FoundationDB physical backend (#4900)

This commit is contained in:
Julien Blache 2018-07-16 07:18:09 -07:00 committed by Jeff Mitchell
parent 493752334a
commit c8fb9ed6a8
8 changed files with 1562 additions and 6 deletions

View File

@ -16,30 +16,36 @@ GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor)
GO_VERSION_MIN=1.10
CGO_ENABLED=0
ifneq ($(FDB_ENABLED), )
CGO_ENABLED=1
BUILD_TAGS+=foundationdb
endif
default: dev
# bin generates the releasable binaries for Vault
bin: prep
@CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS) ui' sh -c "'$(CURDIR)/scripts/build.sh'"
@CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' sh -c "'$(CURDIR)/scripts/build.sh'"
# dev creates binaries for testing Vault locally. These are put
# into ./bin/ as well as $GOPATH/bin
dev: prep
@CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
@CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
dev-ui: prep
@CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS) ui' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
@CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
dev-dynamic: prep
@CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'"
testtravis: BUILD_TAGS=travis
testtravis: BUILD_TAGS+=travis
testtravis: test
testracetravis: BUILD_TAGS=travis
testracetravis: BUILD_TAGS+=travis
testracetravis: testrace
# test runs the unit tests and vets the code
test: prep
@CGO_ENABLED=0 \
@CGO_ENABLED=$(CGO_ENABLED) \
VAULT_ADDR= \
VAULT_TOKEN= \
VAULT_DEV_ROOT_TOKEN_ID= \

View File

@ -59,6 +59,7 @@ import (
physDynamoDB "github.com/hashicorp/vault/physical/dynamodb"
physEtcd "github.com/hashicorp/vault/physical/etcd"
physFile "github.com/hashicorp/vault/physical/file"
physFoundationDB "github.com/hashicorp/vault/physical/foundationdb"
physGCS "github.com/hashicorp/vault/physical/gcs"
physInmem "github.com/hashicorp/vault/physical/inmem"
physManta "github.com/hashicorp/vault/physical/manta"
@ -146,6 +147,7 @@ var (
"etcd": physEtcd.NewEtcdBackend,
"file_transactional": physFile.NewTransactionalFileBackend,
"file": physFile.NewFileBackend,
"foundationdb": physFoundationDB.NewFDBBackend,
"gcs": physGCS.NewBackend,
"inmem_ha": physInmem.NewInmemHA,
"inmem_transactional_ha": physInmem.NewTransactionalInmemHA,

View File

@ -0,0 +1,34 @@
# FoundationDB storage backend
Extra steps are required to produce a Vault build containing the FoundationDB
backend; attempts to use the backend on a build produced without following
this procedure will fail with a descriptive error message at runtime.
## Installing the Go bindings
You will need to install the FoundationDB Go bindings to build the FoundationDB
backend. Make sure you have the FoundationDB client library installed on your
system, along with Mono (core is enough), then install the Go bindings using
the `fdb-go-install.sh` script:
```
$ physical/foundationdb/fdb-go-install.sh
```
## Building Vault
To build Vault the FoundationDB backend, add FDB_ENABLED=1 when invoking
`make`, e.g.
```
$ make dev FDB_ENABLED=1
```
## Running tests
Similarly, add FDB_ENABLED=1 to your `make` invocation when running tests,
e.g.
```
$ make test TEST=./physical/foundationdb FDB_ENABLED=1
```

View File

@ -0,0 +1,330 @@
#!/bin/bash -eu
#
# fdb-go-install.sh
#
# Installs the FoundationDB Go bindings for a client. This will download
# the repository from the remote repo either into the go directory
# with the appropriate semantic version. It will then build a few
# generated files that need to be present for the go build to work.
# At the end, it has some advice for flags to modify within your
# go environment so that other packages may successfully use this
# library.
#
DESTDIR="${DESTDIR:-}"
FDBVER="${FDBVER:-5.1.0}"
REMOTE="${REMOTE:-github.com}"
FDBREPO="${FDBREPO:-apple/foundationdb}"
status=0
platform=$(uname)
if [[ "${platform}" == "Darwin" ]] ; then
FDBLIBDIR="${FDBLIBDIR:-/usr/local/lib}"
libfdbc="libfdb_c.dylib"
elif [[ "${platform}" == "Linux" ]] ; then
libfdbc="libfdb_c.so"
custom_libdir="${FDBLIBDIR:-}"
FDBLIBDIR=""
if [[ -z "${custom_libdir}" ]]; then
search_libdirs=( '/usr/lib' '/usr/lib64' )
else
search_libdirs=( "${custom_libdir}" )
fi
for libdir in "${search_libdirs[@]}" ; do
if [[ -e "${libdir}/${libfdbc}" ]]; then
FDBLIBDIR="${libdir}"
break
fi
done
if [[ -z "${FDBLIBDIR}" ]]; then
echo "The FoundationDB C library could not be found in any of:"
for libdir in "${search_libdirs[@]}" ; do
echo " ${libdir}"
done
echo "Your installation may be incomplete, or you need to set a custom FDBLIBDIR."
let status="${status} + 1"
fi
else
echo "Unsupported platform ${platform}".
echo "At the moment, only macOS and Linux are supported by this script."
let status="${status} + 1"
fi
filedir=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)
destdir=""
function printUsage() {
echo "Usage: fdb-go-install.sh <cmd>"
echo
echo "cmd: One of the commands to run. The options are:"
echo " install Download the FDB go bindings and install them"
echo " localinstall Install a into the go path a local copy of the repo"
echo " download Download but do not prepare the FoundationDB bindings"
echo " help Print this help message and then quit"
echo
echo "Command Line Options:"
echo " --fdbver <version> FoundationDB semantic version (default is ${FDBVER})"
echo " -d/--dest-dir <dest> Local location for the repo (default is to place in go path)"
echo
echo "Environment Variable Options:"
echo " REMOTE Remote repository to download from (currently ${REMOTE})"
echo " FDBREPO Repository of FoundationDB library to download (currently ${FDBREPO})"
echo " FDBLIBDIR Directory within which should be the FoundationDB c library (currently ${FDBLIBDIR})"
}
function parseArgs() {
local status=0
if [[ "${#}" -lt 0 ]] ; then
printUsage
let status="${status} + 1"
else
operation="${1}"
shift
if [[ "${operation}" != "install" ]] && [[ "${operation}" != "localinstall" ]] && [[ "${operation}" != "download" ]] && [[ "${operation}" != "help" ]] ; then
echo "Unknown command: ${operation}"
printUsage
let status="${status} + 1"
fi
fi
while [[ "${#}" -gt 0 ]] && [[ "${status}" -eq 0 ]] ; do
local key="${1}"
case "${key}" in
--fdbver)
if [[ "${#}" -lt 2 ]] ; then
echo "No version specified with --fdbver flag"
printUsage
let status="${status} + 1"
else
FDBVER="${2}"
fi
shift
;;
-d|--dest-dir)
if [[ "${#}" -lt 2 ]] ; then
echo "No destination specified with ${key} flag"
printUsage
let status="${status} + 1"
else
destdir="${2}"
fi
shift
;;
*)
echo "Unrecognized argument ${key}"
printUsage
let status="${status} + 1"
esac
shift
done
return "${status}"
}
function checkBin() {
if [[ "${#}" -lt 1 ]] ; then
echo "Usage: checkBin <binary>"
return 1
else
if [[ -n $(which "${1}") ]] ; then
return 0
else
return 1
fi
fi
}
if [[ "${status}" -gt 0 ]] ; then
# We have already failed.
:
elif [[ "${#}" -lt 1 ]] ; then
printUsage
else
required_bins=( 'go' 'git' 'make' 'mono' )
missing_bins=()
for bin in "${required_bins[@]}" ; do
if ! checkBin "${bin}" ; then
missing_bins+=("${bin}")
let status="${status} + 1"
fi
done
if [[ "${status}" -gt 0 ]] ; then
echo "Missing binaries: ${missing_bins[*]}"
elif ! parseArgs ${@} ; then
let status="${status} + 1"
elif [[ "${operation}" == "help" ]] ; then
printUsage
else
# Add go-specific environment variables.
eval $(go env)
golibdir=$(dirname "${GOPATH}/src/${REMOTE}/${FDBREPO}")
if [[ -z "${destdir}" ]] ; then
if [[ "${operation}" == "localinstall" ]] ; then
# Assume its the local directory.
destdir=$(cd "${filedir}/../../.." && pwd)
else
destdir="${golibdir}"
fi
fi
if [[ ! -d "${destdir}" ]] ; then
cmd=( 'mkdir' '-p' "${destdir}" )
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
let status="${status} + 1"
echo "Could not create destination directory ${destdir}."
fi
fi
# Step 1: Make sure repository is present.
if [[ "${status}" -eq 0 ]] ; then
destdir=$( cd "${destdir}" && pwd ) # Get absolute path of destination dir.
fdbdir="${destdir}/foundationdb"
if [[ ! -d "${destdir}" ]] ; then
cmd=("mkdir" "-p" "${destdir}")
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
echo "Could not create destination directory ${destdir}."
let status="${status} + 1"
fi
fi
fi
if [[ "${operation}" == "localinstall" ]] ; then
# No download occurs in this case.
:
else
if [[ -d "${fdbdir}" ]] ; then
echo "Directory ${fdbdir} already exists ; checking out appropriate tag"
cmd1=( 'git' '-C' "${fdbdir}" 'fetch' 'origin' )
cmd2=( 'git' '-C' "${fdbdir}" 'checkout' "release-${FDBVER}" )
if ! echo "${cmd1[*]}" || ! "${cmd1[@]}" ; then
let status="${status} + 1"
echo "Could not pull latest changes from origin"
elif ! echo "${cmd2[*]}" || ! "${cmd2[@]}" ; then
let status="${status} + 1"
echo "Could not checkout tag release-${FDBVER}."
fi
else
echo "Downloading foundation repository into ${destdir}:"
cmd=( 'git' '-C' "${destdir}" 'clone' '--branch' "release-${FDBVER}" "https://${REMOTE}/${FDBREPO}.git" )
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
let status="${status} + 1"
echo "Could not download repository."
fi
fi
fi
# Step 2: Build generated things.
if [[ "${operation}" == "download" ]] ; then
# The generated files are not created under a strict download.
:
elif [[ "${status}" -eq 0 ]] ; then
echo "Building generated files."
# FoundationDB starting with 5.2 can figure that out on its own
if [ -e '/usr/bin/mcs' ]; then
MCS_BIN=/usr/bin/mcs
else
MCS_BIN=/usr/bin/dmcs
fi
cmd=( 'make' '-C' "${fdbdir}" 'bindings/c/foundationdb/fdb_c_options.g.h' "MCS=$MCS_BIN" )
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
let status="${status} + 1"
echo "Could not generate required c header"
else
infile="${fdbdir}/fdbclient/vexillographer/fdb.options"
outfile="${fdbdir}/bindings/go/src/fdb/generated.go"
cmd=( 'go' 'run' "${fdbdir}/bindings/go/src/_util/translate_fdb_options.go" )
echo "${cmd[*]} < ${infile} > ${outfile}"
if ! "${cmd[@]}" < "${infile}" > "${outfile}" ; then
let status="${status} + 1"
echo "Could not generate generated go file."
fi
fi
fi
# Step 3: Add to go path.
if [[ "${operation}" == "download" ]] ; then
# The files are not moved under a strict download.
:
elif [[ "${status}" -eq 0 ]] ; then
linkpath="${GOPATH}/src/${REMOTE}/${FDBREPO}"
if [[ "${linkpath}" == "${fdbdir}" ]] ; then
# Downloaded directly into go path. Skip making the link.
:
elif [[ -e "${linkpath}" ]] ; then
echo "Warning: link path (${linkpath}) already exists. Leaving in place."
else
dirpath=$(dirname "${linkpath}")
if [[ ! -d "${dirpath}" ]] ; then
cmd=( 'mkdir' '-p' "${dirpath}" )
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
let status="${status} + 1"
echo "Could not create directory for link."
fi
fi
if [[ "${status}" -eq 0 ]] ; then
cmd=( 'ln' '-s' "${fdbdir}" "${linkpath}" )
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
let status="${status} + 1"
echo "Could not create link within go path."
fi
fi
fi
fi
# Step 4: Build the binaries.
if [[ "${operation}" == "download" ]] ; then
# Do not install if only downloading
:
elif [[ "${status}" -eq 0 ]] ; then
cgo_cppflags="-I${linkpath}/bindings/c"
cgo_cflags="-g -O2"
cgo_ldflags="-L${FDBLIBDIR}"
fdb_go_path="${REMOTE}/${FDBREPO}/bindings/go/src"
if ! CGO_CPPFLAGS="${cgo_cppflags}" CGO_CFLAGS="${cgo_cflags}" CGO_LDFLAGS="${cgo_ldflags}" go install "${fdb_go_path}/fdb" "${fdb_go_path}/fdb/tuple" "${fdb_go_path}/fdb/subspace" "${fdb_go_path}/fdb/directory" ; then
let status="${status} + 1"
echo "Could not build FoundationDB go libraries."
fi
fi
# Step 5: Explain CGO flags.
if [[ "${status}" -eq 0 && ("${operation}" == "localinstall" || "${operation}" == "install" ) ]] ; then
echo
echo "The FoundationDB go bindings were successfully installed."
echo "To build packages which use the go bindings, you will need to"
echo "set the following environment variables:"
echo " CGO_CPPFLAGS=\"${cgo_cppflags}\""
echo " CGO_CFLAGS=\"${cgo_cflags}\""
echo " CGO_LDFLAGS=\"${cgo_ldflags}\""
fi
fi
fi
exit "${status}"

View File

@ -0,0 +1,836 @@
// +build foundationdb
package foundationdb
import (
"context"
"fmt"
"strconv"
"strings"
"sync"
"time"
"bytes"
"encoding/binary"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-uuid"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"github.com/armon/go-metrics"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/physical"
)
const (
// The namespace under our top directory containing keys only for list operations
metaKeysNamespace = "_meta-keys"
// The namespace under our top directory containing the actual data
dataNamespace = "_data"
// The namespace under our top directory containing locks
lockNamespace = "_lock"
// Path hierarchy markers
// - an entry in a directory (included in list)
dirEntryMarker = "/\x01"
// - a path component (excluded from list)
dirPathMarker = "/\x02"
)
var (
// 64bit 1 and -1 for FDB atomic Add()
atomicArgOne = []byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
atomicArgMinusOne = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
)
// Verify FDBBackend satisfies the correct interfaces
var _ physical.Backend = (*FDBBackend)(nil)
var _ physical.Transactional = (*FDBBackend)(nil)
var _ physical.HABackend = (*FDBBackend)(nil)
var _ physical.Lock = (*FDBBackendLock)(nil)
// FDBBackend is a physical backend that stores data at a specific
// prefix within FoundationDB.
type FDBBackend struct {
logger log.Logger
haEnabled bool
db fdb.Database
metaKeysSpace subspace.Subspace
dataSpace subspace.Subspace
lockSpace subspace.Subspace
instanceUUID string
}
func concat(a []byte, b ...byte) []byte {
r := make([]byte, len(a)+len(b))
copy(r, a)
copy(r[len(a):], b)
return r
}
func decoratePrefix(prefix string) ([]byte, error) {
pathElements := strings.Split(prefix, "/")
decoratedPrefix := strings.Join(pathElements[:len(pathElements)-1], dirPathMarker)
return []byte(decoratedPrefix + dirEntryMarker), nil
}
// Turn a path string into a decorated byte array to be used as (part of) a key
// foo /\x01foo
// foo/ /\x01foo/
// foo/bar /\x02foo/\x01bar
// foo/bar/ /\x02foo/\x01bar/
// foo/bar/baz /\x02foo/\x02bar/\x01baz
// foo/bar/baz/ /\x02foo/\x02bar/\x01baz/
// foo/bar/baz/quux /\x02foo/\x02bar/\x02baz/\x01quux
// This allows for range queries to retrieve the "directory" listing. The
// decoratePrefix() function builds the path leading up to the leaf.
func decoratePath(path string) ([]byte, error) {
if path == "" {
return nil, fmt.Errorf("Invalid empty path")
}
path = "/" + path
isDir := strings.HasSuffix(path, "/")
path = strings.TrimRight(path, "/")
lastSlash := strings.LastIndexByte(path, '/')
decoratedPrefix, err := decoratePrefix(path[:lastSlash+1])
if err != nil {
return nil, err
}
leaf := path[lastSlash+1:]
if isDir {
leaf += "/"
}
return concat(decoratedPrefix, []byte(leaf)...), nil
}
// Turn a decorated byte array back into a path string
func undecoratePath(decoratedPath []byte) string {
ret := strings.Replace(string(decoratedPath), dirPathMarker, "/", -1)
ret = strings.Replace(ret, dirEntryMarker, "/", -1)
return strings.TrimLeft(ret, "/")
}
// NewFDBBackend constructs a FoundationDB backend storing keys in the
// top-level directory designated by path
func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
// Get the top-level directory name
path, ok := conf["path"]
if !ok {
path = "vault"
}
logger.Debug("config path set", "path", path)
dirPath := strings.Split(strings.Trim(path, "/"), "/")
// FoundationDB API version
fdbApiVersionStr, ok := conf["api_version"]
if !ok {
return nil, fmt.Errorf("FoundationDB API version not specified")
}
fdbApiVersionInt, err := strconv.Atoi(fdbApiVersionStr)
if err != nil {
return nil, errwrap.Wrapf("failed to parse fdb_api_version parameter: {{err}}", err)
}
logger.Debug("FoundationDB API version set", "fdb_api_version", fdbApiVersionInt)
// FoundationDB cluster file
fdbClusterFile, ok := conf["cluster_file"]
if !ok {
return nil, fmt.Errorf("FoundationDB cluster file not specified")
}
haEnabled := false
haEnabledStr, ok := conf["ha_enabled"]
if ok {
haEnabled, err = strconv.ParseBool(haEnabledStr)
if err != nil {
return nil, errwrap.Wrapf("failed to parse ha_enabled parameter: {{err}}", err)
}
}
instanceUUID, err := uuid.GenerateUUID()
if err != nil {
return nil, errwrap.Wrapf("could not generate instance UUID: {{err}}", err)
}
logger.Debug("Instance UUID", "uuid", instanceUUID)
if err := fdb.APIVersion(fdbApiVersionInt); err != nil {
return nil, errwrap.Wrapf("failed to set FDB API version: {{err}}", err)
}
db, err := fdb.Open(fdbClusterFile, []byte("DB"))
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("failed to open database with cluster file '%s': {{err}}", fdbClusterFile), err)
}
topDir, err := directory.CreateOrOpen(db, dirPath, nil)
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("failed to create/open top-level directory '%s': {{err}}", path), err)
}
// Setup the backend
f := &FDBBackend{
logger: logger,
haEnabled: haEnabled,
db: db,
metaKeysSpace: topDir.Sub(metaKeysNamespace),
dataSpace: topDir.Sub(dataNamespace),
lockSpace: topDir.Sub(lockNamespace),
instanceUUID: instanceUUID,
}
return f, nil
}
// Increase refcount on directories in the path, from the bottom -> up
func (f *FDBBackend) incDirsRefcount(tr fdb.Transaction, path string) error {
pathElements := strings.Split(strings.TrimRight(path, "/"), "/")
for i := len(pathElements) - 1; i != 0; i-- {
dPath, err := decoratePath(strings.Join(pathElements[:i], "/") + "/")
if err != nil {
return errwrap.Wrapf("error incrementing directories refcount: {{err}}", err)
}
// Atomic +1
tr.Add(fdb.Key(concat(f.metaKeysSpace.Bytes(), dPath...)), atomicArgOne)
tr.Add(fdb.Key(concat(f.dataSpace.Bytes(), dPath...)), atomicArgOne)
}
return nil
}
type DirsDecTodo struct {
fkey fdb.Key
future fdb.FutureByteSlice
}
// Decrease refcount on directories in the path, from the bottom -> up, and remove empty ones
func (f *FDBBackend) decDirsRefcount(tr fdb.Transaction, path string) error {
pathElements := strings.Split(strings.TrimRight(path, "/"), "/")
dirsTodo := make([]DirsDecTodo, 0, len(pathElements)*2)
for i := len(pathElements) - 1; i != 0; i-- {
dPath, err := decoratePath(strings.Join(pathElements[:i], "/") + "/")
if err != nil {
return errwrap.Wrapf("error decrementing directories refcount: {{err}}", err)
}
metaFKey := fdb.Key(concat(f.metaKeysSpace.Bytes(), dPath...))
dirsTodo = append(dirsTodo, DirsDecTodo{
fkey: metaFKey,
future: tr.Get(metaFKey),
})
dataFKey := fdb.Key(concat(f.dataSpace.Bytes(), dPath...))
dirsTodo = append(dirsTodo, DirsDecTodo{
fkey: dataFKey,
future: tr.Get(dataFKey),
})
}
for _, todo := range dirsTodo {
value, err := todo.future.Get()
if err != nil {
return errwrap.Wrapf("error getting directory refcount while decrementing: {{err}}", err)
}
// The directory entry does not exist; this is not expected
if value == nil {
return fmt.Errorf("non-existent directory while decrementing directory refcount")
}
var count int64
err = binary.Read(bytes.NewReader(value), binary.LittleEndian, &count)
if err != nil {
return errwrap.Wrapf("error reading directory refcount while decrementing: {{err}}", err)
}
if count > 1 {
// Atomic -1
tr.Add(todo.fkey, atomicArgMinusOne)
} else {
// Directory is empty, remove it
tr.Clear(todo.fkey)
}
}
return nil
}
func (f *FDBBackend) internalPut(tr fdb.Transaction, decoratedPath []byte, path string, value []byte) error {
// Check that the meta key exists before blindly increasing the refcounts
// in the directory hierarchy; this protects against commit_unknown_result
// and other similar cases where a previous transaction may have gone
// through without us knowing for sure.
metaFKey := fdb.Key(concat(f.metaKeysSpace.Bytes(), decoratedPath...))
metaFuture := tr.Get(metaFKey)
dataFKey := fdb.Key(concat(f.dataSpace.Bytes(), decoratedPath...))
tr.Set(dataFKey, value)
value, err := metaFuture.Get()
if err != nil {
return errwrap.Wrapf("Put error while getting meta key: {{err}}", err)
}
if value == nil {
tr.Set(metaFKey, []byte{})
return f.incDirsRefcount(tr, path)
}
return nil
}
func (f *FDBBackend) internalClear(tr fdb.Transaction, decoratedPath []byte, path string) error {
// Same as above - check existence of the meta key before taking any
// action, to protect against a possible previous commit_unknown_result
// error.
metaFKey := fdb.Key(concat(f.metaKeysSpace.Bytes(), decoratedPath...))
value, err := tr.Get(metaFKey).Get()
if err != nil {
return errwrap.Wrapf("Delete error while getting meta key: {{err}}", err)
}
if value != nil {
dataFKey := fdb.Key(concat(f.dataSpace.Bytes(), decoratedPath...))
tr.Clear(dataFKey)
tr.Clear(metaFKey)
return f.decDirsRefcount(tr, path)
}
return nil
}
type TxnTodo struct {
decoratedPath []byte
op *physical.TxnEntry
}
// Used to run multiple entries via a transaction
func (f *FDBBackend) Transaction(ctx context.Context, txns []*physical.TxnEntry) error {
if len(txns) == 0 {
return nil
}
todo := make([]*TxnTodo, len(txns))
for i, op := range txns {
if op.Operation != physical.DeleteOperation && op.Operation != physical.PutOperation {
return fmt.Errorf("%q is not a supported transaction operation", op.Operation)
}
decoratedPath, err := decoratePath(op.Entry.Key)
if err != nil {
return errwrap.Wrapf(fmt.Sprintf("could not build decorated path for transaction item %s: {{err}}", op.Entry.Key), err)
}
todo[i] = &TxnTodo{
decoratedPath: decoratedPath,
op: op,
}
}
_, err := f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
for _, txnTodo := range todo {
var err error
switch txnTodo.op.Operation {
case physical.DeleteOperation:
err = f.internalClear(tr, txnTodo.decoratedPath, txnTodo.op.Entry.Key)
case physical.PutOperation:
err = f.internalPut(tr, txnTodo.decoratedPath, txnTodo.op.Entry.Key, txnTodo.op.Entry.Value)
}
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("operation %s failed for transaction item %s: {{err}}", txnTodo.op.Operation, txnTodo.op.Entry.Key), err)
}
}
return nil, nil
})
if err != nil {
return errwrap.Wrapf("transaction failed: {{err}}", err)
}
return nil
}
// Put is used to insert or update an entry
func (f *FDBBackend) Put(ctx context.Context, entry *physical.Entry) error {
defer metrics.MeasureSince([]string{"foundationdb", "put"}, time.Now())
decoratedPath, err := decoratePath(entry.Key)
if err != nil {
return errwrap.Wrapf(fmt.Sprintf("could not build decorated path to put item %s: {{err}}", entry.Key), err)
}
_, err = f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
err := f.internalPut(tr, decoratedPath, entry.Key, entry.Value)
if err != nil {
return nil, err
}
return nil, nil
})
if err != nil {
return errwrap.Wrapf(fmt.Sprintf("put failed for item %s: {{err}}", entry.Key), err)
}
return nil
}
// Get is used to fetch an entry
// Return nil for non-existent keys
func (f *FDBBackend) Get(ctx context.Context, key string) (*physical.Entry, error) {
defer metrics.MeasureSince([]string{"foundationdb", "get"}, time.Now())
decoratedPath, err := decoratePath(key)
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("could not build decorated path to get item %s: {{err}}", key), err)
}
fkey := fdb.Key(concat(f.dataSpace.Bytes(), decoratedPath...))
value, err := f.db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
value, err := rtr.Get(fkey).Get()
if err != nil {
return nil, err
}
return value, nil
})
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("get failed for item %s: {{err}}", key), err)
}
if value.([]byte) == nil {
return nil, nil
}
return &physical.Entry{
Key: key,
Value: value.([]byte),
}, nil
}
// Delete is used to permanently delete an entry
func (f *FDBBackend) Delete(ctx context.Context, key string) error {
defer metrics.MeasureSince([]string{"foundationdb", "delete"}, time.Now())
decoratedPath, err := decoratePath(key)
if err != nil {
return errwrap.Wrapf(fmt.Sprintf("could not build decorated path to delete item %s: {{err}}", key), err)
}
_, err = f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
err := f.internalClear(tr, decoratedPath, key)
if err != nil {
return nil, err
}
return nil, nil
})
if err != nil {
return errwrap.Wrapf(fmt.Sprintf("delete failed for item %s: {{err}}", key), err)
}
return nil
}
// List is used to list all the keys under a given
// prefix, up to the next prefix.
// Return empty string slice for non-existent directories
func (f *FDBBackend) List(ctx context.Context, prefix string) ([]string, error) {
defer metrics.MeasureSince([]string{"foundationdb", "list"}, time.Now())
prefix = strings.TrimRight("/"+prefix, "/") + "/"
decoratedPrefix, err := decoratePrefix(prefix)
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("could not build decorated path to list prefix %s: {{err}}", prefix), err)
}
// The beginning of the range is /\x02foo/\x02bar/\x01 (the decorated prefix) to list foo/bar/
rangeBegin := fdb.Key(concat(f.metaKeysSpace.Bytes(), decoratedPrefix...))
rangeEnd := fdb.Key(concat(rangeBegin, 0xff))
pathRange := fdb.KeyRange{rangeBegin, rangeEnd}
keyPrefixLen := len(rangeBegin)
content, err := f.db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
dirList := make([]string, 0, 0)
ri := rtr.GetRange(pathRange, fdb.RangeOptions{Mode: fdb.StreamingModeWantAll}).Iterator()
for ri.Advance() {
kv := ri.MustGet()
// Strip length of the rangeBegin key off the FDB key, yielding
// the part of the key we're interested in, which does not need
// to be undecorated, by construction.
dirList = append(dirList, string(kv.Key[keyPrefixLen:]))
}
return dirList, nil
})
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("could not list prefix %s: {{err}}", prefix), err)
}
return content.([]string), nil
}
type FDBBackendLock struct {
f *FDBBackend
key string
value string
fkey fdb.Key
lock sync.Mutex
}
// LockWith is used for mutual exclusion based on the given key.
func (f *FDBBackend) LockWith(key, value string) (physical.Lock, error) {
return &FDBBackendLock{
f: f,
key: key,
value: value,
fkey: f.lockSpace.Pack(tuple.Tuple{key}),
}, nil
}
func (f *FDBBackend) HAEnabled() bool {
return f.haEnabled
}
const (
// Position of elements in the lock content tuple
lockContentValueIdx = 0
lockContentOwnerIdx = 1
lockContentExpiresIdx = 2
// Number of elements in the lock content tuple
lockTupleContentElts = 3
lockTTL = 15 * time.Second
lockRenewInterval = 5 * time.Second
lockAcquireRetryInterval = 5 * time.Second
)
type FDBBackendLockContent struct {
value string
ownerUUID string
expires time.Time
}
func packLock(content *FDBBackendLockContent) []byte {
t := tuple.Tuple{content.value, content.ownerUUID, content.expires.UnixNano()}
return t.Pack()
}
func unpackLock(tupleContent []byte) (*FDBBackendLockContent, error) {
t, err := tuple.Unpack(tupleContent)
if err != nil {
return nil, err
}
if len(t) != lockTupleContentElts {
return nil, fmt.Errorf("unexpected lock content, len %d != %d", len(t), lockTupleContentElts)
}
return &FDBBackendLockContent{
value: t[lockContentValueIdx].(string),
ownerUUID: t[lockContentOwnerIdx].(string),
expires: time.Unix(0, t[lockContentExpiresIdx].(int64)),
}, nil
}
func (fl *FDBBackendLock) getLockContent(tr fdb.Transaction) (*FDBBackendLockContent, error) {
tupleContent, err := tr.Get(fl.fkey).Get()
if err != nil {
return nil, err
}
// Lock doesn't exist
if tupleContent == nil {
return nil, fmt.Errorf("non-existent lock %s", fl.key)
}
content, err := unpackLock(tupleContent)
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("failed to unpack lock %s: {{err}}", fl.key), err)
}
return content, nil
}
func (fl *FDBBackendLock) setLockContent(tr fdb.Transaction, content *FDBBackendLockContent) {
tr.Set(fl.fkey, packLock(content))
}
func (fl *FDBBackendLock) isOwned(content *FDBBackendLockContent) bool {
return content.ownerUUID == fl.f.instanceUUID
}
func (fl *FDBBackendLock) isExpired(content *FDBBackendLockContent) bool {
return time.Now().After(content.expires)
}
func (fl *FDBBackendLock) acquireTryLock(acquired chan struct{}, errors chan error) (bool, error) {
wonTheRace, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
tupleContent, err := tr.Get(fl.fkey).Get()
if err != nil {
return nil, errwrap.Wrapf("could not read lock: {{err}}", err)
}
// Lock exists
if tupleContent != nil {
content, err := unpackLock(tupleContent)
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("failed to unpack lock %s: {{err}}", fl.key), err)
}
if fl.isOwned(content) {
return nil, fmt.Errorf("lock %s already held", fl.key)
}
// The lock already exists, is not owned by us, and is not expired
if !fl.isExpired(content) {
return false, nil
}
}
// Lock doesn't exist, or exists but is expired, we can go ahead
content := &FDBBackendLockContent{
value: fl.value,
ownerUUID: fl.f.instanceUUID,
expires: time.Now().Add(lockTTL),
}
fl.setLockContent(tr, content)
return true, nil
})
if err != nil {
errors <- err
return false, err
}
if wonTheRace.(bool) {
close(acquired)
}
return wonTheRace.(bool), nil
}
func (fl *FDBBackendLock) acquireLock(abandon chan struct{}, acquired chan struct{}, errors chan error) {
ticker := time.NewTicker(lockAcquireRetryInterval)
defer ticker.Stop()
lockAcquired, err := fl.acquireTryLock(acquired, errors)
if lockAcquired || err != nil {
return
}
for {
select {
case <-abandon:
return
case <-ticker.C:
lockAcquired, err := fl.acquireTryLock(acquired, errors)
if lockAcquired || err != nil {
return
}
}
}
}
func (fl *FDBBackendLock) maintainLock(lost <-chan struct{}) {
ticker := time.NewTicker(lockRenewInterval)
for {
select {
case <-ticker.C:
_, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
content, err := fl.getLockContent(tr)
if err != nil {
return nil, err
}
// We don't own the lock
if !fl.isOwned(content) {
return nil, fmt.Errorf("lost lock %s", fl.key)
}
// The lock is expired
if fl.isExpired(content) {
return nil, fmt.Errorf("lock %s expired", fl.key)
}
content.expires = time.Now().Add(lockTTL)
fl.setLockContent(tr, content)
return nil, nil
})
if err != nil {
fl.f.logger.Error("lock maintain", "error", err)
}
// Failure to renew the lock will cause another node to take over
// and the watch to fire. DB errors will also be caught by the watch.
case <-lost:
ticker.Stop()
return
}
}
}
func (fl *FDBBackendLock) watchLock(lost chan struct{}) {
for {
watch, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
content, err := fl.getLockContent(tr)
if err != nil {
return nil, err
}
// We don't own the lock
if !fl.isOwned(content) {
return nil, fmt.Errorf("lost lock %s", fl.key)
}
// The lock is expired
if fl.isExpired(content) {
return nil, fmt.Errorf("lock %s expired", fl.key)
}
// Set FDB watch on the lock
future := tr.Watch(fl.fkey)
return future, nil
})
if err != nil {
fl.f.logger.Error("lock watch", "error", err)
break
}
// Wait for the watch to fire, and go again
watch.(fdb.FutureNil).Get()
}
close(lost)
}
func (fl *FDBBackendLock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
fl.lock.Lock()
defer fl.lock.Unlock()
var (
// Inform the lock owner that we lost the lock
lost = make(chan struct{})
// Tell our watch and renewal routines the lock has been abandoned
abandon = make(chan struct{})
// Feedback from lock acquisition routine
acquired = make(chan struct{})
errors = make(chan error)
)
// try to acquire the lock asynchronously
go fl.acquireLock(abandon, acquired, errors)
select {
case <-acquired:
// Maintain the lock after initial acquisition
go fl.maintainLock(lost)
// Watch the lock for changes
go fl.watchLock(lost)
case err := <-errors:
// Initial acquisition failed
close(abandon)
return nil, err
case <-stopCh:
// Prospective lock owner cancelling lock acquisition
close(abandon)
return nil, nil
}
return lost, nil
}
func (fl *FDBBackendLock) Unlock() error {
fl.lock.Lock()
defer fl.lock.Unlock()
_, err := fl.f.db.Transact(func(tr fdb.Transaction) (interface{}, error) {
content, err := fl.getLockContent(tr)
if err != nil {
return nil, errwrap.Wrapf("could not get lock content: {{err}}", err)
}
// We don't own the lock
if !fl.isOwned(content) {
return nil, nil
}
tr.Clear(fl.fkey)
return nil, nil
})
if err != nil {
return errwrap.Wrapf("unlock failed: {{err}}", err)
}
return nil
}
func (fl *FDBBackendLock) Value() (bool, string, error) {
tupleContent, err := fl.f.db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
tupleContent, err := rtr.Get(fl.fkey).Get()
if err != nil {
return nil, errwrap.Wrapf("could not read lock: {{err}}", err)
}
return tupleContent, nil
})
if err != nil {
return false, "", errwrap.Wrapf(fmt.Sprintf("get lock value failed for lock %s: {{err}}", fl.key), err)
}
if tupleContent.([]byte) == nil {
return false, "", nil
}
content, err := unpackLock(tupleContent.([]byte))
if err != nil {
return false, "", errwrap.Wrapf(fmt.Sprintf("get lock value failed to unpack lock %s: {{err}}", fl.key), err)
}
return true, content.value, nil
}

View File

@ -0,0 +1,199 @@
// +build foundationdb
package foundationdb
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-uuid"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
"github.com/hashicorp/vault/helper/logging"
"github.com/hashicorp/vault/physical"
dockertest "gopkg.in/ory-am/dockertest.v3"
)
func connectToFoundationDB(clusterFile string) (*fdb.Database, error) {
if err := fdb.APIVersion(510); err != nil {
return nil, errwrap.Wrapf("failed to set FDB API version: {{err}}", err)
}
db, err := fdb.Open(clusterFile, []byte("DB"))
if err != nil {
return nil, errwrap.Wrapf("failed to open database: {{err}}", err)
}
return &db, nil
}
func cleanupTopDir(clusterFile, topDir string) error {
db, err := connectToFoundationDB(clusterFile)
if err != nil {
return errwrap.Wrapf("could not connect to FDB for cleanup: {{err}}", err)
}
if _, err := directory.Root().Remove(db, []string{topDir}); err != nil {
return errwrap.Wrapf("could not remove directory: {{err}}", err)
}
return nil
}
func TestFoundationDBPathDecoration(t *testing.T) {
cases := map[string][]byte{
"foo": []byte("/\x01foo"),
"foo/": []byte("/\x01foo/"),
"foo/bar": []byte("/\x02foo/\x01bar"),
"foo/bar/": []byte("/\x02foo/\x01bar/"),
"foo/bar/baz": []byte("/\x02foo/\x02bar/\x01baz"),
"foo/bar/baz/": []byte("/\x02foo/\x02bar/\x01baz/"),
"foo/bar/baz/quux": []byte("/\x02foo/\x02bar/\x02baz/\x01quux"),
}
for path, expected := range cases {
decorated, err := decoratePath(path)
if err != nil {
t.Fatalf("path %s error: %s", path, err)
}
if !bytes.Equal(expected, decorated) {
t.Fatalf("path %s expected %v got %v", path, expected, decorated)
}
undecorated := undecoratePath(decorated)
if undecorated != path {
t.Fatalf("expected %s got %s", path, undecorated)
}
}
}
func TestFoundationDBBackend(t *testing.T) {
if testing.Short() {
t.Skipf("skipping in short mode")
}
testUUID, err := uuid.GenerateUUID()
if err != nil {
t.Fatalf("foundationdb: could not generate UUID to top-level directory: %s", err)
}
topDir := fmt.Sprintf("vault-test-%s", testUUID)
var clusterFile string
clusterFile = os.Getenv("FOUNDATIONDB_CLUSTER_FILE")
if clusterFile == "" {
var cleanup func()
cleanup, clusterFile = prepareFoundationDBTestDirectory(t, topDir)
defer cleanup()
}
// Remove the test data once done
defer func() {
if err := cleanupTopDir(clusterFile, topDir); err != nil {
t.Fatalf("foundationdb: could not cleanup test data at end of test: %s", err)
}
}()
// Remove any leftover test data before starting
if err := cleanupTopDir(clusterFile, topDir); err != nil {
t.Fatalf("foundationdb: could not cleanup test data before starting test: %s", err)
}
// Run vault tests
logger := logging.NewVaultLogger(log.Debug)
b, err := NewFDBBackend(map[string]string{
"path": topDir,
"api_version": "510",
"cluster_file": clusterFile,
}, logger)
if err != nil {
t.Fatalf("foundationdb: failed to create new backend: %s", err)
}
physical.ExerciseBackend(t, b)
physical.ExerciseBackend_ListPrefix(t, b)
physical.ExerciseTransactionalBackend(t, b)
ha1, ok := b.(physical.HABackend)
if !ok {
t.Fatalf("foundationdb does not implement HABackend")
}
b2, err := NewFDBBackend(map[string]string{
"path": topDir,
"api_version": "510",
"cluster_file": clusterFile,
}, logger)
if err != nil {
t.Fatalf("foundationdb: failed to create new backend for HA test: %s", err)
}
ha2 := b2.(physical.HABackend)
physical.ExerciseHABackend(t, ha1, ha2)
}
func prepareFoundationDBTestDirectory(t *testing.T, topDir string) (func(), string) {
pool, err := dockertest.NewPool("")
if err != nil {
t.Fatalf("foundationdb: failed to connect to docker: %s", err)
}
resource, err := pool.Run("foundationdb", "5.1.7", nil)
if err != nil {
t.Fatalf("foundationdb: could not start container: %s", err)
}
tmpFile, err := ioutil.TempFile("", topDir)
if err != nil {
t.Fatalf("foundationdb: could not create temporary file for cluster file: %s", err)
}
clusterFile := tmpFile.Name()
cleanup := func() {
pool.Purge(resource)
os.Remove(clusterFile)
}
setup := func() error {
connectString := fmt.Sprintf("foundationdb:foundationdb@127.0.0.1:%s", resource.GetPort("4500/tcp"))
if err := tmpFile.Truncate(0); err != nil {
return errwrap.Wrapf("could not truncate cluster file: {{err}}", err)
}
_, err := tmpFile.WriteAt([]byte(connectString), 0)
if err != nil {
return errwrap.Wrapf("could not write cluster file: {{err}}", err)
}
if _, err := connectToFoundationDB(clusterFile); err != nil {
return errwrap.Wrapf("could not connect to FoundationDB after starting container: %s", err)
}
return nil
}
if pool.Retry(setup); err != nil {
cleanup()
t.Fatalf("foundationdb: could not setup container: %s", err)
}
tmpFile.Close()
return cleanup, clusterFile
}

View File

@ -0,0 +1,15 @@
// +build !foundationdb
package foundationdb
import (
"fmt"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/physical"
)
func NewFDBBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) {
return nil, fmt.Errorf("FoundationDB backend not available in this Vault build")
}

View File

@ -0,0 +1,134 @@
---
layout: "docs"
page_title: "FoundationDB - Storage Backends - Configuration"
sidebar_current: "docs-configuration-storage-foundationdb"
description: |-
The FoundationDB storage backend is used to persist Vault's data in the
FoundationDB KV store.
---
# FoundationDB Storage Backend
The FoundationDB storage backend is used to persist Vault's data in
[FoundationDB][foundationdb] table.
The backend needs to be explicitly enabled at build time, and is not available
in the standard Vault binary distribution. Please refer to the documentation
accompanying the backend's source in the Vault source tree.
- **High Availability** the FoundationDB storage backend supports high
availability. The HA implementation relies on the clocks of the Vault
nodes inside the cluster being properly sychronized; clock skews are
susceptible to cause contention on the locks.
```hcl
storage "foundationdb" {
api_version = 510
cluster_file = "/path/to/fdb.cluster"
path = "vault-top-level-directory"
ha_enabled = "true"
}
```
## `foundationdb` Parameters
- `api_version` `(int)` - The FoundationDB API version to use; this is a
required parameter and doesn't have a default value. Future versions will
impose a minimum API version to access newer features.
- `cluster_file` `(string)` - The path to the cluster file containing the
connection data for the target cluster; this is a required parameter and
doesn't have a default value.
- `path` `(string: "vault")` - The path of the top-level FoundationDB directory
(using the directory layer) under which the Vault data will reside.
- `ha_enabled` `(string: "false")` - Whether or not to enable Vault
high-availability mode using the FoundationDB backend.
## `foundationdb` tips
### Cluster file
The FoundationDB client expects to be able to update the cluster file at
runtime, to keep it current with changes happening to the cluster.
It does so by first writing a new cluster file alongside the current one,
then atomically renaming it into place.
This means the cluster file and the directory it resides in must be writable
by the user Vault is running as. You probably want to isolate the cluster
file into its own directory.
### Multi-version client
The FoundationDB client library version is tightly coupled to the server
version; during cluster upgrades, multiple server versions will be running
in the cluster, and the client must cope with that situation.
This is handled by the (primary) client library having the ability to load
a different version of the client library to connect to a particular server;
it is referred to as the [multi-version client][multi-ver-client] feature.
#### Client setup with `LD_LIBRARY_PATH`
If you do not use mlock, you can use `LD_LIBRARY_PATH` to point the linker at
the location of the primary client library.
```
$ export LD_LIBRARY_PATH=/dest/dir/for/primary:$LD_LIBRARY_PATH
$ export FDB_NETWORK_OPTION_EXTERNAL_CLIENT_DIRECTORY=/dest/dir/for/secondary
$ /path/to/bin/vault ...
```
#### Client setup with `RPATH`
When running Vault with mlock, the Vault binary must have capabilities set to
allow the use of mlock.
```
# setcap cap_ipc_lock=+ep /path/to/bin/vault
$ getcap /path/to/bin/vault
/path/to/bin/vault = cap_ipc_lock+ep
```
The presence of the capabilities will cause the linker to ignore
`LD_LIBRARY_PATH`, for security reasons.
In that case, we have to set an `RPATH` on the Vault binary at build time
to replace the use of `LD_LIBRARY_PATH`.
When building Vault, pass the `-r /dest/dir/for/primary` option to the Go
linker, for instance:
```
$ make dev FDB_ENABLED=1 LD_FLAGS="-r /dest/dir/for/primary "
```
(Note the trailing space in the variable value above).
You can verify `RPATH` is set on the Vault binary using `readelf`:
```
$ readelf -d /path/to/bin/vault | grep RPATH
0x000000000000000f (RPATH) Library rpath: [/dest/dir/for/primary]
```
With the client libraries installed:
```
$ ldd /path/to/bin/vault
...
libfdb_c.so => /dest/dir/for/primary/libfdb_c.so (0x00007f270ad05000)
...
```
Now run Vault:
```
$ export FDB_NETWORK_OPTION_EXTERNAL_CLIENT_DIRECTORY=/dest/dir/for/secondary
$ /path/to/bin/vault ...
```
[foundationdb]: https://www.foundationdb.org
[multi-ver-client]: https://apple.github.io/foundationdb/api-general.html#multi-version-client-api