From 361b10b5dde77fff563c957337ef2f9d7ea61bec Mon Sep 17 00:00:00 2001 From: Jared Wasinger Date: Sun, 8 Apr 2018 03:57:01 -0700 Subject: [PATCH 001/627] add unit tests: limits configuration should be reloadable --- agent/agent_endpoint_test.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 940304b54..f4a800b81 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -281,6 +281,10 @@ func TestAgent_Reload(t *testing.T) { handler = "true" } ] + limits = { + rpc_rate=1 + rpc_max_burst=100 + } `) defer a.Shutdown() @@ -302,6 +306,10 @@ func TestAgent_Reload(t *testing.T) { name = "redis-reloaded" } ] + limits = { + rpc_rate=2 + rpc_max_burst=200 + } `, }) @@ -312,6 +320,14 @@ func TestAgent_Reload(t *testing.T) { t.Fatal("missing redis-reloaded service") } + if a.config.RPCRateLimit != 2 { + t.Fatalf("RPC rate not set correctly. Got %v. Want 2", a.config.RPCRateLimit) + } + + if a.config.RPCMaxBurst != 200 { + t.Fatalf("RPC max burst not set correctly. Got %v. Want 200", a.config.RPCMaxBurst) + } + for _, wp := range a.watchPlans { if !wp.IsStopped() { t.Fatalf("Reloading configs should stop watch plans of the previous configuration") From 9a1737a5f07e9490074c5b91463b5b93b41471cb Mon Sep 17 00:00:00 2001 From: Jared Wasinger Date: Sun, 8 Apr 2018 14:28:29 -0700 Subject: [PATCH 002/627] agent: reload limits upon restart --- agent/agent.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/agent/agent.go b/agent/agent.go index 800302c1a..88dbf8d99 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2479,6 +2479,11 @@ func (a *Agent) DisableNodeMaintenance() { a.logger.Printf("[INFO] agent: Node left maintenance mode") } +func (a *Agent) loadLimits(conf *config.RuntimeConfig) { + a.config.RPCRateLimit = conf.RPCRateLimit + a.config.RPCMaxBurst = conf.RPCMaxBurst +} + func (a *Agent) ReloadConfig(newCfg *config.RuntimeConfig) error { // Bulk update the services and checks a.PauseSync() @@ -2513,6 +2518,8 @@ func (a *Agent) ReloadConfig(newCfg *config.RuntimeConfig) error { return fmt.Errorf("Failed reloading watches: %v", err) } + a.loadLimits(newCfg) + // Update filtered metrics metrics.UpdateFilter(newCfg.TelemetryAllowedPrefixes, newCfg.TelemetryBlockedPrefixes) From a6df13c6a3e39c950aa808399ea94fdd98e1d6db Mon Sep 17 00:00:00 2001 From: Geoffrey Grosenbach Date: Wed, 23 May 2018 14:59:31 -0700 Subject: [PATCH 003/627] Minor clarification of server nodes In **Node Removal** section, clarify that server nodes are being discussed. --- website/source/docs/guides/deployment.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/guides/deployment.html.md b/website/source/docs/guides/deployment.html.md index 28c473ed7..97341404d 100644 --- a/website/source/docs/guides/deployment.html.md +++ b/website/source/docs/guides/deployment.html.md @@ -167,7 +167,7 @@ Failed nodes will be automatically removed after 72 hours. This can happen if a This sequence can be accelerated with the [`force-leave`](https://www.consul.io/docs/commands/force-leave.html) command. Nodes running as servers will be removed from the Raft quorum. Force-leave may also be used to remove nodes that have accidentally joined the datacenter. Force-leave can only be applied to the nodes in its respective datacenter and cannot be executed on the nodes outside the datacenter. -Alternately, nodes can also be removed using `remove-peer` if `force-leave` is not effective in removing the nodes. +Alternately, server nodes can also be removed using `remove-peer` if `force-leave` is not effective in removing the nodes. $ consul operator raft remove-peer -address=x.x.x.x:8300 From 4b9c62b12633db42bfa3e8302ce40ae0f85f2237 Mon Sep 17 00:00:00 2001 From: Junpei Tsuji Date: Wed, 30 May 2018 13:56:56 +0900 Subject: [PATCH 004/627] demo: Added udp port forwarding --- demo/docker-compose-cluster/docker-compose.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/demo/docker-compose-cluster/docker-compose.yml b/demo/docker-compose-cluster/docker-compose.yml index 9a0993927..d9b022b3b 100644 --- a/demo/docker-compose-cluster/docker-compose.yml +++ b/demo/docker-compose-cluster/docker-compose.yml @@ -27,6 +27,7 @@ services: - "8400:8400" - "8500:8500" - "8600:8600" + - "8600:8600/udp" command: "agent -server -bootstrap-expect 3 -ui -client 0.0.0.0" networks: From d7a0d61e7dac1e7bc649b01622069f2065428e7e Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Fri, 8 Jun 2018 10:20:54 -0400 Subject: [PATCH 005/627] Initial progress on build system updates --- GNUmakefile | 51 +- build-support/docker/Build-Go.dockerfile | 16 + .../docker/Build-UI-Legacy.dockerfile | 16 + build-support/docker/Build-UI.dockerfile | 14 + build-support/docker/Makefile | 21 + build-support/scripts/build.sh | 40 ++ build-support/scripts/functions.sh | 464 ++++++++++++++++++ ui-v2/GNUmakefile | 9 +- ui-v2/config/environment.js | 14 + 9 files changed, 638 insertions(+), 7 deletions(-) create mode 100644 build-support/docker/Build-Go.dockerfile create mode 100644 build-support/docker/Build-UI-Legacy.dockerfile create mode 100644 build-support/docker/Build-UI.dockerfile create mode 100644 build-support/docker/Makefile create mode 100755 build-support/scripts/build.sh create mode 100644 build-support/scripts/functions.sh diff --git a/GNUmakefile b/GNUmakefile index bebe8bce5..a02db4066 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -20,13 +20,27 @@ GOOS=$(shell go env GOOS) GOARCH=$(shell go env GOARCH) GOPATH=$(shell go env GOPATH) +ASSETFS_PATH?=agent/bindata_assetfs.go # Get the git commit -GIT_COMMIT=$(shell git rev-parse --short HEAD) -GIT_DIRTY=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true) -GIT_DESCRIBE=$(shell git describe --tags --always) +GIT_COMMIT?=$(shell git rev-parse --short HEAD) +GIT_DIRTY?=$(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true) +GIT_DESCRIBE?=$(shell git describe --tags --always) GIT_IMPORT=github.com/hashicorp/consul/version GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) -X $(GIT_IMPORT).GitDescribe=$(GIT_DESCRIBE) +GO_BUILD_TAG?=consul-build-go +UI_BUILD_TAG?=consul-build-ui +UI_LEGACY_BUILD_TAG?=consul-build-ui-legacy +BUILD_CONTAINER_NAME?=consul-builder + +export GO_BUILD_TAG +export UI_BUILD_TAG +export UI_LEGACY_BUILD_TAG +export BUILD_CONTAINER_NAME +export GIT_COMMIT +export GIT_DIRTY +export GIT_DESCRIBE +export GOTAGS export GOLDFLAGS # all builds binaries for all targets @@ -121,10 +135,37 @@ ui: # also run as part of the release build script when it verifies that there are no # changes to the UI assets that aren't checked in. static-assets: - @go-bindata-assetfs -pkg agent -prefix pkg -o agent/bindata_assetfs.go ./pkg/web_ui/... + @go-bindata-assetfs -pkg agent -prefix pkg -o $(ASSETFS_PATH) ./pkg/web_ui/... $(MAKE) format tools: go get -u -v $(GOTOOLS) -.PHONY: all ci bin dev dist cov test cover format vet ui static-assets tools vendorfmt +docker-images: + @$(MAKE) -C build-support/docker images + +go-build-image: + @$(MAKE) -C build-support/docker go-build-image + +ui-build-image: + @$(MAKE) -C build-support/docker ui-build-image + +ui-legacy-build-image: + @$(MAKE) -C build-support/docker ui-legacy-build-image + +static-assets-docker: go-build-image + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh assetfs + +go-docker: go-build-image + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh consul + +ui-docker: ui-build-image + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh ui + +ui-legacy-docker: ui-legacy-build-image + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh ui-legacy + +release-docker: ui-docker ui-legacy-docker static-assets-docker go-docker + +.PHONY: all ci bin dev dist cov test cover format vet ui static-assets tools vendorfmt +.PHONY: docker-images go-build-iamge ui-build-image ui-legacy-build-image static-assets-docker go-docker ui-docker ui-legacy-docker release-docker diff --git a/build-support/docker/Build-Go.dockerfile b/build-support/docker/Build-Go.dockerfile new file mode 100644 index 000000000..ea0aa25fb --- /dev/null +++ b/build-support/docker/Build-Go.dockerfile @@ -0,0 +1,16 @@ +ARG GOLANG_VERSION=1.10.1 +FROM golang:${GOLANG_VERSION} + +ARG GOTOOLS="github.com/elazarl/go-bindata-assetfs/... \ + github.com/hashicorp/go-bindata/... \ + github.com/magiconair/vendorfmt/cmd/vendorfmt \ + github.com/mitchellh/gox \ + golang.org/x/tools/cmd/cover \ + golang.org/x/tools/cmd/stringer \ + github.com/axw/gocov/gocov \ + gopkg.in/matm/v1/gocov-html" + +RUN go get -u -v ${GOTOOLS} && mkdir -p ${GOPATH}/src/github.com/hashicorp/consul + +WORKDIR $GOPATH/src/github.com/hashicorp/consul + diff --git a/build-support/docker/Build-UI-Legacy.dockerfile b/build-support/docker/Build-UI-Legacy.dockerfile new file mode 100644 index 000000000..7f3a9e6b8 --- /dev/null +++ b/build-support/docker/Build-UI-Legacy.dockerfile @@ -0,0 +1,16 @@ +FROM ubuntu:bionic + +RUN mkdir -p /consul-src/ui + +RUN apt-get update -y && \ + apt-get install --no-install-recommends -y -q \ + build-essential \ + git \ + ruby \ + ruby-dev \ + zip \ + zlib1g-dev && \ + gem install bundler + +WORKDIR /consul-src/ui +CMD make dist diff --git a/build-support/docker/Build-UI.dockerfile b/build-support/docker/Build-UI.dockerfile new file mode 100644 index 000000000..9a4a0198b --- /dev/null +++ b/build-support/docker/Build-UI.dockerfile @@ -0,0 +1,14 @@ +ARG ALPINE_VERSION=3.7 +FROM alpine:${ALPINE_VERSION} + +ARG NODEJS_VERSION=8.9.3-r1 +ARG MAKE_VERSION=4.2.1-r0 +ARG YARN_VERSION=1.7.0 + +RUN apk update && \ + apk add nodejs=${NODEJS_VERSION} nodejs-npm=${NODEJS_VERSION} make=${MAKE_VERSION} && \ + npm install --global yarn@${YARN_VERSION} && \ + mkdir /consul-src + +WORKDIR /consul-src +CMD make init build diff --git a/build-support/docker/Makefile b/build-support/docker/Makefile new file mode 100644 index 000000000..d9af9ed5a --- /dev/null +++ b/build-support/docker/Makefile @@ -0,0 +1,21 @@ +ifeq ($(FORCE_REBUILD),1) +NOCACHE=--no-cache +else +NOCACHE= +endif +GO_BUILD_TAG?=consul-build-go +UI_BUILD_TAG?=consul-build-ui +UI_LEGACY_BUILD_TAG?=consul-build-ui-legacy + +images: go-build-image ui-build-image ui-legacy-build-image + +go-build-image: + docker build $(NOCACHE) -t $(GO_BUILD_TAG) -f Build-Go.dockerfile . + +ui-build-image: + docker build $(NOCACHE) -t $(UI_BUILD_TAG) -f Build-UI.dockerfile . + +ui-legacy-build-image: + docker build $(NOCACHE) -t $(UI_LEGACY_BUILD_TAG) -f Build-UI-Legacy.dockerfile . + +.PHONY: images go-build-image ui-build-image ui-legacy-build-image diff --git a/build-support/scripts/build.sh b/build-support/scripts/build.sh new file mode 100755 index 000000000..f1c387b22 --- /dev/null +++ b/build-support/scripts/build.sh @@ -0,0 +1,40 @@ +#!/bin/bash +pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null +SCRIPT_DIR=$(pwd) +pushd ../.. > /dev/null +SOURCE_DIR=$(pwd) +popd > /dev/null +popd > /dev/null + +source "${SCRIPT_DIR}/functions.sh" + +function main { + case "$1" in + consul ) + build_consul "${SOURCE_DIR}" "${GO_BUILD_TAG}" + return $? + ;; + ui ) + build_ui "${SOURCE_DIR}" "${UI_BUILD_TAG}" + return $? + ;; + ui-legacy ) + build_ui_legacy "${SOURCE_DIR}" "${UI_LEGACY_BUILD_TAG}" + return $? + ;; + version ) + parse_version "${SOURCE_DIR}" + return $? + ;; + assetfs ) + build_assetfs "${SOURCE_DIR}" "${GO_BUILD_TAG}" + return $? + ;; + *) + echo "Unkown build: '$1' - possible values are 'consul', 'ui', 'ui-legacy', 'version' and 'assetfs'" 1>&2 + return 1 + esac +} + +main $@ +exit $? \ No newline at end of file diff --git a/build-support/scripts/functions.sh b/build-support/scripts/functions.sh new file mode 100644 index 000000000..2b44d2401 --- /dev/null +++ b/build-support/scripts/functions.sh @@ -0,0 +1,464 @@ +# GPG Key ID to use for publically released builds +HASHICORP_GPG_KEY="348FFC4C" + +UI_BUILD_CONTAINER_DEFAULT="consul-build-ui" +UI_LEGACY_BUILD_CONTAINER_DEFAULT="consul-build-ui-legacy" + +function is_set { + # Arguments: + # $1 - string value to check its truthiness + # + # Return: + # 0 - is truthy (backwards I know but allows syntax like `if is_set ` to work) + # 1 - is not truthy + + local val=$(tr '[:upper:]' '[:lower:]' <<< "$1") + case $val in + 1 | t | true | y | yes) + return 0 + ;; + *) + return 1 + ;; + esac +} + +function have_gpg_key { + # Arguments: + # $1 - GPG Key id to check if we have installed + # + # Return: + # 0 - success (we can use this key for signing) + # * - failure (key cannot be used) + + gpg --list-secret-keys $1 >dev/null 2>&1 + return $? +} + +function parse_version { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - boolean value for whether to omit the release version from the version string + # + # Return: + # 0 - success (will write the version to stdout) + # * - error (no version output) + # + # Notes: + # If the GIT_DESCRIBE environment variable is present then it is used as the version + # If the GIT_COMMIT environment variable is preset it will be added to the end of + # the version string. + + local vfile="${1}/version/version.go" + + # ensure the version file exists + if ! test -f "${vfile}" + then + echo "Error - File not found: ${vfile}" 1>&2 + return 1 + fi + + # Get the main version out of the source file + version=$(awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) + + # override the version from source with the value of the GIT_DESCRIBE env var if present + if test -n "$GIT_DESCRIBE" + then + version=$GIT_DESCRIBE + fi + + if ! is_set $2 + then + # Get the release version out of the source file + release=$(awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) + + # When no GIT_DESCRIBE env var is present and no release is in the source then we + # are definitely in dev mode + if test -z "$GIT_DESCRIBE" -a -z "$release" + then + release="dev" + fi + + # Add the release to the version + if test -n "$release" + then + version="${version}-${release}" + + # add the git commit to the version + if test -n "$GIT_COMMIT" + then + version="${version} (${GIT_COMMIT})" + fi + fi + fi + + # Output the version + echo "$version" | tr -d "'" + return 0 +} + +function get_version { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - Whether the release version should be parsed from source (optional) + # + # Returns: + # 0 - success (the version is also echoed to stdout) + # 1 - error + # + # Notes: + # If a VERSION environment variable is present it will override any parsing of the version from the source + # In addition to processing the main version.go, version_*.go files will be processed if they have + # a Go build tag that matches the one in the GOTAGS environment variable. This tag processing is + # primitive though and will not match complex build tags in the files with negation etc. + + local vers="$VERSION" + if test -z "$vers" + then + # parse the OSS version from version.go + vers="$(parse_version ${1} ${2})" + + # try to determine the version if we have build tags + for tag in "$GOTAGS" + do + for file in $(ls ${1}/version/version_*.go | sort) + do + if grep -q "// +build $tag" $file + then + vers=$(awk -F\" '/Version =/ {print $2; exit}' < $file ) + fi + done + done + fi + + if test -z "$vers" + then + return 1 + else + echo $vers + return 0 + fi +} + +function tag_release { + # Arguments: + # $1 - Version string to use for tagging the release + # $2 - Alternative GPG key id used for signing the release commit (optional) + # + # Returns: + # 0 - success + # * - error + # + # Notes: + # If the RELEASE_UNSIGNED environment variable is set then no gpg signing will occur + + if ! test -d "$1" + then + echo "ERROR: '$1' is not a directory. tag_release must be called with the path to the top level source as the first argument'" 1>&2 + return 1 + fi + + if test -z "$2" + then + echo "ERROR: tag_release must be called with a version number as the second argument" 1>&2 + return 1 + fi + + # determine whether the gpg key to use is being overridden + local gpg_key=${HASHICORP_GPG_KEY} + if test -n "$3" + then + gpg_key=$3 + fi + + pushd "$1" > /dev/null + local ret=0 + + # perform an usngined release if requested (mainly for testing locally) + if is_set "$RELEASE_UNSIGNED" + then + ( + git commit --allow-empty -a -m "Release v${2}" && + git tag -a -m "Version ${2}" "v${2}" master + ) + ret=$? + # perform a signed release (official releases should do this) + elif have_gpg_key ${gpg_key} + then + ( + git commit --allow-empty -a --gpg-sign=${gpg_key} -m "Release v${2}" && + git tag -a -m "Version ${2}" -s -u ${gpg_key} "v${2}" master + ) + ret=$? + # unsigned release not requested and gpg key isn't useable + else + echo "ERROR: GPG key ${gpg_key} is not in the local keychain - to continue set RELEASE_UNSIGNED=1 in the env" + ret=1 + fi + popd > /dev/null + return $ret +} + +function build_ui { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - The docker image to run the build within (optional) + # + # Returns: + # 0 - success + # * - error + # + # Notes: + # Use the GIT_COMMIT environment variable to pass off to the build + + if ! test -d "$1" + then + echo "ERROR: '$1' is not a directory. build_ui must be called with the path to the top level source as the first argument'" 1>&2 + return 1 + fi + + local image_name=${UI_BUILD_CONTAINER_DEFAULT} + if test -n "$2" + then + image_name="$2" + fi + + local sdir="$1" + local ui_dir="${1}/ui-v2" + + # parse the version + version=$(parse_version "${sdir}") + + # make sure we run within the ui dir + pushd ${ui_dir} > /dev/null + + echo "Creating the UI Build Container" + local container_id=$(docker create -it -e "CONSUL_GIT_SHA=${GIT_COMMIT}" -e "CONSUL_VERSION=${version}" ${image_name}) + local ret=$? + if test $ret -eq 0 + then + echo "Copying the source from '${ui_dir}' to /consul-src within the container" + ( + docker cp . ${container_id}:/consul-src && + echo "Running build in container" && docker start -i ${container_id} && + rm -rf ${1}/ui-v2/dist && + echo "Copying back artifacts" && docker cp ${container_id}:/consul-src/dist ${1}/ui-v2/dist + ) + ret=$? + docker rm ${container_id} > /dev/null + fi + + if test $ret -eq 0 + then + rm -rf ${1}/pkg/web_ui/v2 + cp -r ${1}/ui-v2/dist ${1}/pkg/web_ui/v2 + fi + popd > /dev/null + return $ret +} + +function build_ui_legacy { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - The docker image to run the build within (optional) + # + # Returns: + # 0 - success + # * - error + + if ! test -d "$1" + then + echo "ERROR: '$1' is not a directory. build_ui_legacy must be called with the path to the top level source as the first argument'" 1>&2 + return 1 + fi + + local sdir="$1" + local ui_legacy_dir="${sdir}/ui" + + local image_name=${UI_LEGACY_BUILD_CONTAINER_DEFAULT} + if test -n "$2" + then + image_name="$2" + fi + + pushd ${ui_legacy_dir} > /dev/null + echo "Creating the Legacy UI Build Container" + rm -r ${sdir}/pkg/web_ui/v1 >/dev/null 2>&1 + mkdir -p ${sdir}/pkg/web_ui/v1 + local container_id=$(docker create -it ${image_name}) + local ret=$? + if test $ret -eq 0 + then + echo "Copying the source from '${ui_legacy_dir}' to /consul-src/ui within the container" + ( + docker cp . ${container_id}:/consul-src/ui && + echo "Running build in container" && + docker start -i ${container_id} && + echo "Copying back artifacts" && + docker cp ${container_id}:/consul-src/pkg/web_ui ${sdir}/pkg/web_ui/v1 + ) + ret=$? + docker rm ${container_id} > /dev/null + fi + popd > /dev/null + return $ret +} + +function build_assetfs { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - The docker image to run the build within (optional) + # + # Returns: + # 0 - success + # * - error + # + # Note: + # The GIT_COMMIT, GIT_DIRTY and GIT_DESCRIBE environment variables will be used if present + + if ! test -d "$1" + then + echo "ERROR: '$1' is not a directory. build_assetfs must be called with the path to the top level source as the first argument'" 1>&2 + return 1 + fi + + local sdir="$1" + local image_name=${GO_BUILD_CONTAINER_DEFAULT} + if test -n "$2" + then + image_name="$2" + fi + + pushd ${sdir} > /dev/null + echo "Creating the Go Build Container" + local container_id=$(docker create -it -e GIT_COMMIT=${GIT_COMMIT} -e GIT_DIRTY=${GIT_DIRTY} -e GIT_DESCRIBE=${GIT_DESCRIBE} ${image_name} make static-assets ASSETFS_PATH=bindata_assetfs.go) + local ret=$? + if test $ret -eq 0 + then + echo "Copying the sources from '${sdir}/(pkg|GNUmakefile)' to /go/src/github.com/hashicorp/consul/pkg" + ( + tar -c pkg/web_ui GNUmakefile | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul && + echo "Running build in container" && docker start -i ${container_id} && + echo "Copying back artifacts" && docker cp ${container_id}:/go/src/github.com/hashicorp/consul/bindata_assetfs.go ${sdir}/agent/bindata_assetfs.go + ) + ret=$? + docker rm ${container_id} > /dev/null + fi + popd >/dev/null + return $ret +} + +function build_consul { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - The docker image to run the build within (optional) + # + # Returns: + # 0 - success + # * - error + # + # Note: + # The GOLDFLAGS and GOTAGS environment variables will be used if set + # If the CONSUL_DEV environment var is truthy only the local platform/architecture is built. + # If the XC_OS or the XC_ARCH environment vars are present then only those platforms/architectures + # will be built. Otherwise all supported platform/architectures are built + + if ! test -d "$1" + then + echo "ERROR: '$1' is not a directory. build_consul must be called with the path to the top level source as the first argument'" 1>&2 + return 1 + fi + + local sdir="$1" + local image_name=${GO_BUILD_CONTAINER_DEFAULT} + if test -n "$2" + then + image_name="$2" + fi + + pushd ${sdir} > /dev/null + echo "Creating the Go Build Container" + if is_set "${CONSUL_DEV}" + then + XC_OS=$(go_env GOOS) + XC_ARCH=$(go env GOARCH) + else + XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"} + XC_ARCH=${XC_ARCH:-"386 amd64 arm arm64"} + fi + + local container_id=$(docker create -it ${image_name} gox -os="${XC_OS}" -arch="${XC_ARCH}" -osarch="!darwin/arm !darwin/arm64" -ldflags "${GOLDFLAGS}" -output "pkg/{{.OS}}_{{.Arch}}/consul" -tags="${GOTAGS}") + ret=$? + + if test $ret -eq 0 + then + echo "Copying the source from '${sdir}' to /go/src/github.com/hashicorp/consul/pkg" + ( + tar -c $(ls | grep -v "ui\|ui-v2\|website\|bin\|.git") | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul && + echo "Running build in container" && + docker start -i ${container_id} && + echo "Copying back artifacts" && + docker cp ${container_id}:/go/src/github.com/hashicorp/consul/pkg/ pkg.new + ) + ret=$? + docker rm ${container_id} > /dev/null + + DEV_PLATFORM="./pkg.new/$(go env GOOS)_$(go env GOARCH)" + for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f) + do + cp ${F} bin/ + cp ${F} ${GOPATH}/bin + done + + cp -r pkg.new/* pkg/ + rm -r pkg.new + fi + popd > /dev/null + return $ret +} + +function package_release { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - Version to use in the names of the zip files (optional) + # + # Returns: + # 0 - success + # * - error + + if ! test -d "$1" + then + echo "ERROR: '$1' is not a directory. package_release must be called with the path to the top level source as the first argument'" 1>&2 + return 1 + fi + + local vers="${2}" + if test -z "${vers}" + then + vers=$(get_version $1 false) + ret=$? + if test "$ret" -ne 0 + then + echo "ERROR: failed to determine the version." 1>&2 + return $ret + fi + fi + + local sdir="$1" + local ret=0 + for platform in $(find "${sdir}/pkg" -mindepth 1 -maxdepth 1 -type d) + do + local os_arch=$(basename $platform) + pushd "${platform}" > /dev/null + zip "${sdir}/pkg/dist/consul_${vers}_${os_arch}.zip" ./* + ret=$? + popd > /dev/null + + if test "$ret" -ne 0 + then + break + fi + done + + return $ret +} \ No newline at end of file diff --git a/ui-v2/GNUmakefile b/ui-v2/GNUmakefile index 9b5cb8ba9..70d15d384 100644 --- a/ui-v2/GNUmakefile +++ b/ui-v2/GNUmakefile @@ -3,8 +3,13 @@ ROOT:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) server: yarn run start -dist: +init: + yarn install + +build: yarn run build + +dist: build mv dist ../pkg/web_ui/v2 lint: @@ -12,4 +17,4 @@ lint: format: yarn run format:js -.PHONY: server dist lint format +.PHONY: server build dist lint format diff --git a/ui-v2/config/environment.js b/ui-v2/config/environment.js index d1ac0949b..b72e7b00d 100644 --- a/ui-v2/config/environment.js +++ b/ui-v2/config/environment.js @@ -29,12 +29,19 @@ module.exports = function(environment) { }; ENV = Object.assign({}, ENV, { CONSUL_GIT_SHA: (function() { + if (process.env.CONSUL_GIT_SHA) { + return process.env.CONSUL_GIT_SHA + } + return require('child_process') .execSync('git rev-parse --short HEAD') .toString() .trim(); })(), CONSUL_VERSION: (function() { + if (process.env.CONSUL_VERSION) { + return process.env.CONSUL_VERSION + } // see /scripts/dist.sh:8 const version_go = `${path.dirname(path.dirname(__dirname))}/version/version.go`; const contents = fs.readFileSync(version_go).toString(); @@ -46,6 +53,13 @@ module.exports = function(environment) { .trim() .split('"')[1]; })(), + CONSUL_BINARY_TYPE: (function() { + if (process.env.CONSUL_BINARY_TYPE) { + return process.env.CONSUL_BINARY_TYPE + } + + return "oss" + }), CONSUL_DOCUMENTATION_URL: 'https://www.consul.io/docs', CONSUL_COPYRIGHT_URL: 'https://www.hashicorp.com', CONSUL_COPYRIGHT_YEAR: '2018', From 6604828009f099db846af971297b4027a29ae93f Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 11 Jun 2018 11:49:04 -0400 Subject: [PATCH 006/627] Add configuration entry to control including TXT records for node meta in DNS responses If set to false, the only way to retrieve TXT records for node meta is to specifically query for TXT records. --- agent/config/builder.go | 10 +- agent/config/config.go | 1 + agent/config/runtime.go | 5 + agent/dns.go | 21 ++- agent/dns_test.go | 168 ++++++++++++++++++++++ website/source/docs/agent/options.html.md | 5 + 6 files changed, 205 insertions(+), 5 deletions(-) diff --git a/agent/config/builder.go b/agent/config/builder.go index 6048dab92..c96183961 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -592,6 +592,7 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { DNSRecursors: dnsRecursors, DNSServiceTTL: dnsServiceTTL, DNSUDPAnswerLimit: b.intVal(c.DNS.UDPAnswerLimit), + DNSNodeMetaTXT: b.boolValWithDefault(c.DNS.NodeMetaTXT, true), // HTTP HTTPPort: httpPort, @@ -1010,13 +1011,18 @@ func (b *Builder) serviceVal(v *ServiceDefinition) *structs.ServiceDefinition { } } -func (b *Builder) boolVal(v *bool) bool { +func (b *Builder) boolValWithDefault(v *bool, default_val bool) bool { if v == nil { - return false + return default_val } + return *v } +func (b *Builder) boolVal(v *bool) bool { + return b.boolValWithDefault(v, false) +} + func (b *Builder) durationVal(name string, v *string) (d time.Duration) { if v == nil { return 0 diff --git a/agent/config/config.go b/agent/config/config.go index 79d274d0d..48227d955 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -360,6 +360,7 @@ type DNS struct { RecursorTimeout *string `json:"recursor_timeout,omitempty" hcl:"recursor_timeout" mapstructure:"recursor_timeout"` ServiceTTL map[string]string `json:"service_ttl,omitempty" hcl:"service_ttl" mapstructure:"service_ttl"` UDPAnswerLimit *int `json:"udp_answer_limit,omitempty" hcl:"udp_answer_limit" mapstructure:"udp_answer_limit"` + NodeMetaTXT *bool `json:"additional_node_meta_txt,omitempty" hcl:"additional_node_meta_txt" mapstructure:"additional_node_meta_txt"` } type HTTPConfig struct { diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 66e7e79e7..c1df5a2d5 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -281,6 +281,11 @@ type RuntimeConfig struct { // hcl: dns_config { udp_answer_limit = int } DNSUDPAnswerLimit int + // DNSNodeMetaTXT controls whether DNS queries will synthesize + // TXT records for the node metadata and add them when not specifically + // request (query type = TXT). If unset this will default to true + DNSNodeMetaTXT bool + // DNSRecursors can be set to allow the DNS servers to recursively // resolve non-consul domains. // diff --git a/agent/dns.go b/agent/dns.go index 1d3c46d97..1b8c2e20c 100644 --- a/agent/dns.go +++ b/agent/dns.go @@ -51,6 +51,7 @@ type dnsConfig struct { ServiceTTL map[string]time.Duration UDPAnswerLimit int ARecordLimit int + NodeMetaTXT bool } // DNSServer is used to wrap an Agent and expose various @@ -109,6 +110,7 @@ func GetDNSConfig(conf *config.RuntimeConfig) *dnsConfig { SegmentName: conf.SegmentName, ServiceTTL: conf.DNSServiceTTL, UDPAnswerLimit: conf.DNSUDPAnswerLimit, + NodeMetaTXT: conf.DNSNodeMetaTXT, } } @@ -671,7 +673,20 @@ func (d *DNSServer) formatNodeRecord(node *structs.Node, addr, qName string, qTy } } - if node != nil && (qType == dns.TypeANY || qType == dns.TypeTXT) { + node_meta_txt := true + + if node == nil { + node_meta_txt = false + } else if qType == dns.TypeANY { + // Since any RR type is requested allow the configuration to + // determine whether or not node meta gets added as TXT records + node_meta_txt = d.config.NodeMetaTXT + } else if qType != dns.TypeTXT { + // qType isn't TXT or ANY so avoid emitting the TXT records + node_meta_txt = false + } + + if node_meta_txt { for key, value := range node.Meta { txt := value if !strings.HasPrefix(strings.ToLower(key), "rfc1035-") { @@ -782,8 +797,8 @@ func (d *DNSServer) trimTCPResponse(req, resp *dns.Msg) (trimmed bool) { originalNumRecords := len(resp.Answer) // It is not possible to return more than 4k records even with compression - // Since we are performing binary search it is not a big deal, but it - // improves a bit performance, even with binary search + // Since we are performing binary search it is not a big deal, but it + // improves a bit performance, even with binary search truncateAt := 4096 if req.Question[0].Qtype == dns.TypeSRV { // More than 1024 SRV records do not fit in 64k diff --git a/agent/dns_test.go b/agent/dns_test.go index 41aca8e0e..454d598c3 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -472,6 +472,51 @@ func TestDNS_NodeLookup_TXT(t *testing.T) { } } +func TestDNS_NodeLookup_TXT_DontSuppress(t *testing.T) { + a := NewTestAgent(t.Name(), `dns_config = { additional_node_meta_txt = false }`) + defer a.Shutdown() + + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "google", + Address: "127.0.0.1", + NodeMeta: map[string]string{ + "rfc1035-00": "value0", + "key0": "value1", + }, + } + + var out struct{} + if err := a.RPC("Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m := new(dns.Msg) + m.SetQuestion("google.node.consul.", dns.TypeTXT) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Should have the 1 TXT record reply + if len(in.Answer) != 2 { + t.Fatalf("Bad: %#v", in) + } + + txtRec, ok := in.Answer[0].(*dns.TXT) + if !ok { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if len(txtRec.Txt) != 1 { + t.Fatalf("Bad: %#v", in.Answer[0]) + } + if txtRec.Txt[0] != "value0" && txtRec.Txt[0] != "key0=value1" { + t.Fatalf("Bad: %#v", in.Answer[0]) + } +} + func TestDNS_NodeLookup_ANY(t *testing.T) { a := NewTestAgent(t.Name(), ``) defer a.Shutdown() @@ -513,6 +558,42 @@ func TestDNS_NodeLookup_ANY(t *testing.T) { } +func TestDNS_NodeLookup_ANY_SuppressTXT(t *testing.T) { + a := NewTestAgent(t.Name(), `dns_config = { additional_node_meta_txt = false }`) + defer a.Shutdown() + + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.1", + NodeMeta: map[string]string{ + "key": "value", + }, + } + + var out struct{} + if err := a.RPC("Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m := new(dns.Msg) + m.SetQuestion("bar.node.consul.", dns.TypeANY) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + wantAnswer := []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, + A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 + }, + } + verify.Values(t, "answer", in.Answer, wantAnswer) +} + func TestDNS_EDNS0(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") @@ -4613,6 +4694,93 @@ func TestDNS_ServiceLookup_FilterACL(t *testing.T) { } } +func TestDNS_ServiceLookup_MetaTXT(t *testing.T) { + a := NewTestAgent(t.Name(), `dns_config = { additional_node_meta_txt = true }`) + defer a.Shutdown() + + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.1", + NodeMeta: map[string]string{ + "key": "value", + }, + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"master"}, + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC("Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m := new(dns.Msg) + m.SetQuestion("db.service.consul.", dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + wantAdditional := []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{Name: "bar.node.dc1.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, + A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 + }, + &dns.TXT{ + Hdr: dns.RR_Header{Name: "bar.node.dc1.consul.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Rdlength: 0xa}, + Txt: []string{"key=value"}, + }, + } + verify.Values(t, "additional", in.Extra, wantAdditional) +} + +func TestDNS_ServiceLookup_SuppressTXT(t *testing.T) { + a := NewTestAgent(t.Name(), `dns_config = { additional_node_meta_txt = false }`) + defer a.Shutdown() + + // Register a node with a service. + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: "bar", + Address: "127.0.0.1", + NodeMeta: map[string]string{ + "key": "value", + }, + Service: &structs.NodeService{ + Service: "db", + Tags: []string{"master"}, + Port: 12345, + }, + } + + var out struct{} + if err := a.RPC("Catalog.Register", args, &out); err != nil { + t.Fatalf("err: %v", err) + } + + m := new(dns.Msg) + m.SetQuestion("db.service.consul.", dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + if err != nil { + t.Fatalf("err: %v", err) + } + + wantAdditional := []dns.RR{ + &dns.A{ + Hdr: dns.RR_Header{Name: "bar.node.dc1.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, + A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 + }, + } + verify.Values(t, "additional", in.Extra, wantAdditional) +} + func TestDNS_AddressLookup(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") diff --git a/website/source/docs/agent/options.html.md b/website/source/docs/agent/options.html.md index 4badb25ca..1e1e274b9 100644 --- a/website/source/docs/agent/options.html.md +++ b/website/source/docs/agent/options.html.md @@ -777,6 +777,11 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass [RFC 6724](https://tools.ietf.org/html/rfc6724) and as a result it should be increasingly uncommon to need to change this value with modern resolvers). + + * `additional_node_meta_txt` - If set + to false, node metadata will not be synthesized into TXT records and returned except for queries specifically for + TXT records. By default, TXT records will be generated for node queries with an ANY query type or for SRV queries + of services. * `domain` Equivalent to the [`-domain` command-line flag](#_domain). From c589991452f1525a54a2851b3f7122529c8272da Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 11 Jun 2018 15:51:17 -0400 Subject: [PATCH 007/627] Apply the limits to the clients rpcLimiter --- agent/agent.go | 17 ++++++++++++++--- agent/consul/client.go | 17 +++++++++++++---- agent/consul/server.go | 6 ++++++ 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 88dbf8d99..caf515084 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -73,6 +73,7 @@ type delegate interface { SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io.Writer, replyFn structs.SnapshotReplyFn) error Shutdown() error Stats() map[string]map[string]string + ReloadConfig(config *consul.Config) error } // notifier is called after a successful JoinLAN. @@ -2480,8 +2481,8 @@ func (a *Agent) DisableNodeMaintenance() { } func (a *Agent) loadLimits(conf *config.RuntimeConfig) { - a.config.RPCRateLimit = conf.RPCRateLimit - a.config.RPCMaxBurst = conf.RPCMaxBurst + a.config.RPCRateLimit = conf.RPCRateLimit + a.config.RPCMaxBurst = conf.RPCMaxBurst } func (a *Agent) ReloadConfig(newCfg *config.RuntimeConfig) error { @@ -2518,7 +2519,17 @@ func (a *Agent) ReloadConfig(newCfg *config.RuntimeConfig) error { return fmt.Errorf("Failed reloading watches: %v", err) } - a.loadLimits(newCfg) + a.loadLimits(newCfg) + + // create the config for the rpc server/client + consulCfg, err := a.consulConfig() + if err != nil { + return err + } + + if err := a.delegate.ReloadConfig(consulCfg); err != nil { + return err + } // Update filtered metrics metrics.UpdateFilter(newCfg.TelemetryAllowedPrefixes, newCfg.TelemetryBlockedPrefixes) diff --git a/agent/consul/client.go b/agent/consul/client.go index 96baeb174..84e1a7319 100644 --- a/agent/consul/client.go +++ b/agent/consul/client.go @@ -7,6 +7,7 @@ import ( "os" "strconv" "sync" + "sync/atomic" "time" "github.com/armon/go-metrics" @@ -56,7 +57,7 @@ type Client struct { // rpcLimiter is used to rate limit the total number of RPCs initiated // from an agent. - rpcLimiter *rate.Limiter + rpcLimiter atomic.Value // eventCh is used to receive events from the // serf cluster in the datacenter @@ -125,12 +126,13 @@ func NewClientLogger(config *Config, logger *log.Logger) (*Client, error) { c := &Client{ config: config, connPool: connPool, - rpcLimiter: rate.NewLimiter(config.RPCRate, config.RPCMaxBurst), eventCh: make(chan serf.Event, serfEventBacklog), logger: logger, shutdownCh: make(chan struct{}), } + c.rpcLimiter.Store(rate.NewLimiter(config.RPCRate, config.RPCMaxBurst)) + // Initialize the LAN Serf c.serf, err = c.setupSerf(config.SerfLANConfig, c.eventCh, serfLANSnapshot) @@ -251,7 +253,7 @@ TRY: // Enforce the RPC limit. metrics.IncrCounter([]string{"consul", "client", "rpc"}, 1) metrics.IncrCounter([]string{"client", "rpc"}, 1) - if !c.rpcLimiter.Allow() { + if !c.rpcLimiter.Load().(*rate.Limiter).Allow() { metrics.IncrCounter([]string{"consul", "client", "rpc", "exceeded"}, 1) metrics.IncrCounter([]string{"client", "rpc", "exceeded"}, 1) return structs.ErrRPCRateExceeded @@ -295,7 +297,7 @@ func (c *Client) SnapshotRPC(args *structs.SnapshotRequest, in io.Reader, out io // Enforce the RPC limit. metrics.IncrCounter([]string{"consul", "client", "rpc"}, 1) metrics.IncrCounter([]string{"client", "rpc"}, 1) - if !c.rpcLimiter.Allow() { + if !c.rpcLimiter.Load().(*rate.Limiter).Allow() { metrics.IncrCounter([]string{"consul", "client", "rpc", "exceeded"}, 1) metrics.IncrCounter([]string{"client", "rpc", "exceeded"}, 1) return structs.ErrRPCRateExceeded @@ -360,3 +362,10 @@ func (c *Client) GetLANCoordinate() (lib.CoordinateSet, error) { cs := lib.CoordinateSet{c.config.Segment: lan} return cs, nil } + +// ReloadConfig is used to have the Client do an online reload of +// relevant configuration information +func (c *Client) ReloadConfig(config *Config) error { + c.rpcLimiter.Store(rate.NewLimiter(config.RPCRate, config.RPCMaxBurst)) + return nil +} diff --git a/agent/consul/server.go b/agent/consul/server.go index 128f67081..b1aca96a6 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -1040,6 +1040,12 @@ func (s *Server) GetLANCoordinate() (lib.CoordinateSet, error) { return cs, nil } +// ReloadConfig is used to have the Server do an online reload of +// relevant configuration information +func (s *Server) ReloadConfig(config *Config) error { + return nil +} + // Atomically sets a readiness state flag when leadership is obtained, to indicate that server is past its barrier write func (s *Server) setConsistentReadReady() { atomic.StoreInt32(&s.readyForConsistentReads, 1) From 35ffa2276c51b8279241c4b504ffc6a88b7c59ba Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 11 Jun 2018 15:54:55 -0400 Subject: [PATCH 008/627] Update docs about rpc limits being reloadable --- website/source/docs/agent/options.html.md | 1 + 1 file changed, 1 insertion(+) diff --git a/website/source/docs/agent/options.html.md b/website/source/docs/agent/options.html.md index 0dcf866e2..cc01aef4a 100644 --- a/website/source/docs/agent/options.html.md +++ b/website/source/docs/agent/options.html.md @@ -1483,3 +1483,4 @@ items which are reloaded include: * Node Metadata * Metric Prefix Filter * Discard Check Output +* RPC rate limiting \ No newline at end of file From c41fa6c01021c9f9e27819f8b86c1b1d55693ee1 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 11 Jun 2018 16:23:51 -0400 Subject: [PATCH 009/627] Add a Client ReloadConfig test --- agent/consul/client_test.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/agent/consul/client_test.go b/agent/consul/client_test.go index 20647b3f6..f61541b5e 100644 --- a/agent/consul/client_test.go +++ b/agent/consul/client_test.go @@ -15,6 +15,8 @@ import ( "github.com/hashicorp/consul/testutil/retry" "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/serf/serf" + "github.com/stretchr/testify/require" + "golang.org/x/time/rate" ) func testClientConfig(t *testing.T) (string, *Config) { @@ -665,3 +667,25 @@ func TestClient_Encrypted(t *testing.T) { t.Fatalf("should be encrypted") } } + +func TestClient_Reload(t *testing.T) { + t.Parallel() + dir1, c := testClientWithConfig(t, func(c *Config) { + c.RPCRate = 500 + c.RPCMaxBurst = 5000 + }) + defer os.RemoveAll(dir1) + defer c.Shutdown() + + limiter := c.rpcLimiter.Load().(*rate.Limiter) + require.Equal(t, rate.Limit(500), limiter.Limit()) + require.Equal(t, 5000, limiter.Burst()) + + c.config.RPCRate = 1000 + c.config.RPCMaxBurst = 10000 + + require.NoError(t, c.ReloadConfig(c.config)) + limiter = c.rpcLimiter.Load().(*rate.Limiter) + require.Equal(t, rate.Limit(1000), limiter.Limit()) + require.Equal(t, 10000, limiter.Burst()) +} From f9d0323c0bcb54cc65b9ca27115bc043f86f02b9 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 11 Jun 2018 16:27:39 -0400 Subject: [PATCH 010/627] Fixup a weird merge problem --- agent/agent.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 6d9f80252..cacd08521 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -1848,11 +1848,6 @@ func (a *Agent) AddCheck(check *structs.HealthCheck, chkType *structs.CheckType, check.CheckID, checks.MinInterval)) chkType.Interval = checks.MinInterval } - if chkType.Script != "" { - a.logger.Printf("[WARN] agent: check %q has the 'script' field, which has been deprecated "+ - "and replaced with the 'args' field. See https://www.consul.io/docs/agent/checks.html", - check.CheckID) - } if a.dockerClient == nil { dc, err := checks.NewDockerClient(os.Getenv("DOCKER_HOST"), checks.BufSize) @@ -1890,11 +1885,6 @@ func (a *Agent) AddCheck(check *structs.HealthCheck, chkType *structs.CheckType, check.CheckID, checks.MinInterval) chkType.Interval = checks.MinInterval } - if chkType.Script != "" { - a.logger.Printf("[WARN] agent: check %q has the 'script' field, which has been deprecated "+ - "and replaced with the 'args' field. See https://www.consul.io/docs/agent/checks.html", - check.CheckID) - } monitor := &checks.CheckMonitor{ Notify: a.State, From 351841c7b2acd6aa16d51e0fba374bf5b505f773 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Tue, 12 Jun 2018 16:55:52 -0400 Subject: [PATCH 011/627] Redo the build system MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Improvements: - More modular - Building within docker doesn’t use volumes so can be run on a remote docker host - Build containers include only minimal context so they only rarely need to be rebuilt and most of the time can be used from the cache. - 3 build containers instead of 1. One based off of the upstream golang containers for building go stuff with all our required GOTOOLS installed. One like the old container based off ubuntu bionic for building the old UI (didn’t bother creating a much better container as this shouldn’t be needed once we completely remove the legacy UI). One for building the new UI. Its alpine based with all the node, ember, yarn stuff installed. - Top level makefile has the ability to do a container based build without running make dist - Can build for arbitrary platforms at the top level using: make consul-docker XC_OS=… XC_ARCH=… - overridable functionality to allow for customizations to the enterprise build (like to generate multiple binaries) - unified how we compile our go. always use gox even for dev-builds or rather always use the tooling around our scripts which will make sure things get copied to the correct places throughout the filesystem. --- GNUmakefile | 32 +- build-support/functions/00-vars.sh | 20 ++ build-support/functions/01-util.sh | 186 ++++++++++ build-support/functions/02-build.sh | 382 +++++++++++++++++++++ build-support/functions/03-release.sh | 299 ++++++++++++++++ build-support/scripts/build.sh | 368 +++++++++++++++++++- build-support/scripts/functions.sh | 475 +------------------------- scripts/build.sh | 77 ----- scripts/consul-builder/Dockerfile | 34 -- scripts/dist.sh | 63 ---- scripts/dist_build.sh | 46 --- scripts/fixup_times.sh | 10 - scripts/ui.sh | 18 - scripts/ui_build.sh | 31 -- scripts/vagrant-linux-priv-go.sh | 42 --- 15 files changed, 1273 insertions(+), 810 deletions(-) create mode 100644 build-support/functions/00-vars.sh create mode 100644 build-support/functions/01-util.sh create mode 100644 build-support/functions/02-build.sh create mode 100644 build-support/functions/03-release.sh delete mode 100755 scripts/build.sh delete mode 100644 scripts/consul-builder/Dockerfile delete mode 100755 scripts/dist.sh delete mode 100755 scripts/dist_build.sh delete mode 100755 scripts/fixup_times.sh delete mode 100755 scripts/ui.sh delete mode 100755 scripts/ui_build.sh delete mode 100755 scripts/vagrant-linux-priv-go.sh diff --git a/GNUmakefile b/GNUmakefile index a02db4066..ba7b16d06 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -33,6 +33,10 @@ UI_BUILD_TAG?=consul-build-ui UI_LEGACY_BUILD_TAG?=consul-build-ui-legacy BUILD_CONTAINER_NAME?=consul-builder +DIST_TAG?=1 +DIST_BUILD?=1 +DIST_SIGN?=1 + export GO_BUILD_TAG export UI_BUILD_TAG export UI_LEGACY_BUILD_TAG @@ -47,18 +51,13 @@ export GOLDFLAGS all: bin bin: tools - @mkdir -p bin/ - @GOTAGS='$(GOTAGS)' sh -c "'$(CURDIR)/scripts/build.sh'" + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh consul-local # dev creates binaries for testing locally - these are put into ./bin and $GOPATH dev: changelogfmt vendorfmt dev-build dev-build: - @echo "--> Building consul" - mkdir -p pkg/$(GOOS)_$(GOARCH)/ bin/ - go install -ldflags '$(GOLDFLAGS)' -tags '$(GOTAGS)' - cp $(GOPATH)/bin/consul bin/ - cp $(GOPATH)/bin/consul pkg/$(GOOS)_$(GOARCH) + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh consul-local -o '$(GOOS)' -a '$(GOARCH)' vendorfmt: @echo "--> Formatting vendor/vendor.json" @@ -71,12 +70,11 @@ changelogfmt: # linux builds a linux package independent of the source platform linux: - mkdir -p pkg/linux_amd64/ - GOOS=linux GOARCH=amd64 go build -ldflags '$(GOLDFLAGS)' -tags '$(GOTAGS)' -o pkg/linux_amd64/consul + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh consul-local -o linux -a amd64 # dist builds binaries for all platforms and packages them for distribution dist: - @GOTAGS='$(GOTAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'" + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh release -t '$(DIST_TAG)' -b '$(DIST_BUILD)' -S '$(DIST_SIGN)' cov: gocov test $(GOFILES) | gocov-html > /tmp/coverage.html @@ -128,8 +126,7 @@ vet: # Build the static web ui and build static assets inside a Docker container, the # same way a release build works. This implicitly does a "make static-assets" at # the end. -ui: - @sh -c "'$(CURDIR)/scripts/ui.sh'" +ui: ui-legacy-docker ui-docker static-assets # If you've run "make ui" manually then this will get called for you. This is # also run as part of the release build script when it verifies that there are no @@ -141,6 +138,12 @@ static-assets: tools: go get -u -v $(GOTOOLS) +version: + @echo -n "Version without release: " + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh version + @echo -n "Version with release: " + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh version -R + docker-images: @$(MAKE) -C build-support/docker images @@ -156,7 +159,7 @@ ui-legacy-build-image: static-assets-docker: go-build-image @$(SHELL) $(CURDIR)/build-support/scripts/build.sh assetfs -go-docker: go-build-image +consul-docker: go-build-image @$(SHELL) $(CURDIR)/build-support/scripts/build.sh consul ui-docker: ui-build-image @@ -165,7 +168,6 @@ ui-docker: ui-build-image ui-legacy-docker: ui-legacy-build-image @$(SHELL) $(CURDIR)/build-support/scripts/build.sh ui-legacy -release-docker: ui-docker ui-legacy-docker static-assets-docker go-docker .PHONY: all ci bin dev dist cov test cover format vet ui static-assets tools vendorfmt -.PHONY: docker-images go-build-iamge ui-build-image ui-legacy-build-image static-assets-docker go-docker ui-docker ui-legacy-docker release-docker +.PHONY: docker-images go-build-image ui-build-image ui-legacy-build-image static-assets-docker consul-docker ui-docker ui-legacy-docker version diff --git a/build-support/functions/00-vars.sh b/build-support/functions/00-vars.sh new file mode 100644 index 000000000..2b6c53afa --- /dev/null +++ b/build-support/functions/00-vars.sh @@ -0,0 +1,20 @@ +# GPG Key ID to use for publically released builds +HASHICORP_GPG_KEY="348FFC4C" + +# Default Image Names +UI_BUILD_CONTAINER_DEFAULT="consul-build-ui" +UI_LEGACY_BUILD_CONTAINER_DEFAULT="consul-build-ui-legacy" +GO_BUILD_CONTAINER_DEFAULT="consul-build-go" + +# Whether to colorize shell output +COLORIZE=1 + + +# determine GOPATH and the first GOPATH to use for intalling binaries +GOPATH=${GOPATH:-$(go env GOPATH)} +case $(uname) in + CYGWIN*) + GOPATH="$(cygpath $GOPATH)" + ;; +esac +MAIN_GOPATH=$(cut -d: -f1 <<< "${GOPATH}") diff --git a/build-support/functions/01-util.sh b/build-support/functions/01-util.sh new file mode 100644 index 000000000..34a79740b --- /dev/null +++ b/build-support/functions/01-util.sh @@ -0,0 +1,186 @@ +function err { + if test "${COLORIZE}" -eq 1 + then + tput bold + tput setaf 1 + fi + + echo $@ 1>&2 + + if test "${COLORIZE}" -eq 1 + then + tput sgr0 + fi +} + +function status { + if test "${COLORIZE}" -eq 1 + then + tput bold + tput setaf 4 + fi + + echo $@ + + if test "${COLORIZE}" -eq 1 + then + tput sgr0 + fi +} + +function status_stage { + if test "${COLORIZE}" -eq 1 + then + tput bold + tput setaf 2 + fi + + echo $@ + + if test "${COLORIZE}" -eq 1 + then + tput sgr0 + fi +} + +function is_set { + # Arguments: + # $1 - string value to check its truthiness + # + # Return: + # 0 - is truthy (backwards I know but allows syntax like `if is_set ` to work) + # 1 - is not truthy + + local val=$(tr '[:upper:]' '[:lower:]' <<< "$1") + case $val in + 1 | t | true | y | yes) + return 0 + ;; + *) + return 1 + ;; + esac +} + +function have_gpg_key { + # Arguments: + # $1 - GPG Key id to check if we have installed + # + # Return: + # 0 - success (we can use this key for signing) + # * - failure (key cannot be used) + + gpg --list-secret-keys $1 >dev/null 2>&1 + return $? +} + +function parse_version { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - boolean value for whether to omit the release version from the version string + # + # Return: + # 0 - success (will write the version to stdout) + # * - error (no version output) + # + # Notes: + # If the GOTAGS environment variable is present then it is used to determine which + # version file to use for parsing. + # If the GIT_DESCRIBE environment variable is present then it is used as the version + # If the GIT_COMMIT environment variable is preset it will be added to the end of + # the version string. + + local vfile="${1}/version/version.go" + + # ensure the version file exists + if ! test -f "${vfile}" + then + err "Error - File not found: ${vfile}" + return 1 + fi + + # Get the main version out of the source file + version_main=$(awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) + release_main=$(awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) + + # try to determine the version if we have build tags + for tag in "$GOTAGS" + do + for vfile in $(ls "${1}/version/version_*.go" 2> /dev/null| sort) + do + if grep -q "// +build $tag" $file + then + version_main=$(awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) + release_main=$(awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) + fi + done + done + + version= + + # override the version from source with the value of the GIT_DESCRIBE env var if present + if test -n "$GIT_DESCRIBE" + then + version=$GIT_DESCRIBE + fi + + if ! is_set $2 + then + # Get the release version out of the source file + release=$(awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) + + # When no GIT_DESCRIBE env var is present and no release is in the source then we + # are definitely in dev mode + if test -z "$GIT_DESCRIBE" -a -z "$release" + then + release="dev" + fi + + # Add the release to the version + if test -n "$release" + then + version="${version}-${release}" + + # add the git commit to the version + if test -n "$GIT_COMMIT" + then + version="${version} (${GIT_COMMIT})" + fi + fi + fi + + # Output the version + echo "$version" | tr -d "'" + return 0 +} + +function get_version { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - Whether the release version should be parsed from source (optional) + # + # Returns: + # 0 - success (the version is also echoed to stdout) + # 1 - error + # + # Notes: + # If a VERSION environment variable is present it will override any parsing of the version from the source + # In addition to processing the main version.go, version_*.go files will be processed if they have + # a Go build tag that matches the one in the GOTAGS environment variable. This tag processing is + # primitive though and will not match complex build tags in the files with negation etc. + + local vers="$VERSION" + if test -z "$vers" + then + # parse the OSS version from version.go + vers="$(parse_version ${1} ${2})" + fi + + if test -z "$vers" + then + return 1 + else + echo $vers + return 0 + fi +} \ No newline at end of file diff --git a/build-support/functions/02-build.sh b/build-support/functions/02-build.sh new file mode 100644 index 000000000..166a297e7 --- /dev/null +++ b/build-support/functions/02-build.sh @@ -0,0 +1,382 @@ +function refresh_docker_images { + # Arguments: + # $1 - Path to top level Consul source + # $2 - Which make target to invoke (optional) + # + # Return: + # 0 - success + # * - failure + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. refresh_docker_images must be called with the path to the top level source as the first argument'" + return 1 + fi + + local sdir="$1" + local targets="$2" + + test -n "${targets}" || targets="images" + + make -C "${sdir}/build-support/docker" $targets + return $? +} + +function build_ui { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - The docker image to run the build within (optional) + # + # Returns: + # 0 - success + # * - error + # + # Notes: + # Use the GIT_COMMIT environment variable to pass off to the build + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. build_ui must be called with the path to the top level source as the first argument'" + return 1 + fi + + local image_name=${UI_BUILD_CONTAINER_DEFAULT} + if test -n "$2" + then + image_name="$2" + fi + + local sdir="$1" + local ui_dir="${1}/ui-v2" + + # parse the version + version=$(parse_version "${sdir}") + + local commit_hash="${GIT_COMMIT}" + if test -z "${commit_hash}" + then + commit_hash=$(git rev-parse --short HEAD) + fi + + # make sure we run within the ui dir + pushd ${ui_dir} > /dev/null + + status "Creating the UI Build Container with image: ${image_name}" + local container_id=$(docker create -it -e "CONSUL_GIT_SHA=${commit_hash}" -e "CONSUL_VERSION=${version}" ${image_name}) + local ret=$? + if test $ret -eq 0 + then + status "Copying the source from '${ui_dir}' to /consul-src within the container" + ( + docker cp . ${container_id}:/consul-src && + status "Running build in container" && docker start -i ${container_id} && + rm -rf ${1}/ui-v2/dist && + status "Copying back artifacts" && docker cp ${container_id}:/consul-src/dist ${1}/ui-v2/dist + ) + ret=$? + docker rm ${container_id} > /dev/null + fi + + if test $ret -eq 0 + then + rm -rf ${1}/pkg/web_ui/v2 + cp -r ${1}/ui-v2/dist ${1}/pkg/web_ui/v2 + fi + popd > /dev/null + return $ret +} + +function build_ui_legacy { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - The docker image to run the build within (optional) + # + # Returns: + # 0 - success + # * - error + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. build_ui_legacy must be called with the path to the top level source as the first argument'" + return 1 + fi + + local sdir="$1" + local ui_legacy_dir="${sdir}/ui" + + local image_name=${UI_LEGACY_BUILD_CONTAINER_DEFAULT} + if test -n "$2" + then + image_name="$2" + fi + + pushd ${ui_legacy_dir} > /dev/null + status "Creating the Legacy UI Build Container with image: ${image_name}" + rm -r ${sdir}/pkg/web_ui/v1 >/dev/null 2>&1 + mkdir -p ${sdir}/pkg/web_ui/v1 + local container_id=$(docker create -it ${image_name}) + local ret=$? + if test $ret -eq 0 + then + status "Copying the source from '${ui_legacy_dir}' to /consul-src/ui within the container" + ( + docker cp . ${container_id}:/consul-src/ui && + status "Running build in container" && + docker start -i ${container_id} && + status "Copying back artifacts" && + docker cp ${container_id}:/consul-src/pkg/web_ui ${sdir}/pkg/web_ui/v1 + ) + ret=$? + docker rm ${container_id} > /dev/null + fi + popd > /dev/null + return $ret +} + +function build_assetfs { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - The docker image to run the build within (optional) + # + # Returns: + # 0 - success + # * - error + # + # Note: + # The GIT_COMMIT, GIT_DIRTY and GIT_DESCRIBE environment variables will be used if present + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. build_assetfs must be called with the path to the top level source as the first argument'" + return 1 + fi + + local sdir="$1" + local image_name=${GO_BUILD_CONTAINER_DEFAULT} + if test -n "$2" + then + image_name="$2" + fi + + pushd ${sdir} > /dev/null + status "Creating the Go Build Container with image: ${image_name}" + local container_id=$(docker create -it -e GIT_COMMIT=${GIT_COMMIT} -e GIT_DIRTY=${GIT_DIRTY} -e GIT_DESCRIBE=${GIT_DESCRIBE} ${image_name} make static-assets ASSETFS_PATH=bindata_assetfs.go) + local ret=$? + if test $ret -eq 0 + then + status "Copying the sources from '${sdir}/(pkg|GNUmakefile)' to /go/src/github.com/hashicorp/consul/pkg" + ( + tar -c pkg/web_ui GNUmakefile | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul && + status "Running build in container" && docker start -i ${container_id} && + status "Copying back artifacts" && docker cp ${container_id}:/go/src/github.com/hashicorp/consul/bindata_assetfs.go ${sdir}/agent/bindata_assetfs.go + ) + ret=$? + docker rm ${container_id} > /dev/null + fi + popd >/dev/null + return $ret +} + +function build_consul_post { + # Arguments + # $1 - Path to the top level Consul source + # $2 - build suffix (Optional) + # + # Returns: + # 0 - success + # * - error + # + # Notes: + # pkg/bin is where to place binary packages + # pkg.bin.new is where the just built binaries are located + # bin is where to place the local systems versions + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. build_consul_post must be called with the path to the top level source as the first argument'" + return 1 + fi + + local sdir="$1" + + pushd "${sdir}" > /dev/null + + # recreate the pkg dir + rm -r pkg/bin/* 2> /dev/null + mkdir -p pkg/bin 2> /dev/null + + # move all files in pkg.new into pkg + cp -r pkg.bin.new/* pkg/bin/ + rm -r pkg.bin.new + + DEV_PLATFORM="./pkg/bin/$(go env GOOS)_$(go env GOARCH)${2}" + for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f) + do + # recreate the bin dir + rm -r bin/* 2> /dev/null + mkdir -p bin 2> /dev/null + + cp ${F} bin/ + cp ${F} ${MAIN_GOPATH}/bin + done + + popd > /dev/null + + return 0 +} + +function build_consul { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - build suffix (optional - must specify if needing to specify the docker image) + # $3 - The docker image to run the build within (optional) + # + # Returns: + # 0 - success + # * - error + # + # Note: + # The GOLDFLAGS and GOTAGS environment variables will be used if set + # If the CONSUL_DEV environment var is truthy only the local platform/architecture is built. + # If the XC_OS or the XC_ARCH environment vars are present then only those platforms/architectures + # will be built. Otherwise all supported platform/architectures are built + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. build_consul must be called with the path to the top level source as the first argument'" + return 1 + fi + + local sdir="$1" + local build_suffix="$2" + local image_name=${GO_BUILD_CONTAINER_DEFAULT} + if test -n "$3" + then + image_name="$3" + fi + + pushd ${sdir} > /dev/null + status "Creating the Go Build Container with image: ${image_name}" + if is_set "${CONSUL_DEV}" + then + if test -z "${XC_OS}" + then + XC_OS=$(go env GOOS) + fi + + if test -z "${XC_ARCH}" + then + XC_ARCH=$(go env GOARCH) + fi + fi + XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"} + XC_ARCH=${XC_ARCH:-"386 amd64 arm arm64"} + + local container_id=$(docker create -it -e CGO_ENABLED=0 ${image_name} gox -os="${XC_OS}" -arch="${XC_ARCH}" -osarch="!darwin/arm !darwin/arm64" -ldflags "${GOLDFLAGS}" -output "pkg/bin/{{.OS}}_{{.Arch}}${build_suffix}/consul" -tags="${GOTAGS}") + ret=$? + + if test $ret -eq 0 + then + status "Copying the source from '${sdir}' to /go/src/github.com/hashicorp/consul/pkg" + ( + tar -c $(ls | grep -v "ui\|ui-v2\|website\|bin\|.git") | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul && + status "Running build in container" && + docker start -i ${container_id} && + status "Copying back artifacts" && + docker cp ${container_id}:/go/src/github.com/hashicorp/consul/pkg/bin pkg.bin.new + ) + ret=$? + docker rm ${container_id} > /dev/null + + if test $ret -eq 0 + then + build_consul_post "${sdir}" "${build_suffix}" + ret=$? + else + rm -r pkg.bin.new 2> /dev/null + fi + fi + popd > /dev/null + return $ret +} + +function build_consul_local { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - Space separated string of OSes to build. If empty will use env vars for determination. + # $3 - Space separated string of architectures to build. If empty will use env vars for determination. + # $4 - build suffix (optional) + # + # Returns: + # 0 - success + # * - error + # + # Note: + # The GOLDFLAGS and GOTAGS environment variables will be used if set + # If the CONSUL_DEV environment var is truthy only the local platform/architecture is built. + # If the XC_OS or the XC_ARCH environment vars are present then only those platforms/architectures + # will be built. Otherwise all supported platform/architectures are built + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. build_consul must be called with the path to the top level source as the first argument'" + return 1 + fi + + local sdir="$1" + local build_os="$2" + local build_arch="$3" + local build_suffix="$4" + + pushd ${sdir} > /dev/null + if is_set "${CONSUL_DEV}" + then + if test -z "${XC_OS}" + then + XC_OS=$(go env GOOS) + fi + + if test -z "${XC_ARCH}" + then + XC_ARCH=$(go env GOARCH) + fi + fi + XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"} + XC_ARCH=${XC_ARCH:-"386 amd64 arm arm64"} + + if test -z "${build_os}" + then + build_os="${XC_OS}" + fi + + if test -z "${build_arch}" + then + build_arch="${XC_ARCH}" + fi + + status_stage "==> Building Consul - OSes: ${build_os}, Architectures: ${build_arch}" + mkdir pkg.bin.new 2> /dev/null + CGO_ENABLED=0 gox \ + -os="${build_os}" \ + -arch="${build_arch}" \ + -osarch="!darwin/arm !darwin/arm64" \ + -ldflags="${GOLDFLAGS}" \ + -output "pkg.bin.new/{{.OS}}_{{.Arch}}${build_suffix}/consul" \ + -tags="${GOTAGS}" \ + . + + if test $? -ne 0 + then + err "ERROR: Failed to build Consul" + rm -r pkg.bin.new + return 1 + fi + + build_consul_post "${sdir}" "${build_suffix}" + if test $? -ne 0 + then + err "ERROR: Failed postprocessing Consul binaries" + return 1 + fi + return 0 +} \ No newline at end of file diff --git a/build-support/functions/03-release.sh b/build-support/functions/03-release.sh new file mode 100644 index 000000000..c58281c9c --- /dev/null +++ b/build-support/functions/03-release.sh @@ -0,0 +1,299 @@ +function tag_release { + # Arguments: + # $1 - Path to top level consul source + # $2 - Version string to use for tagging the release + # $3 - Alternative GPG key id used for signing the release commit (optional) + # + # Returns: + # 0 - success + # * - error + # + # Notes: + # If the RELEASE_UNSIGNED environment variable is set then no gpg signing will occur + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. tag_release must be called with the path to the top level source as the first argument'" + return 1 + fi + + if test -z "$2" + then + err "ERROR: tag_release must be called with a version number as the second argument" + return 1 + fi + + # determine whether the gpg key to use is being overridden + local gpg_key=${HASHICORP_GPG_KEY} + if test -n "$3" + then + gpg_key=$3 + fi + + pushd "$1" > /dev/null + local ret=0 + + # perform an usngined release if requested (mainly for testing locally) + if is_set "$RELEASE_UNSIGNED" + then + ( + git commit --allow-empty -a -m "Release v${2}" && + git tag -a -m "Version ${2}" "v${2}" master + ) + ret=$? + # perform a signed release (official releases should do this) + elif have_gpg_key ${gpg_key} + then + ( + git commit --allow-empty -a --gpg-sign=${gpg_key} -m "Release v${2}" && + git tag -a -m "Version ${2}" -s -u ${gpg_key} "v${2}" master + ) + ret=$? + # unsigned release not requested and gpg key isn't useable + else + err "ERROR: GPG key ${gpg_key} is not in the local keychain - to continue set RELEASE_UNSIGNED=1 in the env" + ret=1 + fi + popd > /dev/null + return $ret +} + +function package_release { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - Version to use in the names of the zip files (optional) + # + # Returns: + # 0 - success + # * - error + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. package_release must be called with the path to the top level source as the first argument'" + return 1 + fi + + local vers="${2}" + if test -z "${vers}" + then + vers=$(get_version $1 true) + ret=$? + if test "$ret" -ne 0 + then + err "ERROR: failed to determine the version." + return $ret + fi + fi + + local sdir="$1" + local ret=0 + + rm -rf "${sdir}/pkg/dist" > /dev/null 2>&1 + mkdir -p "${sdir}/pkg/dist" >/dev/null 2>&1 + for platform in $(find "${sdir}/pkg/bin" -mindepth 1 -maxdepth 1 -type d) + do + local os_arch=$(basename $platform) + local dest="${sdir}/pkg/dist/consul_${vers}_${os_arch}.zip" + status "Compressing ${os_arch} directory into ${dest}" + pushd "${platform}" > /dev/null + zip "${sdir}/pkg/dist/consul_${vers}_${os_arch}.zip" ./* + ret=$? + popd > /dev/null + + if test "$ret" -ne 0 + then + break + fi + done + + return $ret +} + +function shasum_release { + # Arguments: + # $1 - Path to directory containing the files to shasum + # $2 - File to output sha sums to + # + # Returns: + # 0 - success + # * - failure + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory and shasum_release requires passing a directory as the first argument" + return 1 + fi + + if test -z "$2" + then + err "ERROR: shasum_release requires a second argument to be the filename to output the shasums to but none was given" + return 1 + fi + + pushd $1 > /dev/null + shasum -a256 * > "$2" + ret=$? + popd >/dev/null + + return $ret +} + +function sign_release { + # Arguments: + # $1 - File to sign + # $2 - Alternative GPG key to use for signing + # + # Returns: + # 0 - success + # * - failure + + # determine whether the gpg key to use is being overridden + local gpg_key=${HASHICORP_GPG_KEY} + if test -n "$2" + then + gpg_key=$2 + fi + + gpg --default-key "${gpg_key}" --detach-sig "$1" + return $? +} + +function build_consul_release { + build_consul "$1" "" "$2" +} + +function build_release { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - boolean whether to tag the release yet + # $3 - boolean whether to build the binaries + # $4 - boolean whether to generate the sha256 sums + # $5 - alternative gpg key to use for signing operations (optional) + # + # Returns: + # 0 - success + # * - error + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. build_release must be called with the path to the top level source as the first argument'" + return 1 + fi + + if test -z "$2" -o -z "$3" -o -z "$4" + then + err "ERROR: build_release requires 4 arguments to be specified: " + return 1 + fi + + local sdir="$1" + local do_tag="$2" + local do_build="$3" + local do_sha256="$4" + local gpg_key="$5" + + local vers=$(get_version ${sdir} true) + if test $? -ne 0 + then + err "Please specify a version (couldn't find one based on build tags)." + return 1 + fi + + # Make sure we arent in dev mode + unset CONSUL_DEV + + if is_set "${do_build}" + then + status_stage "==> Refreshing Docker Build Images" + refresh_docker_images "${sdir}" + if test $? -ne 0 + then + err "ERROR: Failed to refresh docker images" + return 1 + fi + + status_stage "==> Building Legacy UI for version ${vers}" + build_ui_legacy "${sdir}" "${UI_LEGACY_BUILD_TAG}" + if test $? -ne 0 + then + err "ERROR: Failed to build the legacy ui" + return 1 + fi + + status_stage "==> Building UI for version ${vers}" + build_ui "${sdir}" "${UI_BUILD_TAG}" + if test $? -ne 0 + then + err "ERROR: Failed to build the ui" + return 1 + fi + + status_stage "==> Building Static Assets for version ${vers}" + build_assetfs "${sdir}" "${GO_BUILD_TAG}" + if test $? -ne 0 + then + err "ERROR: Failed to build the static assets" + return 1 + fi + + if is_set "${do_tag}" + then + git add "${sdir}/agent/bindata_assetfs.go" + if test $? -ne 0 + then + err "ERROR: Failed to git add the assetfs file" + return 1 + fi + fi + fi + + if is_set "${do_tag}" + then + status_stage "==> Tagging version ${vers}" + tag_release "${sdir}" "${vers}" "${gpg_key}" + if test $? -ne 0 + then + err "ERROR: Failed to tag the release" + return 1 + fi + fi + + if is_set "${do_build}" + then + status_stage "==> Building Consul for version ${vers}" + build_consul_release "${sdir}" "${GO_BUILD_TAG}" + if test $? -ne 0 + then + err "ERROR: Failed to build the Consul binaries" + return 1 + fi + + status_stage "==> Packaging up release binaries" + package_release "${sdir}" "${vers}" + if test $? -ne 0 + then + err "ERROR: Failed to package the release binaries" + return 1 + fi + fi + + status_stage "==> Generating SHA 256 Hashes for Binaries" + shasum_release "${sdir}/pkg/dist" "consul_${vers}_SHA256SUMS" + if test $? -ne 0 + then + err "ERROR: Failed to generate SHA 256 hashes for the release" + return 1 + fi + + if is_set "${do_sha256}" + then + sign_release "${sdir}/pkg/dist/consul_${vers}_SHA256SUMS" "${gpg_key}" + if test $? -ne 0 + then + err "ERROR: Failed to sign the SHA 256 hashes file" + return 1 + fi + fi + + return 0 +} \ No newline at end of file diff --git a/build-support/scripts/build.sh b/build-support/scripts/build.sh index f1c387b22..10fc797c9 100755 --- a/build-support/scripts/build.sh +++ b/build-support/scripts/build.sh @@ -1,39 +1,381 @@ #!/bin/bash +SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null SCRIPT_DIR=$(pwd) pushd ../.. > /dev/null SOURCE_DIR=$(pwd) popd > /dev/null +pushd ../functions > /dev/null +FN_DIR=$(pwd) +popd > /dev/null popd > /dev/null source "${SCRIPT_DIR}/functions.sh" +function can_parse_option { + local allowed="$1" + local command="$2" + local options="$3" + + if test ${allowed} -ne 1 + then + err "ERROR: subcommand ${command} does not support the ${options} options" + return 1 + fi + return 0 +} + +function check_duplicate { + local is_dup="$1" + local command="$2" + local options="$3" + + if test ${is_dup} -ne 0 + then + err "ERROR: options ${options} may not be given more than once to the subcommand ${command}" + return 1 + fi + return 0 +} + +function option_check { + can_parse_option "$1" "$3" "$4" && check_duplicate "$2" "$3" "$4" + return $? +} + +function get_option_value { + # Arguments: + # $1 - bool whether the option should be allowed + # $2 - bool whether the option has been specified already + # $3 - the option value + # $4 - the command being executed + # $5 - the option names to use for logging + # + # Returns: + # 0 - success + # * - failure + + option_check "$1" "$2" "$4" "$5" || return 1 + + if test -z "$3" + then + err "ERROR: options ${5} for subcommand ${4} require an argument but none was provided" + return 1 + fi + + echo "$3" + return 0 +} + +function usage { +cat <<-EOF +Usage: ${SCRIPT_NAME} [] + +Subcommands: + assetfs: Builds the bindata_assetfs.go file from previously build UI artifacts + + Options: + -i | --image IMAGE Alternative Docker image to run the build within. + Defaults to ${GO_BUILD_CONTAINER_DEFAULT} + + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" + + -r | --refresh Enables refreshing the docker image prior to building. + + consul: Builds the main Consul binary. This assumes the assetfs is up to date: + + Options: + -i | --image IMAGE Alternative Docker image to run the build within. + Defaults to ${GO_BUILD_CONTAINER_DEFAULT} + + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" + + -r | --refresh Enables refreshing the docker image prior to building. + + consul-local: Builds the main Consul binary on the local system (no docker) + + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" + + -o | --build-os OS Space separated string of OSes to build + + -a | --build-arch ARCH Space separated string of architectures to build + + release: Performs a release build. + + Options: + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" + + -t | --tag BOOL Whether to add a release commit and tag the build + Defaults to 1. + + -b | --build BOOL Whether to perform the build of the ui's, assetfs and + binaries. Defaults to 1. + + -S | --sign BOOL Whether to sign the generated SHA256SUMS file. + Defaults to 1. + + -g | --gpg-key KEY Alternative GPG key to use for signing operations. + Defaults to ${HASHICORP_GPG_KEY} + + ui: Builds the latest UI. + + Options: + -i | --image IMAGE Alternative Docker image to run the build within. + Defaults to ${UI_BUILD_CONTAINER_DEFAULT} + + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" + + -r | --refresh Enables refreshing the docker image prior to building. + + ui-legacy: Builds the legacy UI + + Options: + -i | --image IMAGE Alternative Docker image to run the build within. + Defaults to ${UI_LEGACY_BUILD_CONTAINER_DEFAULT} + + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" + + -r | --refresh Enables refreshing the docker image prior to building. + + version: Prints out the version parsed from source. + + Options: + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" +EOF +} + function main { - case "$1" in - consul ) - build_consul "${SOURCE_DIR}" "${GO_BUILD_TAG}" - return $? + declare build_fn + declare sdir + declare image + declare -i refresh_docker=0 + declare -i rel_tag + declare -i rel_build + declare -i rel_sign + declare rel_gpg_key="" + declare build_os + declare build_arch + declare -i vers_release + + declare -i use_refresh=1 + declare -i default_refresh=0 + declare -i use_sdir=1 + declare default_sdir="${SOURCE_DIR}" + declare -i use_image=0 + declare default_image="" + declare -i use_rel=0 + declare -i default_rel_tag=1 + declare -i default_rel_build=1 + declare -i default_rel_sign=1 + declare default_rel_gpg_key="${HASHICORP_GPG_KEY}" + declare -i use_xc=0 + declare default_build_os="" + declare default_build_arch="" + declare -i use_vers_rel + declare -i default_vers_rel=1 + + declare command="$1" + shift + + case "${command}" in + consul ) + use_image=1 + default_image="${GO_BUILD_CONTAINER_DEFAULT}" + ;; + consul-local ) + use_xc=1 ;; ui ) - build_ui "${SOURCE_DIR}" "${UI_BUILD_TAG}" - return $? + use_image=1 + default_image="${UI_BUILD_CONTAINER_DEFAULT}" ;; ui-legacy ) - build_ui_legacy "${SOURCE_DIR}" "${UI_LEGACY_BUILD_TAG}" - return $? + use_image=1 + default_image="${UI_LEGACY_BUILD_CONTAINER_DEFAULT}" ;; version ) - parse_version "${SOURCE_DIR}" - return $? + use_refresh=0 + use_vers_rel=1 ;; assetfs ) - build_assetfs "${SOURCE_DIR}" "${GO_BUILD_TAG}" - return $? + use_image=1 + default_image="${GO_BUILD_CONTAINER_DEFAULT}" + ;; + release ) + use_rel=1 + use_refresh=0 + ;; + -h | --help) + usage + return 0 ;; *) - echo "Unkown build: '$1' - possible values are 'consul', 'ui', 'ui-legacy', 'version' and 'assetfs'" 1>&2 + err "Unkown subcommand: '$1' - possible values are 'consul', 'ui', 'ui-legacy', 'assetfs', version' and 'release'" return 1 + ;; + esac + + declare -i have_image_arg=0 + declare -i have_sdir_arg=0 + declare -i have_rel_tag_arg=0 + declare -i have_rel_build_arg=0 + declare -i have_rel_sign_arg=0 + declare -i have_rel_gpg_key_arg=0 + declare -i have_refresh_arg=0 + declare -i have_build_os_arg=0 + declare -i have_build_arch_arg=0 + declare -i have_vers_rel_arg=0 + + while test $# -gt 0 + do + case $1 in + -h | --help ) + usage + return 0 + ;; + -o | --build-os ) + build_os=$(get_option_value "${use_xc}" "${have_build_os_arg}" "$2" "${command}" "-o/--xc-os") || return 1 + have_build_os_arg=1 + shift 2 + ;; + -a | --build-arch) + build_arch=$(get_option_value "${use_xc}" "${have_build_arch_arg}" "$2" "${command}" "-o/--xc-arch") || return 1 + have_build_arch_arg=1 + shift 2 + ;; + -R | --release ) + option_check "${use_vers_rel}" "${have_vers_rel_arg}" "${command}" "-R/--release" || return 1 + have_vers_rel_arg=1 + vers_release=0 + shift + ;; + -r | --refresh) + option_check "${use_refresh}" "${have_refresh_arg}" "${command}" "-r/--refresh" || return 1 + have_refresh_arg=1 + refresh_docker=1 + shift + ;; + -i | --image ) + image=$(get_option_value "${use_image}" "${have_image_arg}" "$2" "${command}" "-i/--image") || return 1 + have_image_arg=1 + shift 2 + ;; + -s | --source ) + sdir=$(get_option_value "${use_sdir}" "${have_sdir_arg}" "$2" "${command}" "-s/--source") || return 1 + if ! test -d "${sdir}" + then + err "ERROR: -s/--source is not a path to a top level directory" + return 1 + fi + have_sdir_arg=1 + shift 2 + ;; + -t | --tag ) + rel_tag=$(get_option_value "${use_rel}" "${have_rel_tag_arg}" "$2" "${command}" "-t/--tag") || return 1 + have_rel_tag_arg=1 + shift 2 + ;; + -b | --build ) + rel_build=$(get_option_value "${use_rel}" "${have_rel_build_arg}" "$2" "${command}" "-b/--build") || return 1 + have_rel_build_arg=1 + shift 2 + ;; + -S | --sign ) + rel_sign=$(get_option_value "${use_rel}" "${have_rel_sign_arg}" "$2" "${command}" "-S/--sign") || return 1 + have_rel_sign_arg=1 + shift 2 + ;; + -g | --gpg-key ) + rel_gpg_key=$(get_option_value "${use_rel}" "${have_rel_gpg_key_arg}" "$2" "${command}" "-g/--gpg-key") || return 1 + shift 2 + ;; + *) + err "ERROR: Unknown option '$1' for subcommand ${command}" + return 1 + ;; + esac + done + + test $have_image_arg -ne 1 && image="${default_image}" + test $have_sdir_arg -ne 1 && sdir="${default_sdir}" + test $have_rel_tag_arg -ne 1 && rel_tag="${default_rel_tag}" + test $have_rel_build_arg -ne 1 && rel_build="${default_rel_build}" + test $have_rel_sign_arg -ne 1 && rel_sign="${default_rel_sign}" + test $have_rel_gpg_key_arg -ne 1 && rel_gpg_key="${default_rel_gpg_key}" + test $have_refresh_arg -ne 1 && refresh_docker="${default_refresh}" + test $have_build_os_arg -ne 1 && build_os="${default_build_os}" + test $have_build_arch_arg -ne 1 && build_arch="${default_build_os}" + test $have_vers_rel_arg -ne 1 && vers_release="${default_vers_rel}" + + case "${command}" in + consul ) + if is_set "${refresh_docker}" + then + status_stage "==> Refreshing Consul build container image" + export GO_BUILD_TAG=${image} + refresh_docker_images ${sdir} go-build-image || return 1 + fi + status_stage "==> Building Consul" + build_consul "${sdir}" "" "${image}" || return 1 + ;; + consul-local ) + build_consul_local "${sdir}" "${build_os}" "${build_arch}" "" || return 1 + ;; + ui ) + + if is_set "${refresh_docker}" + then + status_stage "==> Refreshing UI build container image" + export UI_BUILD_TAG=${image} + refresh_docker_images ${sdir} ui-build-image || return 1 + fi + status_stage "==> Building UI" + build_ui "${sdir}" "${image}" || return 1 + ;; + ui-legacy ) + if is_set "${refresh_docker}" + then + status_stage "==> Refreshing Legacy UI build container image" + export UI_LEGACY_BUILD_TAG=${image} + refresh_docker_images ${sdir} ui-legacy-build-image || return 1 + fi + status_stage "==> Building Legacy UI" + build_ui_legacy "${sdir}" "${image}" || return 1 + ;; + version ) + parse_version "${sdir}" "${vers_release}"|| return 1 + ;; + assetfs ) + if is_set "${refresh_docker}" + then + status_stage "==> Refreshing Consul build container image" + export GO_BUILD_TAG="${image}" + refresh_docker_images ${sdir} go-build-image || return 1 + fi + status_stage "==> Build Static Assets" + build_assetfs "${sdir}" "${image}" || return 1 + ;; + release ) + if is_set "${refresh_docker}" + then + refresh_docker_images ${sdir} || return 1 + fi + build_release "${sdir}" "${rel_tag}" "${rel_build}" "${rel_sign}" "${rel_gpg_key}" || return 1 + ;; + *) + err "Unkown subcommand: '$1' - possible values are 'consul', 'ui', 'ui-legacy', 'assetfs', version' and 'release'" + return 1 + ;; esac + + return 0 } main $@ diff --git a/build-support/scripts/functions.sh b/build-support/scripts/functions.sh index 2b44d2401..ec473662c 100644 --- a/build-support/scripts/functions.sh +++ b/build-support/scripts/functions.sh @@ -1,464 +1,17 @@ -# GPG Key ID to use for publically released builds -HASHICORP_GPG_KEY="348FFC4C" +# +# NOTE: This file is meant to be sourced from other bash scripts/shells +# +# It provides all the scripting around building Consul and the release process -UI_BUILD_CONTAINER_DEFAULT="consul-build-ui" -UI_LEGACY_BUILD_CONTAINER_DEFAULT="consul-build-ui-legacy" +pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null +pushd ../functions > /dev/null +FUNC_DIR=$(pwd) +popd > /dev/null +popd > /dev/null -function is_set { - # Arguments: - # $1 - string value to check its truthiness - # - # Return: - # 0 - is truthy (backwards I know but allows syntax like `if is_set ` to work) - # 1 - is not truthy - - local val=$(tr '[:upper:]' '[:lower:]' <<< "$1") - case $val in - 1 | t | true | y | yes) - return 0 - ;; - *) - return 1 - ;; - esac -} +func_sources=$(find ${FUNC_DIR} -type f -mindepth 1 -maxdepth 1 -name "*.sh" | sort -n) -function have_gpg_key { - # Arguments: - # $1 - GPG Key id to check if we have installed - # - # Return: - # 0 - success (we can use this key for signing) - # * - failure (key cannot be used) - - gpg --list-secret-keys $1 >dev/null 2>&1 - return $? -} - -function parse_version { - # Arguments: - # $1 - Path to the top level Consul source - # $2 - boolean value for whether to omit the release version from the version string - # - # Return: - # 0 - success (will write the version to stdout) - # * - error (no version output) - # - # Notes: - # If the GIT_DESCRIBE environment variable is present then it is used as the version - # If the GIT_COMMIT environment variable is preset it will be added to the end of - # the version string. - - local vfile="${1}/version/version.go" - - # ensure the version file exists - if ! test -f "${vfile}" - then - echo "Error - File not found: ${vfile}" 1>&2 - return 1 - fi - - # Get the main version out of the source file - version=$(awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) - - # override the version from source with the value of the GIT_DESCRIBE env var if present - if test -n "$GIT_DESCRIBE" - then - version=$GIT_DESCRIBE - fi - - if ! is_set $2 - then - # Get the release version out of the source file - release=$(awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) - - # When no GIT_DESCRIBE env var is present and no release is in the source then we - # are definitely in dev mode - if test -z "$GIT_DESCRIBE" -a -z "$release" - then - release="dev" - fi - - # Add the release to the version - if test -n "$release" - then - version="${version}-${release}" - - # add the git commit to the version - if test -n "$GIT_COMMIT" - then - version="${version} (${GIT_COMMIT})" - fi - fi - fi - - # Output the version - echo "$version" | tr -d "'" - return 0 -} - -function get_version { - # Arguments: - # $1 - Path to the top level Consul source - # $2 - Whether the release version should be parsed from source (optional) - # - # Returns: - # 0 - success (the version is also echoed to stdout) - # 1 - error - # - # Notes: - # If a VERSION environment variable is present it will override any parsing of the version from the source - # In addition to processing the main version.go, version_*.go files will be processed if they have - # a Go build tag that matches the one in the GOTAGS environment variable. This tag processing is - # primitive though and will not match complex build tags in the files with negation etc. - - local vers="$VERSION" - if test -z "$vers" - then - # parse the OSS version from version.go - vers="$(parse_version ${1} ${2})" - - # try to determine the version if we have build tags - for tag in "$GOTAGS" - do - for file in $(ls ${1}/version/version_*.go | sort) - do - if grep -q "// +build $tag" $file - then - vers=$(awk -F\" '/Version =/ {print $2; exit}' < $file ) - fi - done - done - fi - - if test -z "$vers" - then - return 1 - else - echo $vers - return 0 - fi -} - -function tag_release { - # Arguments: - # $1 - Version string to use for tagging the release - # $2 - Alternative GPG key id used for signing the release commit (optional) - # - # Returns: - # 0 - success - # * - error - # - # Notes: - # If the RELEASE_UNSIGNED environment variable is set then no gpg signing will occur - - if ! test -d "$1" - then - echo "ERROR: '$1' is not a directory. tag_release must be called with the path to the top level source as the first argument'" 1>&2 - return 1 - fi - - if test -z "$2" - then - echo "ERROR: tag_release must be called with a version number as the second argument" 1>&2 - return 1 - fi - - # determine whether the gpg key to use is being overridden - local gpg_key=${HASHICORP_GPG_KEY} - if test -n "$3" - then - gpg_key=$3 - fi - - pushd "$1" > /dev/null - local ret=0 - - # perform an usngined release if requested (mainly for testing locally) - if is_set "$RELEASE_UNSIGNED" - then - ( - git commit --allow-empty -a -m "Release v${2}" && - git tag -a -m "Version ${2}" "v${2}" master - ) - ret=$? - # perform a signed release (official releases should do this) - elif have_gpg_key ${gpg_key} - then - ( - git commit --allow-empty -a --gpg-sign=${gpg_key} -m "Release v${2}" && - git tag -a -m "Version ${2}" -s -u ${gpg_key} "v${2}" master - ) - ret=$? - # unsigned release not requested and gpg key isn't useable - else - echo "ERROR: GPG key ${gpg_key} is not in the local keychain - to continue set RELEASE_UNSIGNED=1 in the env" - ret=1 - fi - popd > /dev/null - return $ret -} - -function build_ui { - # Arguments: - # $1 - Path to the top level Consul source - # $2 - The docker image to run the build within (optional) - # - # Returns: - # 0 - success - # * - error - # - # Notes: - # Use the GIT_COMMIT environment variable to pass off to the build - - if ! test -d "$1" - then - echo "ERROR: '$1' is not a directory. build_ui must be called with the path to the top level source as the first argument'" 1>&2 - return 1 - fi - - local image_name=${UI_BUILD_CONTAINER_DEFAULT} - if test -n "$2" - then - image_name="$2" - fi - - local sdir="$1" - local ui_dir="${1}/ui-v2" - - # parse the version - version=$(parse_version "${sdir}") - - # make sure we run within the ui dir - pushd ${ui_dir} > /dev/null - - echo "Creating the UI Build Container" - local container_id=$(docker create -it -e "CONSUL_GIT_SHA=${GIT_COMMIT}" -e "CONSUL_VERSION=${version}" ${image_name}) - local ret=$? - if test $ret -eq 0 - then - echo "Copying the source from '${ui_dir}' to /consul-src within the container" - ( - docker cp . ${container_id}:/consul-src && - echo "Running build in container" && docker start -i ${container_id} && - rm -rf ${1}/ui-v2/dist && - echo "Copying back artifacts" && docker cp ${container_id}:/consul-src/dist ${1}/ui-v2/dist - ) - ret=$? - docker rm ${container_id} > /dev/null - fi - - if test $ret -eq 0 - then - rm -rf ${1}/pkg/web_ui/v2 - cp -r ${1}/ui-v2/dist ${1}/pkg/web_ui/v2 - fi - popd > /dev/null - return $ret -} - -function build_ui_legacy { - # Arguments: - # $1 - Path to the top level Consul source - # $2 - The docker image to run the build within (optional) - # - # Returns: - # 0 - success - # * - error - - if ! test -d "$1" - then - echo "ERROR: '$1' is not a directory. build_ui_legacy must be called with the path to the top level source as the first argument'" 1>&2 - return 1 - fi - - local sdir="$1" - local ui_legacy_dir="${sdir}/ui" - - local image_name=${UI_LEGACY_BUILD_CONTAINER_DEFAULT} - if test -n "$2" - then - image_name="$2" - fi - - pushd ${ui_legacy_dir} > /dev/null - echo "Creating the Legacy UI Build Container" - rm -r ${sdir}/pkg/web_ui/v1 >/dev/null 2>&1 - mkdir -p ${sdir}/pkg/web_ui/v1 - local container_id=$(docker create -it ${image_name}) - local ret=$? - if test $ret -eq 0 - then - echo "Copying the source from '${ui_legacy_dir}' to /consul-src/ui within the container" - ( - docker cp . ${container_id}:/consul-src/ui && - echo "Running build in container" && - docker start -i ${container_id} && - echo "Copying back artifacts" && - docker cp ${container_id}:/consul-src/pkg/web_ui ${sdir}/pkg/web_ui/v1 - ) - ret=$? - docker rm ${container_id} > /dev/null - fi - popd > /dev/null - return $ret -} - -function build_assetfs { - # Arguments: - # $1 - Path to the top level Consul source - # $2 - The docker image to run the build within (optional) - # - # Returns: - # 0 - success - # * - error - # - # Note: - # The GIT_COMMIT, GIT_DIRTY and GIT_DESCRIBE environment variables will be used if present - - if ! test -d "$1" - then - echo "ERROR: '$1' is not a directory. build_assetfs must be called with the path to the top level source as the first argument'" 1>&2 - return 1 - fi - - local sdir="$1" - local image_name=${GO_BUILD_CONTAINER_DEFAULT} - if test -n "$2" - then - image_name="$2" - fi - - pushd ${sdir} > /dev/null - echo "Creating the Go Build Container" - local container_id=$(docker create -it -e GIT_COMMIT=${GIT_COMMIT} -e GIT_DIRTY=${GIT_DIRTY} -e GIT_DESCRIBE=${GIT_DESCRIBE} ${image_name} make static-assets ASSETFS_PATH=bindata_assetfs.go) - local ret=$? - if test $ret -eq 0 - then - echo "Copying the sources from '${sdir}/(pkg|GNUmakefile)' to /go/src/github.com/hashicorp/consul/pkg" - ( - tar -c pkg/web_ui GNUmakefile | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul && - echo "Running build in container" && docker start -i ${container_id} && - echo "Copying back artifacts" && docker cp ${container_id}:/go/src/github.com/hashicorp/consul/bindata_assetfs.go ${sdir}/agent/bindata_assetfs.go - ) - ret=$? - docker rm ${container_id} > /dev/null - fi - popd >/dev/null - return $ret -} - -function build_consul { - # Arguments: - # $1 - Path to the top level Consul source - # $2 - The docker image to run the build within (optional) - # - # Returns: - # 0 - success - # * - error - # - # Note: - # The GOLDFLAGS and GOTAGS environment variables will be used if set - # If the CONSUL_DEV environment var is truthy only the local platform/architecture is built. - # If the XC_OS or the XC_ARCH environment vars are present then only those platforms/architectures - # will be built. Otherwise all supported platform/architectures are built - - if ! test -d "$1" - then - echo "ERROR: '$1' is not a directory. build_consul must be called with the path to the top level source as the first argument'" 1>&2 - return 1 - fi - - local sdir="$1" - local image_name=${GO_BUILD_CONTAINER_DEFAULT} - if test -n "$2" - then - image_name="$2" - fi - - pushd ${sdir} > /dev/null - echo "Creating the Go Build Container" - if is_set "${CONSUL_DEV}" - then - XC_OS=$(go_env GOOS) - XC_ARCH=$(go env GOARCH) - else - XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"} - XC_ARCH=${XC_ARCH:-"386 amd64 arm arm64"} - fi - - local container_id=$(docker create -it ${image_name} gox -os="${XC_OS}" -arch="${XC_ARCH}" -osarch="!darwin/arm !darwin/arm64" -ldflags "${GOLDFLAGS}" -output "pkg/{{.OS}}_{{.Arch}}/consul" -tags="${GOTAGS}") - ret=$? - - if test $ret -eq 0 - then - echo "Copying the source from '${sdir}' to /go/src/github.com/hashicorp/consul/pkg" - ( - tar -c $(ls | grep -v "ui\|ui-v2\|website\|bin\|.git") | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul && - echo "Running build in container" && - docker start -i ${container_id} && - echo "Copying back artifacts" && - docker cp ${container_id}:/go/src/github.com/hashicorp/consul/pkg/ pkg.new - ) - ret=$? - docker rm ${container_id} > /dev/null - - DEV_PLATFORM="./pkg.new/$(go env GOOS)_$(go env GOARCH)" - for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f) - do - cp ${F} bin/ - cp ${F} ${GOPATH}/bin - done - - cp -r pkg.new/* pkg/ - rm -r pkg.new - fi - popd > /dev/null - return $ret -} - -function package_release { - # Arguments: - # $1 - Path to the top level Consul source - # $2 - Version to use in the names of the zip files (optional) - # - # Returns: - # 0 - success - # * - error - - if ! test -d "$1" - then - echo "ERROR: '$1' is not a directory. package_release must be called with the path to the top level source as the first argument'" 1>&2 - return 1 - fi - - local vers="${2}" - if test -z "${vers}" - then - vers=$(get_version $1 false) - ret=$? - if test "$ret" -ne 0 - then - echo "ERROR: failed to determine the version." 1>&2 - return $ret - fi - fi - - local sdir="$1" - local ret=0 - for platform in $(find "${sdir}/pkg" -mindepth 1 -maxdepth 1 -type d) - do - local os_arch=$(basename $platform) - pushd "${platform}" > /dev/null - zip "${sdir}/pkg/dist/consul_${vers}_${os_arch}.zip" ./* - ret=$? - popd > /dev/null - - if test "$ret" -ne 0 - then - break - fi - done - - return $ret -} \ No newline at end of file +for src in $func_sources +do + source $src +done \ No newline at end of file diff --git a/scripts/build.sh b/scripts/build.sh deleted file mode 100755 index 038b210fd..000000000 --- a/scripts/build.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash -# -# This script builds the application from source for multiple platforms. -set -e - -export CGO_ENABLED=0 - -# Get the parent directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" - -# Change into that directory -cd "$DIR" - -# Determine the arch/os combos we're building for -XC_ARCH=${XC_ARCH:-"386 amd64 arm arm64"} -XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"} - -# Delete the old dir -echo "==> Removing old directory..." -rm -f bin/* -rm -rf pkg/* -mkdir -p bin/ - -# If it's dev mode, only build for ourself -if [ "${CONSUL_DEV}x" != "x" ]; then - XC_OS=$(go env GOOS) - XC_ARCH=$(go env GOARCH) -fi - -# Build! -echo "==> Building..." -"`which gox`" \ - -os="${XC_OS}" \ - -arch="${XC_ARCH}" \ - -osarch="!darwin/arm !darwin/arm64" \ - -ldflags "${GOLDFLAGS}" \ - -output "pkg/{{.OS}}_{{.Arch}}/consul" \ - -tags="${GOTAGS}" \ - . - -# Move all the compiled things to the $GOPATH/bin -GOPATH=${GOPATH:-$(go env GOPATH)} -case $(uname) in - CYGWIN*) - GOPATH="$(cygpath $GOPATH)" - ;; -esac -OLDIFS=$IFS -IFS=: MAIN_GOPATH=($GOPATH) -IFS=$OLDIFS - -# Copy our OS/Arch to the bin/ directory -DEV_PLATFORM="./pkg/$(go env GOOS)_$(go env GOARCH)" -for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f); do - cp ${F} bin/ - cp ${F} ${MAIN_GOPATH}/bin/ -done - -if [ "${CONSUL_DEV}x" = "x" ]; then - # Zip and copy to the dist dir - echo "==> Packaging..." - for PLATFORM in $(find ./pkg -mindepth 1 -maxdepth 1 -type d); do - OSARCH=$(basename ${PLATFORM}) - echo "--> ${OSARCH}" - - pushd $PLATFORM >/dev/null 2>&1 - zip ../${OSARCH}.zip ./* - popd >/dev/null 2>&1 - done -fi - -# Done! -echo -echo "==> Results:" -ls -hl bin/ diff --git a/scripts/consul-builder/Dockerfile b/scripts/consul-builder/Dockerfile deleted file mode 100644 index 24a6a21df..000000000 --- a/scripts/consul-builder/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -FROM ubuntu:bionic - -ENV GOVERSION 1.10.1 - -RUN apt-get update -y && \ - apt-get install --no-install-recommends -y -q \ - build-essential \ - ca-certificates \ - curl \ - git \ - ruby \ - ruby-dev \ - zip \ - zlib1g-dev \ - nodejs \ - npm && \ - gem install bundler && \ - npm install --global yarn && \ - npm install --global ember-cli - -RUN mkdir /goroot && \ - mkdir /gopath && \ - curl https://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz | \ - tar xzf - -C /goroot --strip-components=1 - -# We want to ensure that release builds never have any cgo dependencies so we -# switch that off at the highest level. -ENV CGO_ENABLED 0 -ENV GOPATH /gopath -ENV GOROOT /goroot -ENV PATH $GOROOT/bin:$GOPATH/bin:$PATH - -RUN mkdir -p $GOPATH/src/github.com/hashicorp/consul -WORKDIR $GOPATH/src/github.com/hashicorp/consul diff --git a/scripts/dist.sh b/scripts/dist.sh deleted file mode 100755 index 81e8ac935..000000000 --- a/scripts/dist.sh +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the version from the environment, or try to figure it out from the build tags. -# We process the files in the same order Go does to find the last matching tag. -if [ -z $VERSION ]; then - # get the OSS version from version.go - VERSION=$(awk -F\" '/Version =/ { print $2; exit }' Building version $VERSION..." - -# Get the parent directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" - -# Change into that dir because we expect that. -cd $DIR - -# Generate the tag. -if [ -z $NOTAG ]; then - echo "==> Tagging..." - git commit --allow-empty -a --gpg-sign=348FFC4C -m "Release v$VERSION" - git tag -a -m "Version $VERSION" -s -u 348FFC4C "v${VERSION}" master -fi - -# Do a hermetic build inside a Docker container. -if [ -z $NOBUILD ]; then - docker build -t hashicorp/consul-builder scripts/consul-builder/ - docker run --rm -e "GOTAGS=$GOTAGS" -v "$(pwd)":/gopath/src/github.com/hashicorp/consul hashicorp/consul-builder ./scripts/dist_build.sh -fi - -# Zip all the files. -rm -rf ./pkg/dist -mkdir -p ./pkg/dist -for FILENAME in $(find ./pkg -mindepth 1 -maxdepth 1 -type f); do - FILENAME=$(basename $FILENAME) - cp ./pkg/${FILENAME} ./pkg/dist/consul_${VERSION}_${FILENAME} -done - -# Make the checksums. -pushd ./pkg/dist -shasum -a256 * > ./consul_${VERSION}_SHA256SUMS -if [ -z $NOSIGN ]; then - echo "==> Signing..." - gpg --default-key 348FFC4C --detach-sig ./consul_${VERSION}_SHA256SUMS -fi -popd - -exit 0 diff --git a/scripts/dist_build.sh b/scripts/dist_build.sh deleted file mode 100755 index 586eaafdf..000000000 --- a/scripts/dist_build.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the parent directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" - -# Change into that dir because we expect that. -cd $DIR - -# Make sure build tools are available. -make tools - -# # Build the standalone version of the web assets for the sanity check. -# pushd ui -# bundle -# make dist -# popd - -# pushd ui-v2 -# yarn install -# make dist -# popd - -# # Fixup the timestamps to match what's checked in. This will allow us to cleanly -# # verify that the checked-in content is up to date without spurious diffs of the -# # file mod times. -# pushd pkg -# cat ../agent/bindata_assetfs.go | ../scripts/fixup_times.sh -# popd - -# # Regenerate the built-in web assets. If there are any diffs after doing this -# # then we know something is up. -# make static-assets -# if ! git diff --quiet agent/bindata_assetfs.go; then -# echo "Checked-in web assets are out of date, build aborted" -# exit 1 -# fi - -# Now we are ready to do a clean build of everything. We no longer distribute the -# web UI so it's ok that gets blown away as part of this. -rm -rf pkg -make all - -exit 0 diff --git a/scripts/fixup_times.sh b/scripts/fixup_times.sh deleted file mode 100755 index 43994a0e6..000000000 --- a/scripts/fixup_times.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash -set -e -regex='bindataFileInfo.*name: \"(.+)\".*time.Unix.(.+),' -while read line; do - if [[ $line =~ $regex ]]; then - file=${BASH_REMATCH[1]} - ts=${BASH_REMATCH[2]} - touch --date @$ts $file - fi -done diff --git a/scripts/ui.sh b/scripts/ui.sh deleted file mode 100755 index 321ed6c1e..000000000 --- a/scripts/ui.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the parent directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" - -# Change into that dir because we expect that. -cd $DIR - -# Do a hermetic build inside a Docker container. -if [ -z $NOBUILD ]; then - docker build -t hashicorp/consul-builder scripts/consul-builder/ - docker run --rm -v "$(pwd)":/gopath/src/github.com/hashicorp/consul hashicorp/consul-builder ./scripts/ui_build.sh -fi - -exit 0 diff --git a/scripts/ui_build.sh b/scripts/ui_build.sh deleted file mode 100755 index c578d396c..000000000 --- a/scripts/ui_build.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the parent directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" - -# Change into that dir because we expect that. -cd $DIR - -# Make sure build tools are available. -make tools - -# Build the web assets. -echo "Building the V1 UI" -pushd ui -bundle -make dist -popd - -echo "Building the V2 UI" -pushd ui-v2 -yarn install -make dist -popd - -# Make the static assets using the container version of the builder -make static-assets - -exit 0 diff --git a/scripts/vagrant-linux-priv-go.sh b/scripts/vagrant-linux-priv-go.sh deleted file mode 100755 index 9ead97c31..000000000 --- a/scripts/vagrant-linux-priv-go.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash - -function install_go() { - local go_version=1.9.1 - local download= - - download="https://storage.googleapis.com/golang/go${go_version}.linux-amd64.tar.gz" - - if [ -d /usr/local/go ] ; then - return - fi - - wget -q -O /tmp/go.tar.gz ${download} - - tar -C /tmp -xf /tmp/go.tar.gz - sudo mv /tmp/go /usr/local - sudo chown -R root:root /usr/local/go -} - -install_go - -# Ensure that the GOPATH tree is owned by vagrant:vagrant -mkdir -p /opt/gopath -chown -R vagrant:vagrant /opt/gopath - -# Ensure Go is on PATH -if [ ! -e /usr/bin/go ] ; then - ln -s /usr/local/go/bin/go /usr/bin/go -fi -if [ ! -e /usr/bin/gofmt ] ; then - ln -s /usr/local/go/bin/gofmt /usr/bin/gofmt -fi - - -# Ensure new sessions know about GOPATH -if [ ! -f /etc/profile.d/gopath.sh ] ; then - cat < /etc/profile.d/gopath.sh -export GOPATH="/opt/gopath" -export PATH="/opt/gopath/bin:\$PATH" -EOT - chmod 755 /etc/profile.d/gopath.sh -fi From 237c78d4a4d69f263d850e4b2bc5e3643402f334 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Wed, 13 Jun 2018 15:10:02 -0400 Subject: [PATCH 012/627] Add more functionality related to verifying a build and publishing --- GNUmakefile | 13 +- build-support/functions/00-vars.sh | 19 ++ build-support/functions/01-util.sh | 292 ++++++++++++++++++++++++-- build-support/functions/02-build.sh | 4 +- build-support/functions/03-release.sh | 141 +++++++++++-- build-support/functions/04-publish.sh | 135 ++++++++++++ build-support/scripts/build.sh | 85 +++++--- 7 files changed, 622 insertions(+), 67 deletions(-) create mode 100644 build-support/functions/04-publish.sh diff --git a/GNUmakefile b/GNUmakefile index ba7b16d06..4872efa34 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -75,6 +75,9 @@ linux: # dist builds binaries for all platforms and packages them for distribution dist: @$(SHELL) $(CURDIR)/build-support/scripts/build.sh release -t '$(DIST_TAG)' -b '$(DIST_BUILD)' -S '$(DIST_SIGN)' + +publish: + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh publish cov: gocov test $(GOFILES) | gocov-html > /tmp/coverage.html @@ -139,11 +142,15 @@ tools: go get -u -v $(GOTOOLS) version: - @echo -n "Version without release: " + @echo -n "Version: " @$(SHELL) $(CURDIR)/build-support/scripts/build.sh version - @echo -n "Version with release: " + @echo -n "Version + release: " @$(SHELL) $(CURDIR)/build-support/scripts/build.sh version -R - + @echo -n "Version + git: " + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh version -G + @echo -n "Version + release + git: " + @$(SHELL) $(CURDIR)/build-support/scripts/build.sh version -R -G + docker-images: @$(MAKE) -C build-support/docker images diff --git a/build-support/functions/00-vars.sh b/build-support/functions/00-vars.sh index 2b6c53afa..b039b658a 100644 --- a/build-support/functions/00-vars.sh +++ b/build-support/functions/00-vars.sh @@ -18,3 +18,22 @@ case $(uname) in ;; esac MAIN_GOPATH=$(cut -d: -f1 <<< "${GOPATH}") + + +# Build debugging output is off by default +if test -z "${BUILD_DEBUG}" +then + BUILD_DEBUG=0 +fi + +# default publish host is github.com - only really useful to use something else for testing +if test -z "${PUBLISH_GIT_HOST}" +then + PUBLISH_GIT_HOST=github.com +fi + +# default publish repo is hashicorp/consul - useful to override for testing as well as in the enterprise repo +if test -z "${PUBLISH_GIT_REPO}" +then + PUBLISH_GIT_REPO=hashicorp/consul.git +fi \ No newline at end of file diff --git a/build-support/functions/01-util.sh b/build-support/functions/01-util.sh index 34a79740b..bd8220f7f 100644 --- a/build-support/functions/01-util.sh +++ b/build-support/functions/01-util.sh @@ -5,7 +5,7 @@ function err { tput setaf 1 fi - echo $@ 1>&2 + echo "$@" 1>&2 if test "${COLORIZE}" -eq 1 then @@ -20,7 +20,7 @@ function status { tput setaf 4 fi - echo $@ + echo "$@" if test "${COLORIZE}" -eq 1 then @@ -35,7 +35,7 @@ function status_stage { tput setaf 2 fi - echo $@ + echo "$@" if test "${COLORIZE}" -eq 1 then @@ -43,6 +43,21 @@ function status_stage { fi } +function debug { + if is_set "${BUILD_DEBUG}" + then + if test "${COLORIZE}" -eq 1 + then + tput setaf 6 + fi + echo "$@" + if test "${COLORIZE}" -eq 1 + then + tput sgr0 + fi + fi +} + function is_set { # Arguments: # $1 - string value to check its truthiness @@ -70,14 +85,15 @@ function have_gpg_key { # 0 - success (we can use this key for signing) # * - failure (key cannot be used) - gpg --list-secret-keys $1 >dev/null 2>&1 + gpg --list-secret-keys $1 > /dev/null 2>&1 return $? } function parse_version { # Arguments: # $1 - Path to the top level Consul source - # $2 - boolean value for whether to omit the release version from the version string + # $2 - boolean value for whether the release version should be parsed from the source + # $3 - boolean whether to use GIT_DESCRIBE and GIT_COMMIT environment variables # # Return: # 0 - success (will write the version to stdout) @@ -86,9 +102,6 @@ function parse_version { # Notes: # If the GOTAGS environment variable is present then it is used to determine which # version file to use for parsing. - # If the GIT_DESCRIBE environment variable is present then it is used as the version - # If the GIT_COMMIT environment variable is preset it will be added to the end of - # the version string. local vfile="${1}/version/version.go" @@ -99,6 +112,28 @@ function parse_version { return 1 fi + local include_release="$2" + local use_git_env="$3" + + local git_version="" + local git_commit="" + + if test -z "${include_release}" + then + include_release=true + fi + + if test -z "${use_git_env}" + then + use_git_env=true + fi + + if is_set "${use_git_env}" + then + git_version="${GIT_DESCRIBE}" + git_commit="${GIT_COMMIT}" + fi + # Get the main version out of the source file version_main=$(awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) release_main=$(awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) @@ -116,22 +151,22 @@ function parse_version { done done - version= - # override the version from source with the value of the GIT_DESCRIBE env var if present - if test -n "$GIT_DESCRIBE" + if test -n "${git_version}" then - version=$GIT_DESCRIBE + version="${git_version}" + else + version="${version_main}" fi - if ! is_set $2 + if is_set "${include_release}" then # Get the release version out of the source file - release=$(awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) + release="${release_main}" # When no GIT_DESCRIBE env var is present and no release is in the source then we # are definitely in dev mode - if test -z "$GIT_DESCRIBE" -a -z "$release" + if test -z "${git_version}" -a -z "$release" then release="dev" fi @@ -142,9 +177,9 @@ function parse_version { version="${version}-${release}" # add the git commit to the version - if test -n "$GIT_COMMIT" + if test -n "${git_commit}" then - version="${version} (${GIT_COMMIT})" + version="${version} (${git_commit})" fi fi fi @@ -158,6 +193,7 @@ function get_version { # Arguments: # $1 - Path to the top level Consul source # $2 - Whether the release version should be parsed from source (optional) + # $3 - Whether to use GIT_DESCRIBE and GIT_COMMIT environment variables # # Returns: # 0 - success (the version is also echoed to stdout) @@ -173,7 +209,7 @@ function get_version { if test -z "$vers" then # parse the OSS version from version.go - vers="$(parse_version ${1} ${2})" + vers="$(parse_version ${1} ${2} ${3})" fi if test -z "$vers" @@ -183,4 +219,224 @@ function get_version { echo $vers return 0 fi +} + +function git_branch { + # Arguments: + # $1 - Path to the git repo (optional - assumes pwd is git repo otherwise) + # + # Returns: + # 0 - success + # * - failure + # + # Notes: + # Echos the current branch to stdout when successful + + local gdir="$(pwd)" + if test -d "$1" + then + gdir="$1" + fi + + pushd "${gdir}" > /dev/null + + local ret=0 + local head="$(git status -b --porcelain=v2 | awk '{if ($1 == "#" && $2 =="branch.head") { print $3 }}')" || ret=1 + + popd > /dev/null + + test ${ret} -eq 0 && echo "$head" + return ${ret} +} + +function git_upstream { + # Arguments: + # $1 - Path to the git repo (optional - assumes pwd is git repo otherwise) + # + # Returns: + # 0 - success + # * - failure + # + # Notes: + # Echos the current upstream branch to stdout when successful + + local gdir="$(pwd)" + if test -d "$1" + then + gdir="$1" + fi + + pushd "${gdir}" > /dev/null + + local ret=0 + local head="$(git status -b --porcelain=v2 | awk '{if ($1 == "#" && $2 =="branch.upstream") { print $3 }}')" || ret=1 + + popd > /dev/null + + test ${ret} -eq 0 && echo "$head" + return ${ret} +} + +function git_log_summary { + # Arguments: + # $1 - Path to the git repo (optional - assumes pwd is git repo otherwise) + # + # Returns: + # 0 - success + # * - failure + # + + local gdir="$(pwd)" + if test -d "$1" + then + gdir="$1" + fi + + pushd "${gdir}" > /dev/null + + local ret=0 + + local head=$(git_branch) || ret=1 + local upstream=$(git_upstream) || ret=1 + local rev_range="${head}...${upstream}" + + if test ${ret} -eq 0 + then + status "Git Changes:" + git log --pretty=oneline ${rev_range} || ret=1 + + fi + return $ret +} + +function normalize_git_url { + url="${1#https://}" + url="${url#git@}" + url="${url%.git}" + url="$(sed -e 's/\([^\/:]*\)[:\/]\(.*\)/\1:\2/' <<< "${url}")" + echo "$url" + return 0 +} + +function find_git_remote { + # Arguments: + # $1 - Path to the top level Consul source + # + # Returns: + # 0 - success + # * - error + # + # Note: + # The remote name to use for publishing will be echoed to stdout upon success + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. find_git_remote must be called with the path to the top level source as the first argument'" + return 1 + fi + + need_url=$(normalize_git_url "${PUBLISH_GIT_HOST}:${PUBLISH_GIT_REPO}") + + pushd "$1" > /dev/null + + local ret=1 + for remote in $(git remote) + do + url=$(git remote get-url --push ${remote}) || continue + url=$(normalize_git_url "${url}") + + if test "${url}" == "${need_url}" + then + echo "${remote}" + ret=0 + break + fi + done + + popd > /dev/null + return $ret +} + +function confirm_git_push_changes { + # Arguments: + # $1 - Path to git repo + # + # Returns: + # 0 - success + # * - error + # + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. confirm_git_push_changes must be called with the path to a git repo as the first argument'" + return 1 + fi + + pushd "${1}" > /dev/null + + + declare -i ret=0 + git_log_summary || ret=1 + if test ${ret} -eq 0 + then + # put a empty line between the git changes and the prompt + echo "" + + local answer="" + + while true + do + case "${answer}" in + [yY]* ) + status "Changes Accepted" + ret=0 + break + ;; + [nN]* ) + err "Changes Rejected" + ret=1 + break + ;; + * ) + read -p "Are these changes correct? [y/n]: " answer + ;; + esac + done + fi + + popd > /dev/null + return $ret +} + +function is_git_clean { + # Arguments: + # $1 - Path to git repo + # $2 - boolean whether the git status should be output when not clean + # + # Returns: + # 0 - success + # * - error + # + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. is_git_clean must be called with the path to a git repo as the first argument'" + return 1 + fi + + local output_status="$2" + + pushd "${1}" > /dev/null + + local ret=0 + test -z "$(git status --porcelain=v2 2> /dev/null)" || ret=1 + + if is_set "${output_status}" && test "$ret" -ne 0 + then + err "Git repo is not clean" + # --porcelain=v1 is the same as --short except uncolorized + git status --porcelain=v1 + fi + popd > /dev/null + return ${ret} } \ No newline at end of file diff --git a/build-support/functions/02-build.sh b/build-support/functions/02-build.sh index 166a297e7..c980b74df 100644 --- a/build-support/functions/02-build.sh +++ b/build-support/functions/02-build.sh @@ -77,7 +77,7 @@ function build_ui { docker rm ${container_id} > /dev/null fi - if test $ret -eq 0 + if test ${ret} -eq 0 then rm -rf ${1}/pkg/web_ui/v2 cp -r ${1}/ui-v2/dist ${1}/pkg/web_ui/v2 @@ -124,7 +124,7 @@ function build_ui_legacy { status "Running build in container" && docker start -i ${container_id} && status "Copying back artifacts" && - docker cp ${container_id}:/consul-src/pkg/web_ui ${sdir}/pkg/web_ui/v1 + docker cp ${container_id}:/consul-src/pkg/web_ui/v1/. ${sdir}/pkg/web_ui/v1 ) ret=$? docker rm ${container_id} > /dev/null diff --git a/build-support/functions/03-release.sh b/build-support/functions/03-release.sh index c58281c9c..4d762f858 100644 --- a/build-support/functions/03-release.sh +++ b/build-support/functions/03-release.sh @@ -33,12 +33,17 @@ function tag_release { pushd "$1" > /dev/null local ret=0 + local branch_to_tag=$(git_branch) || ret=1 + # perform an usngined release if requested (mainly for testing locally) - if is_set "$RELEASE_UNSIGNED" + if test ${ret} -ne 0 + then + err "ERROR: Failed to determine git branch to tag" + elif is_set "$RELEASE_UNSIGNED" then ( git commit --allow-empty -a -m "Release v${2}" && - git tag -a -m "Version ${2}" "v${2}" master + git tag -a -m "Version ${2}" "v${2}" "${branch_to_tag}" ) ret=$? # perform a signed release (official releases should do this) @@ -46,7 +51,7 @@ function tag_release { then ( git commit --allow-empty -a --gpg-sign=${gpg_key} -m "Release v${2}" && - git tag -a -m "Version ${2}" -s -u ${gpg_key} "v${2}" master + git tag -a -m "Version ${2}" -s -u ${gpg_key} "v${2}" "${branch_to_tag}" ) ret=$? # unsigned release not requested and gpg key isn't useable @@ -72,11 +77,14 @@ function package_release { err "ERROR: '$1' is not a directory. package_release must be called with the path to the top level source as the first argument'" return 1 fi - + + local sdir="${1}" + local ret=0 local vers="${2}" + if test -z "${vers}" then - vers=$(get_version $1 true) + vers=$(get_version "${sdir}" true false) ret=$? if test "$ret" -ne 0 then @@ -85,9 +93,6 @@ function package_release { fi fi - local sdir="$1" - local ret=0 - rm -rf "${sdir}/pkg/dist" > /dev/null 2>&1 mkdir -p "${sdir}/pkg/dist" >/dev/null 2>&1 for platform in $(find "${sdir}/pkg/bin" -mindepth 1 -maxdepth 1 -type d) @@ -154,10 +159,101 @@ function sign_release { gpg_key=$2 fi - gpg --default-key "${gpg_key}" --detach-sig "$1" + gpg --default-key "${gpg_key}" --detach-sig --yes -v "$1" return $? } +function check_release { + # Arguments: + # $1 - Path to the release files + # $2 - Version to expect + # $3 - boolean whether to expect the signature file + # + # Returns: + # 0 - success + # * - failure + + declare -i ret=0 + + declare -a expected_files + + expected_files+=("consul_${2}_SHA256SUMS") + echo "check sig: $3" + if is_set "$3" + then + expected_files+=("consul_${2}_SHA256SUMS.sig") + fi + + expected_files+=("consul_${2}_darwin_386.zip") + expected_files+=("consul_${2}_darwin_amd64.zip") + expected_files+=("consul_${2}_freebsd_386.zip") + expected_files+=("consul_${2}_freebsd_amd64.zip") + expected_files+=("consul_${2}_freebsd_arm.zip") + expected_files+=("consul_${2}_linux_386.zip") + expected_files+=("consul_${2}_linux_amd64.zip") + expected_files+=("consul_${2}_linux_arm.zip") + expected_files+=("consul_${2}_linux_arm64.zip") + expected_files+=("consul_${2}_solaris_amd64.zip") + expected_files+=("consul_${2}_windows_386.zip") + expected_files+=("consul_${2}_windows_amd64.zip") + + declare -a found_files + + status_stage "==> Verifying release contents - ${2}" + debug "Expecting Files:" + for fname in "${expected_files[@]}" + do + debug " $fname" + done + + pushd "$1" > /dev/null + for actual_fname in $(ls) + do + local found=0 + for i in "${!expected_files[@]}" + do + local expected_fname="${expected_files[i]}" + if test "${expected_fname}" == "${actual_fname}" + then + # remove from the expected_files array + unset 'expected_files[i]' + + # append to the list of found files + found_files+=("${expected_fname}") + + # mark it as found so we dont error + found=1 + break + fi + done + + if test $found -ne 1 + then + err "ERROR: Release build has an extra file: ${actual_fname}" + ret=1 + fi + done + popd > /dev/null + + for fname in "${expected_files[@]}" + do + err "ERROR: Release build is missing a file: $fname" + ret=1 + done + + + if test $ret -eq 0 + then + status "Release build contents:" + for fname in "${found_files[@]}" + do + echo " $fname" + done + fi + + return $ret +} + function build_consul_release { build_consul "$1" "" "$2" } @@ -192,13 +288,33 @@ function build_release { local do_sha256="$4" local gpg_key="$5" - local vers=$(get_version ${sdir} true) + if test -z "${gpg_key}" + then + gpg_key=${HASHICORP_GPG_KEY} + fi + + local vers="$(get_version ${sdir} true false)" if test $? -ne 0 then err "Please specify a version (couldn't find one based on build tags)." return 1 fi + if ! is_git_clean "${sdir}" true && ! is_set "${ALLOW_DIRTY_GIT}" + then + err "ERROR: Refusing to build because Git is dirty. Set ALLOW_DIRTY_GIT=1 in the environment to proceed anyways" + return 1 + fi + + if ! is_set "${RELEASE_UNSIGNED}" + then + if ! have_gpg_key "${gpg_key}" + then + err "ERROR: Aborting build because no useable GPG key is present. Set RELEASE_UNSIGNED=1 to bypass this check" + return 1 + fi + fi + # Make sure we arent in dev mode unset CONSUL_DEV @@ -295,5 +411,6 @@ function build_release { fi fi - return 0 -} \ No newline at end of file + check_release "${sdir}/pkg/dist" "${vers}" "${do_sha256}" + return $? +} diff --git a/build-support/functions/04-publish.sh b/build-support/functions/04-publish.sh new file mode 100644 index 000000000..ba4e571a1 --- /dev/null +++ b/build-support/functions/04-publish.sh @@ -0,0 +1,135 @@ +function hashicorp_release { + # Arguments: + # $1 - Path to directory containing all of the release artifacts + # + # Returns: + # 0 - success + # * - failure + # + # Notes: + # Requires the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables + # to be set + + status "Uploading files" + hc-releases upload "${1}" || return 1 + + status "Publishing the release" + hc-releases publish || return 1 + + return 0 +} + +function push_git_release { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - Tag to push + # + # Returns: + # 0 - success + # * - error + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. push_git_release must be called with the path to the top level source as the first argument'" + return 1 + fi + + local sdir="$1" + local ret=0 + + # find the correct remote corresponding to the desired repo (basically prevent pushing enterprise to oss or oss to enterprise) + local remote=$(find_git_remote "${sdir}") || return 1 + local head=$(git_branch "${sdir}") || return 1 + local upstream=$(git_upstream "${sdir}") || return 1 + status "Using git remote: ${remote}" + + # upstream branch for this branch does not track the remote we need to push to + if test "${upstream#${remote}}" == "${upstream}" + then + err "ERROR: Upstream branch '${upstream}' does not track the correct remote '${remote}'" + return 1 + fi + + pushd "${sdir}" > /dev/null + + status "Pushing local branch ${head} to ${upstream}" + if ! git push "${remote}" + then + err "ERROR: Failed to push to remote: ${remote}" + ret=1 + fi + + status "Pushing tag ${2} to ${remote}" + if test "${ret}" -eq 0 && ! git push "${remote}" "${2}" + then + err "ERROR: Failed to push tag ${2} to ${remote}" + ret = 1 + fi + + popd > /dev/null + + + return $ret +} + + +function publish_release { + # Arguments: + # $1 - Path to top level Consul source that contains the built release + # $2 - boolean whether to publish to git upstream + # $3 - boolean whether to publish to releases.hashicorp.com + # + # Returns: + # 0 - success + # * - error + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. publish_release must be called with the path to the top level source as the first argument'" + return 1 + fi + + local sdir="$1" + local pub_git="$2" + local pub_hc_releases="$3" + + if test -z "${pub_git}" + then + pub_git=1 + fi + + if test -z "${pub_hc_releases}" + then + pub_hc_releases=1 + fi + + local vers="$(get_version ${sdir} true false)" + if test $? -ne 0 + then + err "Please specify a version (couldn't parse one from the source)." + return 1 + fi + + status_page "==> Verifying release files" + check_release "${sdir}/pkg/dist" "${vers}" true + + status_stage "==> Confirming Git is clean" + is_git_clean "$1" true || return 1 + + status_stage "==> Confirming Git Changes" + confirm_git_push_changes "$1" || return 1 + + if is_set "${pub_git}" + then + status_stage "==> Pushing to Git" + push_git_release "$1" "v${vers}" || return 1 + fi + + if is_set "${pub_hc_releases}" + then + status_stage "==> Publishing to releases.hashicorp.com" + hashicorp_release "${sdir}/pkg/dist" || return 1 + fi + + return 0 +} \ No newline at end of file diff --git a/build-support/scripts/build.sh b/build-support/scripts/build.sh index 10fc797c9..ab2e938c3 100755 --- a/build-support/scripts/build.sh +++ b/build-support/scripts/build.sh @@ -103,6 +103,11 @@ Subcommands: -a | --build-arch ARCH Space separated string of architectures to build + publish: Publishes a release build. + + -s | --source DIR Path to the source to build. + Defaults to "${SOURCE_DIR}" + release: Performs a release build. Options: @@ -163,6 +168,7 @@ function main { declare build_os declare build_arch declare -i vers_release + declare -i vers_git declare -i use_refresh=1 declare -i default_refresh=0 @@ -178,13 +184,18 @@ function main { declare -i use_xc=0 declare default_build_os="" declare default_build_arch="" - declare -i use_vers_rel - declare -i default_vers_rel=1 + declare -i use_version_args + declare -i default_vers_rel=0 + declare -i default_vers_git=0 declare command="$1" shift case "${command}" in + assetfs ) + use_image=1 + default_image="${GO_BUILD_CONTAINER_DEFAULT}" + ;; consul ) use_image=1 default_image="${GO_BUILD_CONTAINER_DEFAULT}" @@ -192,6 +203,13 @@ function main { consul-local ) use_xc=1 ;; + publish ) + use_refresh=0 + ;; + release ) + use_rel=1 + use_refresh=0 + ;; ui ) use_image=1 default_image="${UI_BUILD_CONTAINER_DEFAULT}" @@ -202,15 +220,7 @@ function main { ;; version ) use_refresh=0 - use_vers_rel=1 - ;; - assetfs ) - use_image=1 - default_image="${GO_BUILD_CONTAINER_DEFAULT}" - ;; - release ) - use_rel=1 - use_refresh=0 + use_version_args=1 ;; -h | --help) usage @@ -232,6 +242,7 @@ function main { declare -i have_build_os_arg=0 declare -i have_build_arch_arg=0 declare -i have_vers_rel_arg=0 + declare -i have_vers_git_arg=0 while test $# -gt 0 do @@ -251,9 +262,15 @@ function main { shift 2 ;; -R | --release ) - option_check "${use_vers_rel}" "${have_vers_rel_arg}" "${command}" "-R/--release" || return 1 + option_check "${use_version_args}" "${have_vers_rel_arg}" "${command}" "-R/--release" || return 1 have_vers_rel_arg=1 - vers_release=0 + vers_release=1 + shift + ;; + -G | --git ) + option_check "${use_version_args}" "${have_vers_git_arg}" "${command}" "-G/--git" || return 1 + have_vers_git_arg=1 + vers_git=1 shift ;; -r | --refresh) @@ -313,8 +330,19 @@ function main { test $have_build_os_arg -ne 1 && build_os="${default_build_os}" test $have_build_arch_arg -ne 1 && build_arch="${default_build_os}" test $have_vers_rel_arg -ne 1 && vers_release="${default_vers_rel}" + test $have_vers_git_arg -ne 1 && vers_git="${default_vers_git}" case "${command}" in + assetfs ) + if is_set "${refresh_docker}" + then + status_stage "==> Refreshing Consul build container image" + export GO_BUILD_TAG="${image}" + refresh_docker_images ${sdir} go-build-image || return 1 + fi + status_stage "==> Build Static Assets" + build_assetfs "${sdir}" "${image}" || return 1 + ;; consul ) if is_set "${refresh_docker}" then @@ -328,6 +356,16 @@ function main { consul-local ) build_consul_local "${sdir}" "${build_os}" "${build_arch}" "" || return 1 ;; + publish ) + publish_release "${sdir}" true true || return 1 + ;; + release ) + if is_set "${refresh_docker}" + then + refresh_docker_images ${sdir} || return 1 + fi + build_release "${sdir}" "${rel_tag}" "${rel_build}" "${rel_sign}" "${rel_gpg_key}" || return 1 + ;; ui ) if is_set "${refresh_docker}" @@ -350,27 +388,10 @@ function main { build_ui_legacy "${sdir}" "${image}" || return 1 ;; version ) - parse_version "${sdir}" "${vers_release}"|| return 1 - ;; - assetfs ) - if is_set "${refresh_docker}" - then - status_stage "==> Refreshing Consul build container image" - export GO_BUILD_TAG="${image}" - refresh_docker_images ${sdir} go-build-image || return 1 - fi - status_stage "==> Build Static Assets" - build_assetfs "${sdir}" "${image}" || return 1 - ;; - release ) - if is_set "${refresh_docker}" - then - refresh_docker_images ${sdir} || return 1 - fi - build_release "${sdir}" "${rel_tag}" "${rel_build}" "${rel_sign}" "${rel_gpg_key}" || return 1 + parse_version "${sdir}" "${vers_release}" "${vers_git}" || return 1 ;; *) - err "Unkown subcommand: '$1' - possible values are 'consul', 'ui', 'ui-legacy', 'assetfs', version' and 'release'" + err "Unkown subcommand: '$1' - possible values are 'assetfs', consul', 'consul-local' 'publish', 'release', 'ui', 'ui-legacy' and 'version'" return 1 ;; esac From d99ca00d74775aae23e20f0567efbbf171e74ec4 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Thu, 14 Jun 2018 11:04:04 -0400 Subject: [PATCH 013/627] Move some things around and add in consul version confirmation to publishing checks --- build-support/docker/Makefile | 13 ++- build-support/functions/01-util.sh | 51 ----------- build-support/functions/04-publish.sh | 116 +++++++++++++++++++++++++- 3 files changed, 124 insertions(+), 56 deletions(-) diff --git a/build-support/docker/Makefile b/build-support/docker/Makefile index d9af9ed5a..01c468959 100644 --- a/build-support/docker/Makefile +++ b/build-support/docker/Makefile @@ -7,15 +7,22 @@ GO_BUILD_TAG?=consul-build-go UI_BUILD_TAG?=consul-build-ui UI_LEGACY_BUILD_TAG?=consul-build-ui-legacy +DOCKER_BUILD_QUIET?=1 +ifeq (${DOCKER_BUILD_QUIET},1) +QUIET=-q +else +QUIET= +endif + images: go-build-image ui-build-image ui-legacy-build-image go-build-image: - docker build $(NOCACHE) -t $(GO_BUILD_TAG) -f Build-Go.dockerfile . + docker build $(NOCACHE) $(QUIET) -t $(GO_BUILD_TAG) -f Build-Go.dockerfile . ui-build-image: - docker build $(NOCACHE) -t $(UI_BUILD_TAG) -f Build-UI.dockerfile . + docker build $(NOCACHE) $(QUIET) -t $(UI_BUILD_TAG) -f Build-UI.dockerfile . ui-legacy-build-image: - docker build $(NOCACHE) -t $(UI_LEGACY_BUILD_TAG) -f Build-UI-Legacy.dockerfile . + docker build $(NOCACHE) $(QUIET) -t $(UI_LEGACY_BUILD_TAG) -f Build-UI-Legacy.dockerfile . .PHONY: images go-build-image ui-build-image ui-legacy-build-image diff --git a/build-support/functions/01-util.sh b/build-support/functions/01-util.sh index bd8220f7f..9e15005d8 100644 --- a/build-support/functions/01-util.sh +++ b/build-support/functions/01-util.sh @@ -357,57 +357,6 @@ function find_git_remote { return $ret } -function confirm_git_push_changes { - # Arguments: - # $1 - Path to git repo - # - # Returns: - # 0 - success - # * - error - # - - if ! test -d "$1" - then - err "ERROR: '$1' is not a directory. confirm_git_push_changes must be called with the path to a git repo as the first argument'" - return 1 - fi - - pushd "${1}" > /dev/null - - - declare -i ret=0 - git_log_summary || ret=1 - if test ${ret} -eq 0 - then - # put a empty line between the git changes and the prompt - echo "" - - local answer="" - - while true - do - case "${answer}" in - [yY]* ) - status "Changes Accepted" - ret=0 - break - ;; - [nN]* ) - err "Changes Rejected" - ret=1 - break - ;; - * ) - read -p "Are these changes correct? [y/n]: " answer - ;; - esac - done - fi - - popd > /dev/null - return $ret -} - function is_git_clean { # Arguments: # $1 - Path to git repo diff --git a/build-support/functions/04-publish.sh b/build-support/functions/04-publish.sh index ba4e571a1..02abdcdc6 100644 --- a/build-support/functions/04-publish.sh +++ b/build-support/functions/04-publish.sh @@ -72,6 +72,115 @@ function push_git_release { return $ret } +function confirm_git_push_changes { + # Arguments: + # $1 - Path to git repo + # + # Returns: + # 0 - success + # * - error + # + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. confirm_git_push_changes must be called with the path to a git repo as the first argument'" + return 1 + fi + + pushd "${1}" > /dev/null + + + declare -i ret=0 + git_log_summary || ret=1 + if test ${ret} -eq 0 + then + # put a empty line between the git changes and the prompt + echo "" + + local answer="" + + while true + do + case "${answer}" in + [yY]* ) + status "Changes Accepted" + ret=0 + break + ;; + [nN]* ) + err "Changes Rejected" + ret=1 + break + ;; + * ) + read -p "Are these changes correct? [y/n]: " answer + ;; + esac + done + fi + + popd > /dev/null + return $ret +} + +function confirm_consul_version { + # Arguments: + # $1 - Path to the release files + # $2 - Version to look for + # + # Returns: + # 0 - success + # * - error + + local zfile="${1}/consul_${2}_$(go env GOOS)_$(go env GOARCH).zip" + + if ! test -f "${zfile}" + then + err "ERROR: File not found or is not a regular file: ${zfile}" + return 1 + fi + + local ret=0 + local tfile="$(mktemp) -t "consul_")" + + unzip -p "${zfile}" "consul" > "${tfile}" + if test $? -eq 0 + then + chmod +x "${tfile}" + "${tfile}" version + + # put a empty line between the version output and the prompt + echo "" + + local answer="" + + while true + do + case "${answer}" in + [yY]* ) + status "Version Accepted" + ret=0 + break + ;; + [nN]* ) + err "Version Rejected" + ret=1 + break + ;; + * ) + read -p "Is this Consul version correct? [y/n]: " answer + ;; + esac + done + else + err "ERROR: Failed to extract consul binary from the zip file" + ret=1 + fi + + rm "${tfile}" > /dev/null 2>&1 + return ${ret} +} + function publish_release { # Arguments: @@ -110,8 +219,11 @@ function publish_release { return 1 fi - status_page "==> Verifying release files" - check_release "${sdir}/pkg/dist" "${vers}" true + status_stage "==> Verifying release files" + check_release "${sdir}/pkg/dist" "${vers}" true || return 1 + + status_stage "==> Confirming Consul Version" + confirm_consul_version "${sdir}/pkg/dist" "${vers}" || return 1 status_stage "==> Confirming Git is clean" is_git_clean "$1" true || return 1 From 7f7325e05ef3f67d30df00a436e1bbc2a18200a8 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 13 Jun 2018 14:15:08 +0100 Subject: [PATCH 014/627] Ensure startup/load image is removed if you hit an error on first load --- ui-v2/app/routes/application.js | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ui-v2/app/routes/application.js b/ui-v2/app/routes/application.js index 0fd78fb06..d37941247 100644 --- a/ui-v2/app/routes/application.js +++ b/ui-v2/app/routes/application.js @@ -4,6 +4,9 @@ import { hash } from 'rsvp'; import { get } from '@ember/object'; import { next } from '@ember/runloop'; const $html = document.documentElement; +const removeLoading = function() { + return $html.classList.remove('ember-loading'); +}; export default Route.extend({ init: function() { this._super(...arguments); @@ -24,7 +27,7 @@ export default Route.extend({ const controller = this.controllerFor('application'); controller.setProperties(model); transition.promise.finally(function() { - $html.classList.remove('ember-loading'); + removeLoading(); controller.setProperties({ loading: false, dc: model.dc, @@ -51,11 +54,13 @@ export default Route.extend({ dc: error.status.toString().indexOf('5') !== 0 ? get(this, 'repo').getActive() : null, }) .then(model => { + removeLoading(); next(() => { this.controllerFor('error').setProperties(model); }); }) .catch(e => { + removeLoading(); next(() => { this.controllerFor('error').setProperties({ error: error }); }); From 3ef05bd71901a283e9a1e9f885c21df9dfcd06a0 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 13 Jun 2018 14:20:39 +0100 Subject: [PATCH 015/627] Move healthcheck text down by 1px --- ui-v2/app/styles/components/healthchecked-resource.scss | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui-v2/app/styles/components/healthchecked-resource.scss b/ui-v2/app/styles/components/healthchecked-resource.scss index 14e7b1c14..3bc9fff99 100644 --- a/ui-v2/app/styles/components/healthchecked-resource.scss +++ b/ui-v2/app/styles/components/healthchecked-resource.scss @@ -90,7 +90,7 @@ } %healthchecked-resource li a { padding: 3px 15px; - padding-top: 4px; + padding-top: 5px; padding-left: 39px; height: 31px; } From 171e95d2655c3bf8cbdb53af71ab61f930e2cae4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 26 Feb 2018 19:53:13 -0800 Subject: [PATCH 016/627] Readme for Fork Instructions --- README.md | 105 +++++++++++++++++++++++++----------------------------- 1 file changed, 48 insertions(+), 57 deletions(-) diff --git a/README.md b/README.md index 1e29e765f..1d7c55f37 100644 --- a/README.md +++ b/README.md @@ -1,75 +1,66 @@ -# Consul [![Build Status](https://travis-ci.org/hashicorp/consul.svg?branch=master)](https://travis-ci.org/hashicorp/consul) [![Join the chat at https://gitter.im/hashicorp-consul/Lobby](https://badges.gitter.im/hashicorp-consul/Lobby.svg)](https://gitter.im/hashicorp-consul/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +**This is a temporary README. We'll restore the old README prior to PR upstream.** -* Website: https://www.consul.io -* Chat: [Gitter](https://gitter.im/hashicorp-consul/Lobby) -* Mailing list: [Google Groups](https://groups.google.com/group/consul-tool/) +# Consul Connect -Consul is a tool for service discovery and configuration. Consul is -distributed, highly available, and extremely scalable. +This repository is the forked repository for Consul Connect work to happen +in private prior to public release. This README will explain how to safely +use this fork, how to bring in upstream changes, etc. -Consul provides several key features: +## Cloning -* **Service Discovery** - Consul makes it simple for services to register - themselves and to discover other services via a DNS or HTTP interface. - External services such as SaaS providers can be registered as well. +To use this repository, clone it into your GOPATH as usual but you must +**rename `consul-connect` to `consul`** so that Go imports continue working +as usual. -* **Health Checking** - Health Checking enables Consul to quickly alert - operators about any issues in a cluster. The integration with service - discovery prevents routing traffic to unhealthy hosts and enables service - level circuit breakers. +## Important: Never Modify Master -* **Key/Value Storage** - A flexible key/value store enables storing - dynamic configuration, feature flagging, coordination, leader election and - more. The simple HTTP API makes it easy to use anywhere. +**NEVER MODIFY MASTER! NEVER MODIFY MASTER!** -* **Multi-Datacenter** - Consul is built to be datacenter aware, and can - support any number of regions without complex configuration. +We want to keep the "master" branch equivalent to OSS master. This will make +rebasing easy for master. Instead, we'll use the branch `f-connect`. All +feature branches should branch from `f-connect` and make PRs against +`f-connect`. -Consul runs on Linux, Mac OS X, FreeBSD, Solaris, and Windows. A commercial -version called [Consul Enterprise](https://www.hashicorp.com/products/consul) -is also available. +When we're ready to merge back to upstream, we can make a single mega PR +merging `f-connect` into OSS master. This way we don't have a sudden mega +push to master on OSS. -## Quick Start +## Creating a Feature Branch -An extensive quick start is viewable on the Consul website: +To create a feature branch, branch from `f-connect`: -https://www.consul.io/intro/getting-started/install.html - -## Documentation - -Full, comprehensive documentation is viewable on the Consul website: - -https://www.consul.io/docs - -## Developing Consul - -If you wish to work on Consul itself, you'll first need [Go](https://golang.org) -installed (version 1.9+ is _required_). Make sure you have Go properly installed, -including setting up your [GOPATH](https://golang.org/doc/code.html#GOPATH). - -Next, clone this repository into `$GOPATH/src/github.com/hashicorp/consul` and -then just type `make`. In a few moments, you'll have a working `consul` executable: - -``` -$ make -... -$ bin/consul -... +```sh +git checkout f-connect +git checkout -b my-new-branch ``` -*Note: `make` will build all os/architecture combinations. Set the environment variable `CONSUL_DEV=1` to build it just for your local machine's os/architecture, or use `make dev`.* +All merged Connect features will be in `f-connect`, so you want to work +from that branch. When making a PR for your feature branch, target the +`f-connect` branch as the merge target. You can do this by using the dropdowns +in the GitHub UI when creating a PR. -*Note: `make` will also place a copy of the binary in the first part of your `$GOPATH`.* +## Syncing Upstream -You can run tests by typing `make test`. The test suite may fail if -over-parallelized, so if you are seeing stochastic failures try -`GOTEST_FLAGS="-p 2 -parallel 2" make test`. +First update our local master: -If you make any changes to the code, run `make format` in order to automatically -format the code according to Go standards. +```sh +# This has to happen on forked master +git checkout master -## Vendoring +# Add upstream to OSS Consul +git remote add upstream https://github.com/hashicorp/consul.git -Consul currently uses [govendor](https://github.com/kardianos/govendor) for -vendoring and [vendorfmt](https://github.com/magiconair/vendorfmt) for formatting -`vendor.json` to a more merge-friendly "one line per package" format. +# Fetch it +git fetch upstream + +# Rebase forked master onto upstream. This should have no changes since +# we're never modifying master. +git rebase upstream master +``` + +Next, update the `f-connect` branch: + +```sh +git checkout f-connect +git rebase origin master +``` From c05bed86e171bee7ec9bd7492d2a034287811613 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 27 Feb 2018 22:25:05 -0800 Subject: [PATCH 017/627] agent/consul/state: initial work on intentions memdb table --- agent/consul/state/intention.go | 136 +++++++++++++++++++++++++++ agent/consul/state/intention_test.go | 122 ++++++++++++++++++++++++ agent/consul/state/state_store.go | 4 + agent/structs/intention.go | 62 ++++++++++++ 4 files changed, 324 insertions(+) create mode 100644 agent/consul/state/intention.go create mode 100644 agent/consul/state/intention_test.go create mode 100644 agent/structs/intention.go diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go new file mode 100644 index 000000000..040761e2c --- /dev/null +++ b/agent/consul/state/intention.go @@ -0,0 +1,136 @@ +package state + +import ( + "fmt" + + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/go-memdb" +) + +const ( + intentionsTableName = "connect-intentions" +) + +// intentionsTableSchema returns a new table schema used for storing +// intentions for Connect. +func intentionsTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: intentionsTableName, + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + AllowMissing: false, + Unique: true, + Indexer: &memdb.UUIDFieldIndex{ + Field: "ID", + }, + }, + "destination": &memdb.IndexSchema{ + Name: "destination", + AllowMissing: true, + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "DestinationNS", + Lowercase: true, + }, + &memdb.StringFieldIndex{ + Field: "DestinationName", + Lowercase: true, + }, + }, + }, + }, + "source": &memdb.IndexSchema{ + Name: "source", + AllowMissing: true, + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "SourceNS", + Lowercase: true, + }, + &memdb.StringFieldIndex{ + Field: "SourceName", + Lowercase: true, + }, + }, + }, + }, + }, + } +} + +func init() { + registerSchema(intentionsTableSchema) +} + +// IntentionSet creates or updates an intention. +func (s *Store) IntentionSet(idx uint64, ixn *structs.Intention) error { + tx := s.db.Txn(true) + defer tx.Abort() + + if err := s.intentionSetTxn(tx, idx, ixn); err != nil { + return err + } + + tx.Commit() + return nil +} + +// intentionSetTxn is the inner method used to insert an intention with +// the proper indexes into the state store. +func (s *Store) intentionSetTxn(tx *memdb.Txn, idx uint64, ixn *structs.Intention) error { + // ID is required + if ixn.ID == "" { + return ErrMissingIntentionID + } + + // Check for an existing intention + existing, err := tx.First(intentionsTableName, "id", ixn.ID) + if err != nil { + return fmt.Errorf("failed intention looup: %s", err) + } + if existing != nil { + ixn.CreateIndex = existing.(*structs.Intention).CreateIndex + } else { + ixn.CreateIndex = idx + } + ixn.ModifyIndex = idx + + // Insert + if err := tx.Insert(intentionsTableName, ixn); err != nil { + return err + } + if err := tx.Insert("index", &IndexEntry{intentionsTableName, idx}); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + + return nil +} + +// IntentionGet returns the given intention by ID. +func (s *Store) IntentionGet(ws memdb.WatchSet, id string) (uint64, *structs.Intention, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + // Get the table index. + idx := maxIndexTxn(tx, intentionsTableName) + + // Look up by its ID. + watchCh, intention, err := tx.FirstWatch(intentionsTableName, "id", id) + if err != nil { + return 0, nil, fmt.Errorf("failed intention lookup: %s", err) + } + ws.Add(watchCh) + + // Convert the interface{} if it is non-nil + var result *structs.Intention + if intention != nil { + result = intention.(*structs.Intention) + } + + return idx, result, nil +} diff --git a/agent/consul/state/intention_test.go b/agent/consul/state/intention_test.go new file mode 100644 index 000000000..1c168c3bc --- /dev/null +++ b/agent/consul/state/intention_test.go @@ -0,0 +1,122 @@ +package state + +import ( + "reflect" + "testing" + + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/go-memdb" +) + +func TestStore_IntentionGet_none(t *testing.T) { + s := testStateStore(t) + + // Querying with no results returns nil. + ws := memdb.NewWatchSet() + idx, res, err := s.IntentionGet(ws, testUUID()) + if idx != 0 || res != nil || err != nil { + t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) + } +} + +func TestStore_IntentionSetGet_basic(t *testing.T) { + s := testStateStore(t) + + // Call Get to populate the watch set + ws := memdb.NewWatchSet() + _, _, err := s.IntentionGet(ws, testUUID()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Build a valid intention + ixn := &structs.Intention{ + ID: testUUID(), + } + + // Inserting a with empty ID is disallowed. + if err := s.IntentionSet(1, ixn); err != nil { + t.Fatalf("err: %s", err) + } + + // Make sure the index got updated. + if idx := s.maxIndex(intentionsTableName); idx != 1 { + t.Fatalf("bad index: %d", idx) + } + if !watchFired(ws) { + t.Fatalf("bad") + } + + // Read it back out and verify it. + expected := &structs.Intention{ + ID: ixn.ID, + RaftIndex: structs.RaftIndex{ + CreateIndex: 1, + ModifyIndex: 1, + }, + } + + ws = memdb.NewWatchSet() + idx, actual, err := s.IntentionGet(ws, ixn.ID) + if err != nil { + t.Fatalf("err: %s", err) + } + if idx != expected.CreateIndex { + t.Fatalf("bad index: %d", idx) + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %v", actual) + } + + // Change a value and test updating + ixn.SourceNS = "foo" + if err := s.IntentionSet(2, ixn); err != nil { + t.Fatalf("err: %s", err) + } + + // Make sure the index got updated. + if idx := s.maxIndex(intentionsTableName); idx != 2 { + t.Fatalf("bad index: %d", idx) + } + if !watchFired(ws) { + t.Fatalf("bad") + } + + // Read it back and verify the data was updated + expected.SourceNS = ixn.SourceNS + expected.ModifyIndex = 2 + ws = memdb.NewWatchSet() + idx, actual, err = s.IntentionGet(ws, ixn.ID) + if err != nil { + t.Fatalf("err: %s", err) + } + if idx != expected.ModifyIndex { + t.Fatalf("bad index: %d", idx) + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} + +func TestStore_IntentionSet_emptyId(t *testing.T) { + s := testStateStore(t) + + ws := memdb.NewWatchSet() + _, _, err := s.IntentionGet(ws, testUUID()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Inserting a with empty ID is disallowed. + if err := s.IntentionSet(1, &structs.Intention{}); err == nil { + t.Fatalf("expected %#v, got: %#v", ErrMissingIntentionID, err) + } + + // Index is not updated if nothing is saved. + if idx := s.maxIndex(intentionsTableName); idx != 0 { + t.Fatalf("bad index: %d", idx) + } + if watchFired(ws) { + t.Fatalf("bad") + } +} diff --git a/agent/consul/state/state_store.go b/agent/consul/state/state_store.go index 94947f366..62b6a8bff 100644 --- a/agent/consul/state/state_store.go +++ b/agent/consul/state/state_store.go @@ -28,6 +28,10 @@ var ( // ErrMissingQueryID is returned when a Query set is called on // a Query with an empty ID. ErrMissingQueryID = errors.New("Missing Query ID") + + // ErrMissingIntentionID is returned when an Intention set is called + // with an Intention with an empty ID. + ErrMissingIntentionID = errors.New("Missing Intention ID") ) const ( diff --git a/agent/structs/intention.go b/agent/structs/intention.go new file mode 100644 index 000000000..646fb3f64 --- /dev/null +++ b/agent/structs/intention.go @@ -0,0 +1,62 @@ +package structs + +import ( + "time" +) + +// Intention defines an intention for the Connect Service Graph. This defines +// the allowed or denied behavior of a connection between two services using +// Connect. +type Intention struct { + // ID is the UUID-based ID for the intention, always generated by Consul. + ID string + + // SourceNS, SourceName are the namespace and name, respectively, of + // the source service. Either of these may be the wildcard "*", but only + // the full value can be a wildcard. Partial wildcards are not allowed. + // The source may also be a non-Consul service, as specified by SourceType. + // + // DestinationNS, DestinationName is the same, but for the destination + // service. The same rules apply. The destination is always a Consul + // service. + SourceNS, SourceName string + DestinationNS, DestinationName string + + // SourceType is the type of the value for the source. + SourceType IntentionSourceType + + // Action is whether this is a whitelist or blacklist intention. + Action IntentionAction + + // DefaultAddr, DefaultPort of the local listening proxy (if any) to + // make this connection. + DefaultAddr string + DefaultPort int + + // Meta is arbitrary metadata associated with the intention. This is + // opaque to Consul but is served in API responses. + Meta map[string]string + + // CreatedAt and UpdatedAt keep track of when this record was created + // or modified. + CreatedAt, UpdatedAt time.Time + + RaftIndex +} + +// IntentionAction is the action that the intention represents. This +// can be "allow" or "deny" to whitelist or blacklist intentions. +type IntentionAction string + +const ( + IntentionActionAllow IntentionAction = "allow" + IntentionActionDeny IntentionAction = "deny" +) + +// IntentionSourceType is the type of the source within an intention. +type IntentionSourceType string + +const ( + // IntentionSourceConsul is a service within the Consul catalog. + IntentionSourceConsul IntentionSourceType = "consul" +) From 8b0ac7d9c5891c4a2671eda365738670038bf902 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Feb 2018 09:53:21 -0800 Subject: [PATCH 018/627] agent/consul/state: list intentions --- agent/consul/state/intention.go | 22 ++++++++++ agent/consul/state/intention_test.go | 63 ++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index 040761e2c..844a1a509 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -67,6 +67,28 @@ func init() { registerSchema(intentionsTableSchema) } +// Intentions returns the list of all intentions. +func (s *Store) Intentions(ws memdb.WatchSet) (uint64, structs.Intentions, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + // Get the index + idx := maxIndexTxn(tx, intentionsTableName) + + // Get all intentions + iter, err := tx.Get(intentionsTableName, "id") + if err != nil { + return 0, nil, fmt.Errorf("failed intention lookup: %s", err) + } + ws.Add(iter.WatchCh()) + + var results structs.Intentions + for ixn := iter.Next(); ixn != nil; ixn = iter.Next() { + results = append(results, ixn.(*structs.Intention)) + } + return idx, results, nil +} + // IntentionSet creates or updates an intention. func (s *Store) IntentionSet(idx uint64, ixn *structs.Intention) error { tx := s.db.Txn(true) diff --git a/agent/consul/state/intention_test.go b/agent/consul/state/intention_test.go index 1c168c3bc..e4794bba7 100644 --- a/agent/consul/state/intention_test.go +++ b/agent/consul/state/intention_test.go @@ -120,3 +120,66 @@ func TestStore_IntentionSet_emptyId(t *testing.T) { t.Fatalf("bad") } } + +func TestStore_IntentionsList(t *testing.T) { + s := testStateStore(t) + + // Querying with no results returns nil. + ws := memdb.NewWatchSet() + idx, res, err := s.Intentions(ws) + if idx != 0 || res != nil || err != nil { + t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) + } + + // Create some intentions + ixns := structs.Intentions{ + &structs.Intention{ + ID: testUUID(), + }, + &structs.Intention{ + ID: testUUID(), + }, + } + + // Force deterministic sort order + ixns[0].ID = "a" + ixns[0].ID[1:] + ixns[1].ID = "b" + ixns[1].ID[1:] + + // Create + for i, ixn := range ixns { + if err := s.IntentionSet(uint64(1+i), ixn); err != nil { + t.Fatalf("err: %s", err) + } + } + if !watchFired(ws) { + t.Fatalf("bad") + } + + // Read it back and verify. + expected := structs.Intentions{ + &structs.Intention{ + ID: ixns[0].ID, + RaftIndex: structs.RaftIndex{ + CreateIndex: 1, + ModifyIndex: 1, + }, + }, + &structs.Intention{ + ID: ixns[1].ID, + RaftIndex: structs.RaftIndex{ + CreateIndex: 2, + ModifyIndex: 2, + }, + }, + } + idx, actual, err := s.Intentions(nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if idx != 2 { + t.Fatalf("bad index: %d", idx) + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %v", actual) + } +} From b19a289596fdd94afabb188c6448f46adb7521f7 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Feb 2018 10:04:27 -0800 Subject: [PATCH 019/627] agent/consul: start Intention RPC endpoints, starting with List --- agent/consul/intention_endpoint.go | 36 +++++++++++++++++++++++++ agent/consul/intention_endpoint_test.go | 36 +++++++++++++++++++++++++ agent/consul/server_oss.go | 1 + agent/structs/intention.go | 9 +++++++ 4 files changed, 82 insertions(+) create mode 100644 agent/consul/intention_endpoint.go create mode 100644 agent/consul/intention_endpoint_test.go diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go new file mode 100644 index 000000000..7737d06dd --- /dev/null +++ b/agent/consul/intention_endpoint.go @@ -0,0 +1,36 @@ +package consul + +import ( + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/go-memdb" +) + +// Intention manages the Connect intentions. +type Intention struct { + // srv is a pointer back to the server. + srv *Server +} + +func (s *Intention) List( + args *structs.DCSpecificRequest, + reply *structs.IndexedIntentions) error { + // Forward if necessary + if done, err := s.srv.forward("Intention.List", args, args, reply); done { + return err + } + + return s.srv.blockingQuery( + &args.QueryOptions, &reply.QueryMeta, + func(ws memdb.WatchSet, state *state.Store) error { + index, ixns, err := state.Intentions(ws) + if err != nil { + return err + } + + reply.Index, reply.Intentions = index, ixns + // filterACL + return nil + }, + ) +} diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go new file mode 100644 index 000000000..13242374c --- /dev/null +++ b/agent/consul/intention_endpoint_test.go @@ -0,0 +1,36 @@ +package consul + +import ( + "os" + "testing" + + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/testrpc" + "github.com/hashicorp/net-rpc-msgpackrpc" +) + +func TestIntentionList(t *testing.T) { + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + + codec := rpcClient(t, s1) + defer codec.Close() + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Test with no intentions inserted yet + { + req := &structs.DCSpecificRequest{ + Datacenter: "dc1", + } + var resp structs.IndexedIntentions + if err := msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + if len(resp.Intentions) != 0 { + t.Fatalf("bad: %v", resp) + } + } +} diff --git a/agent/consul/server_oss.go b/agent/consul/server_oss.go index 05c02e46c..e633c2699 100644 --- a/agent/consul/server_oss.go +++ b/agent/consul/server_oss.go @@ -5,6 +5,7 @@ func init() { registerEndpoint(func(s *Server) interface{} { return &Catalog{s} }) registerEndpoint(func(s *Server) interface{} { return NewCoordinate(s) }) registerEndpoint(func(s *Server) interface{} { return &Health{s} }) + registerEndpoint(func(s *Server) interface{} { return &Intention{s} }) registerEndpoint(func(s *Server) interface{} { return &Internal{s} }) registerEndpoint(func(s *Server) interface{} { return &KVS{s} }) registerEndpoint(func(s *Server) interface{} { return &Operator{s} }) diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 646fb3f64..7837ad431 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -60,3 +60,12 @@ const ( // IntentionSourceConsul is a service within the Consul catalog. IntentionSourceConsul IntentionSourceType = "consul" ) + +// Intentions is a list of intentions. +type Intentions []*Intention + +// IndexedIntentions represents a list of intentions for RPC responses. +type IndexedIntentions struct { + Intentions Intentions + QueryMeta +} From 48b9a43f1d2cf32b4694e37ea810e0b3a5726165 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Feb 2018 10:28:07 -0800 Subject: [PATCH 020/627] agent/consul: Intention.Apply, FSM methods, very little validation --- agent/consul/fsm/commands_oss.go | 24 +++++++++++ agent/consul/intention_endpoint.go | 54 +++++++++++++++++++++++++ agent/consul/intention_endpoint_test.go | 31 ++++++++++++++ agent/structs/intention.go | 30 ++++++++++++++ agent/structs/structs.go | 1 + 5 files changed, 140 insertions(+) diff --git a/agent/consul/fsm/commands_oss.go b/agent/consul/fsm/commands_oss.go index ede04eef6..c90f185e0 100644 --- a/agent/consul/fsm/commands_oss.go +++ b/agent/consul/fsm/commands_oss.go @@ -20,6 +20,7 @@ func init() { registerCommand(structs.PreparedQueryRequestType, (*FSM).applyPreparedQueryOperation) registerCommand(structs.TxnRequestType, (*FSM).applyTxn) registerCommand(structs.AutopilotRequestType, (*FSM).applyAutopilotUpdate) + registerCommand(structs.IntentionRequestType, (*FSM).applyIntentionOperation) } func (c *FSM) applyRegister(buf []byte, index uint64) interface{} { @@ -246,3 +247,26 @@ func (c *FSM) applyAutopilotUpdate(buf []byte, index uint64) interface{} { } return c.state.AutopilotSetConfig(index, &req.Config) } + +// applyIntentionOperation applies the given intention operation to the state store. +func (c *FSM) applyIntentionOperation(buf []byte, index uint64) interface{} { + var req structs.IntentionRequest + if err := structs.Decode(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode request: %v", err)) + } + + defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "intention"}, time.Now(), + []metrics.Label{{Name: "op", Value: string(req.Op)}}) + defer metrics.MeasureSinceWithLabels([]string{"fsm", "intention"}, time.Now(), + []metrics.Label{{Name: "op", Value: string(req.Op)}}) + switch req.Op { + case structs.IntentionOpCreate, structs.IntentionOpUpdate: + return c.state.IntentionSet(index, req.Intention) + case structs.IntentionOpDelete: + panic("TODO") + //return c.state.PreparedQueryDelete(index, req.Query.ID) + default: + c.logger.Printf("[WARN] consul.fsm: Invalid Intention operation '%s'", req.Op) + return fmt.Errorf("Invalid Intention operation '%s'", req.Op) + } +} diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 7737d06dd..8d07b4e7b 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -1,9 +1,13 @@ package consul import ( + "time" + + "github.com/armon/go-metrics" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-uuid" ) // Intention manages the Connect intentions. @@ -12,6 +16,56 @@ type Intention struct { srv *Server } +// Apply creates or updates an intention in the data store. +func (s *Intention) Apply( + args *structs.IntentionRequest, + reply *string) error { + if done, err := s.srv.forward("Intention.Apply", args, args, reply); done { + return err + } + defer metrics.MeasureSince([]string{"consul", "intention", "apply"}, time.Now()) + defer metrics.MeasureSince([]string{"intention", "apply"}, time.Now()) + + // If no ID is provided, generate a new ID. This must be done prior to + // appending to the Raft log, because the ID is not deterministic. Once + // the entry is in the log, the state update MUST be deterministic or + // the followers will not converge. + if args.Op == structs.IntentionOpCreate && args.Intention.ID == "" { + state := s.srv.fsm.State() + for { + var err error + args.Intention.ID, err = uuid.GenerateUUID() + if err != nil { + s.srv.logger.Printf("[ERR] consul.intention: UUID generation failed: %v", err) + return err + } + + _, ixn, err := state.IntentionGet(nil, args.Intention.ID) + if err != nil { + s.srv.logger.Printf("[ERR] consul.intention: intention lookup failed: %v", err) + return err + } + if ixn == nil { + break + } + } + } + *reply = args.Intention.ID + + // Commit + resp, err := s.srv.raftApply(structs.IntentionRequestType, args) + if err != nil { + s.srv.logger.Printf("[ERR] consul.intention: Apply failed %v", err) + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } + + return nil +} + +// List returns all the intentions. func (s *Intention) List( args *structs.DCSpecificRequest, reply *structs.IndexedIntentions) error { diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 13242374c..51fa635e3 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -9,6 +9,37 @@ import ( "github.com/hashicorp/net-rpc-msgpackrpc" ) +func TestIntentionApply_new(t *testing.T) { + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Setup a basic record to create + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: &structs.Intention{ + SourceName: "test", + }, + } + var reply string + + // Create + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + if reply == "" { + t.Fatal("reply should be non-empty") + } + + // TODO test read +} + func TestIntentionList(t *testing.T) { t.Parallel() dir1, s1 := testServer(t) diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 7837ad431..81f07080c 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -69,3 +69,33 @@ type IndexedIntentions struct { Intentions Intentions QueryMeta } + +// IntentionOp is the operation for a request related to intentions. +type IntentionOp string + +const ( + IntentionOpCreate IntentionOp = "create" + IntentionOpUpdate IntentionOp = "update" + IntentionOpDelete IntentionOp = "delete" +) + +// IntentionRequest is used to create, update, and delete intentions. +type IntentionRequest struct { + // Datacenter is the target for this request. + Datacenter string + + // Op is the type of operation being requested. + Op IntentionOp + + // Intention is the intention. + Intention *Intention + + // WriteRequest is a common struct containing ACL tokens and other + // write-related common elements for requests. + WriteRequest +} + +// RequestDatacenter returns the datacenter for a given request. +func (q *IntentionRequest) RequestDatacenter() string { + return q.Datacenter +} diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 77075b3e3..8a1860912 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -39,6 +39,7 @@ const ( AutopilotRequestType = 9 AreaRequestType = 10 ACLBootstrapRequestType = 11 // FSM snapshots only. + IntentionRequestType = 12 ) const ( From 2a8a2f8167caef17f019e24ba18553f4b69d5a15 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Feb 2018 10:44:49 -0800 Subject: [PATCH 021/627] agent/consul: Intention.Get endpoint --- agent/consul/intention_endpoint.go | 31 +++++++++++++++++++++++++ agent/consul/intention_endpoint_test.go | 25 +++++++++++++++++++- agent/structs/intention.go | 17 ++++++++++++++ 3 files changed, 72 insertions(+), 1 deletion(-) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 8d07b4e7b..3313c03f7 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -65,6 +65,37 @@ func (s *Intention) Apply( return nil } +// Get returns a single intention by ID. +func (s *Intention) Get( + args *structs.IntentionQueryRequest, + reply *structs.IndexedIntentions) error { + // Forward if necessary + if done, err := s.srv.forward("Intention.Get", args, args, reply); done { + return err + } + + return s.srv.blockingQuery( + &args.QueryOptions, + &reply.QueryMeta, + func(ws memdb.WatchSet, state *state.Store) error { + index, ixn, err := state.IntentionGet(ws, args.IntentionID) + if err != nil { + return err + } + if ixn == nil { + return ErrQueryNotFound + } + + reply.Index = index + reply.Intentions = structs.Intentions{ixn} + + // TODO: acl filtering + + return nil + }, + ) +} + // List returns all the intentions. func (s *Intention) List( args *structs.DCSpecificRequest, diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 51fa635e3..5a2405d89 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -2,6 +2,7 @@ package consul import ( "os" + "reflect" "testing" "github.com/hashicorp/consul/agent/structs" @@ -37,7 +38,29 @@ func TestIntentionApply_new(t *testing.T) { t.Fatal("reply should be non-empty") } - // TODO test read + // Read + ixn.Intention.ID = reply + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: ixn.Intention.ID, + } + var resp structs.IndexedIntentions + if err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + if len(resp.Intentions) != 1 { + t.Fatalf("bad: %v", resp) + } + actual := resp.Intentions[0] + if resp.Index != actual.ModifyIndex { + t.Fatalf("bad index: %d", resp.Index) + } + actual.CreateIndex, actual.ModifyIndex = 0, 0 + if !reflect.DeepEqual(actual, ixn.Intention) { + t.Fatalf("bad: %v", actual) + } + } } func TestIntentionList(t *testing.T) { diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 81f07080c..cce6e3e0f 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -99,3 +99,20 @@ type IntentionRequest struct { func (q *IntentionRequest) RequestDatacenter() string { return q.Datacenter } + +// IntentionQueryRequest is used to query intentions. +type IntentionQueryRequest struct { + // Datacenter is the target this request is intended for. + Datacenter string + + // IntentionID is the ID of a specific intention. + IntentionID string + + // Options for queries + QueryOptions +} + +// RequestDatacenter returns the datacenter for a given request. +func (q *IntentionQueryRequest) RequestDatacenter() string { + return q.Datacenter +} From 4003bca543c00e2d9519912fb0e765d02ae558d6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Feb 2018 11:36:54 -0800 Subject: [PATCH 022/627] agent: GET /v1/connect/intentions endpoint --- agent/http_oss.go | 1 + agent/intentions_endpoint.go | 30 +++++++++++++ agent/intentions_endpoint_test.go | 71 +++++++++++++++++++++++++++++++ 3 files changed, 102 insertions(+) create mode 100644 agent/intentions_endpoint.go create mode 100644 agent/intentions_endpoint_test.go diff --git a/agent/http_oss.go b/agent/http_oss.go index 4a2017d28..28ece14ae 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -39,6 +39,7 @@ func init() { registerEndpoint("/v1/catalog/services", []string{"GET"}, (*HTTPServer).CatalogServices) registerEndpoint("/v1/catalog/service/", []string{"GET"}, (*HTTPServer).CatalogServiceNodes) registerEndpoint("/v1/catalog/node/", []string{"GET"}, (*HTTPServer).CatalogNodeServices) + registerEndpoint("/v1/connect/intentions", []string{"GET"}, (*HTTPServer).IntentionList) registerEndpoint("/v1/coordinate/datacenters", []string{"GET"}, (*HTTPServer).CoordinateDatacenters) registerEndpoint("/v1/coordinate/nodes", []string{"GET"}, (*HTTPServer).CoordinateNodes) registerEndpoint("/v1/coordinate/node/", []string{"GET"}, (*HTTPServer).CoordinateNode) diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go new file mode 100644 index 000000000..0cdd0dc43 --- /dev/null +++ b/agent/intentions_endpoint.go @@ -0,0 +1,30 @@ +package agent + +import ( + "net/http" + + "github.com/hashicorp/consul/agent/structs" +) + +// /v1/connect/intentions +func (s *HTTPServer) IntentionList(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + if req.Method != "GET" { + return nil, MethodNotAllowedError{req.Method, []string{"GET"}} + } + + var args structs.DCSpecificRequest + if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { + return nil, nil + } + + var reply structs.IndexedIntentions + if err := s.agent.RPC("Intention.List", &args, &reply); err != nil { + return nil, err + } + + // Use empty list instead of nil. + if reply.Intentions == nil { + reply.Intentions = make(structs.Intentions, 0) + } + return reply.Intentions, nil +} diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go new file mode 100644 index 000000000..2e56eabf7 --- /dev/null +++ b/agent/intentions_endpoint_test.go @@ -0,0 +1,71 @@ +package agent + +import ( + "net/http" + "net/http/httptest" + "reflect" + "sort" + "testing" + + "github.com/hashicorp/consul/agent/structs" +) + +func TestIntentionsList_empty(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Make sure an empty list is non-nil. + req, _ := http.NewRequest("GET", "/v1/connect/intentions", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionList(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + + value := obj.(structs.Intentions) + if value == nil || len(value) != 0 { + t.Fatalf("bad: %v", value) + } +} + +func TestIntentionsList_values(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Create some intentions + for _, v := range []string{"foo", "bar"} { + req := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: &structs.Intention{SourceName: v}, + } + var reply string + if err := a.RPC("Intention.Apply", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + } + + // Request + req, _ := http.NewRequest("GET", "/v1/connect/intentions", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionList(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + + value := obj.(structs.Intentions) + if len(value) != 2 { + t.Fatalf("bad: %v", value) + } + + expected := []string{"bar", "foo"} + actual := []string{value[0].SourceName, value[1].SourceName} + sort.Strings(actual) + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } +} From c78b82f43b5573c51c9cb6c902e39beb770087a1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Feb 2018 14:02:00 -0800 Subject: [PATCH 023/627] agent: POST /v1/connect/intentions --- agent/http_oss.go | 2 +- agent/intentions_endpoint.go | 47 ++++++++++++++++++++++++++++--- agent/intentions_endpoint_test.go | 40 ++++++++++++++++++++++++++ agent/structs/intention.go | 2 +- 4 files changed, 85 insertions(+), 6 deletions(-) diff --git a/agent/http_oss.go b/agent/http_oss.go index 28ece14ae..61bef8d2a 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -39,7 +39,7 @@ func init() { registerEndpoint("/v1/catalog/services", []string{"GET"}, (*HTTPServer).CatalogServices) registerEndpoint("/v1/catalog/service/", []string{"GET"}, (*HTTPServer).CatalogServiceNodes) registerEndpoint("/v1/catalog/node/", []string{"GET"}, (*HTTPServer).CatalogNodeServices) - registerEndpoint("/v1/connect/intentions", []string{"GET"}, (*HTTPServer).IntentionList) + registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPServer).IntentionList) registerEndpoint("/v1/coordinate/datacenters", []string{"GET"}, (*HTTPServer).CoordinateDatacenters) registerEndpoint("/v1/coordinate/nodes", []string{"GET"}, (*HTTPServer).CoordinateNodes) registerEndpoint("/v1/coordinate/node/", []string{"GET"}, (*HTTPServer).CoordinateNode) diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 0cdd0dc43..62340e7e7 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -1,16 +1,29 @@ package agent import ( + "fmt" "net/http" "github.com/hashicorp/consul/agent/structs" ) -// /v1/connect/intentions -func (s *HTTPServer) IntentionList(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - if req.Method != "GET" { - return nil, MethodNotAllowedError{req.Method, []string{"GET"}} +// /v1/connection/intentions +func (s *HTTPServer) IntentionEndpoint(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + switch req.Method { + case "GET": + return s.IntentionList(resp, req) + + case "POST": + return s.IntentionCreate(resp, req) + + default: + return nil, MethodNotAllowedError{req.Method, []string{"GET", "POST"}} } +} + +// GET /v1/connect/intentions +func (s *HTTPServer) IntentionList(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Method is tested in IntentionEndpoint var args structs.DCSpecificRequest if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { @@ -28,3 +41,29 @@ func (s *HTTPServer) IntentionList(resp http.ResponseWriter, req *http.Request) } return reply.Intentions, nil } + +// POST /v1/connect/intentions +func (s *HTTPServer) IntentionCreate(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Method is tested in IntentionEndpoint + + args := structs.IntentionRequest{ + Op: structs.IntentionOpCreate, + } + s.parseDC(req, &args.Datacenter) + s.parseToken(req, &args.Token) + if err := decodeBody(req, &args.Intention, nil); err != nil { + resp.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(resp, "Request decode failed: %v", err) + return nil, nil + } + + var reply string + if err := s.agent.RPC("Intention.Apply", &args, &reply); err != nil { + return nil, err + } + + return intentionCreateResponse{reply}, nil +} + +// intentionCreateResponse is the response structure for creating an intention. +type intentionCreateResponse struct{ ID string } diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index 2e56eabf7..db6a16580 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -69,3 +69,43 @@ func TestIntentionsList_values(t *testing.T) { t.Fatalf("bad: %#v", actual) } } + +func TestIntentionsCreate_good(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Make sure an empty list is non-nil. + args := &structs.Intention{SourceName: "foo"} + req, _ := http.NewRequest("POST", "/v1/connect/intentions", jsonReader(args)) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionCreate(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + + value := obj.(intentionCreateResponse) + if value.ID == "" { + t.Fatalf("bad: %v", value) + } + + // Read the value + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: value.ID, + } + var resp structs.IndexedIntentions + if err := a.RPC("Intention.Get", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + if len(resp.Intentions) != 1 { + t.Fatalf("bad: %v", resp) + } + actual := resp.Intentions[0] + if actual.SourceName != "foo" { + t.Fatalf("bad: %#v", actual) + } + } +} diff --git a/agent/structs/intention.go b/agent/structs/intention.go index cce6e3e0f..7255bc8f1 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -39,7 +39,7 @@ type Intention struct { // CreatedAt and UpdatedAt keep track of when this record was created // or modified. - CreatedAt, UpdatedAt time.Time + CreatedAt, UpdatedAt time.Time `mapstructure:"-"` RaftIndex } From 37572829abac05f6680dfe4bef44dbccbe677747 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Feb 2018 15:54:48 -0800 Subject: [PATCH 024/627] agent: GET /v1/connect/intentions/:id --- agent/consul/intention_endpoint.go | 8 ++++- agent/http_oss.go | 3 +- agent/intentions_endpoint.go | 48 ++++++++++++++++++++++++++++++ agent/intentions_endpoint_test.go | 43 ++++++++++++++++++++++++++ 4 files changed, 100 insertions(+), 2 deletions(-) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 3313c03f7..030c9922d 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -1,6 +1,7 @@ package consul import ( + "errors" "time" "github.com/armon/go-metrics" @@ -10,6 +11,11 @@ import ( "github.com/hashicorp/go-uuid" ) +var ( + // ErrIntentionNotFound is returned if the intention lookup failed. + ErrIntentionNotFound = errors.New("Intention not found") +) + // Intention manages the Connect intentions. type Intention struct { // srv is a pointer back to the server. @@ -83,7 +89,7 @@ func (s *Intention) Get( return err } if ixn == nil { - return ErrQueryNotFound + return ErrIntentionNotFound } reply.Index = index diff --git a/agent/http_oss.go b/agent/http_oss.go index 61bef8d2a..0170a0075 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -39,7 +39,8 @@ func init() { registerEndpoint("/v1/catalog/services", []string{"GET"}, (*HTTPServer).CatalogServices) registerEndpoint("/v1/catalog/service/", []string{"GET"}, (*HTTPServer).CatalogServiceNodes) registerEndpoint("/v1/catalog/node/", []string{"GET"}, (*HTTPServer).CatalogNodeServices) - registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPServer).IntentionList) + registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPServer).IntentionEndpoint) + registerEndpoint("/v1/connect/intentions/", []string{"GET"}, (*HTTPServer).IntentionSpecific) registerEndpoint("/v1/coordinate/datacenters", []string{"GET"}, (*HTTPServer).CoordinateDatacenters) registerEndpoint("/v1/coordinate/nodes", []string{"GET"}, (*HTTPServer).CoordinateNodes) registerEndpoint("/v1/coordinate/node/", []string{"GET"}, (*HTTPServer).CoordinateNode) diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 62340e7e7..d5d6b6495 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -3,7 +3,9 @@ package agent import ( "fmt" "net/http" + "strings" + "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" ) @@ -65,5 +67,51 @@ func (s *HTTPServer) IntentionCreate(resp http.ResponseWriter, req *http.Request return intentionCreateResponse{reply}, nil } +// IntentionSpecific handles the endpoint for /v1/connection/intentions/:id +func (s *HTTPServer) IntentionSpecific(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + id := strings.TrimPrefix(req.URL.Path, "/v1/connect/intentions/") + + switch req.Method { + case "GET": + return s.IntentionSpecificGet(id, resp, req) + + case "PUT": + panic("TODO") + + case "DELETE": + panic("TODO") + + default: + return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}} + } +} + +// GET /v1/connect/intentions/:id +func (s *HTTPServer) IntentionSpecificGet(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Method is tested in IntentionEndpoint + + args := structs.IntentionQueryRequest{ + IntentionID: id, + } + if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { + return nil, nil + } + + var reply structs.IndexedIntentions + if err := s.agent.RPC("Intention.Get", &args, &reply); err != nil { + // We have to check the string since the RPC sheds the error type + if err.Error() == consul.ErrIntentionNotFound.Error() { + resp.WriteHeader(http.StatusNotFound) + fmt.Fprint(resp, err.Error()) + return nil, nil + } + + return nil, err + } + + // TODO: validate length + return reply.Intentions[0], nil +} + // intentionCreateResponse is the response structure for creating an intention. type intentionCreateResponse struct{ ID string } diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index db6a16580..0bd956842 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -1,6 +1,7 @@ package agent import ( + "fmt" "net/http" "net/http/httptest" "reflect" @@ -109,3 +110,45 @@ func TestIntentionsCreate_good(t *testing.T) { } } } + +func TestIntentionsSpecificGet_good(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // The intention + ixn := &structs.Intention{SourceName: "foo"} + + // Create an intention directly + var reply string + { + req := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: ixn, + } + if err := a.RPC("Intention.Apply", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + } + + // Get the value + req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/connect/intentions/%s", reply), nil) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionSpecific(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + + value := obj.(*structs.Intention) + if value.ID != reply { + t.Fatalf("bad: %v", value) + } + + ixn.ID = value.ID + ixn.RaftIndex = value.RaftIndex + if !reflect.DeepEqual(value, ixn) { + t.Fatalf("bad (got, want):\n\n%#v\n\n%#v", value, ixn) + } +} From f219c766cbf42c4b7b71e7f7f7e2cccfc1c23b7c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Feb 2018 21:11:35 -0800 Subject: [PATCH 025/627] agent/consul: support updating intentions --- agent/consul/intention_endpoint.go | 13 ++++ agent/consul/intention_endpoint_test.go | 90 +++++++++++++++++++++++++ 2 files changed, 103 insertions(+) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 030c9922d..118dfb5b9 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -2,6 +2,7 @@ package consul import ( "errors" + "fmt" "time" "github.com/armon/go-metrics" @@ -58,6 +59,18 @@ func (s *Intention) Apply( } *reply = args.Intention.ID + // If this is not a create, then we have to verify the ID. + if args.Op != structs.IntentionOpCreate { + state := s.srv.fsm.State() + _, ixn, err := state.IntentionGet(nil, args.Intention.ID) + if err != nil { + return fmt.Errorf("Intention lookup failed: %v", err) + } + if ixn == nil { + return fmt.Errorf("Cannot modify non-existent intention: '%s'", args.Intention.ID) + } + } + // Commit resp, err := s.srv.raftApply(structs.IntentionRequestType, args) if err != nil { diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 5a2405d89..6049c5f35 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -3,6 +3,7 @@ package consul import ( "os" "reflect" + "strings" "testing" "github.com/hashicorp/consul/agent/structs" @@ -10,6 +11,7 @@ import ( "github.com/hashicorp/net-rpc-msgpackrpc" ) +// Test basic creation func TestIntentionApply_new(t *testing.T) { t.Parallel() dir1, s1 := testServer(t) @@ -63,6 +65,94 @@ func TestIntentionApply_new(t *testing.T) { } } +// Test basic updating +func TestIntentionApply_updateGood(t *testing.T) { + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Setup a basic record to create + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: &structs.Intention{ + SourceName: "test", + }, + } + var reply string + + // Create + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + if reply == "" { + t.Fatal("reply should be non-empty") + } + + // Update + ixn.Op = structs.IntentionOpUpdate + ixn.Intention.ID = reply + ixn.Intention.SourceName = "bar" + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + + // Read + ixn.Intention.ID = reply + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: ixn.Intention.ID, + } + var resp structs.IndexedIntentions + if err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + if len(resp.Intentions) != 1 { + t.Fatalf("bad: %v", resp) + } + actual := resp.Intentions[0] + actual.CreateIndex, actual.ModifyIndex = 0, 0 + if !reflect.DeepEqual(actual, ixn.Intention) { + t.Fatalf("bad: %v", actual) + } + } +} + +// Shouldn't be able to update a non-existent intention +func TestIntentionApply_updateNonExist(t *testing.T) { + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Setup a basic record to create + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpUpdate, + Intention: &structs.Intention{ + ID: generateUUID(), + SourceName: "test", + }, + } + var reply string + + // Create + err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply) + if err == nil || !strings.Contains(err.Error(), "Cannot modify non-existent intention") { + t.Fatalf("bad: %v", err) + } +} + func TestIntentionList(t *testing.T) { t.Parallel() dir1, s1 := testServer(t) From 32ad54369c05a8274d140ccb7ba4b150e61e23a0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Feb 2018 21:16:45 -0800 Subject: [PATCH 026/627] agent/consul: creating intention must not have ID set --- agent/consul/intention_endpoint.go | 6 ++++- agent/consul/intention_endpoint_test.go | 29 +++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 118dfb5b9..fc552afd9 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -37,7 +37,11 @@ func (s *Intention) Apply( // appending to the Raft log, because the ID is not deterministic. Once // the entry is in the log, the state update MUST be deterministic or // the followers will not converge. - if args.Op == structs.IntentionOpCreate && args.Intention.ID == "" { + if args.Op == structs.IntentionOpCreate { + if args.Intention.ID != "" { + return fmt.Errorf("ID must be empty when creating a new intention") + } + state := s.srv.fsm.State() for { var err error diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 6049c5f35..e0b4762de 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -65,6 +65,35 @@ func TestIntentionApply_new(t *testing.T) { } } +// Shouldn't be able to create with an ID set +func TestIntentionApply_createWithID(t *testing.T) { + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Setup a basic record to create + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: &structs.Intention{ + ID: generateUUID(), + SourceName: "test", + }, + } + var reply string + + // Create + err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply) + if err == nil || !strings.Contains(err.Error(), "ID must be empty") { + t.Fatalf("bad: %v", err) + } +} + // Test basic updating func TestIntentionApply_updateGood(t *testing.T) { t.Parallel() From 95e1c92edf098cd8b051ec3ced6344e93b732fce Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Feb 2018 21:21:59 -0800 Subject: [PATCH 027/627] agent/consul/state,fsm: support for deleting intentions --- agent/consul/fsm/commands_oss.go | 3 +- agent/consul/state/intention.go | 36 ++++++++++++++++++++ agent/consul/state/intention_test.go | 50 ++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+), 2 deletions(-) diff --git a/agent/consul/fsm/commands_oss.go b/agent/consul/fsm/commands_oss.go index c90f185e0..51f127899 100644 --- a/agent/consul/fsm/commands_oss.go +++ b/agent/consul/fsm/commands_oss.go @@ -263,8 +263,7 @@ func (c *FSM) applyIntentionOperation(buf []byte, index uint64) interface{} { case structs.IntentionOpCreate, structs.IntentionOpUpdate: return c.state.IntentionSet(index, req.Intention) case structs.IntentionOpDelete: - panic("TODO") - //return c.state.PreparedQueryDelete(index, req.Query.ID) + return c.state.IntentionDelete(index, req.Intention.ID) default: c.logger.Printf("[WARN] consul.fsm: Invalid Intention operation '%s'", req.Op) return fmt.Errorf("Invalid Intention operation '%s'", req.Op) diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index 844a1a509..ea2ee3fd5 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -156,3 +156,39 @@ func (s *Store) IntentionGet(ws memdb.WatchSet, id string) (uint64, *structs.Int return idx, result, nil } + +// IntentionDelete deletes the given intention by ID. +func (s *Store) IntentionDelete(idx uint64, id string) error { + tx := s.db.Txn(true) + defer tx.Abort() + + if err := s.intentionDeleteTxn(tx, idx, id); err != nil { + return fmt.Errorf("failed intention delete: %s", err) + } + + tx.Commit() + return nil +} + +// intentionDeleteTxn is the inner method used to delete a intention +// with the proper indexes into the state store. +func (s *Store) intentionDeleteTxn(tx *memdb.Txn, idx uint64, queryID string) error { + // Pull the query. + wrapped, err := tx.First(intentionsTableName, "id", queryID) + if err != nil { + return fmt.Errorf("failed intention lookup: %s", err) + } + if wrapped == nil { + return nil + } + + // Delete the query and update the index. + if err := tx.Delete(intentionsTableName, wrapped); err != nil { + return fmt.Errorf("failed intention delete: %s", err) + } + if err := tx.Insert("index", &IndexEntry{intentionsTableName, idx}); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + + return nil +} diff --git a/agent/consul/state/intention_test.go b/agent/consul/state/intention_test.go index e4794bba7..d1494d5e0 100644 --- a/agent/consul/state/intention_test.go +++ b/agent/consul/state/intention_test.go @@ -121,6 +121,56 @@ func TestStore_IntentionSet_emptyId(t *testing.T) { } } +func TestStore_IntentionDelete(t *testing.T) { + s := testStateStore(t) + + // Call Get to populate the watch set + ws := memdb.NewWatchSet() + _, _, err := s.IntentionGet(ws, testUUID()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Create + ixn := &structs.Intention{ID: testUUID()} + if err := s.IntentionSet(1, ixn); err != nil { + t.Fatalf("err: %s", err) + } + + // Make sure the index got updated. + if idx := s.maxIndex(intentionsTableName); idx != 1 { + t.Fatalf("bad index: %d", idx) + } + if !watchFired(ws) { + t.Fatalf("bad") + } + + // Delete + if err := s.IntentionDelete(2, ixn.ID); err != nil { + t.Fatalf("err: %s", err) + } + + // Make sure the index got updated. + if idx := s.maxIndex(intentionsTableName); idx != 2 { + t.Fatalf("bad index: %d", idx) + } + if !watchFired(ws) { + t.Fatalf("bad") + } + + // Sanity check to make sure it's not there. + idx, actual, err := s.IntentionGet(nil, ixn.ID) + if err != nil { + t.Fatalf("err: %s", err) + } + if idx != 2 { + t.Fatalf("bad index: %d", idx) + } + if actual != nil { + t.Fatalf("bad: %v", actual) + } +} + func TestStore_IntentionsList(t *testing.T) { s := testStateStore(t) From bebe6870ffa088dedc788a0dd1f88cb50b90495c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 1 Mar 2018 15:48:48 -0800 Subject: [PATCH 028/627] agent/consul: test that Apply works to delete an intention --- agent/consul/intention_endpoint_test.go | 51 +++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index e0b4762de..53aef35cd 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -182,6 +182,57 @@ func TestIntentionApply_updateNonExist(t *testing.T) { } } +// Test basic deleting +func TestIntentionApply_deleteGood(t *testing.T) { + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Setup a basic record to create + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: &structs.Intention{ + SourceName: "test", + }, + } + var reply string + + // Create + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + if reply == "" { + t.Fatal("reply should be non-empty") + } + + // Delete + ixn.Op = structs.IntentionOpDelete + ixn.Intention.ID = reply + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + + // Read + ixn.Intention.ID = reply + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: ixn.Intention.ID, + } + var resp structs.IndexedIntentions + err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp) + if err == nil || !strings.Contains(err.Error(), ErrIntentionNotFound.Error()) { + t.Fatalf("err: %v", err) + } + } +} + func TestIntentionList(t *testing.T) { t.Parallel() dir1, s1 := testServer(t) From cae7bca448af147625a736b44790b49f87afa5cc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 1 Mar 2018 15:54:03 -0800 Subject: [PATCH 029/627] agent: DELETE /v1/connect/intentions/:id --- agent/intentions_endpoint.go | 21 +++++++++- agent/intentions_endpoint_test.go | 68 +++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index d5d6b6495..9f974309e 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -79,7 +79,7 @@ func (s *HTTPServer) IntentionSpecific(resp http.ResponseWriter, req *http.Reque panic("TODO") case "DELETE": - panic("TODO") + return s.IntentionSpecificDelete(id, resp, req) default: return nil, MethodNotAllowedError{req.Method, []string{"GET", "PUT", "DELETE"}} @@ -113,5 +113,24 @@ func (s *HTTPServer) IntentionSpecificGet(id string, resp http.ResponseWriter, r return reply.Intentions[0], nil } +// DELETE /v1/connect/intentions/:id +func (s *HTTPServer) IntentionSpecificDelete(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Method is tested in IntentionEndpoint + + args := structs.IntentionRequest{ + Op: structs.IntentionOpDelete, + Intention: &structs.Intention{ID: id}, + } + s.parseDC(req, &args.Datacenter) + s.parseToken(req, &args.Token) + + var reply string + if err := s.agent.RPC("Intention.Apply", &args, &reply); err != nil { + return nil, err + } + + return nil, nil +} + // intentionCreateResponse is the response structure for creating an intention. type intentionCreateResponse struct{ ID string } diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index 0bd956842..d38fc6c43 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -6,6 +6,7 @@ import ( "net/http/httptest" "reflect" "sort" + "strings" "testing" "github.com/hashicorp/consul/agent/structs" @@ -152,3 +153,70 @@ func TestIntentionsSpecificGet_good(t *testing.T) { t.Fatalf("bad (got, want):\n\n%#v\n\n%#v", value, ixn) } } + +func TestIntentionsSpecificDelete_good(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // The intention + ixn := &structs.Intention{SourceName: "foo"} + + // Create an intention directly + var reply string + { + req := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: ixn, + } + if err := a.RPC("Intention.Apply", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + } + + // Sanity check that the intention exists + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: reply, + } + var resp structs.IndexedIntentions + if err := a.RPC("Intention.Get", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + if len(resp.Intentions) != 1 { + t.Fatalf("bad: %v", resp) + } + actual := resp.Intentions[0] + if actual.SourceName != "foo" { + t.Fatalf("bad: %#v", actual) + } + } + + // Delete the intention + req, _ := http.NewRequest("DELETE", fmt.Sprintf("/v1/connect/intentions/%s", reply), nil) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionSpecific(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + if obj != nil { + t.Fatalf("obj should be nil: %v", err) + } + + // Verify the intention is gone + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: reply, + } + var resp structs.IndexedIntentions + err := a.RPC("Intention.Get", req, &resp) + if err == nil || !strings.Contains(err.Error(), "not found") { + t.Fatalf("err: %v", err) + } + } + +} From a91fadb9710789b2bf5d885ca792bee0123bc6fa Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 1 Mar 2018 16:53:31 -0800 Subject: [PATCH 030/627] agent: PUT /v1/connect/intentions/:id --- agent/intentions_endpoint.go | 29 +++++++++++++++- agent/intentions_endpoint_test.go | 55 +++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 9f974309e..40a9f2282 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -76,7 +76,7 @@ func (s *HTTPServer) IntentionSpecific(resp http.ResponseWriter, req *http.Reque return s.IntentionSpecificGet(id, resp, req) case "PUT": - panic("TODO") + return s.IntentionSpecificUpdate(id, resp, req) case "DELETE": return s.IntentionSpecificDelete(id, resp, req) @@ -113,6 +113,33 @@ func (s *HTTPServer) IntentionSpecificGet(id string, resp http.ResponseWriter, r return reply.Intentions[0], nil } +// PUT /v1/connect/intentions/:id +func (s *HTTPServer) IntentionSpecificUpdate(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Method is tested in IntentionEndpoint + + args := structs.IntentionRequest{ + Op: structs.IntentionOpUpdate, + } + s.parseDC(req, &args.Datacenter) + s.parseToken(req, &args.Token) + if err := decodeBody(req, &args.Intention, nil); err != nil { + resp.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(resp, "Request decode failed: %v", err) + return nil, nil + } + + // Use the ID from the URL + args.Intention.ID = id + + var reply string + if err := s.agent.RPC("Intention.Apply", &args, &reply); err != nil { + return nil, err + } + + return nil, nil + +} + // DELETE /v1/connect/intentions/:id func (s *HTTPServer) IntentionSpecificDelete(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Method is tested in IntentionEndpoint diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index d38fc6c43..c3753ea97 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -154,6 +154,61 @@ func TestIntentionsSpecificGet_good(t *testing.T) { } } +func TestIntentionsSpecificUpdate_good(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // The intention + ixn := &structs.Intention{SourceName: "foo"} + + // Create an intention directly + var reply string + { + req := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: ixn, + } + if err := a.RPC("Intention.Apply", &req, &reply); err != nil { + t.Fatalf("err: %s", err) + } + } + + // Update the intention + ixn.ID = "bogus" + ixn.SourceName = "bar" + req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/connect/intentions/%s", reply), jsonReader(ixn)) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionSpecific(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + if obj != nil { + t.Fatalf("obj should be nil: %v", err) + } + + // Read the value + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: reply, + } + var resp structs.IndexedIntentions + if err := a.RPC("Intention.Get", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + if len(resp.Intentions) != 1 { + t.Fatalf("bad: %v", resp) + } + actual := resp.Intentions[0] + if actual.SourceName != "bar" { + t.Fatalf("bad: %#v", actual) + } + } +} + func TestIntentionsSpecificDelete_good(t *testing.T) { t.Parallel() From 231f7328bd6782bc47cef614d2026ce4bf5206f6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 2 Mar 2018 11:53:40 -0800 Subject: [PATCH 031/627] agent/structs: IntentionPrecedenceSorter for sorting based on precedence --- agent/structs/intention.go | 80 +++++++++++++++++++++++++++++++++ agent/structs/intention_test.go | 72 +++++++++++++++++++++++++++++ 2 files changed, 152 insertions(+) create mode 100644 agent/structs/intention_test.go diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 7255bc8f1..14b5e0b8e 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -4,6 +4,11 @@ import ( "time" ) +const ( + // IntentionWildcard is the wildcard value. + IntentionWildcard = "*" +) + // Intention defines an intention for the Connect Service Graph. This defines // the allowed or denied behavior of a connection between two services using // Connect. @@ -100,6 +105,16 @@ func (q *IntentionRequest) RequestDatacenter() string { return q.Datacenter } +// IntentionMatchType is the target for a match request. For example, +// matching by source will look for all intentions that match the given +// source value. +type IntentionMatchType string + +const ( + IntentionMatchSource IntentionMatchType = "source" + IntentionMatchDestination IntentionMatchType = "destination" +) + // IntentionQueryRequest is used to query intentions. type IntentionQueryRequest struct { // Datacenter is the target this request is intended for. @@ -108,6 +123,12 @@ type IntentionQueryRequest struct { // IntentionID is the ID of a specific intention. IntentionID string + // MatchBy and MatchNames are used to match a namespace/name pair + // to a set of intentions. The list of MatchNames is an OR list, + // all matching intentions are returned together. + MatchBy IntentionMatchType + MatchNames []string + // Options for queries QueryOptions } @@ -116,3 +137,62 @@ type IntentionQueryRequest struct { func (q *IntentionQueryRequest) RequestDatacenter() string { return q.Datacenter } + +// IntentionQueryMatch are the parameters for performing a match request +// against the state store. +type IntentionQueryMatch struct { + Type IntentionMatchType + Entries []IntentionMatchEntry +} + +// IntentionMatchEntry is a single entry for matching an intention. +type IntentionMatchEntry struct { + Namespace string + Name string +} + +// IntentionPrecedenceSorter takes a list of intentions and sorts them +// based on the match precedence rules for intentions. The intentions +// closer to the head of the list have higher precedence. i.e. index 0 has +// the highest precedence. +type IntentionPrecedenceSorter Intentions + +func (s IntentionPrecedenceSorter) Len() int { return len(s) } +func (s IntentionPrecedenceSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s IntentionPrecedenceSorter) Less(i, j int) bool { + a, b := s[i], s[j] + + // First test the # of exact values in destination, since precedence + // is destination-oriented. + aExact := s.countExact(a.DestinationNS, a.DestinationName) + bExact := s.countExact(b.DestinationNS, b.DestinationName) + if aExact != bExact { + return aExact > bExact + } + + // Next test the # of exact values in source + aExact = s.countExact(a.SourceNS, a.SourceName) + bExact = s.countExact(b.SourceNS, b.SourceName) + return aExact > bExact +} + +// countExact counts the number of exact values (not wildcards) in +// the given namespace and name. +func (s IntentionPrecedenceSorter) countExact(ns, n string) int { + // If NS is wildcard, it must be zero since wildcards only follow exact + if ns == IntentionWildcard { + return 0 + } + + // Same reasoning as above, a wildcard can only follow an exact value + // and an exact value cannot follow a wildcard, so if name is a wildcard + // we must have exactly one. + if n == IntentionWildcard { + return 1 + } + + return 2 +} diff --git a/agent/structs/intention_test.go b/agent/structs/intention_test.go new file mode 100644 index 000000000..19ac5811a --- /dev/null +++ b/agent/structs/intention_test.go @@ -0,0 +1,72 @@ +package structs + +import ( + "reflect" + "sort" + "testing" +) + +func TestIntentionPrecedenceSorter(t *testing.T) { + cases := []struct { + Name string + Input [][]string // SrcNS, SrcN, DstNS, DstN + Expected [][]string // Same structure as Input + }{ + { + "exhaustive list", + [][]string{ + {"*", "*", "exact", "*"}, + {"*", "*", "*", "*"}, + {"exact", "*", "exact", "exact"}, + {"*", "*", "exact", "exact"}, + {"exact", "exact", "*", "*"}, + {"exact", "exact", "exact", "exact"}, + {"exact", "exact", "exact", "*"}, + {"exact", "*", "exact", "*"}, + {"exact", "*", "*", "*"}, + }, + [][]string{ + {"exact", "exact", "exact", "exact"}, + {"exact", "*", "exact", "exact"}, + {"*", "*", "exact", "exact"}, + {"exact", "exact", "exact", "*"}, + {"exact", "*", "exact", "*"}, + {"*", "*", "exact", "*"}, + {"exact", "exact", "*", "*"}, + {"exact", "*", "*", "*"}, + {"*", "*", "*", "*"}, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + var input Intentions + for _, v := range tc.Input { + input = append(input, &Intention{ + SourceNS: v[0], + SourceName: v[1], + DestinationNS: v[2], + DestinationName: v[3], + }) + } + + // Sort + sort.Sort(IntentionPrecedenceSorter(input)) + + // Get back into a comparable form + var actual [][]string + for _, v := range input { + actual = append(actual, []string{ + v.SourceNS, + v.SourceName, + v.DestinationNS, + v.DestinationName, + }) + } + if !reflect.DeepEqual(actual, tc.Expected) { + t.Fatalf("bad (got, wanted):\n\n%#v\n\n%#v", actual, tc.Expected) + } + }) + } +} From 987b7ce0a291a810b3934cb1bc1e9474cc5b8f2a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 2 Mar 2018 12:56:39 -0800 Subject: [PATCH 032/627] agent/consul/state: IntentionMatch for performing match resolution --- agent/consul/state/intention.go | 78 +++++++++++++++ agent/consul/state/intention_test.go | 136 +++++++++++++++++++++++++++ 2 files changed, 214 insertions(+) diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index ea2ee3fd5..51f4e1e3b 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -2,6 +2,7 @@ package state import ( "fmt" + "sort" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" @@ -192,3 +193,80 @@ func (s *Store) intentionDeleteTxn(tx *memdb.Txn, idx uint64, queryID string) er return nil } + +// IntentionMatch returns the list of intentions that match the namespace and +// name for either a source or destination. This applies the resolution rules +// so wildcards will match any value. +// +// The returned value is the list of intentions in the same order as the +// entries in args. The intentions themselves are sorted based on the +// intention precedence rules. i.e. result[0][0] is the highest precedent +// rule to match for the first entry. +func (s *Store) IntentionMatch(ws memdb.WatchSet, args *structs.IntentionQueryMatch) (uint64, []structs.Intentions, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + // Get the table index. + idx := maxIndexTxn(tx, intentionsTableName) + + // Make all the calls and accumulate the results + results := make([]structs.Intentions, len(args.Entries)) + for i, entry := range args.Entries { + // Each search entry may require multiple queries to memdb, so this + // returns the arguments for each necessary Get. Note on performance: + // this is not the most optimal set of queries since we repeat some + // many times (such as */*). We can work on improving that in the + // future, the test cases shouldn't have to change for that. + getParams, err := s.intentionMatchGetParams(entry) + if err != nil { + return 0, nil, err + } + + // Perform each call and accumulate the result. + var ixns structs.Intentions + for _, params := range getParams { + iter, err := tx.Get(intentionsTableName, string(args.Type), params...) + if err != nil { + return 0, nil, fmt.Errorf("failed intention lookup: %s", err) + } + + ws.Add(iter.WatchCh()) + + for ixn := iter.Next(); ixn != nil; ixn = iter.Next() { + ixns = append(ixns, ixn.(*structs.Intention)) + } + } + + // TODO: filter for uniques + + // Sort the results by precedence + sort.Sort(structs.IntentionPrecedenceSorter(ixns)) + + // Store the result + results[i] = ixns + } + + return idx, results, nil +} + +// intentionMatchGetParams returns the tx.Get parameters to find all the +// intentions for a certain entry. +func (s *Store) intentionMatchGetParams(entry structs.IntentionMatchEntry) ([][]interface{}, error) { + // We always query for "*/*" so include that. If the namespace is a + // wildcard, then we're actually done. + result := make([][]interface{}, 0, 3) + result = append(result, []interface{}{"*", "*"}) + if entry.Namespace == structs.IntentionWildcard { + return result, nil + } + + // Search for NS/* intentions. If we have a wildcard name, then we're done. + result = append(result, []interface{}{entry.Namespace, "*"}) + if entry.Name == structs.IntentionWildcard { + return result, nil + } + + // Search for the exact NS/N value. + result = append(result, []interface{}{entry.Namespace, entry.Name}) + return result, nil +} diff --git a/agent/consul/state/intention_test.go b/agent/consul/state/intention_test.go index d1494d5e0..2f4fee26b 100644 --- a/agent/consul/state/intention_test.go +++ b/agent/consul/state/intention_test.go @@ -233,3 +233,139 @@ func TestStore_IntentionsList(t *testing.T) { t.Fatalf("bad: %v", actual) } } + +// Test the matrix of match logic. +// +// Note that this doesn't need to test the intention sort logic exhaustively +// since this is tested in their sort implementation in the structs. +func TestStore_IntentionMatch_table(t *testing.T) { + type testCase struct { + Name string + Insert [][]string // List of intentions to insert + Query [][]string // List of intentions to match + Expected [][][]string // List of matches, where each match is a list of intentions + } + + cases := []testCase{ + { + "single exact namespace/name", + [][]string{ + {"foo", "*"}, + {"foo", "bar"}, + {"foo", "baz"}, // shouldn't match + {"bar", "bar"}, // shouldn't match + {"bar", "*"}, // shouldn't match + {"*", "*"}, + }, + [][]string{ + {"foo", "bar"}, + }, + [][][]string{ + { + {"foo", "bar"}, + {"foo", "*"}, + {"*", "*"}, + }, + }, + }, + + { + "multiple exact namespace/name", + [][]string{ + {"foo", "*"}, + {"foo", "bar"}, + {"foo", "baz"}, // shouldn't match + {"bar", "bar"}, + {"bar", "*"}, + }, + [][]string{ + {"foo", "bar"}, + {"bar", "bar"}, + }, + [][][]string{ + { + {"foo", "bar"}, + {"foo", "*"}, + }, + { + {"bar", "bar"}, + {"bar", "*"}, + }, + }, + }, + } + + // testRunner implements the test for a single case, but can be + // parameterized to run for both source and destination so we can + // test both cases. + testRunner := func(t *testing.T, tc testCase, typ structs.IntentionMatchType) { + // Insert the set + s := testStateStore(t) + var idx uint64 = 1 + for _, v := range tc.Insert { + ixn := &structs.Intention{ID: testUUID()} + switch typ { + case structs.IntentionMatchDestination: + ixn.DestinationNS = v[0] + ixn.DestinationName = v[1] + case structs.IntentionMatchSource: + ixn.SourceNS = v[0] + ixn.SourceName = v[1] + } + + err := s.IntentionSet(idx, ixn) + if err != nil { + t.Fatalf("error inserting: %s", err) + } + + idx++ + } + + // Build the arguments + args := &structs.IntentionQueryMatch{Type: typ} + for _, q := range tc.Query { + args.Entries = append(args.Entries, structs.IntentionMatchEntry{ + Namespace: q[0], + Name: q[1], + }) + } + + // Match + _, matches, err := s.IntentionMatch(nil, args) + if err != nil { + t.Fatalf("error matching: %s", err) + } + + // Should have equal lengths + if len(matches) != len(tc.Expected) { + t.Fatalf("bad (got, wanted):\n\n%#v\n\n%#v", tc.Expected, matches) + } + + // Verify matches + for i, expected := range tc.Expected { + var actual [][]string + for _, ixn := range matches[i] { + switch typ { + case structs.IntentionMatchDestination: + actual = append(actual, []string{ixn.DestinationNS, ixn.DestinationName}) + case structs.IntentionMatchSource: + actual = append(actual, []string{ixn.SourceNS, ixn.SourceName}) + } + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad (got, wanted):\n\n%#v\n\n%#v", actual, expected) + } + } + } + + for _, tc := range cases { + t.Run(tc.Name+" (destination)", func(t *testing.T) { + testRunner(t, tc, structs.IntentionMatchDestination) + }) + + t.Run(tc.Name+" (source)", func(t *testing.T) { + testRunner(t, tc, structs.IntentionMatchSource) + }) + } +} From e9d208bcb61f9b46924602ad29096093a15e2597 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 2 Mar 2018 13:40:03 -0800 Subject: [PATCH 033/627] agent/consul: RPC endpoint for Intention.Match --- agent/consul/intention_endpoint.go | 30 ++++++++++ agent/consul/intention_endpoint_test.go | 76 +++++++++++++++++++++++++ agent/structs/intention.go | 15 +++-- 3 files changed, 116 insertions(+), 5 deletions(-) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index fc552afd9..d13002722 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -142,3 +142,33 @@ func (s *Intention) List( }, ) } + +// Match returns the set of intentions that match the given source/destination. +func (s *Intention) Match( + args *structs.IntentionQueryRequest, + reply *structs.IndexedIntentionMatches) error { + // Forward if necessary + if done, err := s.srv.forward("Intention.Match", args, args, reply); done { + return err + } + + // TODO(mitchellh): validate + + return s.srv.blockingQuery( + &args.QueryOptions, + &reply.QueryMeta, + func(ws memdb.WatchSet, state *state.Store) error { + index, matches, err := state.IntentionMatch(ws, args.Match) + if err != nil { + return err + } + + reply.Index = index + reply.Matches = matches + + // TODO(mitchellh): acl filtering + + return nil + }, + ) +} diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 53aef35cd..65170ff7b 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -258,3 +258,79 @@ func TestIntentionList(t *testing.T) { } } } + +// Test basic matching. We don't need to exhaustively test inputs since this +// is tested in the agent/consul/state package. +func TestIntentionMatch_good(t *testing.T) { + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create some records + { + insert := [][]string{ + {"foo", "*"}, + {"foo", "bar"}, + {"foo", "baz"}, // shouldn't match + {"bar", "bar"}, // shouldn't match + {"bar", "*"}, // shouldn't match + {"*", "*"}, + } + + for _, v := range insert { + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: &structs.Intention{ + SourceNS: "default", + SourceName: "test", + DestinationNS: v[0], + DestinationName: v[1], + }, + } + + // Create + var reply string + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + } + } + + // Match + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + Match: &structs.IntentionQueryMatch{ + Type: structs.IntentionMatchDestination, + Entries: []structs.IntentionMatchEntry{ + { + Namespace: "foo", + Name: "bar", + }, + }, + }, + } + var resp structs.IndexedIntentionMatches + if err := msgpackrpc.CallWithCodec(codec, "Intention.Match", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + if len(resp.Matches) != 1 { + t.Fatalf("bad: %#v", resp.Matches) + } + + expected := [][]string{{"foo", "bar"}, {"foo", "*"}, {"*", "*"}} + var actual [][]string + for _, ixn := range resp.Matches[0] { + actual = append(actual, []string{ixn.DestinationNS, ixn.DestinationName}) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad (got, wanted):\n\n%#v\n\n%#v", actual, expected) + } +} diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 14b5e0b8e..e2ad2fb92 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -75,6 +75,12 @@ type IndexedIntentions struct { QueryMeta } +// IndexedIntentionMatches represents the list of matches for a match query. +type IndexedIntentionMatches struct { + Matches []Intentions + QueryMeta +} + // IntentionOp is the operation for a request related to intentions. type IntentionOp string @@ -123,11 +129,10 @@ type IntentionQueryRequest struct { // IntentionID is the ID of a specific intention. IntentionID string - // MatchBy and MatchNames are used to match a namespace/name pair - // to a set of intentions. The list of MatchNames is an OR list, - // all matching intentions are returned together. - MatchBy IntentionMatchType - MatchNames []string + // Match is non-nil if we're performing a match query. A match will + // find intentions that "match" the given parameters. A match includes + // resolving wildcards. + Match *IntentionQueryMatch // Options for queries QueryOptions From 237da67da524f82e3fc578441a036a7dd110df8f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 2 Mar 2018 14:17:21 -0800 Subject: [PATCH 034/627] agent: GET /v1/connect/intentions/match --- agent/http_oss.go | 1 + agent/intentions_endpoint.go | 89 ++++++++++++++++ agent/intentions_endpoint_test.go | 167 +++++++++++++++++++++++++++++- 3 files changed, 256 insertions(+), 1 deletion(-) diff --git a/agent/http_oss.go b/agent/http_oss.go index 0170a0075..d3bb7adc4 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -40,6 +40,7 @@ func init() { registerEndpoint("/v1/catalog/service/", []string{"GET"}, (*HTTPServer).CatalogServiceNodes) registerEndpoint("/v1/catalog/node/", []string{"GET"}, (*HTTPServer).CatalogNodeServices) registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPServer).IntentionEndpoint) + registerEndpoint("/v1/connect/intentions/match", []string{"GET"}, (*HTTPServer).IntentionMatch) registerEndpoint("/v1/connect/intentions/", []string{"GET"}, (*HTTPServer).IntentionSpecific) registerEndpoint("/v1/coordinate/datacenters", []string{"GET"}, (*HTTPServer).CoordinateDatacenters) registerEndpoint("/v1/coordinate/nodes", []string{"GET"}, (*HTTPServer).CoordinateNodes) diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 40a9f2282..a28488c3d 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -67,6 +67,70 @@ func (s *HTTPServer) IntentionCreate(resp http.ResponseWriter, req *http.Request return intentionCreateResponse{reply}, nil } +// GET /v1/connect/intentions/match +func (s *HTTPServer) IntentionMatch(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Test the method + if req.Method != "GET" { + return nil, MethodNotAllowedError{req.Method, []string{"GET"}} + } + + // Prepare args + args := &structs.IntentionQueryRequest{Match: &structs.IntentionQueryMatch{}} + if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { + return nil, nil + } + + q := req.URL.Query() + + // Extract the "by" query parameter + if by, ok := q["by"]; !ok || len(by) != 1 { + return nil, fmt.Errorf("required query parameter 'by' not set") + } else { + switch v := structs.IntentionMatchType(by[0]); v { + case structs.IntentionMatchSource, structs.IntentionMatchDestination: + args.Match.Type = v + default: + return nil, fmt.Errorf("'by' parameter must be one of 'source' or 'destination'") + } + } + + // Extract all the match names + names, ok := q["name"] + if !ok || len(names) == 0 { + return nil, fmt.Errorf("required query parameter 'name' not set") + } + + // Build the entries in order. The order matters since that is the + // order of the returned responses. + args.Match.Entries = make([]structs.IntentionMatchEntry, len(names)) + for i, n := range names { + entry, err := parseIntentionMatchEntry(n) + if err != nil { + return nil, fmt.Errorf("name %q is invalid: %s", n, err) + } + + args.Match.Entries[i] = entry + } + + var reply structs.IndexedIntentionMatches + if err := s.agent.RPC("Intention.Match", args, &reply); err != nil { + return nil, err + } + + // We must have an identical count of matches + if len(reply.Matches) != len(names) { + return nil, fmt.Errorf("internal error: match response count didn't match input count") + } + + // Use empty list instead of nil. + response := make(map[string]structs.Intentions) + for i, ixns := range reply.Matches { + response[names[i]] = ixns + } + + return response, nil +} + // IntentionSpecific handles the endpoint for /v1/connection/intentions/:id func (s *HTTPServer) IntentionSpecific(resp http.ResponseWriter, req *http.Request) (interface{}, error) { id := strings.TrimPrefix(req.URL.Path, "/v1/connect/intentions/") @@ -161,3 +225,28 @@ func (s *HTTPServer) IntentionSpecificDelete(id string, resp http.ResponseWriter // intentionCreateResponse is the response structure for creating an intention. type intentionCreateResponse struct{ ID string } + +// parseIntentionMatchEntry parses the query parameter for an intention +// match query entry. +func parseIntentionMatchEntry(input string) (structs.IntentionMatchEntry, error) { + var result structs.IntentionMatchEntry + + // TODO(mitchellh): when namespaces are introduced, set the default + // namespace to be the namespace of the requestor. + + // Get the index to the '/'. If it doesn't exist, we have just a name + // so just set that and return. + idx := strings.IndexByte(input, '/') + if idx == -1 { + result.Name = input + return result, nil + } + + result.Namespace = input[:idx] + result.Name = input[idx+1:] + if strings.IndexByte(result.Name, '/') != -1 { + return result, fmt.Errorf("input can contain at most one '/'") + } + + return result, nil +} diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index c3753ea97..7e83846aa 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -72,6 +72,125 @@ func TestIntentionsList_values(t *testing.T) { } } +func TestIntentionsMatch_basic(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Create some intentions + { + insert := [][]string{ + {"foo", "*"}, + {"foo", "bar"}, + {"foo", "baz"}, // shouldn't match + {"bar", "bar"}, // shouldn't match + {"bar", "*"}, // shouldn't match + {"*", "*"}, + } + + for _, v := range insert { + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: &structs.Intention{ + SourceNS: "default", + SourceName: "test", + DestinationNS: v[0], + DestinationName: v[1], + }, + } + + // Create + var reply string + if err := a.RPC("Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + } + } + + // Request + req, _ := http.NewRequest("GET", + "/v1/connect/intentions/match?by=destination&name=foo/bar", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionMatch(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + + value := obj.(map[string]structs.Intentions) + if len(value) != 1 { + t.Fatalf("bad: %v", value) + } + + var actual [][]string + expected := [][]string{{"foo", "bar"}, {"foo", "*"}, {"*", "*"}} + for _, ixn := range value["foo/bar"] { + actual = append(actual, []string{ixn.DestinationNS, ixn.DestinationName}) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad (got, wanted):\n\n%#v\n\n%#v", actual, expected) + } +} + +func TestIntentionsMatch_noBy(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Request + req, _ := http.NewRequest("GET", + "/v1/connect/intentions/match?name=foo/bar", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionMatch(resp, req) + if err == nil || !strings.Contains(err.Error(), "by") { + t.Fatalf("err: %v", err) + } + if obj != nil { + t.Fatal("should have no response") + } +} + +func TestIntentionsMatch_byInvalid(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Request + req, _ := http.NewRequest("GET", + "/v1/connect/intentions/match?by=datacenter", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionMatch(resp, req) + if err == nil || !strings.Contains(err.Error(), "'by' parameter") { + t.Fatalf("err: %v", err) + } + if obj != nil { + t.Fatal("should have no response") + } +} + +func TestIntentionsMatch_noName(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Request + req, _ := http.NewRequest("GET", + "/v1/connect/intentions/match?by=source", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionMatch(resp, req) + if err == nil || !strings.Contains(err.Error(), "'name' not set") { + t.Fatalf("err: %v", err) + } + if obj != nil { + t.Fatal("should have no response") + } +} + func TestIntentionsCreate_good(t *testing.T) { t.Parallel() @@ -273,5 +392,51 @@ func TestIntentionsSpecificDelete_good(t *testing.T) { t.Fatalf("err: %v", err) } } - +} + +func TestParseIntentionMatchEntry(t *testing.T) { + cases := []struct { + Input string + Expected structs.IntentionMatchEntry + Err bool + }{ + { + "foo", + structs.IntentionMatchEntry{ + Name: "foo", + }, + false, + }, + + { + "foo/bar", + structs.IntentionMatchEntry{ + Namespace: "foo", + Name: "bar", + }, + false, + }, + + { + "foo/bar/baz", + structs.IntentionMatchEntry{}, + true, + }, + } + + for _, tc := range cases { + t.Run(tc.Input, func(t *testing.T) { + actual, err := parseIntentionMatchEntry(tc.Input) + if (err != nil) != tc.Err { + t.Fatalf("err: %s", err) + } + if err != nil { + return + } + + if !reflect.DeepEqual(actual, tc.Expected) { + t.Fatalf("bad: %#v", actual) + } + }) + } } From e630d65d9dadce73a591476da4ad06dac3e7195f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 3 Mar 2018 08:43:19 -0800 Subject: [PATCH 035/627] agent/consul: set CreatedAt, UpdatedAt on intentions --- agent/consul/intention_endpoint.go | 11 +++++ agent/consul/intention_endpoint_test.go | 61 +++++++++++++++++++++++++ agent/consul/state/intention.go | 4 +- agent/consul/state/intention_test.go | 33 +++++++++++++ 4 files changed, 108 insertions(+), 1 deletion(-) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index d13002722..70ad25183 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -33,6 +33,11 @@ func (s *Intention) Apply( defer metrics.MeasureSince([]string{"consul", "intention", "apply"}, time.Now()) defer metrics.MeasureSince([]string{"intention", "apply"}, time.Now()) + // Always set a non-nil intention to avoid nil-access below + if args.Intention == nil { + args.Intention = &structs.Intention{} + } + // If no ID is provided, generate a new ID. This must be done prior to // appending to the Raft log, because the ID is not deterministic. Once // the entry is in the log, the state update MUST be deterministic or @@ -60,6 +65,9 @@ func (s *Intention) Apply( break } } + + // Set the created at + args.Intention.CreatedAt = time.Now() } *reply = args.Intention.ID @@ -75,6 +83,9 @@ func (s *Intention) Apply( } } + // We always update the updatedat field. This has no effect for deletion. + args.Intention.UpdatedAt = time.Now() + // Commit resp, err := s.srv.raftApply(structs.IntentionRequestType, args) if err != nil { diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 65170ff7b..8ff584d75 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -5,6 +5,7 @@ import ( "reflect" "strings" "testing" + "time" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/testrpc" @@ -32,6 +33,9 @@ func TestIntentionApply_new(t *testing.T) { } var reply string + // Record now to check created at time + now := time.Now() + // Create if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { t.Fatalf("err: %v", err) @@ -58,7 +62,26 @@ func TestIntentionApply_new(t *testing.T) { if resp.Index != actual.ModifyIndex { t.Fatalf("bad index: %d", resp.Index) } + + // Test CreatedAt + { + timeDiff := actual.CreatedAt.Sub(now) + if timeDiff < 0 || timeDiff > 5*time.Second { + t.Fatalf("should set created at: %s", actual.CreatedAt) + } + } + + // Test UpdatedAt + { + timeDiff := actual.UpdatedAt.Sub(now) + if timeDiff < 0 || timeDiff > 5*time.Second { + t.Fatalf("should set updated at: %s", actual.CreatedAt) + } + } + actual.CreateIndex, actual.ModifyIndex = 0, 0 + actual.CreatedAt = ixn.Intention.CreatedAt + actual.UpdatedAt = ixn.Intention.UpdatedAt if !reflect.DeepEqual(actual, ixn.Intention) { t.Fatalf("bad: %v", actual) } @@ -123,6 +146,28 @@ func TestIntentionApply_updateGood(t *testing.T) { t.Fatal("reply should be non-empty") } + // Read CreatedAt + var createdAt time.Time + ixn.Intention.ID = reply + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: ixn.Intention.ID, + } + var resp structs.IndexedIntentions + if err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + if len(resp.Intentions) != 1 { + t.Fatalf("bad: %v", resp) + } + actual := resp.Intentions[0] + createdAt = actual.CreatedAt + } + + // Sleep a bit so that the updated at will definitely be different, not much + time.Sleep(1 * time.Millisecond) + // Update ixn.Op = structs.IntentionOpUpdate ixn.Intention.ID = reply @@ -146,7 +191,23 @@ func TestIntentionApply_updateGood(t *testing.T) { t.Fatalf("bad: %v", resp) } actual := resp.Intentions[0] + + // Test CreatedAt + if !actual.CreatedAt.Equal(createdAt) { + t.Fatalf("should not modify created at: %s", actual.CreatedAt) + } + + // Test UpdatedAt + { + timeDiff := actual.UpdatedAt.Sub(createdAt) + if timeDiff <= 0 || timeDiff > 5*time.Second { + t.Fatalf("should set updated at: %s", actual.CreatedAt) + } + } + actual.CreateIndex, actual.ModifyIndex = 0, 0 + actual.CreatedAt = ixn.Intention.CreatedAt + actual.UpdatedAt = ixn.Intention.UpdatedAt if !reflect.DeepEqual(actual, ixn.Intention) { t.Fatalf("bad: %v", actual) } diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index 51f4e1e3b..ba3871ea5 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -117,7 +117,9 @@ func (s *Store) intentionSetTxn(tx *memdb.Txn, idx uint64, ixn *structs.Intentio return fmt.Errorf("failed intention looup: %s", err) } if existing != nil { - ixn.CreateIndex = existing.(*structs.Intention).CreateIndex + oldIxn := existing.(*structs.Intention) + ixn.CreateIndex = oldIxn.CreateIndex + ixn.CreatedAt = oldIxn.CreatedAt } else { ixn.CreateIndex = idx } diff --git a/agent/consul/state/intention_test.go b/agent/consul/state/intention_test.go index 2f4fee26b..1bfb6d248 100644 --- a/agent/consul/state/intention_test.go +++ b/agent/consul/state/intention_test.go @@ -3,6 +3,7 @@ package state import ( "reflect" "testing" + "time" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" @@ -121,6 +122,38 @@ func TestStore_IntentionSet_emptyId(t *testing.T) { } } +func TestStore_IntentionSet_updateCreatedAt(t *testing.T) { + s := testStateStore(t) + + // Build a valid intention + now := time.Now() + ixn := structs.Intention{ + ID: testUUID(), + CreatedAt: now, + } + + // Insert + if err := s.IntentionSet(1, &ixn); err != nil { + t.Fatalf("err: %s", err) + } + + // Change a value and test updating + ixnUpdate := ixn + ixnUpdate.CreatedAt = now.Add(10 * time.Second) + if err := s.IntentionSet(2, &ixnUpdate); err != nil { + t.Fatalf("err: %s", err) + } + + // Read it back and verify + _, actual, err := s.IntentionGet(nil, ixn.ID) + if err != nil { + t.Fatalf("err: %s", err) + } + if !actual.CreatedAt.Equal(now) { + t.Fatalf("bad: %#v", actual) + } +} + func TestStore_IntentionDelete(t *testing.T) { s := testStateStore(t) From 2b047fb09be75788fe6085d5226d9cef0cf8f5cb Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 3 Mar 2018 08:51:40 -0800 Subject: [PATCH 036/627] agent,agent/consul: set default namespaces --- agent/consul/intention_endpoint.go | 8 ++++++++ agent/consul/intention_endpoint_test.go | 10 ++++++++-- agent/intentions_endpoint.go | 1 + agent/intentions_endpoint_test.go | 11 +++++++++-- agent/structs/intention.go | 7 +++++++ 5 files changed, 33 insertions(+), 4 deletions(-) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 70ad25183..434c90628 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -86,6 +86,14 @@ func (s *Intention) Apply( // We always update the updatedat field. This has no effect for deletion. args.Intention.UpdatedAt = time.Now() + // Until we support namespaces, we force all namespaces to be default + if args.Intention.SourceNS == "" { + args.Intention.SourceNS = structs.IntentionDefaultNamespace + } + if args.Intention.DestinationNS == "" { + args.Intention.DestinationNS = structs.IntentionDefaultNamespace + } + // Commit resp, err := s.srv.raftApply(structs.IntentionRequestType, args) if err != nil { diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 8ff584d75..21f112f48 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -28,7 +28,10 @@ func TestIntentionApply_new(t *testing.T) { Datacenter: "dc1", Op: structs.IntentionOpCreate, Intention: &structs.Intention{ - SourceName: "test", + SourceNS: structs.IntentionDefaultNamespace, + SourceName: "test", + DestinationNS: structs.IntentionDefaultNamespace, + DestinationName: "test", }, } var reply string @@ -133,7 +136,10 @@ func TestIntentionApply_updateGood(t *testing.T) { Datacenter: "dc1", Op: structs.IntentionOpCreate, Intention: &structs.Intention{ - SourceName: "test", + SourceNS: structs.IntentionDefaultNamespace, + SourceName: "test", + DestinationNS: structs.IntentionDefaultNamespace, + DestinationName: "test", }, } var reply string diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index a28488c3d..39cf3e50b 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -230,6 +230,7 @@ type intentionCreateResponse struct{ ID string } // match query entry. func parseIntentionMatchEntry(input string) (structs.IntentionMatchEntry, error) { var result structs.IntentionMatchEntry + result.Namespace = structs.IntentionDefaultNamespace // TODO(mitchellh): when namespaces are introduced, set the default // namespace to be the namespace of the requestor. diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index 7e83846aa..a1c0413ed 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -238,7 +238,12 @@ func TestIntentionsSpecificGet_good(t *testing.T) { defer a.Shutdown() // The intention - ixn := &structs.Intention{SourceName: "foo"} + ixn := &structs.Intention{ + SourceNS: structs.IntentionDefaultNamespace, + SourceName: "foo", + DestinationNS: structs.IntentionDefaultNamespace, + DestinationName: "bar", + } // Create an intention directly var reply string @@ -268,6 +273,7 @@ func TestIntentionsSpecificGet_good(t *testing.T) { ixn.ID = value.ID ixn.RaftIndex = value.RaftIndex + ixn.CreatedAt, ixn.UpdatedAt = value.CreatedAt, value.UpdatedAt if !reflect.DeepEqual(value, ixn) { t.Fatalf("bad (got, want):\n\n%#v\n\n%#v", value, ixn) } @@ -403,7 +409,8 @@ func TestParseIntentionMatchEntry(t *testing.T) { { "foo", structs.IntentionMatchEntry{ - Name: "foo", + Namespace: structs.IntentionDefaultNamespace, + Name: "foo", }, false, }, diff --git a/agent/structs/intention.go b/agent/structs/intention.go index e2ad2fb92..b60c36625 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -7,6 +7,13 @@ import ( const ( // IntentionWildcard is the wildcard value. IntentionWildcard = "*" + + // IntentionDefaultNamespace is the default namespace value. + // NOTE(mitchellh): This is only meant to be a temporary constant. + // When namespaces are introduced, we should delete this constant and + // fix up all the places where this was used with the proper namespace + // value. + IntentionDefaultNamespace = "default" ) // Intention defines an intention for the Connect Service Graph. This defines From e81d1c88b7dc41439353de4ad5045e36f01a8435 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 3 Mar 2018 08:59:17 -0800 Subject: [PATCH 037/627] agent/consul/fsm: add tests for intention requests --- agent/consul/fsm/commands_oss_test.go | 102 ++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/agent/consul/fsm/commands_oss_test.go b/agent/consul/fsm/commands_oss_test.go index ccf58a47f..b679cb313 100644 --- a/agent/consul/fsm/commands_oss_test.go +++ b/agent/consul/fsm/commands_oss_test.go @@ -1148,3 +1148,105 @@ func TestFSM_Autopilot(t *testing.T) { t.Fatalf("bad: %v", config.CleanupDeadServers) } } + +func TestFSM_Intention_CRUD(t *testing.T) { + t.Parallel() + + fsm, err := New(nil, os.Stderr) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Create a new intention. + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: &structs.Intention{ + ID: generateUUID(), + SourceNS: "default", + SourceName: "web", + DestinationNS: "default", + DestinationName: "db", + }, + } + + { + buf, err := structs.Encode(structs.IntentionRequestType, ixn) + if err != nil { + t.Fatalf("err: %v", err) + } + resp := fsm.Apply(makeLog(buf)) + if resp != nil { + t.Fatalf("resp: %v", resp) + } + } + + // Verify it's in the state store. + { + _, actual, err := fsm.state.IntentionGet(nil, ixn.Intention.ID) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual.CreateIndex, actual.ModifyIndex = 0, 0 + actual.CreatedAt = ixn.Intention.CreatedAt + actual.UpdatedAt = ixn.Intention.UpdatedAt + if !reflect.DeepEqual(actual, ixn.Intention) { + t.Fatalf("bad: %v", actual) + } + } + + // Make an update + ixn.Op = structs.IntentionOpUpdate + ixn.Intention.SourceName = "api" + { + buf, err := structs.Encode(structs.IntentionRequestType, ixn) + if err != nil { + t.Fatalf("err: %v", err) + } + resp := fsm.Apply(makeLog(buf)) + if resp != nil { + t.Fatalf("resp: %v", resp) + } + } + + // Verify the update. + { + _, actual, err := fsm.state.IntentionGet(nil, ixn.Intention.ID) + if err != nil { + t.Fatalf("err: %s", err) + } + + actual.CreateIndex, actual.ModifyIndex = 0, 0 + actual.CreatedAt = ixn.Intention.CreatedAt + actual.UpdatedAt = ixn.Intention.UpdatedAt + if !reflect.DeepEqual(actual, ixn.Intention) { + t.Fatalf("bad: %v", actual) + } + } + + // Delete + ixn.Op = structs.IntentionOpDelete + { + buf, err := structs.Encode(structs.IntentionRequestType, ixn) + if err != nil { + t.Fatalf("err: %v", err) + } + resp := fsm.Apply(makeLog(buf)) + if resp != nil { + t.Fatalf("resp: %v", resp) + } + } + + // Make sure it's gone. + { + _, actual, err := fsm.state.IntentionGet(nil, ixn.Intention.ID) + if err != nil { + t.Fatalf("err: %s", err) + } + + if actual != nil { + t.Fatalf("bad: %v", actual) + } + } +} From d34ee200de1aee7e3c600e1104545b0275b27313 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 3 Mar 2018 09:16:26 -0800 Subject: [PATCH 038/627] agent/consul: support intention description, meta is non-nil --- agent/consul/state/intention.go | 6 ++++ agent/consul/state/intention_test.go | 47 ++++++++++++++++++++++++++++ agent/structs/intention.go | 5 +++ 3 files changed, 58 insertions(+) diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index ba3871ea5..b737318ee 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -125,6 +125,12 @@ func (s *Store) intentionSetTxn(tx *memdb.Txn, idx uint64, ixn *structs.Intentio } ixn.ModifyIndex = idx + // We always force meta to be non-nil so that we its an empty map. + // This makes it easy for API responses to not nil-check this everywhere. + if ixn.Meta == nil { + ixn.Meta = make(map[string]string) + } + // Insert if err := tx.Insert(intentionsTableName, ixn); err != nil { return err diff --git a/agent/consul/state/intention_test.go b/agent/consul/state/intention_test.go index 1bfb6d248..dd7d9fcdf 100644 --- a/agent/consul/state/intention_test.go +++ b/agent/consul/state/intention_test.go @@ -154,6 +154,53 @@ func TestStore_IntentionSet_updateCreatedAt(t *testing.T) { } } +func TestStore_IntentionSet_metaNil(t *testing.T) { + s := testStateStore(t) + + // Build a valid intention + ixn := structs.Intention{ + ID: testUUID(), + } + + // Insert + if err := s.IntentionSet(1, &ixn); err != nil { + t.Fatalf("err: %s", err) + } + + // Read it back and verify + _, actual, err := s.IntentionGet(nil, ixn.ID) + if err != nil { + t.Fatalf("err: %s", err) + } + if actual.Meta == nil { + t.Fatal("meta should be non-nil") + } +} + +func TestStore_IntentionSet_metaSet(t *testing.T) { + s := testStateStore(t) + + // Build a valid intention + ixn := structs.Intention{ + ID: testUUID(), + Meta: map[string]string{"foo": "bar"}, + } + + // Insert + if err := s.IntentionSet(1, &ixn); err != nil { + t.Fatalf("err: %s", err) + } + + // Read it back and verify + _, actual, err := s.IntentionGet(nil, ixn.ID) + if err != nil { + t.Fatalf("err: %s", err) + } + if !reflect.DeepEqual(actual.Meta, ixn.Meta) { + t.Fatalf("bad: %#v", actual) + } +} + func TestStore_IntentionDelete(t *testing.T) { s := testStateStore(t) diff --git a/agent/structs/intention.go b/agent/structs/intention.go index b60c36625..0a7d8c5d4 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -23,6 +23,11 @@ type Intention struct { // ID is the UUID-based ID for the intention, always generated by Consul. ID string + // Description is a human-friendly description of this intention. + // It is opaque to Consul and is only stored and transferred in API + // requests. + Description string + // SourceNS, SourceName are the namespace and name, respectively, of // the source service. Either of these may be the wildcard "*", but only // the full value can be a wildcard. Partial wildcards are not allowed. From 8e2462e3019477b7b80521d31a37fc24e6230a3e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 3 Mar 2018 09:43:37 -0800 Subject: [PATCH 039/627] agent/structs: Intention validation --- agent/consul/intention_endpoint.go | 5 ++ agent/consul/intention_endpoint_test.go | 13 ++- agent/structs/intention.go | 89 ++++++++++++++++++++ agent/structs/intention_test.go | 107 ++++++++++++++++++++++++ agent/structs/testing_intention.go | 15 ++++ 5 files changed, 227 insertions(+), 2 deletions(-) create mode 100644 agent/structs/testing_intention.go diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 434c90628..56f310e18 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -94,6 +94,11 @@ func (s *Intention) Apply( args.Intention.DestinationNS = structs.IntentionDefaultNamespace } + // Validate + if err := args.Intention.Validate(); err != nil { + return err + } + // Commit resp, err := s.srv.raftApply(structs.IntentionRequestType, args) if err != nil { diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 21f112f48..2ee49f207 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -32,6 +32,8 @@ func TestIntentionApply_new(t *testing.T) { SourceName: "test", DestinationNS: structs.IntentionDefaultNamespace, DestinationName: "test", + Action: structs.IntentionActionAllow, + Meta: map[string]string{}, }, } var reply string @@ -86,7 +88,7 @@ func TestIntentionApply_new(t *testing.T) { actual.CreatedAt = ixn.Intention.CreatedAt actual.UpdatedAt = ixn.Intention.UpdatedAt if !reflect.DeepEqual(actual, ixn.Intention) { - t.Fatalf("bad: %v", actual) + t.Fatalf("bad:\n\n%#v\n\n%#v", actual, ixn.Intention) } } } @@ -140,6 +142,8 @@ func TestIntentionApply_updateGood(t *testing.T) { SourceName: "test", DestinationNS: structs.IntentionDefaultNamespace, DestinationName: "test", + Action: structs.IntentionActionAllow, + Meta: map[string]string{}, }, } var reply string @@ -265,7 +269,11 @@ func TestIntentionApply_deleteGood(t *testing.T) { Datacenter: "dc1", Op: structs.IntentionOpCreate, Intention: &structs.Intention{ - SourceName: "test", + SourceNS: "test", + SourceName: "test", + DestinationNS: "test", + DestinationName: "test", + Action: structs.IntentionActionAllow, }, } var reply string @@ -358,6 +366,7 @@ func TestIntentionMatch_good(t *testing.T) { SourceName: "test", DestinationNS: v[0], DestinationName: v[1], + Action: structs.IntentionActionAllow, }, } diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 0a7d8c5d4..a8da939f7 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -1,7 +1,11 @@ package structs import ( + "fmt" + "strings" "time" + + "github.com/hashicorp/go-multierror" ) const ( @@ -61,6 +65,91 @@ type Intention struct { RaftIndex } +// Validate returns an error if the intention is invalid for inserting +// or updating. +func (x *Intention) Validate() error { + var result error + + // Empty values + if x.SourceNS == "" { + result = multierror.Append(result, fmt.Errorf("SourceNS must be set")) + } + if x.SourceName == "" { + result = multierror.Append(result, fmt.Errorf("SourceName must be set")) + } + if x.DestinationNS == "" { + result = multierror.Append(result, fmt.Errorf("DestinationNS must be set")) + } + if x.DestinationName == "" { + result = multierror.Append(result, fmt.Errorf("DestinationName must be set")) + } + + // Wildcard usage verification + if x.SourceNS != IntentionWildcard { + if strings.Contains(x.SourceNS, IntentionWildcard) { + result = multierror.Append(result, fmt.Errorf( + "SourceNS: wildcard character '*' cannot be used with partial values")) + } + } + if x.SourceName != IntentionWildcard { + if strings.Contains(x.SourceName, IntentionWildcard) { + result = multierror.Append(result, fmt.Errorf( + "SourceName: wildcard character '*' cannot be used with partial values")) + } + + if x.SourceNS == IntentionWildcard { + result = multierror.Append(result, fmt.Errorf( + "SourceName: exact value cannot follow wildcard namespace")) + } + } + if x.DestinationNS != IntentionWildcard { + if strings.Contains(x.DestinationNS, IntentionWildcard) { + result = multierror.Append(result, fmt.Errorf( + "DestinationNS: wildcard character '*' cannot be used with partial values")) + } + } + if x.DestinationName != IntentionWildcard { + if strings.Contains(x.DestinationName, IntentionWildcard) { + result = multierror.Append(result, fmt.Errorf( + "DestinationName: wildcard character '*' cannot be used with partial values")) + } + + if x.DestinationNS == IntentionWildcard { + result = multierror.Append(result, fmt.Errorf( + "DestinationName: exact value cannot follow wildcard namespace")) + } + } + + // Length of opaque values + if len(x.Description) > metaValueMaxLength { + result = multierror.Append(result, fmt.Errorf( + "Description exceeds maximum length %d", metaValueMaxLength)) + } + if len(x.Meta) > metaMaxKeyPairs { + result = multierror.Append(result, fmt.Errorf( + "Meta exceeds maximum element count %d", metaMaxKeyPairs)) + } + for k, v := range x.Meta { + if len(k) > metaKeyMaxLength { + result = multierror.Append(result, fmt.Errorf( + "Meta key %q exceeds maximum length %d", k, metaKeyMaxLength)) + } + if len(v) > metaValueMaxLength { + result = multierror.Append(result, fmt.Errorf( + "Meta value for key %q exceeds maximum length %d", k, metaValueMaxLength)) + } + } + + switch x.Action { + case IntentionActionAllow, IntentionActionDeny: + default: + result = multierror.Append(result, fmt.Errorf( + "Action must be set to 'allow' or 'deny'")) + } + + return result +} + // IntentionAction is the action that the intention represents. This // can be "allow" or "deny" to whitelist or blacklist intentions. type IntentionAction string diff --git a/agent/structs/intention_test.go b/agent/structs/intention_test.go index 19ac5811a..500b24d5a 100644 --- a/agent/structs/intention_test.go +++ b/agent/structs/intention_test.go @@ -3,9 +3,116 @@ package structs import ( "reflect" "sort" + "strings" "testing" ) +func TestIntentionValidate(t *testing.T) { + cases := []struct { + Name string + Modify func(*Intention) + Err string + }{ + { + "long description", + func(x *Intention) { + x.Description = strings.Repeat("x", metaValueMaxLength+1) + }, + "description exceeds", + }, + + { + "no action set", + func(x *Intention) { x.Action = "" }, + "action must be set", + }, + + { + "no SourceNS", + func(x *Intention) { x.SourceNS = "" }, + "SourceNS must be set", + }, + + { + "no SourceName", + func(x *Intention) { x.SourceName = "" }, + "SourceName must be set", + }, + + { + "no DestinationNS", + func(x *Intention) { x.DestinationNS = "" }, + "DestinationNS must be set", + }, + + { + "no DestinationName", + func(x *Intention) { x.DestinationName = "" }, + "DestinationName must be set", + }, + + { + "SourceNS partial wildcard", + func(x *Intention) { x.SourceNS = "foo*" }, + "partial value", + }, + + { + "SourceName partial wildcard", + func(x *Intention) { x.SourceName = "foo*" }, + "partial value", + }, + + { + "SourceName exact following wildcard", + func(x *Intention) { + x.SourceNS = "*" + x.SourceName = "foo" + }, + "follow wildcard", + }, + + { + "DestinationNS partial wildcard", + func(x *Intention) { x.DestinationNS = "foo*" }, + "partial value", + }, + + { + "DestinationName partial wildcard", + func(x *Intention) { x.DestinationName = "foo*" }, + "partial value", + }, + + { + "DestinationName exact following wildcard", + func(x *Intention) { + x.DestinationNS = "*" + x.DestinationName = "foo" + }, + "follow wildcard", + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + ixn := TestIntention(t) + tc.Modify(ixn) + + err := ixn.Validate() + if (err != nil) != (tc.Err != "") { + t.Fatalf("err: %s", err) + } + if err == nil { + return + } + if !strings.Contains(strings.ToLower(err.Error()), strings.ToLower(tc.Err)) { + t.Fatalf("err: %s", err) + } + }) + } +} + func TestIntentionPrecedenceSorter(t *testing.T) { cases := []struct { Name string diff --git a/agent/structs/testing_intention.go b/agent/structs/testing_intention.go new file mode 100644 index 000000000..b653c1174 --- /dev/null +++ b/agent/structs/testing_intention.go @@ -0,0 +1,15 @@ +package structs + +import ( + "github.com/mitchellh/go-testing-interface" +) + +// TestIntention returns a valid, uninserted (no ID set) intention. +func TestIntention(t testing.T) *Intention { + return &Intention{ + SourceNS: "eng", + SourceName: "api", + DestinationNS: "eng", + DestinationName: "db", + } +} From 04bd4af99c92253c9db264cd992caa5f40d44a5f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 3 Mar 2018 09:55:27 -0800 Subject: [PATCH 040/627] agent/consul: set default intention SourceType, validate it --- agent/consul/intention_endpoint.go | 5 +++ agent/consul/intention_endpoint_test.go | 57 +++++++++++++++++++++++++ agent/structs/intention.go | 7 +++ agent/structs/intention_test.go | 12 ++++++ agent/structs/testing_intention.go | 2 + 5 files changed, 83 insertions(+) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 56f310e18..c3723d708 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -86,6 +86,11 @@ func (s *Intention) Apply( // We always update the updatedat field. This has no effect for deletion. args.Intention.UpdatedAt = time.Now() + // Default source type + if args.Intention.SourceType == "" { + args.Intention.SourceType = structs.IntentionSourceConsul + } + // Until we support namespaces, we force all namespaces to be default if args.Intention.SourceNS == "" { args.Intention.SourceNS = structs.IntentionDefaultNamespace diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 2ee49f207..3c38e695a 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -33,6 +33,7 @@ func TestIntentionApply_new(t *testing.T) { DestinationNS: structs.IntentionDefaultNamespace, DestinationName: "test", Action: structs.IntentionActionAllow, + SourceType: structs.IntentionSourceConsul, Meta: map[string]string{}, }, } @@ -93,6 +94,61 @@ func TestIntentionApply_new(t *testing.T) { } } +// Test the source type defaults +func TestIntentionApply_defaultSourceType(t *testing.T) { + t.Parallel() + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Setup a basic record to create + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: &structs.Intention{ + SourceNS: structs.IntentionDefaultNamespace, + SourceName: "test", + DestinationNS: structs.IntentionDefaultNamespace, + DestinationName: "test", + Action: structs.IntentionActionAllow, + }, + } + var reply string + + // Create + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + if reply == "" { + t.Fatal("reply should be non-empty") + } + + // Read + ixn.Intention.ID = reply + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: ixn.Intention.ID, + } + var resp structs.IndexedIntentions + if err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + if len(resp.Intentions) != 1 { + t.Fatalf("bad: %v", resp) + } + + actual := resp.Intentions[0] + if actual.SourceType != structs.IntentionSourceConsul { + t.Fatalf("bad:\n\n%#v\n\n%#v", actual, ixn.Intention) + } + } +} + // Shouldn't be able to create with an ID set func TestIntentionApply_createWithID(t *testing.T) { t.Parallel() @@ -143,6 +199,7 @@ func TestIntentionApply_updateGood(t *testing.T) { DestinationNS: structs.IntentionDefaultNamespace, DestinationName: "test", Action: structs.IntentionActionAllow, + SourceType: structs.IntentionSourceConsul, Meta: map[string]string{}, }, } diff --git a/agent/structs/intention.go b/agent/structs/intention.go index a8da939f7..579fef6c1 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -147,6 +147,13 @@ func (x *Intention) Validate() error { "Action must be set to 'allow' or 'deny'")) } + switch x.SourceType { + case IntentionSourceConsul: + default: + result = multierror.Append(result, fmt.Errorf( + "SourceType must be set to 'consul'")) + } + return result } diff --git a/agent/structs/intention_test.go b/agent/structs/intention_test.go index 500b24d5a..ec0a2de66 100644 --- a/agent/structs/intention_test.go +++ b/agent/structs/intention_test.go @@ -92,6 +92,18 @@ func TestIntentionValidate(t *testing.T) { }, "follow wildcard", }, + + { + "SourceType is not set", + func(x *Intention) { x.SourceType = "" }, + "SourceType must", + }, + + { + "SourceType is other", + func(x *Intention) { x.SourceType = IntentionSourceType("other") }, + "SourceType must", + }, } for _, tc := range cases { diff --git a/agent/structs/testing_intention.go b/agent/structs/testing_intention.go index b653c1174..930e27869 100644 --- a/agent/structs/testing_intention.go +++ b/agent/structs/testing_intention.go @@ -11,5 +11,7 @@ func TestIntention(t testing.T) *Intention { SourceName: "api", DestinationNS: "eng", DestinationName: "db", + Action: IntentionActionAllow, + SourceType: IntentionSourceConsul, } } From 37f66e47ed2824331e48a9fd423ee2541bda80cc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 3 Mar 2018 10:12:05 -0800 Subject: [PATCH 041/627] agent: use testing intention to get valid intentions --- agent/consul/intention_endpoint.go | 9 ++++++--- agent/intentions_endpoint.go | 8 +++++++- agent/intentions_endpoint_test.go | 28 ++++++++++++---------------- agent/structs/testing_intention.go | 1 + 4 files changed, 26 insertions(+), 20 deletions(-) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index c3723d708..825c89c59 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -99,9 +99,12 @@ func (s *Intention) Apply( args.Intention.DestinationNS = structs.IntentionDefaultNamespace } - // Validate - if err := args.Intention.Validate(); err != nil { - return err + // Validate. We do not validate on delete since it is valid to only + // send an ID in that case. + if args.Op != structs.IntentionOpDelete { + if err := args.Intention.Validate(); err != nil { + return err + } } // Commit diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 39cf3e50b..72a1cba67 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -173,7 +173,13 @@ func (s *HTTPServer) IntentionSpecificGet(id string, resp http.ResponseWriter, r return nil, err } - // TODO: validate length + // This shouldn't happen since the called API documents it shouldn't, + // but we check since the alternative if it happens is a panic. + if len(reply.Intentions) == 0 { + resp.WriteHeader(http.StatusNotFound) + return nil, nil + } + return reply.Intentions[0], nil } diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index a1c0413ed..c236491e8 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -43,8 +43,10 @@ func TestIntentionsList_values(t *testing.T) { req := structs.IntentionRequest{ Datacenter: "dc1", Op: structs.IntentionOpCreate, - Intention: &structs.Intention{SourceName: v}, + Intention: structs.TestIntention(t), } + req.Intention.SourceName = v + var reply string if err := a.RPC("Intention.Apply", &req, &reply); err != nil { t.Fatalf("err: %s", err) @@ -93,13 +95,10 @@ func TestIntentionsMatch_basic(t *testing.T) { ixn := structs.IntentionRequest{ Datacenter: "dc1", Op: structs.IntentionOpCreate, - Intention: &structs.Intention{ - SourceNS: "default", - SourceName: "test", - DestinationNS: v[0], - DestinationName: v[1], - }, + Intention: structs.TestIntention(t), } + ixn.Intention.DestinationNS = v[0] + ixn.Intention.DestinationName = v[1] // Create var reply string @@ -198,7 +197,8 @@ func TestIntentionsCreate_good(t *testing.T) { defer a.Shutdown() // Make sure an empty list is non-nil. - args := &structs.Intention{SourceName: "foo"} + args := structs.TestIntention(t) + args.SourceName = "foo" req, _ := http.NewRequest("POST", "/v1/connect/intentions", jsonReader(args)) resp := httptest.NewRecorder() obj, err := a.srv.IntentionCreate(resp, req) @@ -238,12 +238,7 @@ func TestIntentionsSpecificGet_good(t *testing.T) { defer a.Shutdown() // The intention - ixn := &structs.Intention{ - SourceNS: structs.IntentionDefaultNamespace, - SourceName: "foo", - DestinationNS: structs.IntentionDefaultNamespace, - DestinationName: "bar", - } + ixn := structs.TestIntention(t) // Create an intention directly var reply string @@ -286,7 +281,7 @@ func TestIntentionsSpecificUpdate_good(t *testing.T) { defer a.Shutdown() // The intention - ixn := &structs.Intention{SourceName: "foo"} + ixn := structs.TestIntention(t) // Create an intention directly var reply string @@ -341,7 +336,8 @@ func TestIntentionsSpecificDelete_good(t *testing.T) { defer a.Shutdown() // The intention - ixn := &structs.Intention{SourceName: "foo"} + ixn := structs.TestIntention(t) + ixn.SourceName = "foo" // Create an intention directly var reply string diff --git a/agent/structs/testing_intention.go b/agent/structs/testing_intention.go index 930e27869..a946a3243 100644 --- a/agent/structs/testing_intention.go +++ b/agent/structs/testing_intention.go @@ -13,5 +13,6 @@ func TestIntention(t testing.T) *Intention { DestinationName: "db", Action: IntentionActionAllow, SourceType: IntentionSourceConsul, + Meta: map[string]string{}, } } From 027dad867288efe7c449342d33bf2640e9f29188 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 3 Mar 2018 10:13:16 -0800 Subject: [PATCH 042/627] agent/consul/state: remove TODO --- agent/consul/state/intention.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index b737318ee..3e83af4d1 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -245,8 +245,6 @@ func (s *Store) IntentionMatch(ws memdb.WatchSet, args *structs.IntentionQueryMa } } - // TODO: filter for uniques - // Sort the results by precedence sort.Sort(structs.IntentionPrecedenceSorter(ixns)) From 3a00564411478a0ce4e3af05c0dcdae990c70392 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 4 Mar 2018 19:10:19 -0800 Subject: [PATCH 043/627] agent/consul/state: need to set Meta for intentions for tests --- agent/consul/state/intention_test.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/agent/consul/state/intention_test.go b/agent/consul/state/intention_test.go index dd7d9fcdf..7f5376509 100644 --- a/agent/consul/state/intention_test.go +++ b/agent/consul/state/intention_test.go @@ -32,7 +32,8 @@ func TestStore_IntentionSetGet_basic(t *testing.T) { // Build a valid intention ixn := &structs.Intention{ - ID: testUUID(), + ID: testUUID(), + Meta: map[string]string{}, } // Inserting a with empty ID is disallowed. @@ -50,7 +51,8 @@ func TestStore_IntentionSetGet_basic(t *testing.T) { // Read it back out and verify it. expected := &structs.Intention{ - ID: ixn.ID, + ID: ixn.ID, + Meta: map[string]string{}, RaftIndex: structs.RaftIndex{ CreateIndex: 1, ModifyIndex: 1, @@ -264,10 +266,12 @@ func TestStore_IntentionsList(t *testing.T) { // Create some intentions ixns := structs.Intentions{ &structs.Intention{ - ID: testUUID(), + ID: testUUID(), + Meta: map[string]string{}, }, &structs.Intention{ - ID: testUUID(), + ID: testUUID(), + Meta: map[string]string{}, }, } @@ -288,14 +292,16 @@ func TestStore_IntentionsList(t *testing.T) { // Read it back and verify. expected := structs.Intentions{ &structs.Intention{ - ID: ixns[0].ID, + ID: ixns[0].ID, + Meta: map[string]string{}, RaftIndex: structs.RaftIndex{ CreateIndex: 1, ModifyIndex: 1, }, }, &structs.Intention{ - ID: ixns[1].ID, + ID: ixns[1].ID, + Meta: map[string]string{}, RaftIndex: structs.RaftIndex{ CreateIndex: 2, ModifyIndex: 2, From 67b017c95c0b1eb246ed26cc60777d381a234c45 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 4 Mar 2018 19:14:19 -0800 Subject: [PATCH 044/627] agent/consul/fsm: switch tests to use structs.TestIntention --- agent/consul/fsm/commands_oss_test.go | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/agent/consul/fsm/commands_oss_test.go b/agent/consul/fsm/commands_oss_test.go index b679cb313..d18d7651e 100644 --- a/agent/consul/fsm/commands_oss_test.go +++ b/agent/consul/fsm/commands_oss_test.go @@ -1161,14 +1161,9 @@ func TestFSM_Intention_CRUD(t *testing.T) { ixn := structs.IntentionRequest{ Datacenter: "dc1", Op: structs.IntentionOpCreate, - Intention: &structs.Intention{ - ID: generateUUID(), - SourceNS: "default", - SourceName: "web", - DestinationNS: "default", - DestinationName: "db", - }, + Intention: structs.TestIntention(t), } + ixn.Intention.ID = generateUUID() { buf, err := structs.Encode(structs.IntentionRequestType, ixn) From 6f33b2d0703d92dc465a6257fe946c72b3b64117 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 6 Mar 2018 09:04:44 -0800 Subject: [PATCH 045/627] agent: use UTC time for intention times, move empty list check to agent/consul --- agent/consul/intention_endpoint.go | 8 ++++++-- agent/consul/intention_endpoint_test.go | 3 +++ agent/intentions_endpoint.go | 4 ---- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 825c89c59..6653d5502 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -67,7 +67,7 @@ func (s *Intention) Apply( } // Set the created at - args.Intention.CreatedAt = time.Now() + args.Intention.CreatedAt = time.Now().UTC() } *reply = args.Intention.ID @@ -84,7 +84,7 @@ func (s *Intention) Apply( } // We always update the updatedat field. This has no effect for deletion. - args.Intention.UpdatedAt = time.Now() + args.Intention.UpdatedAt = time.Now().UTC() // Default source type if args.Intention.SourceType == "" { @@ -169,6 +169,10 @@ func (s *Intention) List( } reply.Index, reply.Intentions = index, ixns + if reply.Intentions == nil { + reply.Intentions = make(structs.Intentions, 0) + } + // filterACL return nil }, diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 3c38e695a..751b7894c 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -384,6 +384,9 @@ func TestIntentionList(t *testing.T) { if err := msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp); err != nil { t.Fatalf("err: %v", err) } + if resp.Intentions == nil { + t.Fatal("should not be nil") + } if len(resp.Intentions) != 0 { t.Fatalf("bad: %v", resp) diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 72a1cba67..5196f06c5 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -37,10 +37,6 @@ func (s *HTTPServer) IntentionList(resp http.ResponseWriter, req *http.Request) return nil, err } - // Use empty list instead of nil. - if reply.Intentions == nil { - reply.Intentions = make(structs.Intentions, 0) - } return reply.Intentions, nil } From f07340e94f4b686d682e9b6519b4c04216323ae4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 6 Mar 2018 09:31:21 -0800 Subject: [PATCH 046/627] agent/consul/fsm,state: snapshot/restore for intentions --- agent/consul/fsm/snapshot_oss.go | 33 ++++++++ agent/consul/fsm/snapshot_oss_test.go | 23 ++++++ agent/consul/state/intention.go | 28 +++++++ agent/consul/state/intention_test.go | 105 ++++++++++++++++++++++++++ 4 files changed, 189 insertions(+) diff --git a/agent/consul/fsm/snapshot_oss.go b/agent/consul/fsm/snapshot_oss.go index be7bfc5a8..1dde3ab0b 100644 --- a/agent/consul/fsm/snapshot_oss.go +++ b/agent/consul/fsm/snapshot_oss.go @@ -20,6 +20,7 @@ func init() { registerRestorer(structs.CoordinateBatchUpdateType, restoreCoordinates) registerRestorer(structs.PreparedQueryRequestType, restorePreparedQuery) registerRestorer(structs.AutopilotRequestType, restoreAutopilot) + registerRestorer(structs.IntentionRequestType, restoreIntention) } func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) error { @@ -44,6 +45,9 @@ func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) err if err := s.persistAutopilot(sink, encoder); err != nil { return err } + if err := s.persistIntentions(sink, encoder); err != nil { + return err + } return nil } @@ -258,6 +262,24 @@ func (s *snapshot) persistAutopilot(sink raft.SnapshotSink, return nil } +func (s *snapshot) persistIntentions(sink raft.SnapshotSink, + encoder *codec.Encoder) error { + ixns, err := s.state.Intentions() + if err != nil { + return err + } + + for _, ixn := range ixns { + if _, err := sink.Write([]byte{byte(structs.IntentionRequestType)}); err != nil { + return err + } + if err := encoder.Encode(ixn); err != nil { + return err + } + } + return nil +} + func restoreRegistration(header *snapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { var req structs.RegisterRequest if err := decoder.Decode(&req); err != nil { @@ -364,3 +386,14 @@ func restoreAutopilot(header *snapshotHeader, restore *state.Restore, decoder *c } return nil } + +func restoreIntention(header *snapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { + var req structs.Intention + if err := decoder.Decode(&req); err != nil { + return err + } + if err := restore.Intention(&req); err != nil { + return err + } + return nil +} diff --git a/agent/consul/fsm/snapshot_oss_test.go b/agent/consul/fsm/snapshot_oss_test.go index 8b8544420..759f825b1 100644 --- a/agent/consul/fsm/snapshot_oss_test.go +++ b/agent/consul/fsm/snapshot_oss_test.go @@ -98,6 +98,17 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { t.Fatalf("err: %s", err) } + // Intentions + ixn := structs.TestIntention(t) + ixn.ID = generateUUID() + ixn.RaftIndex = structs.RaftIndex{ + CreateIndex: 14, + ModifyIndex: 14, + } + if err := fsm.state.IntentionSet(14, ixn); err != nil { + t.Fatalf("err: %s", err) + } + // Snapshot snap, err := fsm.Snapshot() if err != nil { @@ -260,6 +271,18 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { t.Fatalf("bad: %#v, %#v", restoredConf, autopilotConf) } + // Verify intentions are restored. + _, ixns, err := fsm2.state.Intentions(nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(ixns) != 1 { + t.Fatalf("bad: %#v", ixns) + } + if !reflect.DeepEqual(ixns[0], ixn) { + t.Fatalf("bad: %#v", ixns[0]) + } + // Snapshot snap, err = fsm2.Snapshot() if err != nil { diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index 3e83af4d1..bc8bb0213 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -68,6 +68,34 @@ func init() { registerSchema(intentionsTableSchema) } +// Intentions is used to pull all the intentions from the snapshot. +func (s *Snapshot) Intentions() (structs.Intentions, error) { + ixns, err := s.tx.Get(intentionsTableName, "id") + if err != nil { + return nil, err + } + + var ret structs.Intentions + for wrapped := ixns.Next(); wrapped != nil; wrapped = ixns.Next() { + ret = append(ret, wrapped.(*structs.Intention)) + } + + return ret, nil +} + +// Intention is used when restoring from a snapshot. +func (s *Restore) Intention(ixn *structs.Intention) error { + // Insert the intention + if err := s.tx.Insert(intentionsTableName, ixn); err != nil { + return fmt.Errorf("failed restoring intention: %s", err) + } + if err := indexUpdateMaxTxn(s.tx, ixn.ModifyIndex, intentionsTableName); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + + return nil +} + // Intentions returns the list of all intentions. func (s *Store) Intentions(ws memdb.WatchSet) (uint64, structs.Intentions, error) { tx := s.db.Txn(false) diff --git a/agent/consul/state/intention_test.go b/agent/consul/state/intention_test.go index 7f5376509..eb56ff04b 100644 --- a/agent/consul/state/intention_test.go +++ b/agent/consul/state/intention_test.go @@ -455,3 +455,108 @@ func TestStore_IntentionMatch_table(t *testing.T) { }) } } + +func TestStore_Intention_Snapshot_Restore(t *testing.T) { + s := testStateStore(t) + + // Create some intentions. + ixns := structs.Intentions{ + &structs.Intention{ + DestinationName: "foo", + }, + &structs.Intention{ + DestinationName: "bar", + }, + &structs.Intention{ + DestinationName: "baz", + }, + } + + // Force the sort order of the UUIDs before we create them so the + // order is deterministic. + id := testUUID() + ixns[0].ID = "a" + id[1:] + ixns[1].ID = "b" + id[1:] + ixns[2].ID = "c" + id[1:] + + // Now create + for i, ixn := range ixns { + if err := s.IntentionSet(uint64(4+i), ixn); err != nil { + t.Fatalf("err: %s", err) + } + } + + // Snapshot the queries. + snap := s.Snapshot() + defer snap.Close() + + // Alter the real state store. + if err := s.IntentionDelete(7, ixns[0].ID); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the snapshot. + if idx := snap.LastIndex(); idx != 6 { + t.Fatalf("bad index: %d", idx) + } + expected := structs.Intentions{ + &structs.Intention{ + ID: ixns[0].ID, + DestinationName: "foo", + Meta: map[string]string{}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 4, + ModifyIndex: 4, + }, + }, + &structs.Intention{ + ID: ixns[1].ID, + DestinationName: "bar", + Meta: map[string]string{}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 5, + ModifyIndex: 5, + }, + }, + &structs.Intention{ + ID: ixns[2].ID, + DestinationName: "baz", + Meta: map[string]string{}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 6, + ModifyIndex: 6, + }, + }, + } + dump, err := snap.Intentions() + if err != nil { + t.Fatalf("err: %s", err) + } + if !reflect.DeepEqual(dump, expected) { + t.Fatalf("bad: %#v", dump[0]) + } + + // Restore the values into a new state store. + func() { + s := testStateStore(t) + restore := s.Restore() + for _, ixn := range dump { + if err := restore.Intention(ixn); err != nil { + t.Fatalf("err: %s", err) + } + } + restore.Commit() + + // Read the restored values back out and verify that they match. + idx, actual, err := s.Intentions(nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if idx != 6 { + t.Fatalf("bad index: %d", idx) + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %v", actual) + } + }() +} From 1d0b4ceedbd5cf397d3439fa98e265ff790f26b8 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 6 Mar 2018 10:35:20 -0800 Subject: [PATCH 047/627] agent: convert all intention tests to testify/assert --- agent/consul/fsm/commands_oss_test.go | 58 ++---- agent/consul/fsm/snapshot_oss_test.go | 19 +- agent/consul/intention_endpoint_test.go | 187 ++++++------------ agent/consul/state/intention_test.go | 247 +++++++----------------- agent/intentions_endpoint_test.go | 179 ++++++----------- agent/structs/intention_test.go | 19 +- 6 files changed, 218 insertions(+), 491 deletions(-) diff --git a/agent/consul/fsm/commands_oss_test.go b/agent/consul/fsm/commands_oss_test.go index d18d7651e..acf67c5fb 100644 --- a/agent/consul/fsm/commands_oss_test.go +++ b/agent/consul/fsm/commands_oss_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/go-uuid" "github.com/hashicorp/serf/coordinate" "github.com/pascaldekloe/goe/verify" + "github.com/stretchr/testify/assert" ) func generateUUID() (ret string) { @@ -1152,10 +1153,9 @@ func TestFSM_Autopilot(t *testing.T) { func TestFSM_Intention_CRUD(t *testing.T) { t.Parallel() + assert := assert.New(t) fsm, err := New(nil, os.Stderr) - if err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(err) // Create a new intention. ixn := structs.IntentionRequest{ @@ -1167,28 +1167,19 @@ func TestFSM_Intention_CRUD(t *testing.T) { { buf, err := structs.Encode(structs.IntentionRequestType, ixn) - if err != nil { - t.Fatalf("err: %v", err) - } - resp := fsm.Apply(makeLog(buf)) - if resp != nil { - t.Fatalf("resp: %v", resp) - } + assert.Nil(err) + assert.Nil(fsm.Apply(makeLog(buf))) } // Verify it's in the state store. { _, actual, err := fsm.state.IntentionGet(nil, ixn.Intention.ID) - if err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(err) actual.CreateIndex, actual.ModifyIndex = 0, 0 actual.CreatedAt = ixn.Intention.CreatedAt actual.UpdatedAt = ixn.Intention.UpdatedAt - if !reflect.DeepEqual(actual, ixn.Intention) { - t.Fatalf("bad: %v", actual) - } + assert.Equal(ixn.Intention, actual) } // Make an update @@ -1196,52 +1187,33 @@ func TestFSM_Intention_CRUD(t *testing.T) { ixn.Intention.SourceName = "api" { buf, err := structs.Encode(structs.IntentionRequestType, ixn) - if err != nil { - t.Fatalf("err: %v", err) - } - resp := fsm.Apply(makeLog(buf)) - if resp != nil { - t.Fatalf("resp: %v", resp) - } + assert.Nil(err) + assert.Nil(fsm.Apply(makeLog(buf))) } // Verify the update. { _, actual, err := fsm.state.IntentionGet(nil, ixn.Intention.ID) - if err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(err) actual.CreateIndex, actual.ModifyIndex = 0, 0 actual.CreatedAt = ixn.Intention.CreatedAt actual.UpdatedAt = ixn.Intention.UpdatedAt - if !reflect.DeepEqual(actual, ixn.Intention) { - t.Fatalf("bad: %v", actual) - } + assert.Equal(ixn.Intention, actual) } // Delete ixn.Op = structs.IntentionOpDelete { buf, err := structs.Encode(structs.IntentionRequestType, ixn) - if err != nil { - t.Fatalf("err: %v", err) - } - resp := fsm.Apply(makeLog(buf)) - if resp != nil { - t.Fatalf("resp: %v", resp) - } + assert.Nil(err) + assert.Nil(fsm.Apply(makeLog(buf))) } // Make sure it's gone. { _, actual, err := fsm.state.IntentionGet(nil, ixn.Intention.ID) - if err != nil { - t.Fatalf("err: %s", err) - } - - if actual != nil { - t.Fatalf("bad: %v", actual) - } + assert.Nil(err) + assert.Nil(actual) } } diff --git a/agent/consul/fsm/snapshot_oss_test.go b/agent/consul/fsm/snapshot_oss_test.go index 759f825b1..63f1ab1d3 100644 --- a/agent/consul/fsm/snapshot_oss_test.go +++ b/agent/consul/fsm/snapshot_oss_test.go @@ -13,10 +13,13 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/lib" "github.com/pascaldekloe/goe/verify" + "github.com/stretchr/testify/assert" ) func TestFSM_SnapshotRestore_OSS(t *testing.T) { t.Parallel() + + assert := assert.New(t) fsm, err := New(nil, os.Stderr) if err != nil { t.Fatalf("err: %v", err) @@ -105,9 +108,7 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { CreateIndex: 14, ModifyIndex: 14, } - if err := fsm.state.IntentionSet(14, ixn); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(fsm.state.IntentionSet(14, ixn)) // Snapshot snap, err := fsm.Snapshot() @@ -273,15 +274,9 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { // Verify intentions are restored. _, ixns, err := fsm2.state.Intentions(nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if len(ixns) != 1 { - t.Fatalf("bad: %#v", ixns) - } - if !reflect.DeepEqual(ixns[0], ixn) { - t.Fatalf("bad: %#v", ixns[0]) - } + assert.Nil(err) + assert.Len(ixns, 1) + assert.Equal(ixn, ixns[0]) // Snapshot snap, err = fsm2.Snapshot() diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 751b7894c..2ba5b04c3 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -2,19 +2,20 @@ package consul import ( "os" - "reflect" - "strings" "testing" "time" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/stretchr/testify/assert" ) // Test basic creation func TestIntentionApply_new(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -43,12 +44,8 @@ func TestIntentionApply_new(t *testing.T) { now := time.Now() // Create - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if reply == "" { - t.Fatal("reply should be non-empty") - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) + assert.NotEmpty(reply) // Read ixn.Intention.ID = reply @@ -58,45 +55,25 @@ func TestIntentionApply_new(t *testing.T) { IntentionID: ixn.Intention.ID, } var resp structs.IndexedIntentions - if err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if len(resp.Intentions) != 1 { - t.Fatalf("bad: %v", resp) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp)) + assert.Len(resp.Intentions, 1) actual := resp.Intentions[0] - if resp.Index != actual.ModifyIndex { - t.Fatalf("bad index: %d", resp.Index) - } - - // Test CreatedAt - { - timeDiff := actual.CreatedAt.Sub(now) - if timeDiff < 0 || timeDiff > 5*time.Second { - t.Fatalf("should set created at: %s", actual.CreatedAt) - } - } - - // Test UpdatedAt - { - timeDiff := actual.UpdatedAt.Sub(now) - if timeDiff < 0 || timeDiff > 5*time.Second { - t.Fatalf("should set updated at: %s", actual.CreatedAt) - } - } + assert.Equal(resp.Index, actual.ModifyIndex) + assert.WithinDuration(now, actual.CreatedAt, 5*time.Second) + assert.WithinDuration(now, actual.UpdatedAt, 5*time.Second) actual.CreateIndex, actual.ModifyIndex = 0, 0 actual.CreatedAt = ixn.Intention.CreatedAt actual.UpdatedAt = ixn.Intention.UpdatedAt - if !reflect.DeepEqual(actual, ixn.Intention) { - t.Fatalf("bad:\n\n%#v\n\n%#v", actual, ixn.Intention) - } + assert.Equal(ixn.Intention, actual) } } // Test the source type defaults func TestIntentionApply_defaultSourceType(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -120,12 +97,8 @@ func TestIntentionApply_defaultSourceType(t *testing.T) { var reply string // Create - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if reply == "" { - t.Fatal("reply should be non-empty") - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) + assert.NotEmpty(reply) // Read ixn.Intention.ID = reply @@ -135,23 +108,18 @@ func TestIntentionApply_defaultSourceType(t *testing.T) { IntentionID: ixn.Intention.ID, } var resp structs.IndexedIntentions - if err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if len(resp.Intentions) != 1 { - t.Fatalf("bad: %v", resp) - } - + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp)) + assert.Len(resp.Intentions, 1) actual := resp.Intentions[0] - if actual.SourceType != structs.IntentionSourceConsul { - t.Fatalf("bad:\n\n%#v\n\n%#v", actual, ixn.Intention) - } + assert.Equal(structs.IntentionSourceConsul, actual.SourceType) } } // Shouldn't be able to create with an ID set func TestIntentionApply_createWithID(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -173,14 +141,15 @@ func TestIntentionApply_createWithID(t *testing.T) { // Create err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply) - if err == nil || !strings.Contains(err.Error(), "ID must be empty") { - t.Fatalf("bad: %v", err) - } + assert.NotNil(err) + assert.Contains(err, "ID must be empty") } // Test basic updating func TestIntentionApply_updateGood(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -206,12 +175,8 @@ func TestIntentionApply_updateGood(t *testing.T) { var reply string // Create - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if reply == "" { - t.Fatal("reply should be non-empty") - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) + assert.NotEmpty(reply) // Read CreatedAt var createdAt time.Time @@ -222,12 +187,8 @@ func TestIntentionApply_updateGood(t *testing.T) { IntentionID: ixn.Intention.ID, } var resp structs.IndexedIntentions - if err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if len(resp.Intentions) != 1 { - t.Fatalf("bad: %v", resp) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp)) + assert.Len(resp.Intentions, 1) actual := resp.Intentions[0] createdAt = actual.CreatedAt } @@ -239,9 +200,7 @@ func TestIntentionApply_updateGood(t *testing.T) { ixn.Op = structs.IntentionOpUpdate ixn.Intention.ID = reply ixn.Intention.SourceName = "bar" - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) // Read ixn.Intention.ID = reply @@ -251,39 +210,24 @@ func TestIntentionApply_updateGood(t *testing.T) { IntentionID: ixn.Intention.ID, } var resp structs.IndexedIntentions - if err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if len(resp.Intentions) != 1 { - t.Fatalf("bad: %v", resp) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp)) + assert.Len(resp.Intentions, 1) actual := resp.Intentions[0] - - // Test CreatedAt - if !actual.CreatedAt.Equal(createdAt) { - t.Fatalf("should not modify created at: %s", actual.CreatedAt) - } - - // Test UpdatedAt - { - timeDiff := actual.UpdatedAt.Sub(createdAt) - if timeDiff <= 0 || timeDiff > 5*time.Second { - t.Fatalf("should set updated at: %s", actual.CreatedAt) - } - } + assert.Equal(createdAt, actual.CreatedAt) + assert.WithinDuration(time.Now(), actual.UpdatedAt, 5*time.Second) actual.CreateIndex, actual.ModifyIndex = 0, 0 actual.CreatedAt = ixn.Intention.CreatedAt actual.UpdatedAt = ixn.Intention.UpdatedAt - if !reflect.DeepEqual(actual, ixn.Intention) { - t.Fatalf("bad: %v", actual) - } + assert.Equal(ixn.Intention, actual) } } // Shouldn't be able to update a non-existent intention func TestIntentionApply_updateNonExist(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -305,14 +249,15 @@ func TestIntentionApply_updateNonExist(t *testing.T) { // Create err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply) - if err == nil || !strings.Contains(err.Error(), "Cannot modify non-existent intention") { - t.Fatalf("bad: %v", err) - } + assert.NotNil(err) + assert.Contains(err, "Cannot modify non-existent intention") } // Test basic deleting func TestIntentionApply_deleteGood(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -336,19 +281,13 @@ func TestIntentionApply_deleteGood(t *testing.T) { var reply string // Create - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } - if reply == "" { - t.Fatal("reply should be non-empty") - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) + assert.NotEmpty(reply) // Delete ixn.Op = structs.IntentionOpDelete ixn.Intention.ID = reply - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) // Read ixn.Intention.ID = reply @@ -359,14 +298,15 @@ func TestIntentionApply_deleteGood(t *testing.T) { } var resp structs.IndexedIntentions err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp) - if err == nil || !strings.Contains(err.Error(), ErrIntentionNotFound.Error()) { - t.Fatalf("err: %v", err) - } + assert.NotNil(err) + assert.Contains(err, ErrIntentionNotFound.Error()) } } func TestIntentionList(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -381,16 +321,9 @@ func TestIntentionList(t *testing.T) { Datacenter: "dc1", } var resp structs.IndexedIntentions - if err := msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if resp.Intentions == nil { - t.Fatal("should not be nil") - } - - if len(resp.Intentions) != 0 { - t.Fatalf("bad: %v", resp) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp)) + assert.NotNil(resp.Intentions) + assert.Len(resp.Intentions, 0) } } @@ -398,6 +331,8 @@ func TestIntentionList(t *testing.T) { // is tested in the agent/consul/state package. func TestIntentionMatch_good(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -432,9 +367,7 @@ func TestIntentionMatch_good(t *testing.T) { // Create var reply string - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) } } @@ -452,21 +385,13 @@ func TestIntentionMatch_good(t *testing.T) { }, } var resp structs.IndexedIntentionMatches - if err := msgpackrpc.CallWithCodec(codec, "Intention.Match", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - - if len(resp.Matches) != 1 { - t.Fatalf("bad: %#v", resp.Matches) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Match", req, &resp)) + assert.Len(resp.Matches, 1) expected := [][]string{{"foo", "bar"}, {"foo", "*"}, {"*", "*"}} var actual [][]string for _, ixn := range resp.Matches[0] { actual = append(actual, []string{ixn.DestinationNS, ixn.DestinationName}) } - - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad (got, wanted):\n\n%#v\n\n%#v", actual, expected) - } + assert.Equal(expected, actual) } diff --git a/agent/consul/state/intention_test.go b/agent/consul/state/intention_test.go index eb56ff04b..d4c63647a 100644 --- a/agent/consul/state/intention_test.go +++ b/agent/consul/state/intention_test.go @@ -1,34 +1,34 @@ package state import ( - "reflect" "testing" "time" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" + "github.com/stretchr/testify/assert" ) func TestStore_IntentionGet_none(t *testing.T) { + assert := assert.New(t) s := testStateStore(t) // Querying with no results returns nil. ws := memdb.NewWatchSet() idx, res, err := s.IntentionGet(ws, testUUID()) - if idx != 0 || res != nil || err != nil { - t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) - } + assert.Equal(idx, uint64(0)) + assert.Nil(res) + assert.Nil(err) } func TestStore_IntentionSetGet_basic(t *testing.T) { + assert := assert.New(t) s := testStateStore(t) // Call Get to populate the watch set ws := memdb.NewWatchSet() _, _, err := s.IntentionGet(ws, testUUID()) - if err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(err) // Build a valid intention ixn := &structs.Intention{ @@ -37,17 +37,11 @@ func TestStore_IntentionSetGet_basic(t *testing.T) { } // Inserting a with empty ID is disallowed. - if err := s.IntentionSet(1, ixn); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(s.IntentionSet(1, ixn)) // Make sure the index got updated. - if idx := s.maxIndex(intentionsTableName); idx != 1 { - t.Fatalf("bad index: %d", idx) - } - if !watchFired(ws) { - t.Fatalf("bad") - } + assert.Equal(s.maxIndex(intentionsTableName), uint64(1)) + assert.True(watchFired(ws), "watch fired") // Read it back out and verify it. expected := &structs.Intention{ @@ -61,70 +55,48 @@ func TestStore_IntentionSetGet_basic(t *testing.T) { ws = memdb.NewWatchSet() idx, actual, err := s.IntentionGet(ws, ixn.ID) - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != expected.CreateIndex { - t.Fatalf("bad index: %d", idx) - } - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %v", actual) - } + assert.Nil(err) + assert.Equal(expected.CreateIndex, idx) + assert.Equal(expected, actual) // Change a value and test updating ixn.SourceNS = "foo" - if err := s.IntentionSet(2, ixn); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(s.IntentionSet(2, ixn)) // Make sure the index got updated. - if idx := s.maxIndex(intentionsTableName); idx != 2 { - t.Fatalf("bad index: %d", idx) - } - if !watchFired(ws) { - t.Fatalf("bad") - } + assert.Equal(s.maxIndex(intentionsTableName), uint64(2)) + assert.True(watchFired(ws), "watch fired") // Read it back and verify the data was updated expected.SourceNS = ixn.SourceNS expected.ModifyIndex = 2 ws = memdb.NewWatchSet() idx, actual, err = s.IntentionGet(ws, ixn.ID) - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != expected.ModifyIndex { - t.Fatalf("bad index: %d", idx) - } - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) - } + assert.Nil(err) + assert.Equal(expected.ModifyIndex, idx) + assert.Equal(expected, actual) } func TestStore_IntentionSet_emptyId(t *testing.T) { + assert := assert.New(t) s := testStateStore(t) ws := memdb.NewWatchSet() _, _, err := s.IntentionGet(ws, testUUID()) - if err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(err) // Inserting a with empty ID is disallowed. - if err := s.IntentionSet(1, &structs.Intention{}); err == nil { - t.Fatalf("expected %#v, got: %#v", ErrMissingIntentionID, err) - } + err = s.IntentionSet(1, &structs.Intention{}) + assert.NotNil(err) + assert.Contains(err.Error(), ErrMissingIntentionID.Error()) // Index is not updated if nothing is saved. - if idx := s.maxIndex(intentionsTableName); idx != 0 { - t.Fatalf("bad index: %d", idx) - } - if watchFired(ws) { - t.Fatalf("bad") - } + assert.Equal(s.maxIndex(intentionsTableName), uint64(0)) + assert.False(watchFired(ws), "watch fired") } func TestStore_IntentionSet_updateCreatedAt(t *testing.T) { + assert := assert.New(t) s := testStateStore(t) // Build a valid intention @@ -135,28 +107,21 @@ func TestStore_IntentionSet_updateCreatedAt(t *testing.T) { } // Insert - if err := s.IntentionSet(1, &ixn); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(s.IntentionSet(1, &ixn)) // Change a value and test updating ixnUpdate := ixn ixnUpdate.CreatedAt = now.Add(10 * time.Second) - if err := s.IntentionSet(2, &ixnUpdate); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(s.IntentionSet(2, &ixnUpdate)) // Read it back and verify _, actual, err := s.IntentionGet(nil, ixn.ID) - if err != nil { - t.Fatalf("err: %s", err) - } - if !actual.CreatedAt.Equal(now) { - t.Fatalf("bad: %#v", actual) - } + assert.Nil(err) + assert.Equal(now, actual.CreatedAt) } func TestStore_IntentionSet_metaNil(t *testing.T) { + assert := assert.New(t) s := testStateStore(t) // Build a valid intention @@ -165,21 +130,16 @@ func TestStore_IntentionSet_metaNil(t *testing.T) { } // Insert - if err := s.IntentionSet(1, &ixn); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(s.IntentionSet(1, &ixn)) // Read it back and verify _, actual, err := s.IntentionGet(nil, ixn.ID) - if err != nil { - t.Fatalf("err: %s", err) - } - if actual.Meta == nil { - t.Fatal("meta should be non-nil") - } + assert.Nil(err) + assert.NotNil(actual.Meta) } func TestStore_IntentionSet_metaSet(t *testing.T) { + assert := assert.New(t) s := testStateStore(t) // Build a valid intention @@ -189,79 +149,55 @@ func TestStore_IntentionSet_metaSet(t *testing.T) { } // Insert - if err := s.IntentionSet(1, &ixn); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(s.IntentionSet(1, &ixn)) // Read it back and verify _, actual, err := s.IntentionGet(nil, ixn.ID) - if err != nil { - t.Fatalf("err: %s", err) - } - if !reflect.DeepEqual(actual.Meta, ixn.Meta) { - t.Fatalf("bad: %#v", actual) - } + assert.Nil(err) + assert.Equal(ixn.Meta, actual.Meta) } func TestStore_IntentionDelete(t *testing.T) { + assert := assert.New(t) s := testStateStore(t) // Call Get to populate the watch set ws := memdb.NewWatchSet() _, _, err := s.IntentionGet(ws, testUUID()) - if err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(err) // Create ixn := &structs.Intention{ID: testUUID()} - if err := s.IntentionSet(1, ixn); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(s.IntentionSet(1, ixn)) // Make sure the index got updated. - if idx := s.maxIndex(intentionsTableName); idx != 1 { - t.Fatalf("bad index: %d", idx) - } - if !watchFired(ws) { - t.Fatalf("bad") - } + assert.Equal(s.maxIndex(intentionsTableName), uint64(1)) + assert.True(watchFired(ws), "watch fired") // Delete - if err := s.IntentionDelete(2, ixn.ID); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(s.IntentionDelete(2, ixn.ID)) // Make sure the index got updated. - if idx := s.maxIndex(intentionsTableName); idx != 2 { - t.Fatalf("bad index: %d", idx) - } - if !watchFired(ws) { - t.Fatalf("bad") - } + assert.Equal(s.maxIndex(intentionsTableName), uint64(2)) + assert.True(watchFired(ws), "watch fired") // Sanity check to make sure it's not there. idx, actual, err := s.IntentionGet(nil, ixn.ID) - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 2 { - t.Fatalf("bad index: %d", idx) - } - if actual != nil { - t.Fatalf("bad: %v", actual) - } + assert.Nil(err) + assert.Equal(idx, uint64(2)) + assert.Nil(actual) } func TestStore_IntentionsList(t *testing.T) { + assert := assert.New(t) s := testStateStore(t) // Querying with no results returns nil. ws := memdb.NewWatchSet() idx, res, err := s.Intentions(ws) - if idx != 0 || res != nil || err != nil { - t.Fatalf("expected (0, nil, nil), got: (%d, %#v, %#v)", idx, res, err) - } + assert.Nil(err) + assert.Nil(res) + assert.Equal(idx, uint64(0)) // Create some intentions ixns := structs.Intentions{ @@ -281,13 +217,9 @@ func TestStore_IntentionsList(t *testing.T) { // Create for i, ixn := range ixns { - if err := s.IntentionSet(uint64(1+i), ixn); err != nil { - t.Fatalf("err: %s", err) - } - } - if !watchFired(ws) { - t.Fatalf("bad") + assert.Nil(s.IntentionSet(uint64(1+i), ixn)) } + assert.True(watchFired(ws), "watch fired") // Read it back and verify. expected := structs.Intentions{ @@ -309,15 +241,9 @@ func TestStore_IntentionsList(t *testing.T) { }, } idx, actual, err := s.Intentions(nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 2 { - t.Fatalf("bad index: %d", idx) - } - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %v", actual) - } + assert.Nil(err) + assert.Equal(idx, uint64(2)) + assert.Equal(expected, actual) } // Test the matrix of match logic. @@ -386,6 +312,7 @@ func TestStore_IntentionMatch_table(t *testing.T) { // test both cases. testRunner := func(t *testing.T, tc testCase, typ structs.IntentionMatchType) { // Insert the set + assert := assert.New(t) s := testStateStore(t) var idx uint64 = 1 for _, v := range tc.Insert { @@ -399,10 +326,7 @@ func TestStore_IntentionMatch_table(t *testing.T) { ixn.SourceName = v[1] } - err := s.IntentionSet(idx, ixn) - if err != nil { - t.Fatalf("error inserting: %s", err) - } + assert.Nil(s.IntentionSet(idx, ixn)) idx++ } @@ -418,14 +342,10 @@ func TestStore_IntentionMatch_table(t *testing.T) { // Match _, matches, err := s.IntentionMatch(nil, args) - if err != nil { - t.Fatalf("error matching: %s", err) - } + assert.Nil(err) // Should have equal lengths - if len(matches) != len(tc.Expected) { - t.Fatalf("bad (got, wanted):\n\n%#v\n\n%#v", tc.Expected, matches) - } + assert.Len(matches, len(tc.Expected)) // Verify matches for i, expected := range tc.Expected { @@ -439,9 +359,7 @@ func TestStore_IntentionMatch_table(t *testing.T) { } } - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad (got, wanted):\n\n%#v\n\n%#v", actual, expected) - } + assert.Equal(expected, actual) } } @@ -457,6 +375,7 @@ func TestStore_IntentionMatch_table(t *testing.T) { } func TestStore_Intention_Snapshot_Restore(t *testing.T) { + assert := assert.New(t) s := testStateStore(t) // Create some intentions. @@ -481,9 +400,7 @@ func TestStore_Intention_Snapshot_Restore(t *testing.T) { // Now create for i, ixn := range ixns { - if err := s.IntentionSet(uint64(4+i), ixn); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(s.IntentionSet(uint64(4+i), ixn)) } // Snapshot the queries. @@ -491,14 +408,10 @@ func TestStore_Intention_Snapshot_Restore(t *testing.T) { defer snap.Close() // Alter the real state store. - if err := s.IntentionDelete(7, ixns[0].ID); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(s.IntentionDelete(7, ixns[0].ID)) // Verify the snapshot. - if idx := snap.LastIndex(); idx != 6 { - t.Fatalf("bad index: %d", idx) - } + assert.Equal(snap.LastIndex(), uint64(6)) expected := structs.Intentions{ &structs.Intention{ ID: ixns[0].ID, @@ -529,34 +442,22 @@ func TestStore_Intention_Snapshot_Restore(t *testing.T) { }, } dump, err := snap.Intentions() - if err != nil { - t.Fatalf("err: %s", err) - } - if !reflect.DeepEqual(dump, expected) { - t.Fatalf("bad: %#v", dump[0]) - } + assert.Nil(err) + assert.Equal(expected, dump) // Restore the values into a new state store. func() { s := testStateStore(t) restore := s.Restore() for _, ixn := range dump { - if err := restore.Intention(ixn); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(restore.Intention(ixn)) } restore.Commit() // Read the restored values back out and verify that they match. idx, actual, err := s.Intentions(nil) - if err != nil { - t.Fatalf("err: %s", err) - } - if idx != 6 { - t.Fatalf("bad index: %d", idx) - } - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %v", actual) - } + assert.Nil(err) + assert.Equal(idx, uint64(6)) + assert.Equal(expected, actual) }() } diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index c236491e8..4df0bf312 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -4,17 +4,17 @@ import ( "fmt" "net/http" "net/http/httptest" - "reflect" "sort" - "strings" "testing" "github.com/hashicorp/consul/agent/structs" + "github.com/stretchr/testify/assert" ) func TestIntentionsList_empty(t *testing.T) { t.Parallel() + assert := assert.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() @@ -22,19 +22,17 @@ func TestIntentionsList_empty(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/connect/intentions", nil) resp := httptest.NewRecorder() obj, err := a.srv.IntentionList(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(err) value := obj.(structs.Intentions) - if value == nil || len(value) != 0 { - t.Fatalf("bad: %v", value) - } + assert.NotNil(value) + assert.Len(value, 0) } func TestIntentionsList_values(t *testing.T) { t.Parallel() + assert := assert.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() @@ -48,35 +46,28 @@ func TestIntentionsList_values(t *testing.T) { req.Intention.SourceName = v var reply string - if err := a.RPC("Intention.Apply", &req, &reply); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(a.RPC("Intention.Apply", &req, &reply)) } // Request req, _ := http.NewRequest("GET", "/v1/connect/intentions", nil) resp := httptest.NewRecorder() obj, err := a.srv.IntentionList(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(err) value := obj.(structs.Intentions) - if len(value) != 2 { - t.Fatalf("bad: %v", value) - } + assert.Len(value, 2) expected := []string{"bar", "foo"} actual := []string{value[0].SourceName, value[1].SourceName} sort.Strings(actual) - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) - } + assert.Equal(expected, actual) } func TestIntentionsMatch_basic(t *testing.T) { t.Parallel() + assert := assert.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() @@ -102,9 +93,7 @@ func TestIntentionsMatch_basic(t *testing.T) { // Create var reply string - if err := a.RPC("Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(a.RPC("Intention.Apply", &ixn, &reply)) } } @@ -113,14 +102,10 @@ func TestIntentionsMatch_basic(t *testing.T) { "/v1/connect/intentions/match?by=destination&name=foo/bar", nil) resp := httptest.NewRecorder() obj, err := a.srv.IntentionMatch(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(err) value := obj.(map[string]structs.Intentions) - if len(value) != 1 { - t.Fatalf("bad: %v", value) - } + assert.Len(value, 1) var actual [][]string expected := [][]string{{"foo", "bar"}, {"foo", "*"}, {"*", "*"}} @@ -128,14 +113,13 @@ func TestIntentionsMatch_basic(t *testing.T) { actual = append(actual, []string{ixn.DestinationNS, ixn.DestinationName}) } - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad (got, wanted):\n\n%#v\n\n%#v", actual, expected) - } + assert.Equal(expected, actual) } func TestIntentionsMatch_noBy(t *testing.T) { t.Parallel() + assert := assert.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() @@ -144,17 +128,15 @@ func TestIntentionsMatch_noBy(t *testing.T) { "/v1/connect/intentions/match?name=foo/bar", nil) resp := httptest.NewRecorder() obj, err := a.srv.IntentionMatch(resp, req) - if err == nil || !strings.Contains(err.Error(), "by") { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatal("should have no response") - } + assert.NotNil(err) + assert.Contains(err.Error(), "by") + assert.Nil(obj) } func TestIntentionsMatch_byInvalid(t *testing.T) { t.Parallel() + assert := assert.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() @@ -163,17 +145,15 @@ func TestIntentionsMatch_byInvalid(t *testing.T) { "/v1/connect/intentions/match?by=datacenter", nil) resp := httptest.NewRecorder() obj, err := a.srv.IntentionMatch(resp, req) - if err == nil || !strings.Contains(err.Error(), "'by' parameter") { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatal("should have no response") - } + assert.NotNil(err) + assert.Contains(err.Error(), "'by' parameter") + assert.Nil(obj) } func TestIntentionsMatch_noName(t *testing.T) { t.Parallel() + assert := assert.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() @@ -182,17 +162,15 @@ func TestIntentionsMatch_noName(t *testing.T) { "/v1/connect/intentions/match?by=source", nil) resp := httptest.NewRecorder() obj, err := a.srv.IntentionMatch(resp, req) - if err == nil || !strings.Contains(err.Error(), "'name' not set") { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatal("should have no response") - } + assert.NotNil(err) + assert.Contains(err.Error(), "'name' not set") + assert.Nil(obj) } func TestIntentionsCreate_good(t *testing.T) { t.Parallel() + assert := assert.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() @@ -202,14 +180,10 @@ func TestIntentionsCreate_good(t *testing.T) { req, _ := http.NewRequest("POST", "/v1/connect/intentions", jsonReader(args)) resp := httptest.NewRecorder() obj, err := a.srv.IntentionCreate(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(err) value := obj.(intentionCreateResponse) - if value.ID == "" { - t.Fatalf("bad: %v", value) - } + assert.NotEqual("", value.ID) // Read the value { @@ -218,22 +192,17 @@ func TestIntentionsCreate_good(t *testing.T) { IntentionID: value.ID, } var resp structs.IndexedIntentions - if err := a.RPC("Intention.Get", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if len(resp.Intentions) != 1 { - t.Fatalf("bad: %v", resp) - } + assert.Nil(a.RPC("Intention.Get", req, &resp)) + assert.Len(resp.Intentions, 1) actual := resp.Intentions[0] - if actual.SourceName != "foo" { - t.Fatalf("bad: %#v", actual) - } + assert.Equal("foo", actual.SourceName) } } func TestIntentionsSpecificGet_good(t *testing.T) { t.Parallel() + assert := assert.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() @@ -248,35 +217,28 @@ func TestIntentionsSpecificGet_good(t *testing.T) { Op: structs.IntentionOpCreate, Intention: ixn, } - if err := a.RPC("Intention.Apply", &req, &reply); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(a.RPC("Intention.Apply", &req, &reply)) } // Get the value req, _ := http.NewRequest("GET", fmt.Sprintf("/v1/connect/intentions/%s", reply), nil) resp := httptest.NewRecorder() obj, err := a.srv.IntentionSpecific(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(err) value := obj.(*structs.Intention) - if value.ID != reply { - t.Fatalf("bad: %v", value) - } + assert.Equal(reply, value.ID) ixn.ID = value.ID ixn.RaftIndex = value.RaftIndex ixn.CreatedAt, ixn.UpdatedAt = value.CreatedAt, value.UpdatedAt - if !reflect.DeepEqual(value, ixn) { - t.Fatalf("bad (got, want):\n\n%#v\n\n%#v", value, ixn) - } + assert.Equal(ixn, value) } func TestIntentionsSpecificUpdate_good(t *testing.T) { t.Parallel() + assert := assert.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() @@ -291,9 +253,7 @@ func TestIntentionsSpecificUpdate_good(t *testing.T) { Op: structs.IntentionOpCreate, Intention: ixn, } - if err := a.RPC("Intention.Apply", &req, &reply); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(a.RPC("Intention.Apply", &req, &reply)) } // Update the intention @@ -302,12 +262,8 @@ func TestIntentionsSpecificUpdate_good(t *testing.T) { req, _ := http.NewRequest("PUT", fmt.Sprintf("/v1/connect/intentions/%s", reply), jsonReader(ixn)) resp := httptest.NewRecorder() obj, err := a.srv.IntentionSpecific(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("obj should be nil: %v", err) - } + assert.Nil(err) + assert.Nil(obj) // Read the value { @@ -316,22 +272,17 @@ func TestIntentionsSpecificUpdate_good(t *testing.T) { IntentionID: reply, } var resp structs.IndexedIntentions - if err := a.RPC("Intention.Get", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if len(resp.Intentions) != 1 { - t.Fatalf("bad: %v", resp) - } + assert.Nil(a.RPC("Intention.Get", req, &resp)) + assert.Len(resp.Intentions, 1) actual := resp.Intentions[0] - if actual.SourceName != "bar" { - t.Fatalf("bad: %#v", actual) - } + assert.Equal("bar", actual.SourceName) } } func TestIntentionsSpecificDelete_good(t *testing.T) { t.Parallel() + assert := assert.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() @@ -347,9 +298,7 @@ func TestIntentionsSpecificDelete_good(t *testing.T) { Op: structs.IntentionOpCreate, Intention: ixn, } - if err := a.RPC("Intention.Apply", &req, &reply); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(a.RPC("Intention.Apply", &req, &reply)) } // Sanity check that the intention exists @@ -359,28 +308,18 @@ func TestIntentionsSpecificDelete_good(t *testing.T) { IntentionID: reply, } var resp structs.IndexedIntentions - if err := a.RPC("Intention.Get", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if len(resp.Intentions) != 1 { - t.Fatalf("bad: %v", resp) - } + assert.Nil(a.RPC("Intention.Get", req, &resp)) + assert.Len(resp.Intentions, 1) actual := resp.Intentions[0] - if actual.SourceName != "foo" { - t.Fatalf("bad: %#v", actual) - } + assert.Equal("foo", actual.SourceName) } // Delete the intention req, _ := http.NewRequest("DELETE", fmt.Sprintf("/v1/connect/intentions/%s", reply), nil) resp := httptest.NewRecorder() obj, err := a.srv.IntentionSpecific(resp, req) - if err != nil { - t.Fatalf("err: %v", err) - } - if obj != nil { - t.Fatalf("obj should be nil: %v", err) - } + assert.Nil(err) + assert.Nil(obj) // Verify the intention is gone { @@ -390,9 +329,8 @@ func TestIntentionsSpecificDelete_good(t *testing.T) { } var resp structs.IndexedIntentions err := a.RPC("Intention.Get", req, &resp) - if err == nil || !strings.Contains(err.Error(), "not found") { - t.Fatalf("err: %v", err) - } + assert.NotNil(err) + assert.Contains(err.Error(), "not found") } } @@ -429,17 +367,14 @@ func TestParseIntentionMatchEntry(t *testing.T) { for _, tc := range cases { t.Run(tc.Input, func(t *testing.T) { + assert := assert.New(t) actual, err := parseIntentionMatchEntry(tc.Input) - if (err != nil) != tc.Err { - t.Fatalf("err: %s", err) - } + assert.Equal(err != nil, tc.Err, err) if err != nil { return } - if !reflect.DeepEqual(actual, tc.Expected) { - t.Fatalf("bad: %#v", actual) - } + assert.Equal(tc.Expected, actual) }) } } diff --git a/agent/structs/intention_test.go b/agent/structs/intention_test.go index ec0a2de66..9db4ff255 100644 --- a/agent/structs/intention_test.go +++ b/agent/structs/intention_test.go @@ -1,10 +1,11 @@ package structs import ( - "reflect" "sort" "strings" "testing" + + "github.com/stretchr/testify/assert" ) func TestIntentionValidate(t *testing.T) { @@ -108,19 +109,17 @@ func TestIntentionValidate(t *testing.T) { for _, tc := range cases { t.Run(tc.Name, func(t *testing.T) { + assert := assert.New(t) ixn := TestIntention(t) tc.Modify(ixn) err := ixn.Validate() - if (err != nil) != (tc.Err != "") { - t.Fatalf("err: %s", err) - } + assert.Equal(err != nil, tc.Err != "", err) if err == nil { return } - if !strings.Contains(strings.ToLower(err.Error()), strings.ToLower(tc.Err)) { - t.Fatalf("err: %s", err) - } + + assert.Contains(strings.ToLower(err.Error()), strings.ToLower(tc.Err)) }) } } @@ -160,6 +159,8 @@ func TestIntentionPrecedenceSorter(t *testing.T) { for _, tc := range cases { t.Run(tc.Name, func(t *testing.T) { + assert := assert.New(t) + var input Intentions for _, v := range tc.Input { input = append(input, &Intention{ @@ -183,9 +184,7 @@ func TestIntentionPrecedenceSorter(t *testing.T) { v.DestinationName, }) } - if !reflect.DeepEqual(actual, tc.Expected) { - t.Fatalf("bad (got, wanted):\n\n%#v\n\n%#v", actual, tc.Expected) - } + assert.Equal(tc.Expected, actual) }) } } From 10ebccba4507204f186f8fe4d2c4a07ddc154413 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 3 Mar 2018 16:14:33 -0800 Subject: [PATCH 048/627] acl: parsing intentions in service block --- acl/policy.go | 8 +++++ acl/policy_test.go | 82 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+) diff --git a/acl/policy.go b/acl/policy.go index b8fc498b6..0796a1aec 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -73,6 +73,11 @@ type ServicePolicy struct { Name string `hcl:",key"` Policy string Sentinel Sentinel + + // Intentions is the policy for intentions where this service is the + // destination. This may be empty, in which case the Policy determines + // the intentions policy. + Intentions string } func (s *ServicePolicy) GoString() string { @@ -197,6 +202,9 @@ func Parse(rules string, sentinel sentinel.Evaluator) (*Policy, error) { if !isPolicyValid(sp.Policy) { return nil, fmt.Errorf("Invalid service policy: %#v", sp) } + if sp.Intentions != "" && !isPolicyValid(sp.Intentions) { + return nil, fmt.Errorf("Invalid service intentions policy: %#v", sp) + } if err := isSentinelValid(sentinel, sp.Policy, sp.Sentinel); err != nil { return nil, fmt.Errorf("Invalid service Sentinel policy: %#v, got error:%v", sp, err) } diff --git a/acl/policy_test.go b/acl/policy_test.go index 37b8216f5..9d3ae8f69 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -6,6 +6,88 @@ import ( "testing" ) +func TestParse_table(t *testing.T) { + // Note that the table tests are newer than other tests. Many of the + // other aspects of policy parsing are tested in older tests below. New + // parsing tests should be added to this table as its easier to maintain. + cases := []struct { + Name string + Input string + Expected *Policy + Err string + }{ + { + "service no intentions", + ` +service "foo" { + policy = "write" +} + `, + &Policy{ + Services: []*ServicePolicy{ + { + Name: "foo", + Policy: "write", + }, + }, + }, + "", + }, + + { + "service intentions", + ` +service "foo" { + policy = "write" + intentions = "read" +} + `, + &Policy{ + Services: []*ServicePolicy{ + { + Name: "foo", + Policy: "write", + Intentions: "read", + }, + }, + }, + "", + }, + + { + "service intention: invalid value", + ` +service "foo" { + policy = "write" + intentions = "foo" +} + `, + nil, + "service intentions", + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + actual, err := Parse(tc.Input, nil) + if (err != nil) != (tc.Err != "") { + t.Fatalf("err: %s", err) + } + if err != nil { + if !strings.Contains(err.Error(), tc.Err) { + t.Fatalf("err: %s", err) + } + + return + } + + if !reflect.DeepEqual(actual, tc.Expected) { + t.Fatalf("bad: %#v", actual) + } + }) + } +} + func TestACLPolicy_Parse_HCL(t *testing.T) { inp := ` agent "foo" { From 7b3c6fd8bdb2c82d42655f23a260669cf2732122 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 4 Mar 2018 00:38:04 -0800 Subject: [PATCH 049/627] acl: implement IntentionRead/Write methods on ACL interface --- acl/acl.go | 76 +++++++++++++++++++++++++++++++++++++++++++++++++ acl/acl_test.go | 44 ++++++++++++++++++++++++++-- 2 files changed, 118 insertions(+), 2 deletions(-) diff --git a/acl/acl.go b/acl/acl.go index 73bcc4fc3..4ac88f01c 100644 --- a/acl/acl.go +++ b/acl/acl.go @@ -60,6 +60,13 @@ type ACL interface { // EventWrite determines if a specific event may be fired. EventWrite(string) bool + // IntentionRead determines if a specific intention can be read. + IntentionRead(string) bool + + // IntentionWrite determines if a specific intention can be + // created, modified, or deleted. + IntentionWrite(string) bool + // KeyList checks for permission to list keys under a prefix KeyList(string) bool @@ -154,6 +161,14 @@ func (s *StaticACL) EventWrite(string) bool { return s.defaultAllow } +func (s *StaticACL) IntentionRead(string) bool { + return s.defaultAllow +} + +func (s *StaticACL) IntentionWrite(string) bool { + return s.defaultAllow +} + func (s *StaticACL) KeyRead(string) bool { return s.defaultAllow } @@ -275,6 +290,9 @@ type PolicyACL struct { // agentRules contains the agent policies agentRules *radix.Tree + // intentionRules contains the service intention policies + intentionRules *radix.Tree + // keyRules contains the key policies keyRules *radix.Tree @@ -308,6 +326,7 @@ func New(parent ACL, policy *Policy, sentinel sentinel.Evaluator) (*PolicyACL, e p := &PolicyACL{ parent: parent, agentRules: radix.New(), + intentionRules: radix.New(), keyRules: radix.New(), nodeRules: radix.New(), serviceRules: radix.New(), @@ -347,6 +366,25 @@ func New(parent ACL, policy *Policy, sentinel sentinel.Evaluator) (*PolicyACL, e sentinelPolicy: sp.Sentinel, } p.serviceRules.Insert(sp.Name, policyRule) + + // Determine the intention. The intention could be blank (not set). + // If the intention is not set, the value depends on the value of + // the service policy. + intention := sp.Intentions + if intention == "" { + switch sp.Policy { + case PolicyRead, PolicyWrite: + intention = PolicyRead + default: + intention = PolicyDeny + } + } + + policyRule = PolicyRule{ + aclPolicy: intention, + sentinelPolicy: sp.Sentinel, + } + p.intentionRules.Insert(sp.Name, policyRule) } // Load the session policy @@ -455,6 +493,44 @@ func (p *PolicyACL) EventWrite(name string) bool { return p.parent.EventWrite(name) } +// IntentionRead checks if writing (creating, updating, or deleting) of an +// intention is allowed. +func (p *PolicyACL) IntentionRead(prefix string) bool { + // Check for an exact rule or catch-all + _, rule, ok := p.intentionRules.LongestPrefix(prefix) + if ok { + pr := rule.(PolicyRule) + switch pr.aclPolicy { + case PolicyRead, PolicyWrite: + return true + default: + return false + } + } + + // No matching rule, use the parent. + return p.parent.IntentionRead(prefix) +} + +// IntentionWrite checks if writing (creating, updating, or deleting) of an +// intention is allowed. +func (p *PolicyACL) IntentionWrite(prefix string) bool { + // Check for an exact rule or catch-all + _, rule, ok := p.intentionRules.LongestPrefix(prefix) + if ok { + pr := rule.(PolicyRule) + switch pr.aclPolicy { + case PolicyWrite: + return true + default: + return false + } + } + + // No matching rule, use the parent. + return p.parent.IntentionWrite(prefix) +} + // KeyRead returns if a key is allowed to be read func (p *PolicyACL) KeyRead(key string) bool { // Look for a matching rule diff --git a/acl/acl_test.go b/acl/acl_test.go index 02ae0efb4..85f35f606 100644 --- a/acl/acl_test.go +++ b/acl/acl_test.go @@ -53,6 +53,9 @@ func TestStaticACL(t *testing.T) { if !all.EventWrite("foobar") { t.Fatalf("should allow") } + if !all.IntentionWrite("foobar") { + t.Fatalf("should allow") + } if !all.KeyRead("foobar") { t.Fatalf("should allow") } @@ -123,6 +126,9 @@ func TestStaticACL(t *testing.T) { if none.EventWrite("") { t.Fatalf("should not allow") } + if none.IntentionWrite("foo") { + t.Fatalf("should not allow") + } if none.KeyRead("foobar") { t.Fatalf("should not allow") } @@ -187,6 +193,9 @@ func TestStaticACL(t *testing.T) { if !manage.EventWrite("foobar") { t.Fatalf("should allow") } + if !manage.IntentionWrite("foobar") { + t.Fatalf("should allow") + } if !manage.KeyRead("foobar") { t.Fatalf("should allow") } @@ -305,8 +314,14 @@ func TestPolicyACL(t *testing.T) { Policy: PolicyDeny, }, &ServicePolicy{ - Name: "barfoo", - Policy: PolicyWrite, + Name: "barfoo", + Policy: PolicyWrite, + Intentions: PolicyWrite, + }, + &ServicePolicy{ + Name: "intbaz", + Policy: PolicyWrite, + Intentions: PolicyDeny, }, }, } @@ -344,6 +359,31 @@ func TestPolicyACL(t *testing.T) { } } + // Test the intentions + type intentioncase struct { + inp string + read bool + write bool + } + icases := []intentioncase{ + {"other", true, false}, + {"foo", true, false}, + {"bar", false, false}, + {"foobar", true, false}, + {"barfo", false, false}, + {"barfoo", true, true}, + {"barfoo2", true, true}, + {"intbaz", false, false}, + } + for _, c := range icases { + if c.read != acl.IntentionRead(c.inp) { + t.Fatalf("Read fail: %#v", c) + } + if c.write != acl.IntentionWrite(c.inp) { + t.Fatalf("Write fail: %#v", c) + } + } + // Test the services type servicecase struct { inp string From c54be9bc09c90dc95be9f8059f34bec9ed29aa96 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 4 Mar 2018 00:39:56 -0800 Subject: [PATCH 050/627] agent/consul: Basic ACL on Intention.Apply --- agent/consul/intention_endpoint.go | 15 +++++ agent/consul/intention_endpoint_test.go | 89 +++++++++++++++++++++++++ agent/structs/intention.go | 7 ++ 3 files changed, 111 insertions(+) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 6653d5502..0440f17e4 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -6,6 +6,7 @@ import ( "time" "github.com/armon/go-metrics" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" @@ -71,6 +72,20 @@ func (s *Intention) Apply( } *reply = args.Intention.ID + // Get the ACL token for the request for the checks below. + rule, err := s.srv.resolveToken(args.Token) + if err != nil { + return err + } + + // Perform the ACL check + if prefix, ok := args.Intention.GetACLPrefix(); ok { + if rule != nil && !rule.IntentionWrite(prefix) { + s.srv.logger.Printf("[WARN] consul.intention: Operation on intention '%s' denied due to ACLs", args.Intention.ID) + return acl.ErrPermissionDenied + } + } + // If this is not a create, then we have to verify the ID. if args.Op != structs.IntentionOpCreate { state := s.srv.fsm.State() diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 2ba5b04c3..5edf904d7 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/net-rpc-msgpackrpc" @@ -303,6 +304,94 @@ func TestIntentionApply_deleteGood(t *testing.T) { } } +// Test apply with a deny ACL +func TestIntentionApply_aclDeny(t *testing.T) { + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create an ACL with write permissions + var token string + { + var rules = ` +service "foo" { + policy = "deny" + intentions = "write" +}` + + req := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: rules, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Setup a basic record to create + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + ixn.Intention.DestinationName = "foobar" + + // Create without a token should error since default deny + var reply string + err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply) + if !acl.IsErrPermissionDenied(err) { + t.Fatalf("bad: %v", err) + } + + // Now add the token and try again. + ixn.WriteRequest.Token = token + if err = msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + + // Read + ixn.Intention.ID = reply + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: ixn.Intention.ID, + } + var resp structs.IndexedIntentions + if err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + if len(resp.Intentions) != 1 { + t.Fatalf("bad: %v", resp) + } + actual := resp.Intentions[0] + if resp.Index != actual.ModifyIndex { + t.Fatalf("bad index: %d", resp.Index) + } + + actual.CreateIndex, actual.ModifyIndex = 0, 0 + actual.CreatedAt = ixn.Intention.CreatedAt + actual.UpdatedAt = ixn.Intention.UpdatedAt + if !reflect.DeepEqual(actual, ixn.Intention) { + t.Fatalf("bad:\n\n%#v\n\n%#v", actual, ixn.Intention) + } + } +} + func TestIntentionList(t *testing.T) { t.Parallel() diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 579fef6c1..fb83f85da 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -157,6 +157,13 @@ func (x *Intention) Validate() error { return result } +// GetACLPrefix returns the prefix to look up the ACL policy for this +// intention, and a boolean noting whether the prefix is valid to check +// or not. You must check the ok value before using the prefix. +func (x *Intention) GetACLPrefix() (string, bool) { + return x.DestinationName, x.DestinationName != "" +} + // IntentionAction is the action that the intention represents. This // can be "allow" or "deny" to whitelist or blacklist intentions. type IntentionAction string From 14ca93e09c34286e11f014b442c11a7bfafc59f8 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 4 Mar 2018 00:55:23 -0800 Subject: [PATCH 051/627] agent/consul: tests for ACLs on Intention.Apply update/delete --- agent/consul/intention_endpoint_test.go | 153 ++++++++++++++++++++++++ 1 file changed, 153 insertions(+) diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 5edf904d7..946e66f0e 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -392,6 +392,159 @@ service "foo" { } } +// Test apply with delete and a default deny ACL +func TestIntentionApply_aclDelete(t *testing.T) { + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create an ACL with write permissions + var token string + { + var rules = ` +service "foo" { + policy = "deny" + intentions = "write" +}` + + req := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: rules, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Setup a basic record to create + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + ixn.Intention.DestinationName = "foobar" + ixn.WriteRequest.Token = token + + // Create + var reply string + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("bad: %v", err) + } + + // Try to do a delete with no token; this should get rejected. + ixn.Op = structs.IntentionOpDelete + ixn.Intention.ID = reply + ixn.WriteRequest.Token = "" + err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply) + if !acl.IsErrPermissionDenied(err) { + t.Fatalf("bad: %v", err) + } + + // Try again with the original token. This should go through. + ixn.WriteRequest.Token = token + if err = msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + + // Verify it is gone + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: ixn.Intention.ID, + } + var resp structs.IndexedIntentions + err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp) + if err == nil || err.Error() != ErrIntentionNotFound.Error() { + t.Fatalf("err: %v", err) + } + } +} + +// Test apply with update and a default deny ACL +func TestIntentionApply_aclUpdate(t *testing.T) { + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create an ACL with write permissions + var token string + { + var rules = ` +service "foo" { + policy = "deny" + intentions = "write" +}` + + req := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: rules, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Setup a basic record to create + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + ixn.Intention.DestinationName = "foobar" + ixn.WriteRequest.Token = token + + // Create + var reply string + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("bad: %v", err) + } + + // Try to do an update without a token; this should get rejected. + ixn.Op = structs.IntentionOpUpdate + ixn.Intention.ID = reply + ixn.WriteRequest.Token = "" + err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply) + if !acl.IsErrPermissionDenied(err) { + t.Fatalf("bad: %v", err) + } + + // Try again with the original token; this should go through. + ixn.WriteRequest.Token = token + if err = msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } +} + func TestIntentionList(t *testing.T) { t.Parallel() From fd840da97a5fdb76544b10183d83677dad097a64 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 4 Mar 2018 11:35:39 -0800 Subject: [PATCH 052/627] agent/consul: Intention.Apply ACL on rename --- agent/consul/intention_endpoint.go | 9 ++ agent/consul/intention_endpoint_test.go | 109 ++++++++++++++++++++++++ 2 files changed, 118 insertions(+) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 0440f17e4..2a409dcbe 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -96,6 +96,15 @@ func (s *Intention) Apply( if ixn == nil { return fmt.Errorf("Cannot modify non-existent intention: '%s'", args.Intention.ID) } + + // Perform the ACL check that we have write to the old prefix too, + // which must be true to perform any rename. + if prefix, ok := ixn.GetACLPrefix(); ok { + if rule != nil && !rule.IntentionWrite(prefix) { + s.srv.logger.Printf("[WARN] consul.intention: Operation on intention '%s' denied due to ACLs", args.Intention.ID) + return acl.ErrPermissionDenied + } + } } // We always update the updatedat field. This has no effect for deletion. diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 946e66f0e..fd76bbb78 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -545,6 +545,115 @@ service "foo" { } } +// Test apply with a management token +func TestIntentionApply_aclManagement(t *testing.T) { + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Setup a basic record to create + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + ixn.Intention.DestinationName = "foobar" + ixn.WriteRequest.Token = "root" + + // Create + var reply string + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("bad: %v", err) + } + ixn.Intention.ID = reply + + // Update + ixn.Op = structs.IntentionOpUpdate + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + + // Delete + ixn.Op = structs.IntentionOpDelete + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } +} + +// Test update changing the name where an ACL won't allow it +func TestIntentionApply_aclUpdateChange(t *testing.T) { + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create an ACL with write permissions + var token string + { + var rules = ` +service "foo" { + policy = "deny" + intentions = "write" +}` + + req := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: rules, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Setup a basic record to create + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + ixn.Intention.DestinationName = "bar" + ixn.WriteRequest.Token = "root" + + // Create + var reply string + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("bad: %v", err) + } + + // Try to do an update without a token; this should get rejected. + ixn.Op = structs.IntentionOpUpdate + ixn.Intention.ID = reply + ixn.Intention.DestinationName = "foo" + ixn.WriteRequest.Token = token + err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply) + if !acl.IsErrPermissionDenied(err) { + t.Fatalf("bad: %v", err) + } +} + func TestIntentionList(t *testing.T) { t.Parallel() From db44a98a2dd33444cdfb43a35259f76a9f9e9587 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 4 Mar 2018 11:53:52 -0800 Subject: [PATCH 053/627] agent/consul: Intention.Get ACLs --- agent/consul/acl.go | 30 +++++++++ agent/consul/intention_endpoint.go | 11 ++- agent/consul/intention_endpoint_test.go | 90 +++++++++++++++++++++++++ 3 files changed, 130 insertions(+), 1 deletion(-) diff --git a/agent/consul/acl.go b/agent/consul/acl.go index 1e95e62e4..ce3282b40 100644 --- a/agent/consul/acl.go +++ b/agent/consul/acl.go @@ -454,6 +454,33 @@ func (f *aclFilter) filterCoordinates(coords *structs.Coordinates) { *coords = c } +// filterIntentions is used to filter intentions based on ACL rules. +// We prune entries the user doesn't have access to, and we redact any tokens +// if the user doesn't have a management token. +func (f *aclFilter) filterIntentions(ixns *structs.Intentions) { + // Management tokens can see everything with no filtering. + if f.acl.ACLList() { + return + } + + // Otherwise, we need to see what the token has access to. + ret := make(structs.Intentions, 0, len(*ixns)) + for _, ixn := range *ixns { + // If no prefix ACL applies to this then filter it, since + // we know at this point the user doesn't have a management + // token, otherwise see what the policy says. + prefix, ok := ixn.GetACLPrefix() + if !ok || !f.acl.IntentionRead(prefix) { + f.logger.Printf("[DEBUG] consul: dropping intention %q from result due to ACLs", ixn.ID) + continue + } + + ret = append(ret, ixn) + } + + *ixns = ret +} + // filterNodeDump is used to filter through all parts of a node dump and // remove elements the provided ACL token cannot access. func (f *aclFilter) filterNodeDump(dump *structs.NodeDump) { @@ -598,6 +625,9 @@ func (s *Server) filterACL(token string, subj interface{}) error { case *structs.IndexedHealthChecks: filt.filterHealthChecks(&v.HealthChecks) + case *structs.IndexedIntentions: + filt.filterIntentions(&v.Intentions) + case *structs.IndexedNodeDump: filt.filterNodeDump(&v.Dump) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 2a409dcbe..568446d73 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -168,7 +168,16 @@ func (s *Intention) Get( reply.Index = index reply.Intentions = structs.Intentions{ixn} - // TODO: acl filtering + // Filter + if err := s.srv.filterACL(args.Token, reply); err != nil { + return err + } + + // If ACLs prevented any responses, error + if len(reply.Intentions) == 0 { + s.srv.logger.Printf("[WARN] consul.intention: Request to get intention '%s' denied due to ACLs", args.IntentionID) + return acl.ErrPermissionDenied + } return nil }, diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index fd76bbb78..67c2a07d0 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -654,6 +654,96 @@ service "foo" { } } +// Test reading with ACLs +func TestIntentionGet_acl(t *testing.T) { + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create an ACL with service write permissions. This will grant + // intentions read. + var token string + { + var rules = ` +service "foo" { + policy = "write" +}` + + req := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: rules, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Setup a basic record to create + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + ixn.Intention.DestinationName = "foobar" + ixn.WriteRequest.Token = "root" + + // Create + var reply string + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + ixn.Intention.ID = reply + + // Read without token should be error + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: ixn.Intention.ID, + } + + var resp structs.IndexedIntentions + err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp) + if !acl.IsErrPermissionDenied(err) { + t.Fatalf("bad: %v", err) + } + if len(resp.Intentions) != 0 { + t.Fatalf("bad: %v", resp) + } + } + + // Read with token should work + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + IntentionID: ixn.Intention.ID, + QueryOptions: structs.QueryOptions{Token: token}, + } + + var resp structs.IndexedIntentions + if err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + if len(resp.Intentions) != 1 { + t.Fatalf("bad: %v", resp) + } + } +} + func TestIntentionList(t *testing.T) { t.Parallel() From 3e10a1ae7a45fd54d5e7329c51c40d5b5b040dbf Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 4 Mar 2018 18:32:28 -0800 Subject: [PATCH 054/627] agent/consul: Intention.Match ACLs --- agent/consul/intention_endpoint.go | 23 ++- agent/consul/intention_endpoint_test.go | 233 ++++++++++++++++++++++++ 2 files changed, 250 insertions(+), 6 deletions(-) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 568446d73..2458a8ee9 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -206,8 +206,7 @@ func (s *Intention) List( reply.Intentions = make(structs.Intentions, 0) } - // filterACL - return nil + return s.srv.filterACL(args.Token, reply) }, ) } @@ -221,7 +220,22 @@ func (s *Intention) Match( return err } - // TODO(mitchellh): validate + // Get the ACL token for the request for the checks below. + rule, err := s.srv.resolveToken(args.Token) + if err != nil { + return err + } + + if rule != nil { + // We go through each entry and test the destination to check if it + // matches. + for _, entry := range args.Match.Entries { + if prefix := entry.Name; prefix != "" && !rule.IntentionRead(prefix) { + s.srv.logger.Printf("[WARN] consul.intention: Operation on intention prefix '%s' denied due to ACLs", prefix) + return acl.ErrPermissionDenied + } + } + } return s.srv.blockingQuery( &args.QueryOptions, @@ -234,9 +248,6 @@ func (s *Intention) Match( reply.Index = index reply.Matches = matches - - // TODO(mitchellh): acl filtering - return nil }, ) diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 67c2a07d0..5a0a8a723 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -768,6 +768,110 @@ func TestIntentionList(t *testing.T) { } } +// Test listing with ACLs +func TestIntentionList_acl(t *testing.T) { + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create an ACL with service write permissions. This will grant + // intentions read. + var token string + { + var rules = ` +service "foo" { + policy = "write" +}` + + req := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: rules, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Create a few records + for _, name := range []string{"foobar", "bar", "baz"} { + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + ixn.Intention.DestinationName = name + ixn.WriteRequest.Token = "root" + + // Create + var reply string + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Test with no token + { + req := &structs.DCSpecificRequest{ + Datacenter: "dc1", + } + var resp structs.IndexedIntentions + if err := msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + if len(resp.Intentions) != 0 { + t.Fatalf("bad: %v", resp) + } + } + + // Test with management token + { + req := &structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: "root"}, + } + var resp structs.IndexedIntentions + if err := msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + if len(resp.Intentions) != 3 { + t.Fatalf("bad: %v", resp) + } + } + + // Test with user token + { + req := &structs.DCSpecificRequest{ + Datacenter: "dc1", + QueryOptions: structs.QueryOptions{Token: token}, + } + var resp structs.IndexedIntentions + if err := msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + if len(resp.Intentions) != 1 { + t.Fatalf("bad: %v", resp) + } + } +} + // Test basic matching. We don't need to exhaustively test inputs since this // is tested in the agent/consul/state package. func TestIntentionMatch_good(t *testing.T) { @@ -836,3 +940,132 @@ func TestIntentionMatch_good(t *testing.T) { } assert.Equal(expected, actual) } + +// Test matching with ACLs +func TestIntentionMatch_acl(t *testing.T) { + t.Parallel() + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create an ACL with service write permissions. This will grant + // intentions read. + var token string + { + var rules = ` +service "bar" { + policy = "write" +}` + + req := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: rules, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Create some records + { + insert := [][]string{ + {"foo", "*"}, + {"foo", "bar"}, + {"foo", "baz"}, // shouldn't match + {"bar", "bar"}, // shouldn't match + {"bar", "*"}, // shouldn't match + {"*", "*"}, + } + + for _, v := range insert { + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + ixn.Intention.DestinationNS = v[0] + ixn.Intention.DestinationName = v[1] + ixn.WriteRequest.Token = "root" + + // Create + var reply string + if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { + t.Fatalf("err: %v", err) + } + } + } + + // Test with no token + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + Match: &structs.IntentionQueryMatch{ + Type: structs.IntentionMatchDestination, + Entries: []structs.IntentionMatchEntry{ + { + Namespace: "foo", + Name: "bar", + }, + }, + }, + } + var resp structs.IndexedIntentionMatches + err := msgpackrpc.CallWithCodec(codec, "Intention.Match", req, &resp) + if !acl.IsErrPermissionDenied(err) { + t.Fatalf("err: %v", err) + } + + if len(resp.Matches) != 0 { + t.Fatalf("bad: %#v", resp.Matches) + } + } + + // Test with proper token + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + Match: &structs.IntentionQueryMatch{ + Type: structs.IntentionMatchDestination, + Entries: []structs.IntentionMatchEntry{ + { + Namespace: "foo", + Name: "bar", + }, + }, + }, + QueryOptions: structs.QueryOptions{Token: token}, + } + var resp structs.IndexedIntentionMatches + if err := msgpackrpc.CallWithCodec(codec, "Intention.Match", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + if len(resp.Matches) != 1 { + t.Fatalf("bad: %#v", resp.Matches) + } + + expected := [][]string{{"foo", "bar"}, {"foo", "*"}, {"*", "*"}} + var actual [][]string + for _, ixn := range resp.Matches[0] { + actual = append(actual, []string{ixn.DestinationNS, ixn.DestinationName}) + } + + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad (got, wanted):\n\n%#v\n\n%#v", actual, expected) + } + } +} From 6a8bba7d487bac6e62a374ac9d601af8e357cd8c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 4 Mar 2018 18:46:33 -0800 Subject: [PATCH 055/627] agent/consul,structs: add tests for ACL filter and prefix for intentions --- agent/consul/acl_test.go | 60 +++++++++++++++++++++++++++++++++ agent/structs/intention_test.go | 37 ++++++++++++++++++++ 2 files changed, 97 insertions(+) diff --git a/agent/consul/acl_test.go b/agent/consul/acl_test.go index 9a1eaba6c..a37bbf101 100644 --- a/agent/consul/acl_test.go +++ b/agent/consul/acl_test.go @@ -847,6 +847,66 @@ node "node1" { } } +func TestACL_filterIntentions(t *testing.T) { + t.Parallel() + fill := func() structs.Intentions { + return structs.Intentions{ + &structs.Intention{ + ID: "f004177f-2c28-83b7-4229-eacc25fe55d1", + DestinationName: "bar", + }, + &structs.Intention{ + ID: "f004177f-2c28-83b7-4229-eacc25fe55d2", + DestinationName: "foo", + }, + } + } + + // Try permissive filtering. + { + ixns := fill() + filt := newACLFilter(acl.AllowAll(), nil, false) + filt.filterIntentions(&ixns) + if len(ixns) != 2 { + t.Fatalf("bad: %#v", ixns) + } + } + + // Try restrictive filtering. + { + ixns := fill() + filt := newACLFilter(acl.DenyAll(), nil, false) + filt.filterIntentions(&ixns) + if len(ixns) != 0 { + t.Fatalf("bad: %#v", ixns) + } + } + + // Policy to see one + policy, err := acl.Parse(` +service "foo" { + policy = "read" +} +`, nil) + if err != nil { + t.Fatalf("err %v", err) + } + perms, err := acl.New(acl.DenyAll(), policy, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Filter + { + ixns := fill() + filt := newACLFilter(perms, nil, false) + filt.filterIntentions(&ixns) + if len(ixns) != 1 { + t.Fatalf("bad: %#v", ixns) + } + } +} + func TestACL_filterServices(t *testing.T) { t.Parallel() // Create some services diff --git a/agent/structs/intention_test.go b/agent/structs/intention_test.go index 9db4ff255..948ae920e 100644 --- a/agent/structs/intention_test.go +++ b/agent/structs/intention_test.go @@ -8,6 +8,43 @@ import ( "github.com/stretchr/testify/assert" ) +func TestIntentionGetACLPrefix(t *testing.T) { + cases := []struct { + Name string + Input *Intention + Expected string + }{ + { + "unset name", + &Intention{DestinationName: ""}, + "", + }, + + { + "set name", + &Intention{DestinationName: "fo"}, + "fo", + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + actual, ok := tc.Input.GetACLPrefix() + if tc.Expected == "" { + if !ok { + return + } + + t.Fatal("should not be ok") + } + + if actual != tc.Expected { + t.Fatalf("bad: %q", actual) + } + }) + } +} + func TestIntentionValidate(t *testing.T) { cases := []struct { Name string From 23ee0888ecf83dd0cb8e94e7481ee4df90db103c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 6 Mar 2018 10:51:26 -0800 Subject: [PATCH 056/627] agent/consul: convert intention ACLs to testify/assert --- acl/policy_test.go | 17 +- agent/consul/acl_test.go | 23 +-- agent/consul/intention_endpoint_test.go | 199 ++++++++---------------- 3 files changed, 77 insertions(+), 162 deletions(-) diff --git a/acl/policy_test.go b/acl/policy_test.go index 9d3ae8f69..19468e38b 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -4,6 +4,8 @@ import ( "reflect" "strings" "testing" + + "github.com/stretchr/testify/assert" ) func TestParse_table(t *testing.T) { @@ -69,21 +71,14 @@ service "foo" { for _, tc := range cases { t.Run(tc.Name, func(t *testing.T) { + assert := assert.New(t) actual, err := Parse(tc.Input, nil) - if (err != nil) != (tc.Err != "") { - t.Fatalf("err: %s", err) - } + assert.Equal(tc.Err != "", err != nil, err) if err != nil { - if !strings.Contains(err.Error(), tc.Err) { - t.Fatalf("err: %s", err) - } - + assert.Contains(err.Error(), tc.Err) return } - - if !reflect.DeepEqual(actual, tc.Expected) { - t.Fatalf("bad: %#v", actual) - } + assert.Equal(tc.Expected, actual) }) } } diff --git a/agent/consul/acl_test.go b/agent/consul/acl_test.go index a37bbf101..ace1284a8 100644 --- a/agent/consul/acl_test.go +++ b/agent/consul/acl_test.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/consul/testutil/retry" + "github.com/stretchr/testify/assert" ) var testACLPolicy = ` @@ -849,6 +850,8 @@ node "node1" { func TestACL_filterIntentions(t *testing.T) { t.Parallel() + assert := assert.New(t) + fill := func() structs.Intentions { return structs.Intentions{ &structs.Intention{ @@ -867,9 +870,7 @@ func TestACL_filterIntentions(t *testing.T) { ixns := fill() filt := newACLFilter(acl.AllowAll(), nil, false) filt.filterIntentions(&ixns) - if len(ixns) != 2 { - t.Fatalf("bad: %#v", ixns) - } + assert.Len(ixns, 2) } // Try restrictive filtering. @@ -877,9 +878,7 @@ func TestACL_filterIntentions(t *testing.T) { ixns := fill() filt := newACLFilter(acl.DenyAll(), nil, false) filt.filterIntentions(&ixns) - if len(ixns) != 0 { - t.Fatalf("bad: %#v", ixns) - } + assert.Len(ixns, 0) } // Policy to see one @@ -888,22 +887,16 @@ service "foo" { policy = "read" } `, nil) - if err != nil { - t.Fatalf("err %v", err) - } + assert.Nil(err) perms, err := acl.New(acl.DenyAll(), policy, nil) - if err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(err) // Filter { ixns := fill() filt := newACLFilter(perms, nil, false) filt.filterIntentions(&ixns) - if len(ixns) != 1 { - t.Fatalf("bad: %#v", ixns) - } + assert.Len(ixns, 1) } } diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index 5a0a8a723..a1e1ae751 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -307,6 +307,8 @@ func TestIntentionApply_deleteGood(t *testing.T) { // Test apply with a deny ACL func TestIntentionApply_aclDeny(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" @@ -338,9 +340,7 @@ service "foo" { }, WriteRequest: structs.WriteRequest{Token: "root"}, } - if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token)) } // Setup a basic record to create @@ -354,47 +354,38 @@ service "foo" { // Create without a token should error since default deny var reply string err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply) - if !acl.IsErrPermissionDenied(err) { - t.Fatalf("bad: %v", err) - } + assert.True(acl.IsErrPermissionDenied(err)) // Now add the token and try again. ixn.WriteRequest.Token = token - if err = msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) // Read ixn.Intention.ID = reply { req := &structs.IntentionQueryRequest{ - Datacenter: "dc1", - IntentionID: ixn.Intention.ID, + Datacenter: "dc1", + IntentionID: ixn.Intention.ID, + QueryOptions: structs.QueryOptions{Token: "root"}, } var resp structs.IndexedIntentions - if err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if len(resp.Intentions) != 1 { - t.Fatalf("bad: %v", resp) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp)) + assert.Len(resp.Intentions, 1) actual := resp.Intentions[0] - if resp.Index != actual.ModifyIndex { - t.Fatalf("bad index: %d", resp.Index) - } + assert.Equal(resp.Index, actual.ModifyIndex) actual.CreateIndex, actual.ModifyIndex = 0, 0 actual.CreatedAt = ixn.Intention.CreatedAt actual.UpdatedAt = ixn.Intention.UpdatedAt - if !reflect.DeepEqual(actual, ixn.Intention) { - t.Fatalf("bad:\n\n%#v\n\n%#v", actual, ixn.Intention) - } + assert.Equal(ixn.Intention, actual) } } // Test apply with delete and a default deny ACL func TestIntentionApply_aclDelete(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" @@ -426,9 +417,7 @@ service "foo" { }, WriteRequest: structs.WriteRequest{Token: "root"}, } - if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token)) } // Setup a basic record to create @@ -442,24 +431,18 @@ service "foo" { // Create var reply string - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("bad: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) // Try to do a delete with no token; this should get rejected. ixn.Op = structs.IntentionOpDelete ixn.Intention.ID = reply ixn.WriteRequest.Token = "" err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply) - if !acl.IsErrPermissionDenied(err) { - t.Fatalf("bad: %v", err) - } + assert.True(acl.IsErrPermissionDenied(err)) // Try again with the original token. This should go through. ixn.WriteRequest.Token = token - if err = msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) // Verify it is gone { @@ -469,15 +452,16 @@ service "foo" { } var resp structs.IndexedIntentions err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp) - if err == nil || err.Error() != ErrIntentionNotFound.Error() { - t.Fatalf("err: %v", err) - } + assert.NotNil(err) + assert.Contains(err.Error(), ErrIntentionNotFound.Error()) } } // Test apply with update and a default deny ACL func TestIntentionApply_aclUpdate(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" @@ -509,9 +493,7 @@ service "foo" { }, WriteRequest: structs.WriteRequest{Token: "root"}, } - if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token)) } // Setup a basic record to create @@ -525,29 +507,25 @@ service "foo" { // Create var reply string - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("bad: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) // Try to do an update without a token; this should get rejected. ixn.Op = structs.IntentionOpUpdate ixn.Intention.ID = reply ixn.WriteRequest.Token = "" err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply) - if !acl.IsErrPermissionDenied(err) { - t.Fatalf("bad: %v", err) - } + assert.True(acl.IsErrPermissionDenied(err)) // Try again with the original token; this should go through. ixn.WriteRequest.Token = token - if err = msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) } // Test apply with a management token func TestIntentionApply_aclManagement(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" @@ -571,27 +549,23 @@ func TestIntentionApply_aclManagement(t *testing.T) { // Create var reply string - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("bad: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) ixn.Intention.ID = reply // Update ixn.Op = structs.IntentionOpUpdate - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) // Delete ixn.Op = structs.IntentionOpDelete - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) } // Test update changing the name where an ACL won't allow it func TestIntentionApply_aclUpdateChange(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" @@ -623,9 +597,7 @@ service "foo" { }, WriteRequest: structs.WriteRequest{Token: "root"}, } - if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token)) } // Setup a basic record to create @@ -639,9 +611,7 @@ service "foo" { // Create var reply string - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("bad: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) // Try to do an update without a token; this should get rejected. ixn.Op = structs.IntentionOpUpdate @@ -649,14 +619,14 @@ service "foo" { ixn.Intention.DestinationName = "foo" ixn.WriteRequest.Token = token err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply) - if !acl.IsErrPermissionDenied(err) { - t.Fatalf("bad: %v", err) - } + assert.True(acl.IsErrPermissionDenied(err)) } // Test reading with ACLs func TestIntentionGet_acl(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" @@ -688,9 +658,7 @@ service "foo" { }, WriteRequest: structs.WriteRequest{Token: "root"}, } - if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token)) } // Setup a basic record to create @@ -704,9 +672,7 @@ service "foo" { // Create var reply string - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) ixn.Intention.ID = reply // Read without token should be error @@ -718,12 +684,8 @@ service "foo" { var resp structs.IndexedIntentions err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp) - if !acl.IsErrPermissionDenied(err) { - t.Fatalf("bad: %v", err) - } - if len(resp.Intentions) != 0 { - t.Fatalf("bad: %v", resp) - } + assert.True(acl.IsErrPermissionDenied(err)) + assert.Len(resp.Intentions, 0) } // Read with token should work @@ -735,12 +697,8 @@ service "foo" { } var resp structs.IndexedIntentions - if err := msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - if len(resp.Intentions) != 1 { - t.Fatalf("bad: %v", resp) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Get", req, &resp)) + assert.Len(resp.Intentions, 1) } } @@ -771,6 +729,8 @@ func TestIntentionList(t *testing.T) { // Test listing with ACLs func TestIntentionList_acl(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" @@ -802,9 +762,7 @@ service "foo" { }, WriteRequest: structs.WriteRequest{Token: "root"}, } - if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token)) } // Create a few records @@ -819,9 +777,7 @@ service "foo" { // Create var reply string - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) } // Test with no token @@ -830,13 +786,8 @@ service "foo" { Datacenter: "dc1", } var resp structs.IndexedIntentions - if err := msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - - if len(resp.Intentions) != 0 { - t.Fatalf("bad: %v", resp) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp)) + assert.Len(resp.Intentions, 0) } // Test with management token @@ -846,13 +797,8 @@ service "foo" { QueryOptions: structs.QueryOptions{Token: "root"}, } var resp structs.IndexedIntentions - if err := msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - - if len(resp.Intentions) != 3 { - t.Fatalf("bad: %v", resp) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp)) + assert.Len(resp.Intentions, 3) } // Test with user token @@ -862,13 +808,8 @@ service "foo" { QueryOptions: structs.QueryOptions{Token: token}, } var resp structs.IndexedIntentions - if err := msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - - if len(resp.Intentions) != 1 { - t.Fatalf("bad: %v", resp) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.List", req, &resp)) + assert.Len(resp.Intentions, 1) } } @@ -944,6 +885,8 @@ func TestIntentionMatch_good(t *testing.T) { // Test matching with ACLs func TestIntentionMatch_acl(t *testing.T) { t.Parallel() + + assert := assert.New(t) dir1, s1 := testServerWithConfig(t, func(c *Config) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" @@ -975,9 +918,7 @@ service "bar" { }, WriteRequest: structs.WriteRequest{Token: "root"}, } - if err := msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token)) } // Create some records @@ -1003,9 +944,7 @@ service "bar" { // Create var reply string - if err := msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply); err != nil { - t.Fatalf("err: %v", err) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) } } @@ -1025,13 +964,8 @@ service "bar" { } var resp structs.IndexedIntentionMatches err := msgpackrpc.CallWithCodec(codec, "Intention.Match", req, &resp) - if !acl.IsErrPermissionDenied(err) { - t.Fatalf("err: %v", err) - } - - if len(resp.Matches) != 0 { - t.Fatalf("bad: %#v", resp.Matches) - } + assert.True(acl.IsErrPermissionDenied(err)) + assert.Len(resp.Matches, 0) } // Test with proper token @@ -1050,13 +984,8 @@ service "bar" { QueryOptions: structs.QueryOptions{Token: token}, } var resp structs.IndexedIntentionMatches - if err := msgpackrpc.CallWithCodec(codec, "Intention.Match", req, &resp); err != nil { - t.Fatalf("err: %v", err) - } - - if len(resp.Matches) != 1 { - t.Fatalf("bad: %#v", resp.Matches) - } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Match", req, &resp)) + assert.Len(resp.Matches, 1) expected := [][]string{{"foo", "bar"}, {"foo", "*"}, {"*", "*"}} var actual [][]string @@ -1064,8 +993,6 @@ service "bar" { actual = append(actual, []string{ixn.DestinationNS, ixn.DestinationName}) } - if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad (got, wanted):\n\n%#v\n\n%#v", actual, expected) - } + assert.Equal(expected, actual) } } From 09568ce7b5d5436e8cf504bb2af506821aeff1c9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 5 Mar 2018 19:56:52 -0800 Subject: [PATCH 057/627] agent/consul/state: service registration with proxy works --- agent/consul/state/catalog_test.go | 34 ++++++++++++++++++++++++++++++ agent/structs/structs.go | 28 ++++++++++++++++++++++++ 2 files changed, 62 insertions(+) diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index a2b56cad8..dcd1d505a 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -981,6 +981,40 @@ func TestStateStore_EnsureService(t *testing.T) { } } +func TestStateStore_EnsureService_connectProxy(t *testing.T) { + s := testStateStore(t) + + // Create the service registration. + ns1 := &structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + ID: "connect-proxy", + Service: "connect-proxy", + Address: "1.1.1.1", + Port: 1111, + ProxyDestination: "foo", + } + + // Service successfully registers into the state store. + testRegisterNode(t, s, 0, "node1") + if err := s.EnsureService(10, "node1", ns1); err != nil { + t.Fatalf("err: %s", err) + } + + // Retrieve and verify + _, out, err := s.NodeServices(nil, "node1") + if err != nil { + t.Fatalf("err: %s", err) + } + if out == nil || len(out.Services) != 1 { + t.Fatalf("bad: %#v", out) + } + expect1 := *ns1 + expect1.CreateIndex, expect1.ModifyIndex = 10, 10 + if svc := out.Services["connect-proxy"]; !reflect.DeepEqual(&expect1, svc) { + t.Fatalf("bad: %#v", svc) + } +} + func TestStateStore_Services(t *testing.T) { s := testStateStore(t) diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 8a1860912..23cd41acc 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -388,6 +388,7 @@ type ServiceNode struct { Datacenter string TaggedAddresses map[string]string NodeMeta map[string]string + ServiceKind ServiceKind ServiceID string ServiceName string ServiceTags []string @@ -395,6 +396,7 @@ type ServiceNode struct { ServiceMeta map[string]string ServicePort int ServiceEnableTagOverride bool + ServiceProxyDestination string RaftIndex } @@ -431,6 +433,7 @@ func (s *ServiceNode) PartialClone() *ServiceNode { // ToNodeService converts the given service node to a node service. func (s *ServiceNode) ToNodeService() *NodeService { return &NodeService{ + Kind: s.ServiceKind, ID: s.ServiceID, Service: s.ServiceName, Tags: s.ServiceTags, @@ -438,6 +441,7 @@ func (s *ServiceNode) ToNodeService() *NodeService { Port: s.ServicePort, Meta: s.ServiceMeta, EnableTagOverride: s.ServiceEnableTagOverride, + ProxyDestination: s.ServiceProxyDestination, RaftIndex: RaftIndex{ CreateIndex: s.CreateIndex, ModifyIndex: s.ModifyIndex, @@ -447,8 +451,26 @@ func (s *ServiceNode) ToNodeService() *NodeService { type ServiceNodes []*ServiceNode +// ServiceKind is the kind of service being registered. +type ServiceKind string + +const ( + // ServiceKindTypical is a typical, classic Consul service. + ServiceKindTypical ServiceKind = "typical" + + // ServiceKindConnectProxy is a proxy for the Connect feature. This + // service proxies another service within Consul and speaks the connect + // protocol. + ServiceKindConnectProxy ServiceKind = "connect-proxy" +) + // NodeService is a service provided by a node type NodeService struct { + // Kind is the kind of service this is. Different kinds of services may + // have differing validation, DNS behavior, etc. An empty kind will default + // to the Default kind. See ServiceKind for the full list of kinds. + Kind ServiceKind + ID string Service string Tags []string @@ -457,6 +479,10 @@ type NodeService struct { Port int EnableTagOverride bool + // ProxyDestination is the name of the service that this service is + // a Connect proxy for. This is only valid if Kind is "connect-proxy". + ProxyDestination string + RaftIndex } @@ -485,6 +511,7 @@ func (s *NodeService) ToServiceNode(node string) *ServiceNode { Node: node, // Skip Address, see ServiceNode definition. // Skip TaggedAddresses, see ServiceNode definition. + ServiceKind: s.Kind, ServiceID: s.ID, ServiceName: s.Service, ServiceTags: s.Tags, @@ -492,6 +519,7 @@ func (s *NodeService) ToServiceNode(node string) *ServiceNode { ServicePort: s.Port, ServiceMeta: s.Meta, ServiceEnableTagOverride: s.EnableTagOverride, + ServiceProxyDestination: s.ProxyDestination, RaftIndex: RaftIndex{ CreateIndex: s.CreateIndex, ModifyIndex: s.ModifyIndex, From 58bff8dd05c7c25b9bb339ee2727c817ae1b5636 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 6 Mar 2018 17:13:52 -0800 Subject: [PATCH 058/627] agent/consul/state: convert proxy test to testify/assert --- agent/consul/state/catalog_test.go | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index dcd1d505a..c057ebea6 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/go-memdb" uuid "github.com/hashicorp/go-uuid" "github.com/pascaldekloe/goe/verify" + "github.com/stretchr/testify/assert" ) func makeRandomNodeID(t *testing.T) types.NodeID { @@ -982,6 +983,7 @@ func TestStateStore_EnsureService(t *testing.T) { } func TestStateStore_EnsureService_connectProxy(t *testing.T) { + assert := assert.New(t) s := testStateStore(t) // Create the service registration. @@ -996,23 +998,17 @@ func TestStateStore_EnsureService_connectProxy(t *testing.T) { // Service successfully registers into the state store. testRegisterNode(t, s, 0, "node1") - if err := s.EnsureService(10, "node1", ns1); err != nil { - t.Fatalf("err: %s", err) - } + assert.Nil(s.EnsureService(10, "node1", ns1)) // Retrieve and verify _, out, err := s.NodeServices(nil, "node1") - if err != nil { - t.Fatalf("err: %s", err) - } - if out == nil || len(out.Services) != 1 { - t.Fatalf("bad: %#v", out) - } + assert.Nil(err) + assert.NotNil(out) + assert.Len(out.Services, 1) + expect1 := *ns1 expect1.CreateIndex, expect1.ModifyIndex = 10, 10 - if svc := out.Services["connect-proxy"]; !reflect.DeepEqual(&expect1, svc) { - t.Fatalf("bad: %#v", svc) - } + assert.Equal(&expect1, out.Services["connect-proxy"]) } func TestStateStore_Services(t *testing.T) { From 761b561946201ac40c2e8d1b69ef85f69f9fccaf Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 6 Mar 2018 17:32:41 -0800 Subject: [PATCH 059/627] agent: /v1/catalog/service/:service works with proxies --- agent/catalog_endpoint_test.go | 25 +++++++++++++++++++++ agent/consul/catalog_endpoint_test.go | 32 +++++++++++++++++++++++++++ agent/structs/catalog.go | 7 ++++-- agent/structs/structs.go | 2 ++ agent/structs/testing_catalog.go | 22 ++++++++++++++++++ 5 files changed, 86 insertions(+), 2 deletions(-) create mode 100644 agent/structs/testing_catalog.go diff --git a/agent/catalog_endpoint_test.go b/agent/catalog_endpoint_test.go index 845929117..d3af9bf6d 100644 --- a/agent/catalog_endpoint_test.go +++ b/agent/catalog_endpoint_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/testutil/retry" "github.com/hashicorp/serf/coordinate" + "github.com/stretchr/testify/assert" ) func TestCatalogRegister_Service_InvalidAddress(t *testing.T) { @@ -750,6 +751,30 @@ func TestCatalogServiceNodes_DistanceSort(t *testing.T) { } } +func TestCatalogServiceNodes_ConnectProxy(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Register + args := structs.TestRegisterRequestProxy(t) + var out struct{} + assert.Nil(a.RPC("Catalog.Register", args, &out)) + + req, _ := http.NewRequest("GET", fmt.Sprintf( + "/v1/catalog/service/%s", args.Service.Service), nil) + resp := httptest.NewRecorder() + obj, err := a.srv.CatalogServiceNodes(resp, req) + assert.Nil(err) + assertIndex(t, resp) + + nodes := obj.(structs.ServiceNodes) + assert.Len(nodes, 1) + assert.Equal(structs.ServiceKindConnectProxy, nodes[0].ServiceKind) +} + func TestCatalogNodeServices(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index f6825f990..db49875cb 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/consul/testutil/retry" "github.com/hashicorp/consul/types" "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/stretchr/testify/assert" ) func TestCatalog_Register(t *testing.T) { @@ -1599,6 +1600,37 @@ func TestCatalog_ListServiceNodes_DistanceSort(t *testing.T) { } } +func TestCatalog_ListServiceNodes_ConnectProxy(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Register the service + args := structs.TestRegisterRequestProxy(t) + var out struct{} + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out)) + + // List + req := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: args.Service.Service, + TagFilter: false, + } + var resp structs.IndexedServiceNodes + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp)) + assert.Len(resp.ServiceNodes, 1) + v := resp.ServiceNodes[0] + assert.Equal(structs.ServiceKindConnectProxy, v.ServiceKind) + assert.Equal(args.Service.ProxyDestination, v.ServiceProxyDestination) +} + func TestCatalog_NodeServices(t *testing.T) { t.Parallel() dir1, s1 := testServer(t) diff --git a/agent/structs/catalog.go b/agent/structs/catalog.go index b6b443f6f..3f68f43a1 100644 --- a/agent/structs/catalog.go +++ b/agent/structs/catalog.go @@ -13,9 +13,12 @@ const ( SerfCheckFailedOutput = "Agent not live or unreachable" ) -// These are used to manage the "consul" service that's attached to every Consul -// server node in the catalog. const ( + // These are used to manage the "consul" service that's attached to every + // Consul server node in the catalog. ConsulServiceID = "consul" ConsulServiceName = "consul" + + // ConnectProxyServiceName is the name of the proxy services. + ConnectProxyServiceName = "connect-proxy" ) diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 23cd41acc..65ec87024 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -416,6 +416,7 @@ func (s *ServiceNode) PartialClone() *ServiceNode { Node: s.Node, // Skip Address, see above. // Skip TaggedAddresses, see above. + ServiceKind: s.ServiceKind, ServiceID: s.ServiceID, ServiceName: s.ServiceName, ServiceTags: tags, @@ -423,6 +424,7 @@ func (s *ServiceNode) PartialClone() *ServiceNode { ServicePort: s.ServicePort, ServiceMeta: nsmeta, ServiceEnableTagOverride: s.ServiceEnableTagOverride, + ServiceProxyDestination: s.ServiceProxyDestination, RaftIndex: RaftIndex{ CreateIndex: s.CreateIndex, ModifyIndex: s.ModifyIndex, diff --git a/agent/structs/testing_catalog.go b/agent/structs/testing_catalog.go new file mode 100644 index 000000000..8a002d380 --- /dev/null +++ b/agent/structs/testing_catalog.go @@ -0,0 +1,22 @@ +package structs + +import ( + "github.com/mitchellh/go-testing-interface" +) + +// TestRegisterRequestProxy returns a RegisterRequest for registering a +// Connect proxy. +func TestRegisterRequestProxy(t testing.T) *RegisterRequest { + return &RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &NodeService{ + Kind: ServiceKindConnectProxy, + Service: ConnectProxyServiceName, + Address: "127.0.0.2", + Port: 2222, + ProxyDestination: "web", + }, + } +} From 8777ff139c4340e9961259975d1a3f4cc347977e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 6 Mar 2018 17:41:39 -0800 Subject: [PATCH 060/627] agent: test /v1/catalog/node/:node to list connect proxies --- agent/catalog_endpoint_test.go | 25 +++++++++++++++++++++ agent/consul/catalog_endpoint_test.go | 31 +++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/agent/catalog_endpoint_test.go b/agent/catalog_endpoint_test.go index d3af9bf6d..4df9d4275 100644 --- a/agent/catalog_endpoint_test.go +++ b/agent/catalog_endpoint_test.go @@ -810,6 +810,31 @@ func TestCatalogNodeServices(t *testing.T) { } } +func TestCatalogNodeServices_ConnectProxy(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Register + args := structs.TestRegisterRequestProxy(t) + var out struct{} + assert.Nil(a.RPC("Catalog.Register", args, &out)) + + req, _ := http.NewRequest("GET", fmt.Sprintf( + "/v1/catalog/node/%s", args.Node), nil) + resp := httptest.NewRecorder() + obj, err := a.srv.CatalogNodeServices(resp, req) + assert.Nil(err) + assertIndex(t, resp) + + ns := obj.(*structs.NodeServices) + assert.Len(ns.Services, 1) + v := ns.Services[args.Service.Service] + assert.Equal(structs.ServiceKindConnectProxy, v.Kind) +} + func TestCatalogNodeServices_WanTranslation(t *testing.T) { t.Parallel() a1 := NewTestAgent(t.Name(), ` diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index db49875cb..572ff86bb 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -1681,6 +1681,37 @@ func TestCatalog_NodeServices(t *testing.T) { } } +func TestCatalog_NodeServices_ConnectProxy(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Register the service + args := structs.TestRegisterRequestProxy(t) + var out struct{} + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out)) + + // List + req := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: args.Node, + } + var resp structs.IndexedNodeServices + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.NodeServices", &req, &resp)) + + assert.Len(resp.NodeServices.Services, 1) + v := resp.NodeServices.Services[args.Service.Service] + assert.Equal(structs.ServiceKindConnectProxy, v.Kind) + assert.Equal(args.Service.ProxyDestination, v.ProxyDestination) +} + // Used to check for a regression against a known bug func TestCatalog_Register_FailedCase1(t *testing.T) { t.Parallel() From 6cd9e0e37c450f6451d62ea28826cad3da0f6d7a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 8 Mar 2018 10:54:05 -0800 Subject: [PATCH 061/627] agent: /v1/agent/services test with connect proxies (works w/ no change) --- agent/agent_endpoint_test.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 1a62a8427..126994196 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -23,6 +23,7 @@ import ( "github.com/hashicorp/consul/types" "github.com/hashicorp/serf/serf" "github.com/pascaldekloe/goe/verify" + "github.com/stretchr/testify/assert" ) func makeReadOnlyAgentACL(t *testing.T, srv *HTTPServer) string { @@ -68,6 +69,32 @@ func TestAgent_Services(t *testing.T) { } } +func TestAgent_Services_ConnectProxy(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + srv1 := &structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + ID: structs.ConnectProxyServiceName, + Service: structs.ConnectProxyServiceName, + Port: 5000, + ProxyDestination: "db", + } + a.State.AddService(srv1, "") + + req, _ := http.NewRequest("GET", "/v1/agent/services", nil) + obj, err := a.srv.AgentServices(nil, req) + assert.Nil(err) + val := obj.(map[string]*structs.NodeService) + assert.Len(val, 1) + actual := val[structs.ConnectProxyServiceName] + assert.Equal(structs.ServiceKindConnectProxy, actual.Kind) + assert.Equal("db", actual.ProxyDestination) +} + func TestAgent_Services_ACLFilter(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), TestACLConfig()) From 8a728264830a06a7f31d6f1da316ac316fc1c4dc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 8 Mar 2018 22:13:35 -0800 Subject: [PATCH 062/627] agent/consul: proxy registration and tests --- agent/consul/catalog_endpoint.go | 18 ++++++ agent/consul/catalog_endpoint_test.go | 81 +++++++++++++++++++++++++++ agent/structs/structs.go | 26 +++++++++ agent/structs/structs_test.go | 55 ++++++++++++++++++ agent/structs/testing_catalog.go | 20 ++++--- 5 files changed, 193 insertions(+), 7 deletions(-) diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index 0c1cbe3de..5cb30b9c3 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -47,6 +47,24 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error // Handle a service registration. if args.Service != nil { + // Connect proxy specific logic + if args.Service.Kind == structs.ServiceKindConnectProxy { + // Name is optional, if it isn't set, we default to the + // proxy name. It actually MUST be this, but the validation + // below this will verify. + if args.Service.Service == "" { + args.Service.Service = fmt.Sprintf( + "%s-connect-proxy", args.Service.ProxyDestination) + } + } + + // Validate the service. This is in addition to the below since + // the above just hasn't been moved over yet. We should move it over + // in time. + if err := args.Service.Validate(); err != nil { + return err + } + // If no service id, but service name, use default if args.Service.ID == "" && args.Service.Service != "" { args.Service.ID = args.Service.Service diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index 572ff86bb..2399e9b2f 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -333,6 +333,87 @@ func TestCatalog_Register_ForwardDC(t *testing.T) { } } +func TestCatalog_Register_ConnectProxy(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + args := structs.TestRegisterRequestProxy(t) + + // Register + var out struct{} + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) + + // List + req := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: args.Service.Service, + } + var resp structs.IndexedServiceNodes + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp)) + assert.Len(resp.ServiceNodes, 1) + v := resp.ServiceNodes[0] + assert.Equal(structs.ServiceKindConnectProxy, v.ServiceKind) + assert.Equal(args.Service.ProxyDestination, v.ServiceProxyDestination) +} + +// Test an invalid ConnectProxy. We don't need to exhaustively test because +// this is all tested in structs on the Validate method. +func TestCatalog_Register_ConnectProxy_invalid(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + args := structs.TestRegisterRequestProxy(t) + args.Service.ProxyDestination = "" + + // Register + var out struct{} + err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out) + assert.NotNil(err) + assert.Contains(err.Error(), "ProxyDestination") +} + +// Test registering a proxy with no name set, which should work. +func TestCatalog_Register_ConnectProxy_noName(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + args := structs.TestRegisterRequestProxy(t) + args.Service.Service = "" + + // Register + var out struct{} + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) + + // List + req := structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: fmt.Sprintf("%s-connect-proxy", args.Service.ProxyDestination), + } + var resp structs.IndexedServiceNodes + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp)) + assert.Len(resp.ServiceNodes, 1) + v := resp.ServiceNodes[0] + assert.Equal(structs.ServiceKindConnectProxy, v.ServiceKind) +} + func TestCatalog_Deregister(t *testing.T) { t.Parallel() dir1, s1 := testServer(t) diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 65ec87024..e1ab91ab5 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/types" "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/serf/coordinate" ) @@ -488,6 +489,31 @@ type NodeService struct { RaftIndex } +// Validate validates the node service configuration. +// +// NOTE(mitchellh): This currently only validates fields for a ConnectProxy. +// Historically validation has been directly in the Catalog.Register RPC. +// ConnectProxy validation was moved here for easier table testing, but +// other validation still exists in Catalog.Register. +func (s *NodeService) Validate() error { + var result error + + // ConnectProxy validation + if s.Kind == ServiceKindConnectProxy { + if strings.TrimSpace(s.ProxyDestination) == "" { + result = multierror.Append(result, fmt.Errorf( + "ProxyDestination must be non-empty for Connect proxy services")) + } + + if s.Port == 0 { + result = multierror.Append(result, fmt.Errorf( + "Port must be set for a Connect proxy")) + } + } + + return result +} + // IsSame checks if one NodeService is the same as another, without looking // at the Raft information (that's why we didn't call it IsEqual). This is // useful for seeing if an update would be idempotent for all the functional diff --git a/agent/structs/structs_test.go b/agent/structs/structs_test.go index dcb8e0c4e..972146d93 100644 --- a/agent/structs/structs_test.go +++ b/agent/structs/structs_test.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/types" + "github.com/stretchr/testify/assert" ) func TestEncodeDecode(t *testing.T) { @@ -208,6 +209,60 @@ func TestStructs_ServiceNode_Conversions(t *testing.T) { } } +func TestStructs_NodeService_ValidateConnectProxy(t *testing.T) { + cases := []struct { + Name string + Modify func(*NodeService) + Err string + }{ + { + "valid", + func(x *NodeService) {}, + "", + }, + + { + "connect-proxy: no ProxyDestination", + func(x *NodeService) { x.ProxyDestination = "" }, + "ProxyDestination must be", + }, + + { + "connect-proxy: whitespace ProxyDestination", + func(x *NodeService) { x.ProxyDestination = " " }, + "ProxyDestination must be", + }, + + { + "connect-proxy: valid ProxyDestination", + func(x *NodeService) { x.ProxyDestination = "hello" }, + "", + }, + + { + "connect-proxy: no port set", + func(x *NodeService) { x.Port = 0 }, + "Port must", + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + assert := assert.New(t) + ns := TestNodeServiceProxy(t) + tc.Modify(ns) + + err := ns.Validate() + assert.Equal(err != nil, tc.Err != "", err) + if err == nil { + return + } + + assert.Contains(strings.ToLower(err.Error()), strings.ToLower(tc.Err)) + }) + } +} + func TestStructs_NodeService_IsSame(t *testing.T) { ns := &NodeService{ ID: "node1", diff --git a/agent/structs/testing_catalog.go b/agent/structs/testing_catalog.go index 8a002d380..4a55f1e3d 100644 --- a/agent/structs/testing_catalog.go +++ b/agent/structs/testing_catalog.go @@ -11,12 +11,18 @@ func TestRegisterRequestProxy(t testing.T) *RegisterRequest { Datacenter: "dc1", Node: "foo", Address: "127.0.0.1", - Service: &NodeService{ - Kind: ServiceKindConnectProxy, - Service: ConnectProxyServiceName, - Address: "127.0.0.2", - Port: 2222, - ProxyDestination: "web", - }, + Service: TestNodeServiceProxy(t), + } +} + +// TestNodeServiceProxy returns a *NodeService representing a valid +// Connect proxy. +func TestNodeServiceProxy(t testing.T) *NodeService { + return &NodeService{ + Kind: ServiceKindConnectProxy, + Service: ConnectProxyServiceName, + Address: "127.0.0.2", + Port: 2222, + ProxyDestination: "web", } } From 200100d3f401d1a94954f8b1127e605a0c5680f5 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 8 Mar 2018 22:31:44 -0800 Subject: [PATCH 063/627] agent/consul: enforce ACL on ProxyDestination --- agent/consul/catalog_endpoint.go | 7 +++ agent/consul/catalog_endpoint_test.go | 61 +++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index 5cb30b9c3..f6fb9a91d 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -91,6 +91,13 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error return acl.ErrPermissionDenied } } + + // Proxies must have write permission on their destination + if args.Service.Kind == structs.ServiceKindConnectProxy { + if rule != nil && !rule.ServiceWrite(args.Service.ProxyDestination, nil) { + return acl.ErrPermissionDenied + } + } } // Move the old format single check into the slice, and fixup IDs. diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index 2399e9b2f..e810f3f63 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -414,6 +414,67 @@ func TestCatalog_Register_ConnectProxy_noName(t *testing.T) { assert.Equal(structs.ServiceKindConnectProxy, v.ServiceKind) } +// Test that write is required for the proxy destination to register a proxy. +func TestCatalog_Register_ConnectProxy_ACLProxyDestination(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + c.ACLEnforceVersion8 = false + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create the ACL. + arg := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: ` +service "foo" { + policy = "write" +} +`, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + var token string + assert.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &token)) + + // Register should fail because we don't have permission on the destination + args := structs.TestRegisterRequestProxy(t) + args.Service.Service = "foo" + args.Service.ProxyDestination = "bar" + args.WriteRequest.Token = token + var out struct{} + err := msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out) + assert.True(acl.IsErrPermissionDenied(err)) + + // Register should fail with the right destination but wrong name + args = structs.TestRegisterRequestProxy(t) + args.Service.Service = "bar" + args.Service.ProxyDestination = "foo" + args.WriteRequest.Token = token + err = msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out) + assert.True(acl.IsErrPermissionDenied(err)) + + // Register should work with the right destination + args = structs.TestRegisterRequestProxy(t) + args.Service.Service = "foo" + args.Service.ProxyDestination = "foo" + args.WriteRequest.Token = token + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) +} + func TestCatalog_Deregister(t *testing.T) { t.Parallel() dir1, s1 := testServer(t) From 06957f6d7ff67890cc8749bdaf4f7135651bc6c8 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 9 Mar 2018 08:11:39 -0800 Subject: [PATCH 064/627] agent/consul/state: ConnectServiceNodes --- agent/consul/state/catalog.go | 46 ++++++++++++++++++++++++++++++ agent/consul/state/catalog_test.go | 42 +++++++++++++++++++++++++++ 2 files changed, 88 insertions(+) diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index 2a81c1071..3eb733bbe 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -10,6 +10,10 @@ import ( "github.com/hashicorp/go-memdb" ) +const ( + servicesTableName = "services" +) + // nodesTableSchema returns a new table schema used for storing node // information. func nodesTableSchema() *memdb.TableSchema { @@ -87,6 +91,15 @@ func servicesTableSchema() *memdb.TableSchema { Lowercase: true, }, }, + "proxy_destination": &memdb.IndexSchema{ + Name: "proxy_destination", + AllowMissing: true, + Unique: false, + Indexer: &memdb.StringFieldIndex{ + Field: "ServiceProxyDestination", + Lowercase: true, + }, + }, }, } } @@ -839,6 +852,39 @@ func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tag string) ( return idx, results, nil } +// ConnectServiceNodes returns the nodes associated with a Connect +// compatible destination for the given service name. This will include +// both proxies and native integrations. +func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string) (uint64, structs.ServiceNodes, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + // Get the table index. + idx := maxIndexForService(tx, serviceName, false) + + // Find all the proxies. When we support native integrations we'll have + // to perform another table lookup here. + services, err := tx.Get(servicesTableName, "proxy_destination", serviceName) + if err != nil { + return 0, nil, fmt.Errorf("failed service lookup: %s", err) + } + ws.Add(services.WatchCh()) + + // Store them + var results structs.ServiceNodes + for service := services.Next(); service != nil; service = services.Next() { + results = append(results, service.(*structs.ServiceNode)) + } + + // Fill in the node details. + results, err = s.parseServiceNodes(tx, ws, results) + if err != nil { + return 0, nil, fmt.Errorf("failed parsing service nodes: %s", err) + } + + return idx, results, nil +} + // serviceTagFilter returns true (should filter) if the given service node // doesn't contain the given tag. func serviceTagFilter(sn *structs.ServiceNode, tag string) bool { diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index c057ebea6..1f20fb9b8 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -1572,6 +1572,48 @@ func TestStateStore_DeleteService(t *testing.T) { } } +func TestStateStore_ConnectServiceNodes(t *testing.T) { + assert := assert.New(t) + s := testStateStore(t) + + // Listing with no results returns an empty list. + ws := memdb.NewWatchSet() + idx, nodes, err := s.ConnectServiceNodes(ws, "db") + assert.Nil(err) + assert.Equal(idx, uint64(0)) + assert.Len(nodes, 0) + + // Create some nodes and services. + assert.Nil(s.EnsureNode(10, &structs.Node{Node: "foo", Address: "127.0.0.1"})) + assert.Nil(s.EnsureNode(11, &structs.Node{Node: "bar", Address: "127.0.0.2"})) + assert.Nil(s.EnsureService(12, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000})) + assert.Nil(s.EnsureService(13, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000})) + assert.Nil(s.EnsureService(14, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", ProxyDestination: "db", Port: 8000})) + assert.Nil(s.EnsureService(15, "bar", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", ProxyDestination: "db", Port: 8000})) + assert.Nil(s.EnsureService(16, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"slave"}, Address: "", Port: 8001})) + assert.True(watchFired(ws)) + + // Read everything back. + ws = memdb.NewWatchSet() + idx, nodes, err = s.ConnectServiceNodes(ws, "db") + assert.Nil(err) + assert.Equal(idx, uint64(idx)) + assert.Len(nodes, 2) + + for _, n := range nodes { + assert.Equal(structs.ServiceKindConnectProxy, n.ServiceKind) + assert.Equal("db", n.ServiceProxyDestination) + } + + // Registering some unrelated node should not fire the watch. + testRegisterNode(t, s, 17, "nope") + assert.False(watchFired(ws)) + + // But removing a node with the "db" service should fire the watch. + assert.Nil(s.DeleteNode(18, "bar")) + assert.True(watchFired(ws)) +} + func TestStateStore_Service_Snapshot(t *testing.T) { s := testStateStore(t) From 253256352cc99b825726de6096d3a116949f059c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 9 Mar 2018 08:34:55 -0800 Subject: [PATCH 065/627] agent/consul: Catalog.ServiceNodes supports Connect filtering --- agent/consul/catalog_endpoint.go | 48 +++++++++++++++++-------- agent/consul/catalog_endpoint_test.go | 51 +++++++++++++++++++++++++++ agent/structs/structs.go | 4 +++ agent/structs/testing_catalog.go | 14 ++++++++ 4 files changed, 103 insertions(+), 14 deletions(-) diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index f6fb9a91d..840b97fa6 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -269,24 +269,37 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru return fmt.Errorf("Must provide service name") } + // Determine the function we'll call + var f func(memdb.WatchSet, *state.Store) (uint64, structs.ServiceNodes, error) + switch { + case args.Connect: + f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.ServiceNodes, error) { + return s.ConnectServiceNodes(ws, args.ServiceName) + } + + default: + f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.ServiceNodes, error) { + if args.ServiceAddress != "" { + return s.ServiceAddressNodes(ws, args.ServiceAddress) + } + + if args.TagFilter { + return s.ServiceTagNodes(ws, args.ServiceName, args.ServiceTag) + } + + return s.ServiceNodes(ws, args.ServiceName) + } + } + err := c.srv.blockingQuery( &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - var index uint64 - var services structs.ServiceNodes - var err error - if args.TagFilter { - index, services, err = state.ServiceTagNodes(ws, args.ServiceName, args.ServiceTag) - } else { - index, services, err = state.ServiceNodes(ws, args.ServiceName) - } - if args.ServiceAddress != "" { - index, services, err = state.ServiceAddressNodes(ws, args.ServiceAddress) - } + index, services, err := f(ws, state) if err != nil { return err } + reply.Index, reply.ServiceNodes = index, services if len(args.NodeMetaFilters) > 0 { var filtered structs.ServiceNodes @@ -305,17 +318,24 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru // Provide some metrics if err == nil { - metrics.IncrCounterWithLabels([]string{"catalog", "service", "query"}, 1, + // For metrics, we separate Connect-based lookups from non-Connect + key := "service" + if args.Connect { + key = "connect" + } + + metrics.IncrCounterWithLabels([]string{"catalog", key, "query"}, 1, []metrics.Label{{Name: "service", Value: args.ServiceName}}) if args.ServiceTag != "" { - metrics.IncrCounterWithLabels([]string{"catalog", "service", "query-tag"}, 1, + metrics.IncrCounterWithLabels([]string{"catalog", key, "query-tag"}, 1, []metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}}) } if len(reply.ServiceNodes) == 0 { - metrics.IncrCounterWithLabels([]string{"catalog", "service", "not-found"}, 1, + metrics.IncrCounterWithLabels([]string{"catalog", key, "not-found"}, 1, []metrics.Label{{Name: "service", Value: args.ServiceName}}) } } + return err } diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index e810f3f63..b095c3f3a 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -1773,6 +1773,57 @@ func TestCatalog_ListServiceNodes_ConnectProxy(t *testing.T) { assert.Equal(args.Service.ProxyDestination, v.ServiceProxyDestination) } +func TestCatalog_ListServiceNodes_ConnectDestination(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Register the proxy service + args := structs.TestRegisterRequestProxy(t) + var out struct{} + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out)) + + // Register the service + { + dst := args.Service.ProxyDestination + args := structs.TestRegisterRequest(t) + args.Service.Service = dst + var out struct{} + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", args, &out)) + } + + // List + req := structs.ServiceSpecificRequest{ + Connect: true, + Datacenter: "dc1", + ServiceName: args.Service.ProxyDestination, + } + var resp structs.IndexedServiceNodes + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp)) + assert.Len(resp.ServiceNodes, 1) + v := resp.ServiceNodes[0] + assert.Equal(structs.ServiceKindConnectProxy, v.ServiceKind) + assert.Equal(args.Service.ProxyDestination, v.ServiceProxyDestination) + + // List by non-Connect + req = structs.ServiceSpecificRequest{ + Datacenter: "dc1", + ServiceName: args.Service.ProxyDestination, + } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp)) + assert.Len(resp.ServiceNodes, 1) + v = resp.ServiceNodes[0] + assert.Equal(args.Service.ProxyDestination, v.ServiceName) + assert.Equal("", v.ServiceProxyDestination) +} + func TestCatalog_NodeServices(t *testing.T) { t.Parallel() dir1, s1 := testServer(t) diff --git a/agent/structs/structs.go b/agent/structs/structs.go index e1ab91ab5..4301c7e93 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -284,6 +284,10 @@ type ServiceSpecificRequest struct { ServiceAddress string TagFilter bool // Controls tag filtering Source QuerySource + + // Connect if true will only search for Connect-compatible services. + Connect bool + QueryOptions } diff --git a/agent/structs/testing_catalog.go b/agent/structs/testing_catalog.go index 4a55f1e3d..d61266ad5 100644 --- a/agent/structs/testing_catalog.go +++ b/agent/structs/testing_catalog.go @@ -4,6 +4,20 @@ import ( "github.com/mitchellh/go-testing-interface" ) +// TestRegisterRequest returns a RegisterRequest for registering a typical service. +func TestRegisterRequest(t testing.T) *RegisterRequest { + return &RegisterRequest{ + Datacenter: "dc1", + Node: "foo", + Address: "127.0.0.1", + Service: &NodeService{ + Service: "web", + Address: "", + Port: 80, + }, + } +} + // TestRegisterRequestProxy returns a RegisterRequest for registering a // Connect proxy. func TestRegisterRequestProxy(t testing.T) *RegisterRequest { From fa4f0d353b6b229d76b1636b6fe8dbf976450076 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 9 Mar 2018 08:43:17 -0800 Subject: [PATCH 066/627] agent: /v1/catalog/connect/:service --- agent/catalog_endpoint.go | 49 ++++++++++++++++++++++++++++++++++ agent/catalog_endpoint_test.go | 24 +++++++++++++++++ agent/http_oss.go | 1 + 3 files changed, 74 insertions(+) diff --git a/agent/catalog_endpoint.go b/agent/catalog_endpoint.go index 0088741e1..86e4e95ee 100644 --- a/agent/catalog_endpoint.go +++ b/agent/catalog_endpoint.go @@ -217,6 +217,55 @@ RETRY_ONCE: return out.ServiceNodes, nil } +func (s *HTTPServer) CatalogConnectServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_connect_service_nodes"}, 1, + []metrics.Label{{Name: "node", Value: s.nodeName()}}) + if req.Method != "GET" { + return nil, MethodNotAllowedError{req.Method, []string{"GET"}} + } + + // Set default DC + args := structs.ServiceSpecificRequest{Connect: true} + s.parseSource(req, &args.Source) + args.NodeMetaFilters = s.parseMetaFilter(req) + if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { + return nil, nil + } + + // Pull out the service name + args.ServiceName = strings.TrimPrefix(req.URL.Path, "/v1/catalog/connect/") + if args.ServiceName == "" { + resp.WriteHeader(http.StatusBadRequest) + fmt.Fprint(resp, "Missing service name") + return nil, nil + } + + // Make the RPC request + var out structs.IndexedServiceNodes + defer setMeta(resp, &out.QueryMeta) + if err := s.agent.RPC("Catalog.ServiceNodes", &args, &out); err != nil { + metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_connect_service_nodes"}, 1, + []metrics.Label{{Name: "node", Value: s.nodeName()}}) + return nil, err + } + s.agent.TranslateAddresses(args.Datacenter, out.ServiceNodes) + + // Use empty list instead of nil + if out.ServiceNodes == nil { + out.ServiceNodes = make(structs.ServiceNodes, 0) + } + for i, s := range out.ServiceNodes { + if s.ServiceTags == nil { + clone := *s + clone.ServiceTags = make([]string, 0) + out.ServiceNodes[i] = &clone + } + } + metrics.IncrCounterWithLabels([]string{"client", "api", "success", "catalog_connect_service_nodes"}, 1, + []metrics.Label{{Name: "node", Value: s.nodeName()}}) + return out.ServiceNodes, nil +} + func (s *HTTPServer) CatalogNodeServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) { metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_node_services"}, 1, []metrics.Label{{Name: "node", Value: s.nodeName()}}) diff --git a/agent/catalog_endpoint_test.go b/agent/catalog_endpoint_test.go index 4df9d4275..71c848ede 100644 --- a/agent/catalog_endpoint_test.go +++ b/agent/catalog_endpoint_test.go @@ -775,6 +775,30 @@ func TestCatalogServiceNodes_ConnectProxy(t *testing.T) { assert.Equal(structs.ServiceKindConnectProxy, nodes[0].ServiceKind) } +func TestCatalogConnectServiceNodes_good(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Register + args := structs.TestRegisterRequestProxy(t) + var out struct{} + assert.Nil(a.RPC("Catalog.Register", args, &out)) + + req, _ := http.NewRequest("GET", fmt.Sprintf( + "/v1/catalog/connect/%s", args.Service.ProxyDestination), nil) + resp := httptest.NewRecorder() + obj, err := a.srv.CatalogConnectServiceNodes(resp, req) + assert.Nil(err) + assertIndex(t, resp) + + nodes := obj.(structs.ServiceNodes) + assert.Len(nodes, 1) + assert.Equal(structs.ServiceKindConnectProxy, nodes[0].ServiceKind) +} + func TestCatalogNodeServices(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") diff --git a/agent/http_oss.go b/agent/http_oss.go index d3bb7adc4..185c8c1e0 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -33,6 +33,7 @@ func init() { registerEndpoint("/v1/agent/service/deregister/", []string{"PUT"}, (*HTTPServer).AgentDeregisterService) registerEndpoint("/v1/agent/service/maintenance/", []string{"PUT"}, (*HTTPServer).AgentServiceMaintenance) registerEndpoint("/v1/catalog/register", []string{"PUT"}, (*HTTPServer).CatalogRegister) + registerEndpoint("/v1/catalog/connect/", []string{"GET"}, (*HTTPServer).CatalogConnectServiceNodes) registerEndpoint("/v1/catalog/deregister", []string{"PUT"}, (*HTTPServer).CatalogDeregister) registerEndpoint("/v1/catalog/datacenters", []string{"GET"}, (*HTTPServer).CatalogDatacenters) registerEndpoint("/v1/catalog/nodes", []string{"GET"}, (*HTTPServer).CatalogNodes) From a5fe6204d5b986d3ba35d61eb55087fc2dc6518c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 9 Mar 2018 09:09:21 -0800 Subject: [PATCH 067/627] agent: working DNS for Connect queries, I think, but have to implement Health endpoints to be sure --- agent/dns.go | 23 ++++++++++++++++------- agent/dns_test.go | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 7 deletions(-) diff --git a/agent/dns.go b/agent/dns.go index 1d3c46d97..e014c1330 100644 --- a/agent/dns.go +++ b/agent/dns.go @@ -337,7 +337,7 @@ func (d *DNSServer) addSOA(msg *dns.Msg) { // nameservers returns the names and ip addresses of up to three random servers // in the current cluster which serve as authoritative name servers for zone. func (d *DNSServer) nameservers(edns bool) (ns []dns.RR, extra []dns.RR) { - out, err := d.lookupServiceNodes(d.agent.config.Datacenter, structs.ConsulServiceName, "") + out, err := d.lookupServiceNodes(d.agent.config.Datacenter, structs.ConsulServiceName, "", false) if err != nil { d.logger.Printf("[WARN] dns: Unable to get list of servers: %s", err) return nil, nil @@ -415,7 +415,7 @@ PARSE: n = n + 1 } - switch labels[n-1] { + switch kind := labels[n-1]; kind { case "service": if n == 1 { goto INVALID @@ -433,7 +433,7 @@ PARSE: } // _name._tag.service.consul - d.serviceLookup(network, datacenter, labels[n-3][1:], tag, req, resp) + d.serviceLookup(network, datacenter, labels[n-3][1:], tag, false, req, resp) // Consul 0.3 and prior format for SRV queries } else { @@ -445,9 +445,17 @@ PARSE: } // tag[.tag].name.service.consul - d.serviceLookup(network, datacenter, labels[n-2], tag, req, resp) + d.serviceLookup(network, datacenter, labels[n-2], tag, false, req, resp) } + case "connect": + if n == 1 { + goto INVALID + } + + // name.connect.consul + d.serviceLookup(network, datacenter, labels[n-2], "", true, req, resp) + case "node": if n == 1 { goto INVALID @@ -898,8 +906,9 @@ func (d *DNSServer) trimDNSResponse(network string, req, resp *dns.Msg) (trimmed } // lookupServiceNodes returns nodes with a given service. -func (d *DNSServer) lookupServiceNodes(datacenter, service, tag string) (structs.IndexedCheckServiceNodes, error) { +func (d *DNSServer) lookupServiceNodes(datacenter, service, tag string, connect bool) (structs.IndexedCheckServiceNodes, error) { args := structs.ServiceSpecificRequest{ + Connect: connect, Datacenter: datacenter, ServiceName: service, ServiceTag: tag, @@ -935,8 +944,8 @@ func (d *DNSServer) lookupServiceNodes(datacenter, service, tag string) (structs } // serviceLookup is used to handle a service query -func (d *DNSServer) serviceLookup(network, datacenter, service, tag string, req, resp *dns.Msg) { - out, err := d.lookupServiceNodes(datacenter, service, tag) +func (d *DNSServer) serviceLookup(network, datacenter, service, tag string, connect bool, req, resp *dns.Msg) { + out, err := d.lookupServiceNodes(datacenter, service, tag, connect) if err != nil { d.logger.Printf("[ERR] dns: rpc error: %v", err) resp.SetRcode(req, dns.RcodeServerFailure) diff --git a/agent/dns_test.go b/agent/dns_test.go index 41aca8e0e..5d1082888 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/serf/coordinate" "github.com/miekg/dns" "github.com/pascaldekloe/goe/verify" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -1041,6 +1042,48 @@ func TestDNS_ServiceLookupWithInternalServiceAddress(t *testing.T) { verify.Values(t, "extra", in.Extra, wantExtra) } +func TestDNS_ConnectServiceLookup(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Register a node with an external service. + { + args := structs.TestRegisterRequestProxy(t) + args.Service.ProxyDestination = "db" + args.Service.Port = 12345 + var out struct{} + assert.Nil(a.RPC("Catalog.Register", args, &out)) + } + + // Look up the service + questions := []string{ + "db.connect.consul.", + } + for _, question := range questions { + m := new(dns.Msg) + m.SetQuestion(question, dns.TypeSRV) + + c := new(dns.Client) + in, _, err := c.Exchange(m, a.DNSAddr()) + assert.Nil(err) + assert.Len(in.Answer, 1) + + srvRec, ok := in.Answer[0].(*dns.SRV) + assert.True(ok) + assert.Equal(12345, srvRec.Port) + assert.Equal("foo.node.dc1.consul.", srvRec.Target) + assert.Equal(0, srvRec.Hdr.Ttl) + + cnameRec, ok := in.Extra[0].(*dns.CNAME) + assert.True(ok) + assert.Equal("foo.node.dc1.consul.", cnameRec.Hdr.Name) + assert.Equal(0, srvRec.Hdr.Ttl) + } +} + func TestDNS_ExternalServiceLookup(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") From 119ffe3ed91c748bd3dd317eb9f76516835521d4 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 9 Mar 2018 09:32:22 -0800 Subject: [PATCH 068/627] agent/consul: implement Health.ServiceNodes for Connect, DNS works --- agent/consul/health_endpoint.go | 40 ++++++++++++++++++++-------- agent/consul/state/catalog.go | 24 ++++++++++++++++- agent/consul/state/catalog_test.go | 42 ++++++++++++++++++++++++++++++ agent/dns_test.go | 9 ++++--- 4 files changed, 99 insertions(+), 16 deletions(-) diff --git a/agent/consul/health_endpoint.go b/agent/consul/health_endpoint.go index db59356c8..70cc2e37d 100644 --- a/agent/consul/health_endpoint.go +++ b/agent/consul/health_endpoint.go @@ -111,18 +111,30 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc return fmt.Errorf("Must provide service name") } + // Determine the function we'll call + var f func(memdb.WatchSet, *state.Store) (uint64, structs.CheckServiceNodes, error) + switch { + case args.Connect: + f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.CheckServiceNodes, error) { + return s.CheckConnectServiceNodes(ws, args.ServiceName) + } + + case args.TagFilter: + f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.CheckServiceNodes, error) { + return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTag) + } + + default: + f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.CheckServiceNodes, error) { + return s.CheckServiceNodes(ws, args.ServiceName) + } + } + err := h.srv.blockingQuery( &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - var index uint64 - var nodes structs.CheckServiceNodes - var err error - if args.TagFilter { - index, nodes, err = state.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTag) - } else { - index, nodes, err = state.CheckServiceNodes(ws, args.ServiceName) - } + index, nodes, err := f(ws, state) if err != nil { return err } @@ -139,14 +151,20 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc // Provide some metrics if err == nil { - metrics.IncrCounterWithLabels([]string{"health", "service", "query"}, 1, + // For metrics, we separate Connect-based lookups from non-Connect + key := "service" + if args.Connect { + key = "connect" + } + + metrics.IncrCounterWithLabels([]string{"health", key, "query"}, 1, []metrics.Label{{Name: "service", Value: args.ServiceName}}) if args.ServiceTag != "" { - metrics.IncrCounterWithLabels([]string{"health", "service", "query-tag"}, 1, + metrics.IncrCounterWithLabels([]string{"health", key, "query-tag"}, 1, []metrics.Label{{Name: "service", Value: args.ServiceName}, {Name: "tag", Value: args.ServiceTag}}) } if len(reply.Nodes) == 0 { - metrics.IncrCounterWithLabels([]string{"health", "service", "not-found"}, 1, + metrics.IncrCounterWithLabels([]string{"health", key, "not-found"}, 1, []metrics.Label{{Name: "service", Value: args.ServiceName}}) } } diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index 3eb733bbe..2ce2da36b 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -1525,14 +1525,36 @@ func (s *Store) deleteCheckTxn(tx *memdb.Txn, idx uint64, node string, checkID t // CheckServiceNodes is used to query all nodes and checks for a given service. func (s *Store) CheckServiceNodes(ws memdb.WatchSet, serviceName string) (uint64, structs.CheckServiceNodes, error) { + return s.checkServiceNodes(ws, serviceName, false) +} + +// CheckConnectServiceNodes is used to query all nodes and checks for Connect +// compatible endpoints for a given service. +func (s *Store) CheckConnectServiceNodes(ws memdb.WatchSet, serviceName string) (uint64, structs.CheckServiceNodes, error) { + return s.checkServiceNodes(ws, serviceName, true) +} + +func (s *Store) checkServiceNodes(ws memdb.WatchSet, serviceName string, connect bool) (uint64, structs.CheckServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. idx := maxIndexForService(tx, serviceName, true) + // Function for lookup + var f func() (memdb.ResultIterator, error) + if !connect { + f = func() (memdb.ResultIterator, error) { + return tx.Get("services", "service", serviceName) + } + } else { + f = func() (memdb.ResultIterator, error) { + return tx.Get("services", "proxy_destination", serviceName) + } + } + // Query the state store for the service. - iter, err := tx.Get("services", "service", serviceName) + iter, err := f() if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } diff --git a/agent/consul/state/catalog_test.go b/agent/consul/state/catalog_test.go index 1f20fb9b8..9d771ca48 100644 --- a/agent/consul/state/catalog_test.go +++ b/agent/consul/state/catalog_test.go @@ -2529,6 +2529,48 @@ func TestStateStore_CheckServiceNodes(t *testing.T) { } } +func TestStateStore_CheckConnectServiceNodes(t *testing.T) { + assert := assert.New(t) + s := testStateStore(t) + + // Listing with no results returns an empty list. + ws := memdb.NewWatchSet() + idx, nodes, err := s.CheckConnectServiceNodes(ws, "db") + assert.Nil(err) + assert.Equal(idx, uint64(0)) + assert.Len(nodes, 0) + + // Create some nodes and services. + assert.Nil(s.EnsureNode(10, &structs.Node{Node: "foo", Address: "127.0.0.1"})) + assert.Nil(s.EnsureNode(11, &structs.Node{Node: "bar", Address: "127.0.0.2"})) + assert.Nil(s.EnsureService(12, "foo", &structs.NodeService{ID: "db", Service: "db", Tags: nil, Address: "", Port: 5000})) + assert.Nil(s.EnsureService(13, "bar", &structs.NodeService{ID: "api", Service: "api", Tags: nil, Address: "", Port: 5000})) + assert.Nil(s.EnsureService(14, "foo", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", ProxyDestination: "db", Port: 8000})) + assert.Nil(s.EnsureService(15, "bar", &structs.NodeService{Kind: structs.ServiceKindConnectProxy, ID: "proxy", Service: "proxy", ProxyDestination: "db", Port: 8000})) + assert.Nil(s.EnsureService(16, "bar", &structs.NodeService{ID: "db2", Service: "db", Tags: []string{"slave"}, Address: "", Port: 8001})) + assert.True(watchFired(ws)) + + // Register node checks + testRegisterCheck(t, s, 17, "foo", "", "check1", api.HealthPassing) + testRegisterCheck(t, s, 18, "bar", "", "check2", api.HealthPassing) + + // Register checks against the services. + testRegisterCheck(t, s, 19, "foo", "db", "check3", api.HealthPassing) + testRegisterCheck(t, s, 20, "bar", "proxy", "check4", api.HealthPassing) + + // Read everything back. + ws = memdb.NewWatchSet() + idx, nodes, err = s.CheckConnectServiceNodes(ws, "db") + assert.Nil(err) + assert.Equal(idx, uint64(idx)) + assert.Len(nodes, 2) + + for _, n := range nodes { + assert.Equal(structs.ServiceKindConnectProxy, n.Service.Kind) + assert.Equal("db", n.Service.ProxyDestination) + } +} + func BenchmarkCheckServiceNodes(b *testing.B) { s, err := NewStateStore(nil) if err != nil { diff --git a/agent/dns_test.go b/agent/dns_test.go index 5d1082888..a501a9c9f 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -1053,6 +1053,7 @@ func TestDNS_ConnectServiceLookup(t *testing.T) { { args := structs.TestRegisterRequestProxy(t) args.Service.ProxyDestination = "db" + args.Service.Address = "" args.Service.Port = 12345 var out struct{} assert.Nil(a.RPC("Catalog.Register", args, &out)) @@ -1073,14 +1074,14 @@ func TestDNS_ConnectServiceLookup(t *testing.T) { srvRec, ok := in.Answer[0].(*dns.SRV) assert.True(ok) - assert.Equal(12345, srvRec.Port) + assert.Equal(uint16(12345), srvRec.Port) assert.Equal("foo.node.dc1.consul.", srvRec.Target) - assert.Equal(0, srvRec.Hdr.Ttl) + assert.Equal(uint32(0), srvRec.Hdr.Ttl) - cnameRec, ok := in.Extra[0].(*dns.CNAME) + cnameRec, ok := in.Extra[0].(*dns.A) assert.True(ok) assert.Equal("foo.node.dc1.consul.", cnameRec.Hdr.Name) - assert.Equal(0, srvRec.Hdr.Ttl) + assert.Equal(uint32(0), srvRec.Hdr.Ttl) } } From 3d82d261bd65e3a21cad3e1be92f9b69f2c53967 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 9 Mar 2018 09:52:32 -0800 Subject: [PATCH 069/627] agent: /v1/health/connect/:service --- agent/health_endpoint.go | 22 +++++++- agent/health_endpoint_test.go | 100 ++++++++++++++++++++++++++++++++++ agent/http_oss.go | 1 + 3 files changed, 121 insertions(+), 2 deletions(-) diff --git a/agent/health_endpoint.go b/agent/health_endpoint.go index 9c0aac2b6..e57b5f48b 100644 --- a/agent/health_endpoint.go +++ b/agent/health_endpoint.go @@ -143,9 +143,21 @@ RETRY_ONCE: return out.HealthChecks, nil } +func (s *HTTPServer) HealthConnectServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + return s.healthServiceNodes(resp, req, true) +} + func (s *HTTPServer) HealthServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + return s.healthServiceNodes(resp, req, false) +} + +func (s *HTTPServer) healthServiceNodes(resp http.ResponseWriter, req *http.Request, connect bool) (interface{}, error) { + if req.Method != "GET" { + return nil, MethodNotAllowedError{req.Method, []string{"GET"}} + } + // Set default DC - args := structs.ServiceSpecificRequest{} + args := structs.ServiceSpecificRequest{Connect: connect} s.parseSource(req, &args.Source) args.NodeMetaFilters = s.parseMetaFilter(req) if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { @@ -159,8 +171,14 @@ func (s *HTTPServer) HealthServiceNodes(resp http.ResponseWriter, req *http.Requ args.TagFilter = true } + // Determine the prefix + prefix := "/v1/health/service/" + if connect { + prefix = "/v1/health/connect/" + } + // Pull out the service name - args.ServiceName = strings.TrimPrefix(req.URL.Path, "/v1/health/service/") + args.ServiceName = strings.TrimPrefix(req.URL.Path, prefix) if args.ServiceName == "" { resp.WriteHeader(http.StatusBadRequest) fmt.Fprint(resp, "Missing service name") diff --git a/agent/health_endpoint_test.go b/agent/health_endpoint_test.go index 5d2ae1445..688924df1 100644 --- a/agent/health_endpoint_test.go +++ b/agent/health_endpoint_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/testutil/retry" "github.com/hashicorp/serf/coordinate" + "github.com/stretchr/testify/assert" ) func TestHealthChecksInState(t *testing.T) { @@ -770,6 +771,105 @@ func TestHealthServiceNodes_WanTranslation(t *testing.T) { } } +func TestHealthConnectServiceNodes(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Register + args := structs.TestRegisterRequestProxy(t) + var out struct{} + assert.Nil(a.RPC("Catalog.Register", args, &out)) + + // Request + req, _ := http.NewRequest("GET", fmt.Sprintf( + "/v1/health/connect/%s?dc=dc1", args.Service.ProxyDestination), nil) + resp := httptest.NewRecorder() + obj, err := a.srv.HealthConnectServiceNodes(resp, req) + assert.Nil(err) + assertIndex(t, resp) + + // Should be a non-nil empty list for checks + nodes := obj.(structs.CheckServiceNodes) + assert.Len(nodes, 1) + assert.Len(nodes[0].Checks, 0) +} + +func TestHealthConnectServiceNodes_PassingFilter(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Register + args := structs.TestRegisterRequestProxy(t) + args.Check = &structs.HealthCheck{ + Node: args.Node, + Name: "check", + ServiceID: args.Service.Service, + Status: api.HealthCritical, + } + var out struct{} + assert.Nil(t, a.RPC("Catalog.Register", args, &out)) + + t.Run("bc_no_query_value", func(t *testing.T) { + assert := assert.New(t) + req, _ := http.NewRequest("GET", fmt.Sprintf( + "/v1/health/connect/%s?passing", args.Service.ProxyDestination), nil) + resp := httptest.NewRecorder() + obj, err := a.srv.HealthConnectServiceNodes(resp, req) + assert.Nil(err) + assertIndex(t, resp) + + // Should be 0 health check for consul + nodes := obj.(structs.CheckServiceNodes) + assert.Len(nodes, 0) + }) + + t.Run("passing_true", func(t *testing.T) { + assert := assert.New(t) + req, _ := http.NewRequest("GET", fmt.Sprintf( + "/v1/health/connect/%s?passing=true", args.Service.ProxyDestination), nil) + resp := httptest.NewRecorder() + obj, err := a.srv.HealthConnectServiceNodes(resp, req) + assert.Nil(err) + assertIndex(t, resp) + + // Should be 0 health check for consul + nodes := obj.(structs.CheckServiceNodes) + assert.Len(nodes, 0) + }) + + t.Run("passing_false", func(t *testing.T) { + assert := assert.New(t) + req, _ := http.NewRequest("GET", fmt.Sprintf( + "/v1/health/connect/%s?passing=false", args.Service.ProxyDestination), nil) + resp := httptest.NewRecorder() + obj, err := a.srv.HealthConnectServiceNodes(resp, req) + assert.Nil(err) + assertIndex(t, resp) + + // Should be 0 health check for consul + nodes := obj.(structs.CheckServiceNodes) + assert.Len(nodes, 1) + }) + + t.Run("passing_bad", func(t *testing.T) { + assert := assert.New(t) + req, _ := http.NewRequest("GET", fmt.Sprintf( + "/v1/health/connect/%s?passing=nope-nope", args.Service.ProxyDestination), nil) + resp := httptest.NewRecorder() + a.srv.HealthConnectServiceNodes(resp, req) + assert.Equal(400, resp.Code) + + body, err := ioutil.ReadAll(resp.Body) + assert.Nil(err) + assert.True(bytes.Contains(body, []byte("Invalid value for ?passing"))) + }) +} + func TestFilterNonPassing(t *testing.T) { t.Parallel() nodes := structs.CheckServiceNodes{ diff --git a/agent/http_oss.go b/agent/http_oss.go index 185c8c1e0..2e2c9751a 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -53,6 +53,7 @@ func init() { registerEndpoint("/v1/health/checks/", []string{"GET"}, (*HTTPServer).HealthServiceChecks) registerEndpoint("/v1/health/state/", []string{"GET"}, (*HTTPServer).HealthChecksInState) registerEndpoint("/v1/health/service/", []string{"GET"}, (*HTTPServer).HealthServiceNodes) + registerEndpoint("/v1/health/connect/", []string{"GET"}, (*HTTPServer).HealthConnectServiceNodes) registerEndpoint("/v1/internal/ui/nodes", []string{"GET"}, (*HTTPServer).UINodes) registerEndpoint("/v1/internal/ui/node/", []string{"GET"}, (*HTTPServer).UINodeInfo) registerEndpoint("/v1/internal/ui/services", []string{"GET"}, (*HTTPServer).UIServices) From daaa6e2403da3f475520d8dfe65c35e1c31d88b7 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 9 Mar 2018 10:01:42 -0800 Subject: [PATCH 070/627] agent: clean up connect/non-connect duplication by using shared methods --- agent/catalog_endpoint.go | 70 +++++++++-------------------------- agent/consul/state/catalog.go | 59 +++++++++++++---------------- 2 files changed, 43 insertions(+), 86 deletions(-) diff --git a/agent/catalog_endpoint.go b/agent/catalog_endpoint.go index 86e4e95ee..4c0fd8f52 100644 --- a/agent/catalog_endpoint.go +++ b/agent/catalog_endpoint.go @@ -157,12 +157,27 @@ RETRY_ONCE: return out.Services, nil } +func (s *HTTPServer) CatalogConnectServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + return s.catalogServiceNodes(resp, req, true) +} + func (s *HTTPServer) CatalogServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_service_nodes"}, 1, + return s.catalogServiceNodes(resp, req, false) +} + +func (s *HTTPServer) catalogServiceNodes(resp http.ResponseWriter, req *http.Request, connect bool) (interface{}, error) { + metricsKey := "catalog_service_nodes" + pathPrefix := "/v1/catalog/service/" + if connect { + metricsKey = "catalog_connect_service_nodes" + pathPrefix = "/v1/catalog/connect/" + } + + metrics.IncrCounterWithLabels([]string{"client", "api", metricsKey}, 1, []metrics.Label{{Name: "node", Value: s.nodeName()}}) // Set default DC - args := structs.ServiceSpecificRequest{} + args := structs.ServiceSpecificRequest{Connect: connect} s.parseSource(req, &args.Source) args.NodeMetaFilters = s.parseMetaFilter(req) if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { @@ -177,7 +192,7 @@ func (s *HTTPServer) CatalogServiceNodes(resp http.ResponseWriter, req *http.Req } // Pull out the service name - args.ServiceName = strings.TrimPrefix(req.URL.Path, "/v1/catalog/service/") + args.ServiceName = strings.TrimPrefix(req.URL.Path, pathPrefix) if args.ServiceName == "" { resp.WriteHeader(http.StatusBadRequest) fmt.Fprint(resp, "Missing service name") @@ -217,55 +232,6 @@ RETRY_ONCE: return out.ServiceNodes, nil } -func (s *HTTPServer) CatalogConnectServiceNodes(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_connect_service_nodes"}, 1, - []metrics.Label{{Name: "node", Value: s.nodeName()}}) - if req.Method != "GET" { - return nil, MethodNotAllowedError{req.Method, []string{"GET"}} - } - - // Set default DC - args := structs.ServiceSpecificRequest{Connect: true} - s.parseSource(req, &args.Source) - args.NodeMetaFilters = s.parseMetaFilter(req) - if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { - return nil, nil - } - - // Pull out the service name - args.ServiceName = strings.TrimPrefix(req.URL.Path, "/v1/catalog/connect/") - if args.ServiceName == "" { - resp.WriteHeader(http.StatusBadRequest) - fmt.Fprint(resp, "Missing service name") - return nil, nil - } - - // Make the RPC request - var out structs.IndexedServiceNodes - defer setMeta(resp, &out.QueryMeta) - if err := s.agent.RPC("Catalog.ServiceNodes", &args, &out); err != nil { - metrics.IncrCounterWithLabels([]string{"client", "rpc", "error", "catalog_connect_service_nodes"}, 1, - []metrics.Label{{Name: "node", Value: s.nodeName()}}) - return nil, err - } - s.agent.TranslateAddresses(args.Datacenter, out.ServiceNodes) - - // Use empty list instead of nil - if out.ServiceNodes == nil { - out.ServiceNodes = make(structs.ServiceNodes, 0) - } - for i, s := range out.ServiceNodes { - if s.ServiceTags == nil { - clone := *s - clone.ServiceTags = make([]string, 0) - out.ServiceNodes[i] = &clone - } - } - metrics.IncrCounterWithLabels([]string{"client", "api", "success", "catalog_connect_service_nodes"}, 1, - []metrics.Label{{Name: "node", Value: s.nodeName()}}) - return out.ServiceNodes, nil -} - func (s *HTTPServer) CatalogNodeServices(resp http.ResponseWriter, req *http.Request) (interface{}, error) { metrics.IncrCounterWithLabels([]string{"client", "api", "catalog_node_services"}, 1, []metrics.Label{{Name: "node", Value: s.nodeName()}}) diff --git a/agent/consul/state/catalog.go b/agent/consul/state/catalog.go index 2ce2da36b..90a3dc5eb 100644 --- a/agent/consul/state/catalog.go +++ b/agent/consul/state/catalog.go @@ -792,15 +792,39 @@ func maxIndexForService(tx *memdb.Txn, serviceName string, checks bool) uint64 { return maxIndexTxn(tx, "nodes", "services") } +// ConnectServiceNodes returns the nodes associated with a Connect +// compatible destination for the given service name. This will include +// both proxies and native integrations. +func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string) (uint64, structs.ServiceNodes, error) { + return s.serviceNodes(ws, serviceName, true) +} + // ServiceNodes returns the nodes associated with a given service name. func (s *Store) ServiceNodes(ws memdb.WatchSet, serviceName string) (uint64, structs.ServiceNodes, error) { + return s.serviceNodes(ws, serviceName, false) +} + +func (s *Store) serviceNodes(ws memdb.WatchSet, serviceName string, connect bool) (uint64, structs.ServiceNodes, error) { tx := s.db.Txn(false) defer tx.Abort() // Get the table index. idx := maxIndexForService(tx, serviceName, false) + + // Function for lookup + var f func() (memdb.ResultIterator, error) + if !connect { + f = func() (memdb.ResultIterator, error) { + return tx.Get("services", "service", serviceName) + } + } else { + f = func() (memdb.ResultIterator, error) { + return tx.Get("services", "proxy_destination", serviceName) + } + } + // List all the services. - services, err := tx.Get("services", "service", serviceName) + services, err := f() if err != nil { return 0, nil, fmt.Errorf("failed service lookup: %s", err) } @@ -852,39 +876,6 @@ func (s *Store) ServiceTagNodes(ws memdb.WatchSet, service string, tag string) ( return idx, results, nil } -// ConnectServiceNodes returns the nodes associated with a Connect -// compatible destination for the given service name. This will include -// both proxies and native integrations. -func (s *Store) ConnectServiceNodes(ws memdb.WatchSet, serviceName string) (uint64, structs.ServiceNodes, error) { - tx := s.db.Txn(false) - defer tx.Abort() - - // Get the table index. - idx := maxIndexForService(tx, serviceName, false) - - // Find all the proxies. When we support native integrations we'll have - // to perform another table lookup here. - services, err := tx.Get(servicesTableName, "proxy_destination", serviceName) - if err != nil { - return 0, nil, fmt.Errorf("failed service lookup: %s", err) - } - ws.Add(services.WatchCh()) - - // Store them - var results structs.ServiceNodes - for service := services.Next(); service != nil; service = services.Next() { - results = append(results, service.(*structs.ServiceNode)) - } - - // Fill in the node details. - results, err = s.parseServiceNodes(tx, ws, results) - if err != nil { - return 0, nil, fmt.Errorf("failed parsing service nodes: %s", err) - } - - return idx, results, nil -} - // serviceTagFilter returns true (should filter) if the given service node // doesn't contain the given tag. func serviceTagFilter(sn *structs.ServiceNode, tag string) bool { From c43ccd024ad3662f6791a1ac6e4b62deb5e857b2 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 9 Mar 2018 17:16:12 -0800 Subject: [PATCH 071/627] agent/local: anti-entropy for connect proxy services --- agent/agent_endpoint_test.go | 33 +++++ agent/local/state_test.go | 140 ++++++++++++++++++++ agent/structs/service_definition.go | 4 + agent/structs/testing_service_definition.go | 13 ++ 4 files changed, 190 insertions(+) create mode 100644 agent/structs/testing_service_definition.go diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 126994196..e30323007 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -1365,6 +1365,39 @@ func TestAgent_RegisterService_InvalidAddress(t *testing.T) { } } +func TestAgent_RegisterService_ConnectProxy(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + args := &structs.ServiceDefinition{ + Kind: structs.ServiceKindConnectProxy, + Name: "connect-proxy", + Port: 8000, + ProxyDestination: "db", + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentRegisterService(resp, req) + assert.Nil(err) + assert.Nil(obj) + + // Ensure the servie + svc, ok := a.State.Services()["connect-proxy"] + assert.True(ok, "has service") + assert.Equal(structs.ServiceKindConnectProxy, svc.Kind) + assert.Equal("db", svc.ProxyDestination) + + // Ensure the token was configured + assert.Equal("abc123", a.State.ServiceToken("connect-proxy")) +} + func TestAgent_DeregisterService(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") diff --git a/agent/local/state_test.go b/agent/local/state_test.go index a6e9e1738..d0c006a95 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/consul/testutil/retry" "github.com/hashicorp/consul/types" "github.com/pascaldekloe/goe/verify" + "github.com/stretchr/testify/assert" ) func TestAgentAntiEntropy_Services(t *testing.T) { @@ -224,6 +225,145 @@ func TestAgentAntiEntropy_Services(t *testing.T) { } } +func TestAgentAntiEntropy_Services_ConnectProxy(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := &agent.TestAgent{Name: t.Name()} + a.Start() + defer a.Shutdown() + + // Register node info + var out struct{} + args := &structs.RegisterRequest{ + Datacenter: "dc1", + Node: a.Config.NodeName, + Address: "127.0.0.1", + } + + // Exists both same (noop) + srv1 := &structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + ID: "mysql-proxy", + Service: "mysql-proxy", + Port: 5000, + ProxyDestination: "db", + } + a.State.AddService(srv1, "") + args.Service = srv1 + assert.Nil(a.RPC("Catalog.Register", args, &out)) + + // Exists both, different (update) + srv2 := &structs.NodeService{ + ID: "redis-proxy", + Service: "redis-proxy", + Port: 8000, + Kind: structs.ServiceKindConnectProxy, + ProxyDestination: "redis", + } + a.State.AddService(srv2, "") + + srv2_mod := new(structs.NodeService) + *srv2_mod = *srv2 + srv2_mod.Port = 9000 + args.Service = srv2_mod + assert.Nil(a.RPC("Catalog.Register", args, &out)) + + // Exists local (create) + srv3 := &structs.NodeService{ + ID: "web-proxy", + Service: "web-proxy", + Port: 80, + Kind: structs.ServiceKindConnectProxy, + ProxyDestination: "web", + } + a.State.AddService(srv3, "") + + // Exists remote (delete) + srv4 := &structs.NodeService{ + ID: "lb-proxy", + Service: "lb-proxy", + Port: 443, + Kind: structs.ServiceKindConnectProxy, + ProxyDestination: "lb", + } + args.Service = srv4 + assert.Nil(a.RPC("Catalog.Register", args, &out)) + + // Exists local, in sync, remote missing (create) + srv5 := &structs.NodeService{ + ID: "cache-proxy", + Service: "cache-proxy", + Port: 11211, + Kind: structs.ServiceKindConnectProxy, + ProxyDestination: "cache-proxy", + } + a.State.SetServiceState(&local.ServiceState{ + Service: srv5, + InSync: true, + }) + + assert.Nil(a.State.SyncFull()) + + var services structs.IndexedNodeServices + req := structs.NodeSpecificRequest{ + Datacenter: "dc1", + Node: a.Config.NodeName, + } + assert.Nil(a.RPC("Catalog.NodeServices", &req, &services)) + + // We should have 5 services (consul included) + assert.Len(services.NodeServices.Services, 5) + + // All the services should match + for id, serv := range services.NodeServices.Services { + serv.CreateIndex, serv.ModifyIndex = 0, 0 + switch id { + case "mysql-proxy": + assert.Equal(srv1, serv) + case "redis-proxy": + assert.Equal(srv2, serv) + case "web-proxy": + assert.Equal(srv3, serv) + case "cache-proxy": + assert.Equal(srv5, serv) + case structs.ConsulServiceID: + // ignore + default: + t.Fatalf("unexpected service: %v", id) + } + } + + assert.Nil(servicesInSync(a.State, 4)) + + // Remove one of the services + a.State.RemoveService("cache-proxy") + assert.Nil(a.State.SyncFull()) + assert.Nil(a.RPC("Catalog.NodeServices", &req, &services)) + + // We should have 4 services (consul included) + assert.Len(services.NodeServices.Services, 4) + + // All the services should match + for id, serv := range services.NodeServices.Services { + serv.CreateIndex, serv.ModifyIndex = 0, 0 + switch id { + case "mysql-proxy": + assert.Equal(srv1, serv) + case "redis-proxy": + assert.Equal(srv2, serv) + case "web-proxy": + assert.Equal(srv3, serv) + case structs.ConsulServiceID: + // ignore + default: + t.Fatalf("unexpected service: %v", id) + } + } + + assert.Nil(servicesInSync(a.State, 3)) +} + func TestAgentAntiEntropy_EnableTagOverride(t *testing.T) { t.Parallel() a := &agent.TestAgent{Name: t.Name()} diff --git a/agent/structs/service_definition.go b/agent/structs/service_definition.go index 4dc8ccfca..d469ed5d7 100644 --- a/agent/structs/service_definition.go +++ b/agent/structs/service_definition.go @@ -2,6 +2,7 @@ package structs // ServiceDefinition is used to JSON decode the Service definitions type ServiceDefinition struct { + Kind ServiceKind ID string Name string Tags []string @@ -12,10 +13,12 @@ type ServiceDefinition struct { Checks CheckTypes Token string EnableTagOverride bool + ProxyDestination string } func (s *ServiceDefinition) NodeService() *NodeService { ns := &NodeService{ + Kind: s.Kind, ID: s.ID, Service: s.Name, Tags: s.Tags, @@ -23,6 +26,7 @@ func (s *ServiceDefinition) NodeService() *NodeService { Meta: s.Meta, Port: s.Port, EnableTagOverride: s.EnableTagOverride, + ProxyDestination: s.ProxyDestination, } if ns.ID == "" && ns.Service != "" { ns.ID = ns.Service diff --git a/agent/structs/testing_service_definition.go b/agent/structs/testing_service_definition.go new file mode 100644 index 000000000..b14e1e2ff --- /dev/null +++ b/agent/structs/testing_service_definition.go @@ -0,0 +1,13 @@ +package structs + +import ( + "github.com/mitchellh/go-testing-interface" +) + +// TestServiceDefinitionProxy returns a ServiceDefinition for a proxy. +func TestServiceDefinitionProxy(t testing.T) *ServiceDefinition { + return &ServiceDefinition{ + Kind: ServiceKindConnectProxy, + ProxyDestination: "db", + } +} From b5fd3017bb1947d90cdf4911910ac9d394179545 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 9 Mar 2018 17:21:26 -0800 Subject: [PATCH 072/627] agent/structs: tests for PartialClone and IsSame for proxy fields --- agent/structs/structs.go | 4 +++- agent/structs/structs_test.go | 6 ++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 4301c7e93..f8f339a5b 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -529,7 +529,9 @@ func (s *NodeService) IsSame(other *NodeService) bool { s.Address != other.Address || s.Port != other.Port || !reflect.DeepEqual(s.Meta, other.Meta) || - s.EnableTagOverride != other.EnableTagOverride { + s.EnableTagOverride != other.EnableTagOverride || + s.Kind != other.Kind || + s.ProxyDestination != other.ProxyDestination { return false } diff --git a/agent/structs/structs_test.go b/agent/structs/structs_test.go index 972146d93..077636be0 100644 --- a/agent/structs/structs_test.go +++ b/agent/structs/structs_test.go @@ -134,6 +134,7 @@ func testServiceNode() *ServiceNode { NodeMeta: map[string]string{ "tag": "value", }, + ServiceKind: ServiceKindTypical, ServiceID: "service1", ServiceName: "dogs", ServiceTags: []string{"prod", "v1"}, @@ -143,6 +144,7 @@ func testServiceNode() *ServiceNode { "service": "metadata", }, ServiceEnableTagOverride: true, + ServiceProxyDestination: "cats", RaftIndex: RaftIndex{ CreateIndex: 1, ModifyIndex: 2, @@ -275,6 +277,7 @@ func TestStructs_NodeService_IsSame(t *testing.T) { }, Port: 1234, EnableTagOverride: true, + ProxyDestination: "db", } if !ns.IsSame(ns) { t.Fatalf("should be equal to itself") @@ -292,6 +295,7 @@ func TestStructs_NodeService_IsSame(t *testing.T) { "meta2": "value2", "meta1": "value1", }, + ProxyDestination: "db", RaftIndex: RaftIndex{ CreateIndex: 1, ModifyIndex: 2, @@ -325,6 +329,8 @@ func TestStructs_NodeService_IsSame(t *testing.T) { check(func() { other.Port = 9999 }, func() { other.Port = 1234 }) check(func() { other.Meta["meta2"] = "wrongValue" }, func() { other.Meta["meta2"] = "value2" }) check(func() { other.EnableTagOverride = false }, func() { other.EnableTagOverride = true }) + check(func() { other.Kind = ServiceKindConnectProxy }, func() { other.Kind = "" }) + check(func() { other.ProxyDestination = "" }, func() { other.ProxyDestination = "db" }) } func TestStructs_HealthCheck_IsSame(t *testing.T) { From 4207bb42c077ba83db2aa21aa36f2adfc99ab9ea Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 10 Mar 2018 17:42:30 -0800 Subject: [PATCH 073/627] agent: validate service entry on register --- agent/agent_endpoint.go | 8 ++++++++ agent/agent_endpoint_test.go | 29 +++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 8d7728f1c..75d5807c0 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -554,6 +554,14 @@ func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Re return nil, nil } + // Run validation. This is the same validation that would happen on + // the catalog endpoint so it helps ensure the sync will work properly. + if err := ns.Validate(); err != nil { + resp.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(resp, err.Error()) + return nil, nil + } + // Verify the check type. chkTypes, err := args.CheckTypes() if err != nil { diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index e30323007..05e8b6ca3 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -1398,6 +1398,35 @@ func TestAgent_RegisterService_ConnectProxy(t *testing.T) { assert.Equal("abc123", a.State.ServiceToken("connect-proxy")) } +func TestAgent_RegisterService_ConnectProxyInvalid(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + args := &structs.ServiceDefinition{ + Kind: structs.ServiceKindConnectProxy, + Name: "connect-proxy", + ProxyDestination: "db", + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentRegisterService(resp, req) + assert.Nil(err) + assert.Nil(obj) + assert.Equal(http.StatusBadRequest, resp.Code) + assert.Contains(resp.Body.String(), "Port") + + // Ensure the service doesn't exist + _, ok := a.State.Services()["connect-proxy"] + assert.False(ok) +} + func TestAgent_DeregisterService(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") From 566c98b2fcb7bc0f6877e5078785cb57e5e5d30f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 11 Mar 2018 09:11:10 -0700 Subject: [PATCH 074/627] agent/consul: require name for proxies --- agent/consul/catalog_endpoint.go | 11 ---------- agent/consul/catalog_endpoint_test.go | 30 --------------------------- agent/structs/structs.go | 7 +++++-- 3 files changed, 5 insertions(+), 43 deletions(-) diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index 840b97fa6..adde8e52e 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -47,17 +47,6 @@ func (c *Catalog) Register(args *structs.RegisterRequest, reply *struct{}) error // Handle a service registration. if args.Service != nil { - // Connect proxy specific logic - if args.Service.Kind == structs.ServiceKindConnectProxy { - // Name is optional, if it isn't set, we default to the - // proxy name. It actually MUST be this, but the validation - // below this will verify. - if args.Service.Service == "" { - args.Service.Service = fmt.Sprintf( - "%s-connect-proxy", args.Service.ProxyDestination) - } - } - // Validate the service. This is in addition to the below since // the above just hasn't been moved over yet. We should move it over // in time. diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index b095c3f3a..fd437c978 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -384,36 +384,6 @@ func TestCatalog_Register_ConnectProxy_invalid(t *testing.T) { assert.Contains(err.Error(), "ProxyDestination") } -// Test registering a proxy with no name set, which should work. -func TestCatalog_Register_ConnectProxy_noName(t *testing.T) { - t.Parallel() - - assert := assert.New(t) - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - codec := rpcClient(t, s1) - defer codec.Close() - - args := structs.TestRegisterRequestProxy(t) - args.Service.Service = "" - - // Register - var out struct{} - assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) - - // List - req := structs.ServiceSpecificRequest{ - Datacenter: "dc1", - ServiceName: fmt.Sprintf("%s-connect-proxy", args.Service.ProxyDestination), - } - var resp structs.IndexedServiceNodes - assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp)) - assert.Len(resp.ServiceNodes, 1) - v := resp.ServiceNodes[0] - assert.Equal(structs.ServiceKindConnectProxy, v.ServiceKind) -} - // Test that write is required for the proxy destination to register a proxy. func TestCatalog_Register_ConnectProxy_ACLProxyDestination(t *testing.T) { t.Parallel() diff --git a/agent/structs/structs.go b/agent/structs/structs.go index f8f339a5b..40b606d17 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -462,8 +462,11 @@ type ServiceNodes []*ServiceNode type ServiceKind string const ( - // ServiceKindTypical is a typical, classic Consul service. - ServiceKindTypical ServiceKind = "typical" + // ServiceKindTypical is a typical, classic Consul service. This is + // represented by the absense of a value. This was chosen for ease of + // backwards compatibility: existing services in the catalog would + // default to the typical service. + ServiceKindTypical ServiceKind = "" // ServiceKindConnectProxy is a proxy for the Connect feature. This // service proxies another service within Consul and speaks the connect From 4cc4de1ff68d18ecbff45370743d9adb2630e929 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 11 Mar 2018 09:31:31 -0700 Subject: [PATCH 075/627] agent: remove ConnectProxyServiceName --- agent/agent_endpoint_test.go | 6 +++--- agent/structs/catalog.go | 3 --- agent/structs/testing_catalog.go | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 05e8b6ca3..167a23377 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -78,8 +78,8 @@ func TestAgent_Services_ConnectProxy(t *testing.T) { srv1 := &structs.NodeService{ Kind: structs.ServiceKindConnectProxy, - ID: structs.ConnectProxyServiceName, - Service: structs.ConnectProxyServiceName, + ID: "db-proxy", + Service: "db-proxy", Port: 5000, ProxyDestination: "db", } @@ -90,7 +90,7 @@ func TestAgent_Services_ConnectProxy(t *testing.T) { assert.Nil(err) val := obj.(map[string]*structs.NodeService) assert.Len(val, 1) - actual := val[structs.ConnectProxyServiceName] + actual := val["db-proxy"] assert.Equal(structs.ServiceKindConnectProxy, actual.Kind) assert.Equal("db", actual.ProxyDestination) } diff --git a/agent/structs/catalog.go b/agent/structs/catalog.go index 3f68f43a1..b118b9935 100644 --- a/agent/structs/catalog.go +++ b/agent/structs/catalog.go @@ -18,7 +18,4 @@ const ( // Consul server node in the catalog. ConsulServiceID = "consul" ConsulServiceName = "consul" - - // ConnectProxyServiceName is the name of the proxy services. - ConnectProxyServiceName = "connect-proxy" ) diff --git a/agent/structs/testing_catalog.go b/agent/structs/testing_catalog.go index d61266ad5..1394b7081 100644 --- a/agent/structs/testing_catalog.go +++ b/agent/structs/testing_catalog.go @@ -34,7 +34,7 @@ func TestRegisterRequestProxy(t testing.T) *RegisterRequest { func TestNodeServiceProxy(t testing.T) *NodeService { return &NodeService{ Kind: ServiceKindConnectProxy, - Service: ConnectProxyServiceName, + Service: "connect-proxy", Address: "127.0.0.2", Port: 2222, ProxyDestination: "web", From 641c982480d8673e968a1126bdd813756a65edd7 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 11 Mar 2018 09:31:39 -0700 Subject: [PATCH 076/627] agent/consul: Catalog endpoint ACL requirements for Connect proxies --- agent/consul/catalog_endpoint.go | 15 +++++ agent/consul/catalog_endpoint_test.go | 85 +++++++++++++++++++++++++++ 2 files changed, 100 insertions(+) diff --git a/agent/consul/catalog_endpoint.go b/agent/consul/catalog_endpoint.go index adde8e52e..a31ca59eb 100644 --- a/agent/consul/catalog_endpoint.go +++ b/agent/consul/catalog_endpoint.go @@ -280,6 +280,21 @@ func (c *Catalog) ServiceNodes(args *structs.ServiceSpecificRequest, reply *stru } } + // If we're doing a connect query, we need read access to the service + // we're trying to find proxies for, so check that. + if args.Connect { + // Fetch the ACL token, if any. + rule, err := c.srv.resolveToken(args.Token) + if err != nil { + return err + } + + if rule != nil && !rule.ServiceRead(args.ServiceName) { + // Just return nil, which will return an empty response (tested) + return nil + } + } + err := c.srv.blockingQuery( &args.QueryOptions, &reply.QueryMeta, diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index fd437c978..d08438f9d 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -1794,6 +1794,91 @@ func TestCatalog_ListServiceNodes_ConnectDestination(t *testing.T) { assert.Equal("", v.ServiceProxyDestination) } +func TestCatalog_ListServiceNodes_ConnectProxy_ACL(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + c.ACLEnforceVersion8 = false + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create the ACL. + arg := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: ` +service "foo" { + policy = "write" +} +`, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + var token string + assert.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &token)) + + { + // Register a proxy + args := structs.TestRegisterRequestProxy(t) + args.Service.Service = "foo-proxy" + args.Service.ProxyDestination = "bar" + args.WriteRequest.Token = "root" + var out struct{} + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) + + // Register a proxy + args = structs.TestRegisterRequestProxy(t) + args.Service.Service = "foo-proxy" + args.Service.ProxyDestination = "foo" + args.WriteRequest.Token = "root" + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) + + // Register a proxy + args = structs.TestRegisterRequestProxy(t) + args.Service.Service = "another-proxy" + args.Service.ProxyDestination = "foo" + args.WriteRequest.Token = "root" + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) + } + + // List w/ token. This should disallow because we don't have permission + // to read "bar" + req := structs.ServiceSpecificRequest{ + Connect: true, + Datacenter: "dc1", + ServiceName: "bar", + QueryOptions: structs.QueryOptions{Token: token}, + } + var resp structs.IndexedServiceNodes + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp)) + assert.Len(resp.ServiceNodes, 0) + + // List w/ token. This should work since we're requesting "foo", but should + // also only contain the proxies with names that adhere to our ACL. + req = structs.ServiceSpecificRequest{ + Connect: true, + Datacenter: "dc1", + ServiceName: "foo", + QueryOptions: structs.QueryOptions{Token: token}, + } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.ServiceNodes", &req, &resp)) + assert.Len(resp.ServiceNodes, 1) + v := resp.ServiceNodes[0] + assert.Equal("foo-proxy", v.ServiceName) +} + func TestCatalog_NodeServices(t *testing.T) { t.Parallel() dir1, s1 := testServer(t) From 62cbb892e38b8cf357cbc23e69274a3636bfc8ac Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 11 Mar 2018 11:49:12 -0700 Subject: [PATCH 077/627] agent/consul: Health.ServiceNodes ACL check for Connect --- agent/consul/health_endpoint.go | 15 ++++ agent/consul/health_endpoint_test.go | 101 +++++++++++++++++++++++++++ 2 files changed, 116 insertions(+) diff --git a/agent/consul/health_endpoint.go b/agent/consul/health_endpoint.go index 70cc2e37d..214de777d 100644 --- a/agent/consul/health_endpoint.go +++ b/agent/consul/health_endpoint.go @@ -130,6 +130,21 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc } } + // If we're doing a connect query, we need read access to the service + // we're trying to find proxies for, so check that. + if args.Connect { + // Fetch the ACL token, if any. + rule, err := h.srv.resolveToken(args.Token) + if err != nil { + return err + } + + if rule != nil && !rule.ServiceRead(args.ServiceName) { + // Just return nil, which will return an empty response (tested) + return nil + } + } + err := h.srv.blockingQuery( &args.QueryOptions, &reply.QueryMeta, diff --git a/agent/consul/health_endpoint_test.go b/agent/consul/health_endpoint_test.go index c9581e3a7..117afd646 100644 --- a/agent/consul/health_endpoint_test.go +++ b/agent/consul/health_endpoint_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/stretchr/testify/assert" ) func TestHealth_ChecksInState(t *testing.T) { @@ -821,6 +822,106 @@ func TestHealth_ServiceNodes_DistanceSort(t *testing.T) { } } +func TestHealth_ServiceNodes_ConnectProxy_ACL(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + c.ACLEnforceVersion8 = false + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create the ACL. + arg := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: ` +service "foo" { + policy = "write" +} +`, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + var token string + assert.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", arg, &token)) + + { + var out struct{} + + // Register a service + args := structs.TestRegisterRequestProxy(t) + args.WriteRequest.Token = "root" + args.Service.ID = "foo-proxy-0" + args.Service.Service = "foo-proxy" + args.Service.ProxyDestination = "bar" + args.Check = &structs.HealthCheck{ + Name: "proxy", + Status: api.HealthPassing, + ServiceID: args.Service.ID, + } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) + + // Register a service + args = structs.TestRegisterRequestProxy(t) + args.WriteRequest.Token = "root" + args.Service.Service = "foo-proxy" + args.Service.ProxyDestination = "foo" + args.Check = &structs.HealthCheck{ + Name: "proxy", + Status: api.HealthPassing, + ServiceID: args.Service.Service, + } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) + + // Register a service + args = structs.TestRegisterRequestProxy(t) + args.WriteRequest.Token = "root" + args.Service.Service = "another-proxy" + args.Service.ProxyDestination = "foo" + args.Check = &structs.HealthCheck{ + Name: "proxy", + Status: api.HealthPassing, + ServiceID: args.Service.Service, + } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Catalog.Register", &args, &out)) + } + + // List w/ token. This should disallow because we don't have permission + // to read "bar" + req := structs.ServiceSpecificRequest{ + Connect: true, + Datacenter: "dc1", + ServiceName: "bar", + QueryOptions: structs.QueryOptions{Token: token}, + } + var resp structs.IndexedCheckServiceNodes + assert.Nil(msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &resp)) + assert.Len(resp.Nodes, 0) + + // List w/ token. This should work since we're requesting "foo", but should + // also only contain the proxies with names that adhere to our ACL. + req = structs.ServiceSpecificRequest{ + Connect: true, + Datacenter: "dc1", + ServiceName: "foo", + QueryOptions: structs.QueryOptions{Token: token}, + } + assert.Nil(msgpackrpc.CallWithCodec(codec, "Health.ServiceNodes", &req, &resp)) + assert.Len(resp.Nodes, 1) +} + func TestHealth_NodeChecks_FilterACL(t *testing.T) { t.Parallel() dir, token, srv, codec := testACLFilterServer(t) From f9a55aa7e093191c96ba45d411a08f622b99f357 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 12 Mar 2018 10:13:44 -0700 Subject: [PATCH 078/627] agent: clarified a number of comments per PR feedback --- agent/agent_endpoint_test.go | 5 ++++- agent/consul/catalog_endpoint_test.go | 2 -- agent/dns_test.go | 2 +- agent/structs/service_definition.go | 3 ++- agent/structs/structs.go | 3 +++ 5 files changed, 10 insertions(+), 5 deletions(-) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 167a23377..d59c804ea 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -1372,6 +1372,9 @@ func TestAgent_RegisterService_ConnectProxy(t *testing.T) { a := NewTestAgent(t.Name(), "") defer a.Shutdown() + // Register a proxy. Note that the destination doesn't exist here on + // this agent or in the catalog at all. This is intended and part + // of the design. args := &structs.ServiceDefinition{ Kind: structs.ServiceKindConnectProxy, Name: "connect-proxy", @@ -1388,7 +1391,7 @@ func TestAgent_RegisterService_ConnectProxy(t *testing.T) { assert.Nil(err) assert.Nil(obj) - // Ensure the servie + // Ensure the service svc, ok := a.State.Services()["connect-proxy"] assert.True(ok, "has service") assert.Equal(structs.ServiceKindConnectProxy, svc.Kind) diff --git a/agent/consul/catalog_endpoint_test.go b/agent/consul/catalog_endpoint_test.go index d08438f9d..7b2247af7 100644 --- a/agent/consul/catalog_endpoint_test.go +++ b/agent/consul/catalog_endpoint_test.go @@ -393,7 +393,6 @@ func TestCatalog_Register_ConnectProxy_ACLProxyDestination(t *testing.T) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -1802,7 +1801,6 @@ func TestCatalog_ListServiceNodes_ConnectProxy_ACL(t *testing.T) { c.ACLDatacenter = "dc1" c.ACLMasterToken = "root" c.ACLDefaultPolicy = "deny" - c.ACLEnforceVersion8 = false }) defer os.RemoveAll(dir1) defer s1.Shutdown() diff --git a/agent/dns_test.go b/agent/dns_test.go index a501a9c9f..d897a921e 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -1049,7 +1049,7 @@ func TestDNS_ConnectServiceLookup(t *testing.T) { a := NewTestAgent(t.Name(), "") defer a.Shutdown() - // Register a node with an external service. + // Register { args := structs.TestRegisterRequestProxy(t) args.Service.ProxyDestination = "db" diff --git a/agent/structs/service_definition.go b/agent/structs/service_definition.go index d469ed5d7..a10f1527f 100644 --- a/agent/structs/service_definition.go +++ b/agent/structs/service_definition.go @@ -1,6 +1,7 @@ package structs -// ServiceDefinition is used to JSON decode the Service definitions +// ServiceDefinition is used to JSON decode the Service definitions. For +// documentation on specific fields see NodeService which is better documented. type ServiceDefinition struct { Kind ServiceKind ID string diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 40b606d17..95c0ba069 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -491,6 +491,9 @@ type NodeService struct { // ProxyDestination is the name of the service that this service is // a Connect proxy for. This is only valid if Kind is "connect-proxy". + // The destination may be a service that isn't present in the catalog. + // This is expected and allowed to allow for proxies to come up + // earlier than their target services. ProxyDestination string RaftIndex From 767d2eaef6217a1d713db220d3d6a59e2b582a95 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 12 Mar 2018 13:05:06 -0700 Subject: [PATCH 079/627] agent: commenting some tests --- agent/agent_endpoint_test.go | 8 ++++++++ agent/catalog_endpoint_test.go | 6 ++++++ 2 files changed, 14 insertions(+) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index d59c804ea..566d397cf 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -69,6 +69,8 @@ func TestAgent_Services(t *testing.T) { } } +// This tests that the agent services endpoint (/v1/agent/services) returns +// Connect proxies. func TestAgent_Services_ConnectProxy(t *testing.T) { t.Parallel() @@ -1365,6 +1367,9 @@ func TestAgent_RegisterService_InvalidAddress(t *testing.T) { } } +// This tests local agent service registration of a connect proxy. This +// verifies that it is put in the local state store properly for syncing +// later. func TestAgent_RegisterService_ConnectProxy(t *testing.T) { t.Parallel() @@ -1401,6 +1406,9 @@ func TestAgent_RegisterService_ConnectProxy(t *testing.T) { assert.Equal("abc123", a.State.ServiceToken("connect-proxy")) } +// This tests that connect proxy validation is done for local agent +// registration. This doesn't need to test validation exhaustively since +// that is done via a table test in the structs package. func TestAgent_RegisterService_ConnectProxyInvalid(t *testing.T) { t.Parallel() diff --git a/agent/catalog_endpoint_test.go b/agent/catalog_endpoint_test.go index 71c848ede..64e6c3dbe 100644 --- a/agent/catalog_endpoint_test.go +++ b/agent/catalog_endpoint_test.go @@ -751,6 +751,8 @@ func TestCatalogServiceNodes_DistanceSort(t *testing.T) { } } +// Test that connect proxies can be queried via /v1/catalog/service/:service +// directly and that their results contain the proxy fields. func TestCatalogServiceNodes_ConnectProxy(t *testing.T) { t.Parallel() @@ -775,6 +777,8 @@ func TestCatalogServiceNodes_ConnectProxy(t *testing.T) { assert.Equal(structs.ServiceKindConnectProxy, nodes[0].ServiceKind) } +// Test that the Connect-compatible endpoints can be queried for a +// service via /v1/catalog/connect/:service. func TestCatalogConnectServiceNodes_good(t *testing.T) { t.Parallel() @@ -834,6 +838,8 @@ func TestCatalogNodeServices(t *testing.T) { } } +// Test that the services on a node contain all the Connect proxies on +// the node as well with their fields properly populated. func TestCatalogNodeServices_ConnectProxy(t *testing.T) { t.Parallel() From 7e8d6067178c647df8996f8cb9db95dd23b9ef0a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 21 Mar 2018 16:54:44 -1000 Subject: [PATCH 080/627] agent: address PR feedback --- agent/catalog_endpoint_test.go | 2 ++ agent/consul/health_endpoint.go | 33 ++++++++++++++++++++------------- agent/dns_test.go | 2 ++ agent/health_endpoint_test.go | 2 +- 4 files changed, 25 insertions(+), 14 deletions(-) diff --git a/agent/catalog_endpoint_test.go b/agent/catalog_endpoint_test.go index 64e6c3dbe..f97b22dbc 100644 --- a/agent/catalog_endpoint_test.go +++ b/agent/catalog_endpoint_test.go @@ -788,6 +788,7 @@ func TestCatalogConnectServiceNodes_good(t *testing.T) { // Register args := structs.TestRegisterRequestProxy(t) + args.Service.Address = "127.0.0.55" var out struct{} assert.Nil(a.RPC("Catalog.Register", args, &out)) @@ -801,6 +802,7 @@ func TestCatalogConnectServiceNodes_good(t *testing.T) { nodes := obj.(structs.ServiceNodes) assert.Len(nodes, 1) assert.Equal(structs.ServiceKindConnectProxy, nodes[0].ServiceKind) + assert.Equal(args.Service.Address, nodes[0].ServiceAddress) } func TestCatalogNodeServices(t *testing.T) { diff --git a/agent/consul/health_endpoint.go b/agent/consul/health_endpoint.go index 214de777d..38b7a9c0a 100644 --- a/agent/consul/health_endpoint.go +++ b/agent/consul/health_endpoint.go @@ -112,22 +112,14 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc } // Determine the function we'll call - var f func(memdb.WatchSet, *state.Store) (uint64, structs.CheckServiceNodes, error) + var f func(memdb.WatchSet, *state.Store, *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) switch { case args.Connect: - f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.CheckServiceNodes, error) { - return s.CheckConnectServiceNodes(ws, args.ServiceName) - } - + f = h.serviceNodesConnect case args.TagFilter: - f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.CheckServiceNodes, error) { - return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTag) - } - + f = h.serviceNodesTagFilter default: - f = func(ws memdb.WatchSet, s *state.Store) (uint64, structs.CheckServiceNodes, error) { - return s.CheckServiceNodes(ws, args.ServiceName) - } + f = h.serviceNodesDefault } // If we're doing a connect query, we need read access to the service @@ -149,7 +141,7 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { - index, nodes, err := f(ws, state) + index, nodes, err := f(ws, state, args) if err != nil { return err } @@ -185,3 +177,18 @@ func (h *Health) ServiceNodes(args *structs.ServiceSpecificRequest, reply *struc } return err } + +// The serviceNodes* functions below are the various lookup methods that +// can be used by the ServiceNodes endpoint. + +func (h *Health) serviceNodesConnect(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) { + return s.CheckConnectServiceNodes(ws, args.ServiceName) +} + +func (h *Health) serviceNodesTagFilter(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) { + return s.CheckServiceTagNodes(ws, args.ServiceName, args.ServiceTag) +} + +func (h *Health) serviceNodesDefault(ws memdb.WatchSet, s *state.Store, args *structs.ServiceSpecificRequest) (uint64, structs.CheckServiceNodes, error) { + return s.CheckServiceNodes(ws, args.ServiceName) +} diff --git a/agent/dns_test.go b/agent/dns_test.go index d897a921e..d7bb2102d 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -1052,6 +1052,7 @@ func TestDNS_ConnectServiceLookup(t *testing.T) { // Register { args := structs.TestRegisterRequestProxy(t) + args.Address = "127.0.0.55" args.Service.ProxyDestination = "db" args.Service.Address = "" args.Service.Port = 12345 @@ -1082,6 +1083,7 @@ func TestDNS_ConnectServiceLookup(t *testing.T) { assert.True(ok) assert.Equal("foo.node.dc1.consul.", cnameRec.Hdr.Name) assert.Equal(uint32(0), srvRec.Hdr.Ttl) + assert.Equal("127.0.0.55", cnameRec.A.String()) } } diff --git a/agent/health_endpoint_test.go b/agent/health_endpoint_test.go index 688924df1..8164be477 100644 --- a/agent/health_endpoint_test.go +++ b/agent/health_endpoint_test.go @@ -851,7 +851,7 @@ func TestHealthConnectServiceNodes_PassingFilter(t *testing.T) { assert.Nil(err) assertIndex(t, resp) - // Should be 0 health check for consul + // Should be 1 nodes := obj.(structs.CheckServiceNodes) assert.Len(nodes, 1) }) From cfb62677c05dffe8ec10ab4d71d89a49d2b6f80b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 16 Mar 2018 21:20:54 -0700 Subject: [PATCH 081/627] agent/consul/state: CARoot structs and initial state store --- agent/consul/state/connect_ca.go | 106 ++++++++++++++++++++++++++++++ agent/consul/state/state_store.go | 4 ++ agent/structs/connect_ca.go | 37 +++++++++++ 3 files changed, 147 insertions(+) create mode 100644 agent/consul/state/connect_ca.go create mode 100644 agent/structs/connect_ca.go diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go new file mode 100644 index 000000000..9e3195918 --- /dev/null +++ b/agent/consul/state/connect_ca.go @@ -0,0 +1,106 @@ +package state + +import ( + "fmt" + + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/go-memdb" +) + +const ( + caRootTableName = "connect-ca-roots" +) + +// caRootTableSchema returns a new table schema used for storing +// CA roots for Connect. +func caRootTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: caRootTableName, + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + AllowMissing: false, + Unique: true, + Indexer: &memdb.UUIDFieldIndex{ + Field: "ID", + }, + }, + }, + } +} + +func init() { + registerSchema(caRootTableSchema) +} + +// CARoots returns the list of all CA roots. +func (s *Store) CARoots(ws memdb.WatchSet) (uint64, structs.CARoots, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + // Get the index + idx := maxIndexTxn(tx, caRootTableName) + + // Get all + iter, err := tx.Get(caRootTableName, "id") + if err != nil { + return 0, nil, fmt.Errorf("failed CA root lookup: %s", err) + } + ws.Add(iter.WatchCh()) + + var results structs.CARoots + for v := iter.Next(); v != nil; v = iter.Next() { + results = append(results, v.(*structs.CARoot)) + } + return idx, results, nil +} + +// CARootSet creates or updates a CA root. +// +// NOTE(mitchellh): I have a feeling we'll want a CARootMultiSetCAS to +// perform a check-and-set on the entire set of CARoots versus an individual +// set, since we'll want to modify them atomically during events such as +// rotation. +func (s *Store) CARootSet(idx uint64, v *structs.CARoot) error { + tx := s.db.Txn(true) + defer tx.Abort() + + if err := s.caRootSetTxn(tx, idx, v); err != nil { + return err + } + + tx.Commit() + return nil +} + +// caRootSetTxn is the inner method used to insert or update a CA root with +// the proper indexes into the state store. +func (s *Store) caRootSetTxn(tx *memdb.Txn, idx uint64, v *structs.CARoot) error { + // ID is required + if v.ID == "" { + return ErrMissingCARootID + } + + // Check for an existing value + existing, err := tx.First(caRootTableName, "id", v.ID) + if err != nil { + return fmt.Errorf("failed CA root lookup: %s", err) + } + if existing != nil { + old := existing.(*structs.CARoot) + v.CreateIndex = old.CreateIndex + } else { + v.CreateIndex = idx + } + v.ModifyIndex = idx + + // Insert + if err := tx.Insert(caRootTableName, v); err != nil { + return err + } + if err := tx.Insert("index", &IndexEntry{caRootTableName, idx}); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + + return nil +} diff --git a/agent/consul/state/state_store.go b/agent/consul/state/state_store.go index 62b6a8bff..c59e09e93 100644 --- a/agent/consul/state/state_store.go +++ b/agent/consul/state/state_store.go @@ -29,6 +29,10 @@ var ( // a Query with an empty ID. ErrMissingQueryID = errors.New("Missing Query ID") + // ErrMissingCARootID is returned when an CARoot set is called + // with an CARoot with an empty ID. + ErrMissingCARootID = errors.New("Missing CA Root ID") + // ErrMissingIntentionID is returned when an Intention set is called // with an Intention with an empty ID. ErrMissingIntentionID = errors.New("Missing Intention ID") diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go new file mode 100644 index 000000000..87211e09f --- /dev/null +++ b/agent/structs/connect_ca.go @@ -0,0 +1,37 @@ +package structs + +// IndexedCARoots is the list of currently trusted CA Roots. +type IndexedCARoots struct { + // ActiveRootID is the ID of a root in Roots that is the active CA root. + // Other roots are still valid if they're in the Roots list but are in + // the process of being rotated out. + ActiveRootID string + + // Roots is a list of root CA certs to trust. + Roots []*CARoot + + QueryMeta +} + +// CARoot represents a root CA certificate that is trusted. +type CARoot struct { + // ID is a globally unique ID (UUID) representing this CA root. + ID string + + // Name is a human-friendly name for this CA root. This value is + // opaque to Consul and is not used for anything internally. + Name string + + // RootCert is the PEM-encoded public certificate. + RootCert string + + // SigningCert is the PEM-encoded signing certificate and SigningKey + // is the PEM-encoded private key for the signing certificate. + SigningCert string + SigningKey string + + RaftIndex +} + +// CARoots is a list of CARoot structures. +type CARoots []*CARoot From 24830f4cfad3c935fce306dcdb6bd65d473af1af Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 16 Mar 2018 21:28:27 -0700 Subject: [PATCH 082/627] agent/consul: RPC endpoints to list roots --- agent/consul/connect_ca_endpoint.go | 55 +++++++++++++++++++++++++++++ agent/consul/server_oss.go | 1 + 2 files changed, 56 insertions(+) create mode 100644 agent/consul/connect_ca_endpoint.go diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go new file mode 100644 index 000000000..3f35ad79f --- /dev/null +++ b/agent/consul/connect_ca_endpoint.go @@ -0,0 +1,55 @@ +package consul + +import ( + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/go-memdb" +) + +// ConnectCA manages the Connect CA. +type ConnectCA struct { + // srv is a pointer back to the server. + srv *Server +} + +// Roots returns the currently trusted root certificates. +func (s *ConnectCA) Roots( + args *structs.DCSpecificRequest, + reply *structs.IndexedCARoots) error { + // Forward if necessary + if done, err := s.srv.forward("ConnectCA.Roots", args, args, reply); done { + return err + } + + return s.srv.blockingQuery( + &args.QueryOptions, &reply.QueryMeta, + func(ws memdb.WatchSet, state *state.Store) error { + index, roots, err := state.CARoots(ws) + if err != nil { + return err + } + + reply.Index, reply.Roots = index, roots + if reply.Roots == nil { + reply.Roots = make(structs.CARoots, 0) + } + + // The API response must NEVER contain the secret information + // such as keys and so on. We use a whitelist below to copy the + // specific fields we want to expose. + for i, r := range reply.Roots { + // IMPORTANT: r must NEVER be modified, since it is a pointer + // directly to the structure in the memdb store. + + reply.Roots[i] = &structs.CARoot{ + ID: r.ID, + Name: r.Name, + RootCert: r.RootCert, + RaftIndex: r.RaftIndex, + } + } + + return nil + }, + ) +} diff --git a/agent/consul/server_oss.go b/agent/consul/server_oss.go index e633c2699..016420476 100644 --- a/agent/consul/server_oss.go +++ b/agent/consul/server_oss.go @@ -4,6 +4,7 @@ func init() { registerEndpoint(func(s *Server) interface{} { return &ACL{s} }) registerEndpoint(func(s *Server) interface{} { return &Catalog{s} }) registerEndpoint(func(s *Server) interface{} { return NewCoordinate(s) }) + registerEndpoint(func(s *Server) interface{} { return &ConnectCA{s} }) registerEndpoint(func(s *Server) interface{} { return &Health{s} }) registerEndpoint(func(s *Server) interface{} { return &Intention{s} }) registerEndpoint(func(s *Server) interface{} { return &Internal{s} }) From 9ad2a12441a288bf95ed8768f45d8c7089210a4a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 16 Mar 2018 21:39:26 -0700 Subject: [PATCH 083/627] agent: /v1/connect/ca/roots --- agent/agent_endpoint.go | 9 +++++++++ agent/connect_ca_endpoint.go | 28 ++++++++++++++++++++++++++++ agent/http_oss.go | 2 ++ 3 files changed, 39 insertions(+) create mode 100644 agent/connect_ca_endpoint.go diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 75d5807c0..e3e8fcd51 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -836,3 +836,12 @@ func (s *HTTPServer) AgentToken(resp http.ResponseWriter, req *http.Request) (in s.agent.logger.Printf("[INFO] agent: Updated agent's ACL token %q", target) return nil, nil } + +// AgentConnectCARoots returns the trusted CA roots. +func (s *HTTPServer) AgentConnectCARoots(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + if req.Method != "GET" { + return nil, MethodNotAllowedError{req.Method, []string{"GET"}} + } + + return nil, nil +} diff --git a/agent/connect_ca_endpoint.go b/agent/connect_ca_endpoint.go new file mode 100644 index 000000000..8e92417bc --- /dev/null +++ b/agent/connect_ca_endpoint.go @@ -0,0 +1,28 @@ +package agent + +import ( + "net/http" + + "github.com/hashicorp/consul/agent/structs" +) + +// GET /v1/connect/ca/roots +func (s *HTTPServer) ConnectCARoots(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Test the method + if req.Method != "GET" { + return nil, MethodNotAllowedError{req.Method, []string{"GET"}} + } + + var args structs.DCSpecificRequest + if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { + return nil, nil + } + + var reply structs.IndexedCARoots + defer setMeta(resp, &reply.QueryMeta) + if err := s.agent.RPC("ConnectCA.Roots", &args, &reply); err != nil { + return nil, err + } + + return reply.Roots, nil +} diff --git a/agent/http_oss.go b/agent/http_oss.go index 2e2c9751a..3cb18b2e1 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -29,6 +29,7 @@ func init() { registerEndpoint("/v1/agent/check/warn/", []string{"PUT"}, (*HTTPServer).AgentCheckWarn) registerEndpoint("/v1/agent/check/fail/", []string{"PUT"}, (*HTTPServer).AgentCheckFail) registerEndpoint("/v1/agent/check/update/", []string{"PUT"}, (*HTTPServer).AgentCheckUpdate) + registerEndpoint("/v1/agent/connect/ca/roots", []string{"GET"}, (*HTTPServer).AgentConnectCARoots) registerEndpoint("/v1/agent/service/register", []string{"PUT"}, (*HTTPServer).AgentRegisterService) registerEndpoint("/v1/agent/service/deregister/", []string{"PUT"}, (*HTTPServer).AgentDeregisterService) registerEndpoint("/v1/agent/service/maintenance/", []string{"PUT"}, (*HTTPServer).AgentServiceMaintenance) @@ -40,6 +41,7 @@ func init() { registerEndpoint("/v1/catalog/services", []string{"GET"}, (*HTTPServer).CatalogServices) registerEndpoint("/v1/catalog/service/", []string{"GET"}, (*HTTPServer).CatalogServiceNodes) registerEndpoint("/v1/catalog/node/", []string{"GET"}, (*HTTPServer).CatalogNodeServices) + registerEndpoint("/v1/connect/ca/roots", []string{"GET"}, (*HTTPServer).ConnectCARoots) registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPServer).IntentionEndpoint) registerEndpoint("/v1/connect/intentions/match", []string{"GET"}, (*HTTPServer).IntentionMatch) registerEndpoint("/v1/connect/intentions/", []string{"GET"}, (*HTTPServer).IntentionSpecific) From f433f61fdfbc8b7d76334b65bf8d671519e2f9da Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 18 Mar 2018 22:07:52 -0700 Subject: [PATCH 084/627] agent/structs: json omit QueryMeta --- agent/connect_ca_endpoint.go | 2 +- agent/structs/connect_ca.go | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/agent/connect_ca_endpoint.go b/agent/connect_ca_endpoint.go index 8e92417bc..1c7871015 100644 --- a/agent/connect_ca_endpoint.go +++ b/agent/connect_ca_endpoint.go @@ -24,5 +24,5 @@ func (s *HTTPServer) ConnectCARoots(resp http.ResponseWriter, req *http.Request) return nil, err } - return reply.Roots, nil + return reply, nil } diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 87211e09f..46725dcc7 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -10,7 +10,9 @@ type IndexedCARoots struct { // Roots is a list of root CA certs to trust. Roots []*CARoot - QueryMeta + // QueryMeta contains the meta sent via a header. We ignore for JSON + // so this whole structure can be returned. + QueryMeta `json:"-"` } // CARoot represents a root CA certificate that is trusted. From d4e232f69b332105e42caa8c711391b8df9b4ce6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 19 Mar 2018 10:48:38 -0700 Subject: [PATCH 085/627] connect: create connect package for helpers --- connect/ca.go | 48 +++++++++ connect/connect.go | 3 + connect/testing.go | 230 ++++++++++++++++++++++++++++++++++++++++ connect/testing_test.go | 109 +++++++++++++++++++ 4 files changed, 390 insertions(+) create mode 100644 connect/ca.go create mode 100644 connect/connect.go create mode 100644 connect/testing.go create mode 100644 connect/testing_test.go diff --git a/connect/ca.go b/connect/ca.go new file mode 100644 index 000000000..e9ada4953 --- /dev/null +++ b/connect/ca.go @@ -0,0 +1,48 @@ +package connect + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "fmt" + "math/big" +) + +// ParseCert parses the x509 certificate from a PEM-encoded value. +func ParseCert(pemValue string) (*x509.Certificate, error) { + block, _ := pem.Decode([]byte(pemValue)) + if block == nil { + return nil, fmt.Errorf("no PEM-encoded data found") + } + + if block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("first PEM-block should be CERTIFICATE type") + } + + return x509.ParseCertificate(block.Bytes) +} + +// ParseSigner parses a crypto.Signer from a PEM-encoded key. The private key +// is expected to be the first block in the PEM value. +func ParseSigner(pemValue string) (crypto.Signer, error) { + block, _ := pem.Decode([]byte(pemValue)) + if block == nil { + return nil, fmt.Errorf("no PEM-encoded data found") + } + + switch block.Type { + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + + default: + return nil, fmt.Errorf("unknown PEM block type for signing key: %s", block.Type) + } +} + +// SerialNumber generates a serial number suitable for a certificate. +// +// This function is taken directly from the Vault implementation. +func SerialNumber() (*big.Int, error) { + return rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) +} diff --git a/connect/connect.go b/connect/connect.go new file mode 100644 index 000000000..b2ad85f71 --- /dev/null +++ b/connect/connect.go @@ -0,0 +1,3 @@ +// Package connect contains utilities and helpers for working with the +// Connect feature of Consul. +package connect diff --git a/connect/testing.go b/connect/testing.go new file mode 100644 index 000000000..78008270a --- /dev/null +++ b/connect/testing.go @@ -0,0 +1,230 @@ +package connect + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "net/url" + "sync/atomic" + "time" + + "github.com/hashicorp/consul/agent/structs" + "github.com/mitchellh/go-testing-interface" +) + +// testClusterID is the Consul cluster ID for testing. +// +// NOTE(mitchellh): This might have to change some other constant for +// real testing once we integrate the Cluster ID into the core. For now it +// is unchecked. +const testClusterID = "11111111-2222-3333-4444-555555555555" + +// testCACounter is just an atomically incremented counter for creating +// unique names for the CA certs. +var testCACounter uint64 = 0 + +// TestCA creates a test CA certificate and signing key and returns it +// in the CARoot structure format. The CARoot returned will NOT have an ID +// set. +// +// If xc is non-nil, then the returned certificate will have a signing cert +// that is cross-signed with the previous cert, and this will be set as +// SigningCert. +func TestCA(t testing.T, xc *structs.CARoot) *structs.CARoot { + var result structs.CARoot + result.Name = fmt.Sprintf("Test CA %d", atomic.AddUint64(&testCACounter, 1)) + + // Create the private key we'll use for this CA cert. + signer := testPrivateKey(t, &result) + + // The serial number for the cert + sn, err := SerialNumber() + if err != nil { + t.Fatalf("error generating serial number: %s", err) + } + + // The URI (SPIFFE compatible) for the cert + uri, err := url.Parse(fmt.Sprintf("spiffe://%s.consul", testClusterID)) + if err != nil { + t.Fatalf("error parsing CA URI: %s", err) + } + + // Create the CA cert + template := x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{CommonName: result.Name}, + URIs: []*url.URL{uri}, + PermittedDNSDomainsCritical: true, + PermittedDNSDomains: []string{uri.Hostname()}, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + IsCA: true, + NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: testKeyID(t, signer.Public()), + SubjectKeyId: testKeyID(t, signer.Public()), + } + + bs, err := x509.CreateCertificate( + rand.Reader, &template, &template, signer.Public(), signer) + if err != nil { + t.Fatalf("error generating CA certificate: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + t.Fatalf("error encoding private key: %s", err) + } + result.RootCert = buf.String() + + // If there is a prior CA to cross-sign with, then we need to create that + // and set it as the signing cert. + if xc != nil { + xccert, err := ParseCert(xc.RootCert) + if err != nil { + t.Fatalf("error parsing CA cert: %s", err) + } + xcsigner, err := ParseSigner(xc.SigningKey) + if err != nil { + t.Fatalf("error parsing signing key: %s", err) + } + + // Set the authority key to be the previous one + template.AuthorityKeyId = testKeyID(t, xcsigner.Public()) + + // Create the new certificate where the parent is the previous + // CA, the public key is the new public key, and the signing private + // key is the old private key. + bs, err := x509.CreateCertificate( + rand.Reader, &template, xccert, signer.Public(), xcsigner) + if err != nil { + t.Fatalf("error generating CA certificate: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + t.Fatalf("error encoding private key: %s", err) + } + result.SigningCert = buf.String() + } + + return &result +} + +// TestLeaf returns a valid leaf certificate for the named service with +// the given CA Root. +func TestLeaf(t testing.T, service string, root *structs.CARoot) string { + // Parse the CA cert and signing key from the root + caCert, err := ParseCert(root.RootCert) + if err != nil { + t.Fatalf("error parsing CA cert: %s", err) + } + signer, err := ParseSigner(root.SigningKey) + if err != nil { + t.Fatalf("error parsing signing key: %s", err) + } + + // The serial number for the cert + sn, err := SerialNumber() + if err != nil { + t.Fatalf("error generating serial number: %s", err) + } + + // Cert template for generation + template := x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{CommonName: service}, + SignatureAlgorithm: x509.ECDSAWithSHA256, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageDataEncipherment | x509.KeyUsageKeyAgreement, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: testKeyID(t, signer.Public()), + SubjectKeyId: testKeyID(t, signer.Public()), + } + + // Create the certificate, PEM encode it and return that value. + var buf bytes.Buffer + bs, err := x509.CreateCertificate( + rand.Reader, &template, caCert, signer.Public(), signer) + if err != nil { + t.Fatalf("error generating certificate: %s", err) + } + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + t.Fatalf("error encoding private key: %s", err) + } + + return buf.String() +} + +// testKeyID returns a KeyID from the given public key. The "raw" must be +// an *ecdsa.PublicKey, but is an interface type to suppot crypto.Signer.Public +// values. +func testKeyID(t testing.T, raw interface{}) []byte { + pub, ok := raw.(*ecdsa.PublicKey) + if !ok { + t.Fatalf("raw is type %T, expected *ecdsa.PublicKey", raw) + } + + // This is not standard; RFC allows any unique identifier as long as they + // match in subject/authority chains but suggests specific hashing of DER + // bytes of public key including DER tags. I can't be bothered to do esp. + // since ECDSA keys don't have a handy way to marshal the publick key alone. + h := sha256.New() + h.Write(pub.X.Bytes()) + h.Write(pub.Y.Bytes()) + return h.Sum([]byte{}) +} + +// testMemoizePK is the private key that we memoize once we generate it +// once so that our tests don't rely on too much system entropy. +var testMemoizePK atomic.Value + +// testPrivateKey creates an ECDSA based private key. +func testPrivateKey(t testing.T, ca *structs.CARoot) crypto.Signer { + // If we already generated a private key, use that + var pk *ecdsa.PrivateKey + if v := testMemoizePK.Load(); v != nil { + pk = v.(*ecdsa.PrivateKey) + } + + // If we have no key, then create a new one. + if pk == nil { + var err error + pk, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("error generating private key: %s", err) + } + } + + bs, err := x509.MarshalECPrivateKey(pk) + if err != nil { + t.Fatalf("error generating private key: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) + if err != nil { + t.Fatalf("error encoding private key: %s", err) + } + ca.SigningKey = buf.String() + + // Memoize the key + testMemoizePK.Store(pk) + + return pk +} diff --git a/connect/testing_test.go b/connect/testing_test.go new file mode 100644 index 000000000..d07aac201 --- /dev/null +++ b/connect/testing_test.go @@ -0,0 +1,109 @@ +package connect + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +// hasOpenSSL is used to determine if the openssl CLI exists for unit tests. +var hasOpenSSL bool + +func init() { + _, err := exec.LookPath("openssl") + hasOpenSSL = err == nil +} + +// Test that the TestCA and TestLeaf functions generate valid certificates. +func TestTestCAAndLeaf(t *testing.T) { + if !hasOpenSSL { + t.Skip("openssl not found") + return + } + + assert := assert.New(t) + + // Create the certs + ca := TestCA(t, nil) + leaf := TestLeaf(t, "web", ca) + + // Create a temporary directory for storing the certs + td, err := ioutil.TempDir("", "consul") + assert.Nil(err) + defer os.RemoveAll(td) + + // Write the cert + assert.Nil(ioutil.WriteFile(filepath.Join(td, "ca.pem"), []byte(ca.RootCert), 0644)) + assert.Nil(ioutil.WriteFile(filepath.Join(td, "leaf.pem"), []byte(leaf), 0644)) + + // Use OpenSSL to verify so we have an external, known-working process + // that can verify this outside of our own implementations. + cmd := exec.Command( + "openssl", "verify", "-verbose", "-CAfile", "ca.pem", "leaf.pem") + cmd.Dir = td + output, err := cmd.Output() + t.Log(string(output)) + assert.Nil(err) +} + +// Test cross-signing. +func TestTestCAAndLeaf_xc(t *testing.T) { + if !hasOpenSSL { + t.Skip("openssl not found") + return + } + + assert := assert.New(t) + + // Create the certs + ca1 := TestCA(t, nil) + ca2 := TestCA(t, ca1) + leaf1 := TestLeaf(t, "web", ca1) + leaf2 := TestLeaf(t, "web", ca2) + + // Create a temporary directory for storing the certs + td, err := ioutil.TempDir("", "consul") + assert.Nil(err) + defer os.RemoveAll(td) + + // Write the cert + xcbundle := []byte(ca1.RootCert) + xcbundle = append(xcbundle, '\n') + xcbundle = append(xcbundle, []byte(ca2.SigningCert)...) + assert.Nil(ioutil.WriteFile(filepath.Join(td, "ca.pem"), xcbundle, 0644)) + assert.Nil(ioutil.WriteFile(filepath.Join(td, "leaf1.pem"), []byte(leaf1), 0644)) + assert.Nil(ioutil.WriteFile(filepath.Join(td, "leaf2.pem"), []byte(leaf2), 0644)) + + // OpenSSL verify the cross-signed leaf (leaf2) + { + cmd := exec.Command( + "openssl", "verify", "-verbose", "-CAfile", "ca.pem", "leaf2.pem") + cmd.Dir = td + output, err := cmd.Output() + t.Log(string(output)) + assert.Nil(err) + } + + // OpenSSL verify the old leaf (leaf1) + { + cmd := exec.Command( + "openssl", "verify", "-verbose", "-CAfile", "ca.pem", "leaf1.pem") + cmd.Dir = td + output, err := cmd.Output() + t.Log(string(output)) + assert.Nil(err) + } +} + +// Test that the private key is memoized to preseve system entropy. +func TestTestPrivateKey_memoize(t *testing.T) { + ca1 := TestCA(t, nil) + ca2 := TestCA(t, nil) + if ca1.SigningKey != ca2.SigningKey { + t.Fatal("should have the same signing keys for tests") + } +} From 6550ff949248502fcea2ba9a605a5eb57227a730 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 19 Mar 2018 13:53:57 -0700 Subject: [PATCH 086/627] agent/connect: package for agent-related Connect, parse SPIFFE IDs --- agent/connect/ca.go | 59 ++++++++++++++++++++++++++++++++++++ agent/connect/spiffe.go | 55 +++++++++++++++++++++++++++++++++ agent/connect/spiffe_test.go | 57 ++++++++++++++++++++++++++++++++++ 3 files changed, 171 insertions(+) create mode 100644 agent/connect/ca.go create mode 100644 agent/connect/spiffe.go create mode 100644 agent/connect/spiffe_test.go diff --git a/agent/connect/ca.go b/agent/connect/ca.go new file mode 100644 index 000000000..90c484529 --- /dev/null +++ b/agent/connect/ca.go @@ -0,0 +1,59 @@ +package connect + +import ( + "crypto" + "crypto/ecdsa" + "crypto/sha256" + "crypto/x509" + "encoding/pem" + "fmt" +) + +// ParseCert parses the x509 certificate from a PEM-encoded value. +func ParseCert(pemValue string) (*x509.Certificate, error) { + block, _ := pem.Decode([]byte(pemValue)) + if block == nil { + return nil, fmt.Errorf("no PEM-encoded data found") + } + + if block.Type != "CERTIFICATE" { + return nil, fmt.Errorf("first PEM-block should be CERTIFICATE type") + } + + return x509.ParseCertificate(block.Bytes) +} + +// ParseSigner parses a crypto.Signer from a PEM-encoded key. The private key +// is expected to be the first block in the PEM value. +func ParseSigner(pemValue string) (crypto.Signer, error) { + block, _ := pem.Decode([]byte(pemValue)) + if block == nil { + return nil, fmt.Errorf("no PEM-encoded data found") + } + + switch block.Type { + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + + default: + return nil, fmt.Errorf("unknown PEM block type for signing key: %s", block.Type) + } +} + +// KeyId returns a x509 KeyId from the given signing key. The key must be +// an *ecdsa.PublicKey, but is an interface type to support crypto.Signer. +func KeyId(raw interface{}) ([]byte, error) { + pub, ok := raw.(*ecdsa.PublicKey) + if !ok { + return nil, fmt.Errorf("invalid key type: %T", raw) + } + + // This is not standard; RFC allows any unique identifier as long as they + // match in subject/authority chains but suggests specific hashing of DER + // bytes of public key including DER tags. I can't be bothered to do esp. + // since ECDSA keys don't have a handy way to marshal the publick key alone. + h := sha256.New() + h.Write(pub.X.Bytes()) + h.Write(pub.Y.Bytes()) + return h.Sum([]byte{}), nil +} diff --git a/agent/connect/spiffe.go b/agent/connect/spiffe.go new file mode 100644 index 000000000..58a6b83e3 --- /dev/null +++ b/agent/connect/spiffe.go @@ -0,0 +1,55 @@ +package connect + +import ( + "fmt" + "net/url" + "regexp" +) + +// SpiffeID represents a Connect-valid SPIFFE ID. The user should type switch +// on the various implementations in this package to determine the type of ID. +type SpiffeID interface { + URI() *url.URL +} + +var ( + spiffeIDServiceRegexp = regexp.MustCompile( + `^/ns/(\w+)/dc/(\w+)/svc/(\w+)$`) +) + +// ParseSpiffeID parses a SPIFFE ID from the input URI. +func ParseSpiffeID(input *url.URL) (SpiffeID, error) { + if input.Scheme != "spiffe" { + return nil, fmt.Errorf("SPIFFE ID must have 'spiffe' scheme") + } + + // Test for service IDs + if v := spiffeIDServiceRegexp.FindStringSubmatch(input.Path); v != nil { + return &SpiffeIDService{ + Host: input.Host, + Namespace: v[1], + Datacenter: v[2], + Service: v[3], + }, nil + } + + return nil, fmt.Errorf("SPIFFE ID is not in the expected format") +} + +// SpiffeIDService is the structure to represent the SPIFFE ID for a service. +type SpiffeIDService struct { + Host string + Namespace string + Datacenter string + Service string +} + +// URI returns the *url.URL for this SPIFFE ID. +func (id *SpiffeIDService) URI() *url.URL { + var result url.URL + result.Scheme = "spiffe" + result.Host = id.Host + result.Path = fmt.Sprintf("/ns/%s/dc/%s/svc/%s", + id.Namespace, id.Datacenter, id.Service) + return &result +} diff --git a/agent/connect/spiffe_test.go b/agent/connect/spiffe_test.go new file mode 100644 index 000000000..861a4fa63 --- /dev/null +++ b/agent/connect/spiffe_test.go @@ -0,0 +1,57 @@ +package connect + +import ( + "net/url" + "testing" + + "github.com/stretchr/testify/assert" +) + +// testSpiffeIDCases contains the test cases for parsing and encoding +// the SPIFFE IDs. This is a global since it is used in multiple test functions. +var testSpiffeIDCases = []struct { + Name string + URI string + Struct interface{} + ParseError string +}{ + { + "invalid scheme", + "http://google.com/", + nil, + "scheme", + }, + + { + "basic service ID", + "spiffe://1234.consul/ns/default/dc/dc01/svc/web", + &SpiffeIDService{ + Host: "1234.consul", + Namespace: "default", + Datacenter: "dc01", + Service: "web", + }, + "", + }, +} + +func TestParseSpiffeID(t *testing.T) { + for _, tc := range testSpiffeIDCases { + t.Run(tc.Name, func(t *testing.T) { + assert := assert.New(t) + + // Parse the URI, should always be valid + uri, err := url.Parse(tc.URI) + assert.Nil(err) + + // Parse the ID and check the error/return value + actual, err := ParseSpiffeID(uri) + assert.Equal(tc.ParseError != "", err != nil, "error value") + if err != nil { + assert.Contains(err.Error(), tc.ParseError) + return + } + assert.Equal(tc.Struct, actual) + }) + } +} From a360c5cca41e7561ff2d3d6ad07835cc8e090e32 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 19 Mar 2018 14:36:17 -0700 Subject: [PATCH 087/627] agent/consul: basic sign endpoint not tested yet --- agent/connect/ca.go | 15 +++ {connect => agent/connect}/testing.go | 56 +++++++++++- {connect => agent/connect}/testing_test.go | 0 agent/consul/connect_ca_endpoint.go | 101 +++++++++++++++++++++ agent/structs/connect_ca.go | 21 ++++- connect/ca.go | 48 ---------- connect/connect.go | 3 - 7 files changed, 188 insertions(+), 56 deletions(-) rename {connect => agent/connect}/testing.go (83%) rename {connect => agent/connect}/testing_test.go (100%) delete mode 100644 connect/ca.go delete mode 100644 connect/connect.go diff --git a/agent/connect/ca.go b/agent/connect/ca.go index 90c484529..a0a65ece6 100644 --- a/agent/connect/ca.go +++ b/agent/connect/ca.go @@ -40,6 +40,21 @@ func ParseSigner(pemValue string) (crypto.Signer, error) { } } +// ParseCSR parses a CSR from a PEM-encoded value. The certificate request +// must be the the first block in the PEM value. +func ParseCSR(pemValue string) (*x509.CertificateRequest, error) { + block, _ := pem.Decode([]byte(pemValue)) + if block == nil { + return nil, fmt.Errorf("no PEM-encoded data found") + } + + if block.Type != "CERTIFICATE REQUEST" { + return nil, fmt.Errorf("first PEM-block should be CERTIFICATE REQUEST type") + } + + return x509.ParseCertificateRequest(block.Bytes) +} + // KeyId returns a x509 KeyId from the given signing key. The key must be // an *ecdsa.PublicKey, but is an interface type to support crypto.Signer. func KeyId(raw interface{}) ([]byte, error) { diff --git a/connect/testing.go b/agent/connect/testing.go similarity index 83% rename from connect/testing.go rename to agent/connect/testing.go index 78008270a..96b13dcf5 100644 --- a/connect/testing.go +++ b/agent/connect/testing.go @@ -11,6 +11,7 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" + "math/big" "net/url" "sync/atomic" "time" @@ -45,7 +46,7 @@ func TestCA(t testing.T, xc *structs.CARoot) *structs.CARoot { signer := testPrivateKey(t, &result) // The serial number for the cert - sn, err := SerialNumber() + sn, err := testSerialNumber() if err != nil { t.Fatalf("error generating serial number: %s", err) } @@ -124,7 +125,11 @@ func TestCA(t testing.T, xc *structs.CARoot) *structs.CARoot { // the given CA Root. func TestLeaf(t testing.T, service string, root *structs.CARoot) string { // Parse the CA cert and signing key from the root - caCert, err := ParseCert(root.RootCert) + cert := root.SigningCert + if cert == "" { + cert = root.RootCert + } + caCert, err := ParseCert(cert) if err != nil { t.Fatalf("error parsing CA cert: %s", err) } @@ -133,8 +138,16 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) string { t.Fatalf("error parsing signing key: %s", err) } + // Build the SPIFFE ID + spiffeId := &SpiffeIDService{ + Host: fmt.Sprintf("%s.consul", testClusterID), + Namespace: "default", + Datacenter: "dc01", + Service: service, + } + // The serial number for the cert - sn, err := SerialNumber() + sn, err := testSerialNumber() if err != nil { t.Fatalf("error generating serial number: %s", err) } @@ -143,6 +156,7 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) string { template := x509.Certificate{ SerialNumber: sn, Subject: pkix.Name{CommonName: service}, + URIs: []*url.URL{spiffeId.URI()}, SignatureAlgorithm: x509.ECDSAWithSHA256, BasicConstraintsValid: true, KeyUsage: x509.KeyUsageDataEncipherment | x509.KeyUsageKeyAgreement, @@ -171,6 +185,30 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) string { return buf.String() } +// TestCSR returns a CSR to sign the given service. +func TestCSR(t testing.T, id SpiffeID) string { + template := &x509.CertificateRequest{ + URIs: []*url.URL{id.URI()}, + } + + // Create the private key we'll use + signer := testPrivateKey(t, nil) + + // Create the CSR itself + bs, err := x509.CreateCertificateRequest(rand.Reader, template, signer) + if err != nil { + t.Fatalf("error creating CSR: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: bs}) + if err != nil { + t.Fatalf("error encoding CSR: %s", err) + } + + return buf.String() +} + // testKeyID returns a KeyID from the given public key. The "raw" must be // an *ecdsa.PublicKey, but is an interface type to suppot crypto.Signer.Public // values. @@ -221,10 +259,20 @@ func testPrivateKey(t testing.T, ca *structs.CARoot) crypto.Signer { if err != nil { t.Fatalf("error encoding private key: %s", err) } - ca.SigningKey = buf.String() + if ca != nil { + ca.SigningKey = buf.String() + } // Memoize the key testMemoizePK.Store(pk) return pk } + +// testSerialNumber generates a serial number suitable for a certificate. +// For testing, this just sets it to a random number. +// +// This function is taken directly from the Vault implementation. +func testSerialNumber() (*big.Int, error) { + return rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) +} diff --git a/connect/testing_test.go b/agent/connect/testing_test.go similarity index 100% rename from connect/testing_test.go rename to agent/connect/testing_test.go diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 3f35ad79f..f07fbd90f 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -1,6 +1,16 @@ package consul import ( + "bytes" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "time" + + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" @@ -53,3 +63,94 @@ func (s *ConnectCA) Roots( }, ) } + +// Sign signs a certificate for a service. +// +// NOTE(mitchellh): There is a LOT missing from this. I do next to zero +// validation of the incoming CSR, the way the cert is signed probably +// isn't right, we're not using enough of the CSR fields, etc. +func (s *ConnectCA) Sign( + args *structs.CASignRequest, + reply *structs.IndexedCARoots) error { + // Parse the CSR + csr, err := connect.ParseCSR(args.CSR) + if err != nil { + return err + } + + // Parse the SPIFFE ID + spiffeId, err := connect.ParseSpiffeID(csr.URIs[0]) + if err != nil { + return err + } + serviceId, ok := spiffeId.(*connect.SpiffeIDService) + if !ok { + return fmt.Errorf("SPIFFE ID in CSR must be a service ID") + } + + var root *structs.CARoot + + // Determine the signing certificate. It is the set signing cert + // unless that is empty, in which case it is identically to the public + // cert. + certPem := root.SigningCert + if certPem == "" { + certPem = root.RootCert + } + + // Parse the CA cert and signing key from the root + caCert, err := connect.ParseCert(certPem) + if err != nil { + return fmt.Errorf("error parsing CA cert: %s", err) + } + signer, err := connect.ParseSigner(root.SigningKey) + if err != nil { + return fmt.Errorf("error parsing signing key: %s", err) + } + + // The serial number for the cert. NOTE(mitchellh): in the final + // implementation this should be monotonically increasing based on + // some raft state. + sn, err := rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) + if err != nil { + return fmt.Errorf("error generating serial number: %s", err) + } + + // Create the keyId for the cert from the signing public key. + keyId, err := connect.KeyId(signer.Public()) + if err != nil { + return err + } + + // Cert template for generation + template := x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{CommonName: serviceId.Service}, + URIs: csr.URIs, + SignatureAlgorithm: x509.ECDSAWithSHA256, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageDataEncipherment | x509.KeyUsageKeyAgreement, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: keyId, + SubjectKeyId: keyId, + } + + // Create the certificate, PEM encode it and return that value. + var buf bytes.Buffer + bs, err := x509.CreateCertificate( + rand.Reader, &template, caCert, signer.Public(), signer) + if err != nil { + return fmt.Errorf("error generating certificate: %s", err) + } + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + return fmt.Errorf("error encoding private key: %s", err) + } + + return nil +} diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 46725dcc7..992fce85a 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -28,7 +28,8 @@ type CARoot struct { RootCert string // SigningCert is the PEM-encoded signing certificate and SigningKey - // is the PEM-encoded private key for the signing certificate. + // is the PEM-encoded private key for the signing certificate. These + // may actually be empty if the CA plugin in use manages these for us. SigningCert string SigningKey string @@ -37,3 +38,21 @@ type CARoot struct { // CARoots is a list of CARoot structures. type CARoots []*CARoot + +// CASignRequest is the request for signing a service certificate. +type CASignRequest struct { + // Datacenter is the target for this request. + Datacenter string + + // CSR is the PEM-encoded CSR. + CSR string + + // WriteRequest is a common struct containing ACL tokens and other + // write-related common elements for requests. + WriteRequest +} + +// RequestDatacenter returns the datacenter for a given request. +func (q *CASignRequest) RequestDatacenter() string { + return q.Datacenter +} diff --git a/connect/ca.go b/connect/ca.go deleted file mode 100644 index e9ada4953..000000000 --- a/connect/ca.go +++ /dev/null @@ -1,48 +0,0 @@ -package connect - -import ( - "crypto" - "crypto/rand" - "crypto/x509" - "encoding/pem" - "fmt" - "math/big" -) - -// ParseCert parses the x509 certificate from a PEM-encoded value. -func ParseCert(pemValue string) (*x509.Certificate, error) { - block, _ := pem.Decode([]byte(pemValue)) - if block == nil { - return nil, fmt.Errorf("no PEM-encoded data found") - } - - if block.Type != "CERTIFICATE" { - return nil, fmt.Errorf("first PEM-block should be CERTIFICATE type") - } - - return x509.ParseCertificate(block.Bytes) -} - -// ParseSigner parses a crypto.Signer from a PEM-encoded key. The private key -// is expected to be the first block in the PEM value. -func ParseSigner(pemValue string) (crypto.Signer, error) { - block, _ := pem.Decode([]byte(pemValue)) - if block == nil { - return nil, fmt.Errorf("no PEM-encoded data found") - } - - switch block.Type { - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(block.Bytes) - - default: - return nil, fmt.Errorf("unknown PEM block type for signing key: %s", block.Type) - } -} - -// SerialNumber generates a serial number suitable for a certificate. -// -// This function is taken directly from the Vault implementation. -func SerialNumber() (*big.Int, error) { - return rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) -} diff --git a/connect/connect.go b/connect/connect.go deleted file mode 100644 index b2ad85f71..000000000 --- a/connect/connect.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package connect contains utilities and helpers for working with the -// Connect feature of Consul. -package connect From 9a8653f45e1b003bf1ac93cfe2419f7c1f913f94 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 19 Mar 2018 20:29:14 -0700 Subject: [PATCH 088/627] agent/consul: test for ConnectCA.Sign --- agent/connect/{testing.go => testing_ca.go} | 16 ++++++- .../{testing_test.go => testing_ca_test.go} | 0 agent/connect/testing_spiffe.go | 15 +++++++ agent/consul/connect_ca_endpoint.go | 7 +++- agent/consul/connect_ca_endpoint_test.go | 42 +++++++++++++++++++ agent/consul/state/connect_ca.go | 18 ++++++++ agent/structs/connect_ca.go | 6 +++ 7 files changed, 101 insertions(+), 3 deletions(-) rename agent/connect/{testing.go => testing_ca.go} (95%) rename agent/connect/{testing_test.go => testing_ca_test.go} (100%) create mode 100644 agent/connect/testing_spiffe.go create mode 100644 agent/consul/connect_ca_endpoint_test.go diff --git a/agent/connect/testing.go b/agent/connect/testing_ca.go similarity index 95% rename from agent/connect/testing.go rename to agent/connect/testing_ca.go index 96b13dcf5..b6140bb04 100644 --- a/agent/connect/testing.go +++ b/agent/connect/testing_ca.go @@ -17,6 +17,7 @@ import ( "time" "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/go-uuid" "github.com/mitchellh/go-testing-interface" ) @@ -32,14 +33,15 @@ const testClusterID = "11111111-2222-3333-4444-555555555555" var testCACounter uint64 = 0 // TestCA creates a test CA certificate and signing key and returns it -// in the CARoot structure format. The CARoot returned will NOT have an ID -// set. +// in the CARoot structure format. The returned CA will be set as Active = true. // // If xc is non-nil, then the returned certificate will have a signing cert // that is cross-signed with the previous cert, and this will be set as // SigningCert. func TestCA(t testing.T, xc *structs.CARoot) *structs.CARoot { var result structs.CARoot + result.ID = testUUID(t) + result.Active = true result.Name = fmt.Sprintf("Test CA %d", atomic.AddUint64(&testCACounter, 1)) // Create the private key we'll use for this CA cert. @@ -276,3 +278,13 @@ func testPrivateKey(t testing.T, ca *structs.CARoot) crypto.Signer { func testSerialNumber() (*big.Int, error) { return rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) } + +// testUUID generates a UUID for testing. +func testUUID(t testing.T) string { + ret, err := uuid.GenerateUUID() + if err != nil { + t.Fatalf("Unable to generate a UUID, %s", err) + } + + return ret +} diff --git a/agent/connect/testing_test.go b/agent/connect/testing_ca_test.go similarity index 100% rename from agent/connect/testing_test.go rename to agent/connect/testing_ca_test.go diff --git a/agent/connect/testing_spiffe.go b/agent/connect/testing_spiffe.go new file mode 100644 index 000000000..e2e7a470f --- /dev/null +++ b/agent/connect/testing_spiffe.go @@ -0,0 +1,15 @@ +package connect + +import ( + "github.com/mitchellh/go-testing-interface" +) + +// TestSpiffeIDService returns a SPIFFE ID representing a service. +func TestSpiffeIDService(t testing.T, service string) *SpiffeIDService { + return &SpiffeIDService{ + Host: testClusterID + ".consul", + Namespace: "default", + Datacenter: "dc01", + Service: service, + } +} diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index f07fbd90f..2e26c4e2b 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -88,7 +88,12 @@ func (s *ConnectCA) Sign( return fmt.Errorf("SPIFFE ID in CSR must be a service ID") } - var root *structs.CARoot + // Get the currently active root + state := s.srv.fsm.State() + _, root, err := state.CARootActive(nil) + if err != nil { + return err + } // Determine the signing certificate. It is the set signing cert // unless that is empty, in which case it is identically to the public diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go new file mode 100644 index 000000000..a08e31e04 --- /dev/null +++ b/agent/consul/connect_ca_endpoint_test.go @@ -0,0 +1,42 @@ +package consul + +import ( + "os" + "testing" + + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/testrpc" + "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/stretchr/testify/assert" +) + +// Test CA signing +// +// NOTE(mitchellh): Just testing the happy path and not all the other validation +// issues because the internals of this method will probably be gutted for the +// CA plugins then we can just test mocks. +func TestConnectCASign(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Insert a CA + state := s1.fsm.State() + assert.Nil(state.CARootSet(1, connect.TestCA(t, nil))) + + // Generate a CSR and request signing + args := &structs.CASignRequest{ + Datacenter: "dc01", + CSR: connect.TestCSR(t, connect.TestSpiffeIDService(t, "web")), + } + var reply interface{} + assert.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply)) +} diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go index 9e3195918..9c19b65c2 100644 --- a/agent/consul/state/connect_ca.go +++ b/agent/consul/state/connect_ca.go @@ -55,6 +55,24 @@ func (s *Store) CARoots(ws memdb.WatchSet) (uint64, structs.CARoots, error) { return idx, results, nil } +// CARootActive returns the currently active CARoot. +func (s *Store) CARootActive(ws memdb.WatchSet) (uint64, *structs.CARoot, error) { + // Get all the roots since there should never be that many and just + // do the filtering in this method. + var result *structs.CARoot + idx, roots, err := s.CARoots(ws) + if err == nil { + for _, r := range roots { + if r.Active { + result = r + break + } + } + } + + return idx, result, err +} + // CARootSet creates or updates a CA root. // // NOTE(mitchellh): I have a feeling we'll want a CARootMultiSetCAS to diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 992fce85a..045ebea90 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -33,6 +33,12 @@ type CARoot struct { SigningCert string SigningKey string + // Active is true if this is the current active CA. This must only + // be true for exactly one CA. For any method that modifies roots in the + // state store, tests should be written to verify that multiple roots + // cannot be active. + Active bool + RaftIndex } From 1928c07d0c4faaa64a24105557a25edc05e64323 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 19 Mar 2018 21:00:01 -0700 Subject: [PATCH 089/627] agent/consul: key the public key of the CSR, verify in test --- agent/connect/testing_ca.go | 3 ++- agent/consul/connect_ca_endpoint.go | 15 ++++++++++++--- agent/consul/connect_ca_endpoint_test.go | 16 ++++++++++++++-- agent/structs/connect_ca.go | 14 ++++++++++++++ 4 files changed, 42 insertions(+), 6 deletions(-) diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index b6140bb04..b7f436834 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -190,7 +190,8 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) string { // TestCSR returns a CSR to sign the given service. func TestCSR(t testing.T, id SpiffeID) string { template := &x509.CertificateRequest{ - URIs: []*url.URL{id.URI()}, + URIs: []*url.URL{id.URI()}, + SignatureAlgorithm: x509.ECDSAWithSHA256, } // Create the private key we'll use diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 2e26c4e2b..9e6b8a4b1 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -71,7 +71,7 @@ func (s *ConnectCA) Roots( // isn't right, we're not using enough of the CSR fields, etc. func (s *ConnectCA) Sign( args *structs.CASignRequest, - reply *structs.IndexedCARoots) error { + reply *structs.IssuedCert) error { // Parse the CSR csr, err := connect.ParseCSR(args.CSR) if err != nil { @@ -132,14 +132,17 @@ func (s *ConnectCA) Sign( SerialNumber: sn, Subject: pkix.Name{CommonName: serviceId.Service}, URIs: csr.URIs, - SignatureAlgorithm: x509.ECDSAWithSHA256, + Signature: csr.Signature, + SignatureAlgorithm: csr.SignatureAlgorithm, + PublicKeyAlgorithm: csr.PublicKeyAlgorithm, + PublicKey: csr.PublicKey, BasicConstraintsValid: true, KeyUsage: x509.KeyUsageDataEncipherment | x509.KeyUsageKeyAgreement, ExtKeyUsage: []x509.ExtKeyUsage{ x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, }, - NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotAfter: time.Now().Add(3 * 24 * time.Hour), NotBefore: time.Now(), AuthorityKeyId: keyId, SubjectKeyId: keyId, @@ -157,5 +160,11 @@ func (s *ConnectCA) Sign( return fmt.Errorf("error encoding private key: %s", err) } + // Set the response + *reply = structs.IssuedCert{ + SerialNumber: template.SerialNumber, + Cert: buf.String(), + } + return nil } diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index a08e31e04..d658c7ade 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -1,6 +1,7 @@ package consul import ( + "crypto/x509" "os" "testing" @@ -30,13 +31,24 @@ func TestConnectCASign(t *testing.T) { // Insert a CA state := s1.fsm.State() - assert.Nil(state.CARootSet(1, connect.TestCA(t, nil))) + ca := connect.TestCA(t, nil) + assert.Nil(state.CARootSet(1, ca)) // Generate a CSR and request signing args := &structs.CASignRequest{ Datacenter: "dc01", CSR: connect.TestCSR(t, connect.TestSpiffeIDService(t, "web")), } - var reply interface{} + var reply structs.IssuedCert assert.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply)) + + // Verify that the cert is signed by the CA + roots := x509.NewCertPool() + assert.True(roots.AppendCertsFromPEM([]byte(ca.RootCert))) + leaf, err := connect.ParseCert(reply.Cert) + assert.Nil(err) + _, err = leaf.Verify(x509.VerifyOptions{ + Roots: roots, + }) + assert.Nil(err) } diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 045ebea90..6723d9b98 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -1,5 +1,9 @@ package structs +import ( + "math/big" +) + // IndexedCARoots is the list of currently trusted CA Roots. type IndexedCARoots struct { // ActiveRootID is the ID of a root in Roots that is the active CA root. @@ -62,3 +66,13 @@ type CASignRequest struct { func (q *CASignRequest) RequestDatacenter() string { return q.Datacenter } + +// IssuedCert is a certificate that has been issued by a Connect CA. +type IssuedCert struct { + // SerialNumber is the unique serial number for this certificate. + SerialNumber *big.Int + + // Cert is the PEM-encoded certificate. This should not be stored in the + // state store, but is present in the sign API response. + Cert string `json:",omitempty"` +} From 712888258b659d1f08ed9e90fe7efbe69adb6f74 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 20 Mar 2018 10:36:05 -0700 Subject: [PATCH 090/627] agent/consul: tests for CA endpoints --- agent/agent_endpoint.go | 9 +++--- agent/connect_ca_endpoint_test.go | 27 +++++++++++++++++ agent/consul/connect_ca_endpoint.go | 5 ++++ agent/consul/connect_ca_endpoint_test.go | 38 ++++++++++++++++++++++++ 4 files changed, 74 insertions(+), 5 deletions(-) create mode 100644 agent/connect_ca_endpoint_test.go diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index e3e8fcd51..97c52512a 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -839,9 +839,8 @@ func (s *HTTPServer) AgentToken(resp http.ResponseWriter, req *http.Request) (in // AgentConnectCARoots returns the trusted CA roots. func (s *HTTPServer) AgentConnectCARoots(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - if req.Method != "GET" { - return nil, MethodNotAllowedError{req.Method, []string{"GET"}} - } - - return nil, nil + // NOTE(mitchellh): for now this is identical to /v1/connect/ca/roots. + // In the future, we're going to do some agent-local caching and the + // behavior will differ. + return s.ConnectCARoots(resp, req) } diff --git a/agent/connect_ca_endpoint_test.go b/agent/connect_ca_endpoint_test.go new file mode 100644 index 000000000..ee30f57a9 --- /dev/null +++ b/agent/connect_ca_endpoint_test.go @@ -0,0 +1,27 @@ +package agent + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/hashicorp/consul/agent/structs" + "github.com/stretchr/testify/assert" +) + +func TestConnectCARoots_empty(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + req, _ := http.NewRequest("GET", "/v1/connect/ca/roots", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.ConnectCARoots(resp, req) + assert.Nil(err) + + value := obj.(structs.IndexedCARoots) + assert.Equal(value.ActiveRootID, "") + assert.Len(value.Roots, 0) +} diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 9e6b8a4b1..1702c8740 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -56,6 +56,11 @@ func (s *ConnectCA) Roots( Name: r.Name, RootCert: r.RootCert, RaftIndex: r.RaftIndex, + Active: r.Active, + } + + if r.Active { + reply.ActiveRootID = r.ID } } diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index d658c7ade..375d75115 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -12,6 +12,44 @@ import ( "github.com/stretchr/testify/assert" ) +// Test listing root CAs. +func TestConnectCARoots(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Insert some CAs + state := s1.fsm.State() + ca1 := connect.TestCA(t, nil) + ca2 := connect.TestCA(t, nil) + ca2.Active = false + assert.Nil(state.CARootSet(1, ca1)) + assert.Nil(state.CARootSet(2, ca2)) + + // Request + args := &structs.DCSpecificRequest{ + Datacenter: "dc1", + } + var reply structs.IndexedCARoots + assert.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", args, &reply)) + + // Verify + assert.Equal(ca1.ID, reply.ActiveRootID) + assert.Len(reply.Roots, 2) + for _, r := range reply.Roots { + // These must never be set, for security + assert.Equal("", r.SigningCert) + assert.Equal("", r.SigningKey) + } +} + // Test CA signing // // NOTE(mitchellh): Just testing the happy path and not all the other validation From 80a058a573ee992123dd8292d2b0ce97023dfcbf Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 21 Mar 2018 10:10:53 -0700 Subject: [PATCH 091/627] agent/consul: CAS operations for setting the CA root --- agent/agent_test.go | 9 +++ agent/connect_ca_endpoint_test.go | 21 ++++++ agent/consul/connect_ca_endpoint.go | 3 + agent/consul/connect_ca_endpoint_test.go | 9 ++- agent/consul/fsm/commands_oss.go | 26 +++++++ agent/consul/state/connect_ca.go | 88 +++++++++++++----------- agent/consul/testing.go | 26 +++++++ agent/consul/testing_endpoint.go | 43 ++++++++++++ agent/consul/testing_endpoint_test.go | 42 +++++++++++ agent/consul/testing_test.go | 13 ++++ agent/structs/connect_ca.go | 22 ++++++ agent/structs/structs.go | 1 + 12 files changed, 259 insertions(+), 44 deletions(-) create mode 100644 agent/consul/testing.go create mode 100644 agent/consul/testing_endpoint.go create mode 100644 agent/consul/testing_endpoint_test.go create mode 100644 agent/consul/testing_test.go diff --git a/agent/agent_test.go b/agent/agent_test.go index 58ada5561..df1593bd9 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -16,6 +16,7 @@ import ( "time" "github.com/hashicorp/consul/agent/checks" + "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/testutil" @@ -25,6 +26,14 @@ import ( "github.com/pascaldekloe/goe/verify" ) +// TestMain is the main entrypoint for `go test`. +func TestMain(m *testing.M) { + // Enable the test RPC endpoints + consul.TestEndpoint() + + os.Exit(m.Run()) +} + func externalIP() (string, error) { addrs, err := net.InterfaceAddrs() if err != nil { diff --git a/agent/connect_ca_endpoint_test.go b/agent/connect_ca_endpoint_test.go index ee30f57a9..cec8382c0 100644 --- a/agent/connect_ca_endpoint_test.go +++ b/agent/connect_ca_endpoint_test.go @@ -5,6 +5,7 @@ import ( "net/http/httptest" "testing" + "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" "github.com/stretchr/testify/assert" ) @@ -25,3 +26,23 @@ func TestConnectCARoots_empty(t *testing.T) { assert.Equal(value.ActiveRootID, "") assert.Len(value.Roots, 0) } + +func TestConnectCARoots_list(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + state := consul.TestServerState(a.Agent.delegate.(*consul.Server)) + t.Log(state.CARoots(nil)) + + req, _ := http.NewRequest("GET", "/v1/connect/ca/roots", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.ConnectCARoots(resp, req) + assert.Nil(err) + + value := obj.(structs.IndexedCARoots) + assert.Equal(value.ActiveRootID, "") + assert.Len(value.Roots, 0) +} diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 1702c8740..d6ddaef58 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -99,6 +99,9 @@ func (s *ConnectCA) Sign( if err != nil { return err } + if root == nil { + return fmt.Errorf("no active CA found") + } // Determine the signing certificate. It is the set signing cert // unless that is empty, in which case it is identically to the public diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index 375d75115..8a3f1b4f2 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -30,8 +30,9 @@ func TestConnectCARoots(t *testing.T) { ca1 := connect.TestCA(t, nil) ca2 := connect.TestCA(t, nil) ca2.Active = false - assert.Nil(state.CARootSet(1, ca1)) - assert.Nil(state.CARootSet(2, ca2)) + ok, err := state.CARootSetCAS(1, 0, []*structs.CARoot{ca1, ca2}) + assert.True(ok) + assert.Nil(err) // Request args := &structs.DCSpecificRequest{ @@ -70,7 +71,9 @@ func TestConnectCASign(t *testing.T) { // Insert a CA state := s1.fsm.State() ca := connect.TestCA(t, nil) - assert.Nil(state.CARootSet(1, ca)) + ok, err := state.CARootSetCAS(1, 0, []*structs.CARoot{ca}) + assert.True(ok) + assert.Nil(err) // Generate a CSR and request signing args := &structs.CASignRequest{ diff --git a/agent/consul/fsm/commands_oss.go b/agent/consul/fsm/commands_oss.go index 51f127899..2d2627748 100644 --- a/agent/consul/fsm/commands_oss.go +++ b/agent/consul/fsm/commands_oss.go @@ -21,6 +21,7 @@ func init() { registerCommand(structs.TxnRequestType, (*FSM).applyTxn) registerCommand(structs.AutopilotRequestType, (*FSM).applyAutopilotUpdate) registerCommand(structs.IntentionRequestType, (*FSM).applyIntentionOperation) + registerCommand(structs.ConnectCARequestType, (*FSM).applyConnectCAOperation) } func (c *FSM) applyRegister(buf []byte, index uint64) interface{} { @@ -269,3 +270,28 @@ func (c *FSM) applyIntentionOperation(buf []byte, index uint64) interface{} { return fmt.Errorf("Invalid Intention operation '%s'", req.Op) } } + +// applyConnectCAOperation applies the given CA operation to the state store. +func (c *FSM) applyConnectCAOperation(buf []byte, index uint64) interface{} { + var req structs.CARequest + if err := structs.Decode(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode request: %v", err)) + } + + defer metrics.MeasureSinceWithLabels([]string{"consul", "fsm", "ca"}, time.Now(), + []metrics.Label{{Name: "op", Value: string(req.Op)}}) + defer metrics.MeasureSinceWithLabels([]string{"fsm", "ca"}, time.Now(), + []metrics.Label{{Name: "op", Value: string(req.Op)}}) + switch req.Op { + case structs.CAOpSet: + act, err := c.state.CARootSetCAS(index, req.Index, req.Roots) + if err != nil { + return err + } + + return act + default: + c.logger.Printf("[WARN] consul.fsm: Invalid CA operation '%s'", req.Op) + return fmt.Errorf("Invalid CA operation '%s'", req.Op) + } +} diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go index 9c19b65c2..3b66a07c6 100644 --- a/agent/consul/state/connect_ca.go +++ b/agent/consul/state/connect_ca.go @@ -73,52 +73,58 @@ func (s *Store) CARootActive(ws memdb.WatchSet) (uint64, *structs.CARoot, error) return idx, result, err } -// CARootSet creates or updates a CA root. +// CARootSetCAS sets the current CA root state using a check-and-set operation. +// On success, this will replace the previous set of CARoots completely with +// the given set of roots. // -// NOTE(mitchellh): I have a feeling we'll want a CARootMultiSetCAS to -// perform a check-and-set on the entire set of CARoots versus an individual -// set, since we'll want to modify them atomically during events such as -// rotation. -func (s *Store) CARootSet(idx uint64, v *structs.CARoot) error { +// The first boolean result returns whether the transaction succeeded or not. +func (s *Store) CARootSetCAS(idx, cidx uint64, rs []*structs.CARoot) (bool, error) { tx := s.db.Txn(true) defer tx.Abort() - if err := s.caRootSetTxn(tx, idx, v); err != nil { - return err + // Get the current max index + if midx := maxIndexTxn(tx, caRootTableName); midx != cidx { + return false, nil + } + + // Go through and find any existing matching CAs so we can preserve and + // update their Create/ModifyIndex values. + for _, r := range rs { + if r.ID == "" { + return false, ErrMissingCARootID + } + + existing, err := tx.First(caRootTableName, "id", r.ID) + if err != nil { + return false, fmt.Errorf("failed CA root lookup: %s", err) + } + + if existing != nil { + r.CreateIndex = existing.(*structs.CARoot).CreateIndex + } else { + r.CreateIndex = idx + } + r.ModifyIndex = idx + } + + // Delete all + _, err := tx.DeleteAll(caRootTableName, "id") + if err != nil { + return false, err + } + + // Insert all + for _, r := range rs { + if err := tx.Insert(caRootTableName, r); err != nil { + return false, err + } + } + + // Update the index + if err := tx.Insert("index", &IndexEntry{caRootTableName, idx}); err != nil { + return false, fmt.Errorf("failed updating index: %s", err) } tx.Commit() - return nil -} - -// caRootSetTxn is the inner method used to insert or update a CA root with -// the proper indexes into the state store. -func (s *Store) caRootSetTxn(tx *memdb.Txn, idx uint64, v *structs.CARoot) error { - // ID is required - if v.ID == "" { - return ErrMissingCARootID - } - - // Check for an existing value - existing, err := tx.First(caRootTableName, "id", v.ID) - if err != nil { - return fmt.Errorf("failed CA root lookup: %s", err) - } - if existing != nil { - old := existing.(*structs.CARoot) - v.CreateIndex = old.CreateIndex - } else { - v.CreateIndex = idx - } - v.ModifyIndex = idx - - // Insert - if err := tx.Insert(caRootTableName, v); err != nil { - return err - } - if err := tx.Insert("index", &IndexEntry{caRootTableName, idx}); err != nil { - return fmt.Errorf("failed updating index: %s", err) - } - - return nil + return true, nil } diff --git a/agent/consul/testing.go b/agent/consul/testing.go new file mode 100644 index 000000000..afae7c1a1 --- /dev/null +++ b/agent/consul/testing.go @@ -0,0 +1,26 @@ +package consul + +import ( + "sync" +) + +// testEndpointsOnce ensures that endpoints for testing are registered once. +var testEndpointsOnce sync.Once + +// TestEndpoints registers RPC endpoints specifically for testing. These +// endpoints enable some internal data access that we normally disallow, but +// are useful for modifying server state. +// +// To use this, modify TestMain to call this function prior to running tests. +// +// These should NEVER be registered outside of tests. +// +// NOTE(mitchellh): This was created so that the downstream agent tests can +// modify internal Connect CA state. When the CA plugin work comes in with +// a more complete CA API, this may no longer be necessary and we can remove it. +// That would be ideal. +func TestEndpoint() { + testEndpointsOnce.Do(func() { + registerEndpoint(func(s *Server) interface{} { return &Test{s} }) + }) +} diff --git a/agent/consul/testing_endpoint.go b/agent/consul/testing_endpoint.go new file mode 100644 index 000000000..e47e0e737 --- /dev/null +++ b/agent/consul/testing_endpoint.go @@ -0,0 +1,43 @@ +package consul + +import ( + "github.com/hashicorp/consul/agent/structs" +) + +// Test is an RPC endpoint that is only available during `go test` when +// `TestEndpoint` is called. This is not and must not ever be available +// during a real running Consul agent, since it this endpoint bypasses +// critical ACL checks. +type Test struct { + // srv is a pointer back to the server. + srv *Server +} + +// ConnectCASetRoots sets the current CA roots state. +func (s *Test) ConnectCASetRoots( + args []*structs.CARoot, + reply *interface{}) error { + + // Get the highest index + state := s.srv.fsm.State() + idx, _, err := state.CARoots(nil) + if err != nil { + return err + } + + // Commit + resp, err := s.srv.raftApply(structs.ConnectCARequestType, &structs.CARequest{ + Op: structs.CAOpSet, + Index: idx, + Roots: args, + }) + if err != nil { + s.srv.logger.Printf("[ERR] consul.test: Apply failed %v", err) + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } + + return nil +} diff --git a/agent/consul/testing_endpoint_test.go b/agent/consul/testing_endpoint_test.go new file mode 100644 index 000000000..e20213695 --- /dev/null +++ b/agent/consul/testing_endpoint_test.go @@ -0,0 +1,42 @@ +package consul + +import ( + "os" + "testing" + + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/testrpc" + "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/stretchr/testify/assert" +) + +// Test setting the CAs +func TestTestConnectCASetRoots(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Prepare + ca1 := connect.TestCA(t, nil) + ca2 := connect.TestCA(t, nil) + ca2.Active = false + + // Request + args := []*structs.CARoot{ca1, ca2} + var reply interface{} + assert.Nil(msgpackrpc.CallWithCodec(codec, "Test.ConnectCASetRoots", args, &reply)) + + // Verify they're there + state := s1.fsm.State() + _, actual, err := state.CARoots(nil) + assert.Nil(err) + assert.Len(actual, 2) +} diff --git a/agent/consul/testing_test.go b/agent/consul/testing_test.go new file mode 100644 index 000000000..98e8dd743 --- /dev/null +++ b/agent/consul/testing_test.go @@ -0,0 +1,13 @@ +package consul + +import ( + "os" + "testing" +) + +func TestMain(m *testing.M) { + // Register the test RPC endpoint + TestEndpoint() + + os.Exit(m.Run()) +} diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 6723d9b98..0437b27cf 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -76,3 +76,25 @@ type IssuedCert struct { // state store, but is present in the sign API response. Cert string `json:",omitempty"` } + +// CAOp is the operation for a request related to intentions. +type CAOp string + +const ( + CAOpSet CAOp = "set" +) + +// CARequest is used to modify connect CA data. This is used by the +// FSM (agent/consul/fsm) to apply changes. +type CARequest struct { + // Op is the type of operation being requested. This determines what + // other fields are required. + Op CAOp + + // Index is used by CAOpSet for a CAS operation. + Index uint64 + + // Roots is a list of roots. This is used for CAOpSet. One root must + // always be active. + Roots []*CARoot +} diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 95c0ba069..a4e942230 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -41,6 +41,7 @@ const ( AreaRequestType = 10 ACLBootstrapRequestType = 11 // FSM snapshots only. IntentionRequestType = 12 + ConnectCARequestType = 13 ) const ( From 748a0bb82440919a6d122831f74c5acc048ac5f3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 21 Mar 2018 10:20:35 -0700 Subject: [PATCH 092/627] agent: CA root HTTP endpoints --- agent/agent_endpoint_test.go | 50 +++++++++++++++++++++++++++++++ agent/connect_ca_endpoint_test.go | 22 ++++++++++---- 2 files changed, 67 insertions(+), 5 deletions(-) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 566d397cf..4c2f9f1d6 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/config" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/logger" @@ -2024,3 +2025,52 @@ func TestAgent_Token(t *testing.T) { } }) } + +func TestAgentConnectCARoots_empty(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentConnectCARoots(resp, req) + assert.Nil(err) + + value := obj.(structs.IndexedCARoots) + assert.Equal(value.ActiveRootID, "") + assert.Len(value.Roots, 0) +} + +func TestAgentConnectCARoots_list(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Set some CAs + var reply interface{} + ca1 := connect.TestCA(t, nil) + ca1.Active = false + ca2 := connect.TestCA(t, nil) + assert.Nil(a.RPC("Test.ConnectCASetRoots", + []*structs.CARoot{ca1, ca2}, &reply)) + + // List + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentConnectCARoots(resp, req) + assert.Nil(err) + + value := obj.(structs.IndexedCARoots) + assert.Equal(value.ActiveRootID, ca2.ID) + assert.Len(value.Roots, 2) + + // We should never have the secret information + for _, r := range value.Roots { + assert.Equal("", r.SigningCert) + assert.Equal("", r.SigningKey) + } +} diff --git a/agent/connect_ca_endpoint_test.go b/agent/connect_ca_endpoint_test.go index cec8382c0..bcf209ffe 100644 --- a/agent/connect_ca_endpoint_test.go +++ b/agent/connect_ca_endpoint_test.go @@ -5,7 +5,7 @@ import ( "net/http/httptest" "testing" - "github.com/hashicorp/consul/agent/consul" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" "github.com/stretchr/testify/assert" ) @@ -34,15 +34,27 @@ func TestConnectCARoots_list(t *testing.T) { a := NewTestAgent(t.Name(), "") defer a.Shutdown() - state := consul.TestServerState(a.Agent.delegate.(*consul.Server)) - t.Log(state.CARoots(nil)) + // Set some CAs + var reply interface{} + ca1 := connect.TestCA(t, nil) + ca1.Active = false + ca2 := connect.TestCA(t, nil) + assert.Nil(a.RPC("Test.ConnectCASetRoots", + []*structs.CARoot{ca1, ca2}, &reply)) + // List req, _ := http.NewRequest("GET", "/v1/connect/ca/roots", nil) resp := httptest.NewRecorder() obj, err := a.srv.ConnectCARoots(resp, req) assert.Nil(err) value := obj.(structs.IndexedCARoots) - assert.Equal(value.ActiveRootID, "") - assert.Len(value.Roots, 0) + assert.Equal(value.ActiveRootID, ca2.ID) + assert.Len(value.Roots, 2) + + // We should never have the secret information + for _, r := range value.Roots { + assert.Equal("", r.SigningCert) + assert.Equal("", r.SigningKey) + } } From 58b6f476e877cafd62ef8f56955fd8ad7280d4b1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 21 Mar 2018 10:55:39 -0700 Subject: [PATCH 093/627] agent: /v1/connect/ca/leaf/:service_id --- agent/agent_endpoint.go | 41 +++++++++++++++++++++ agent/agent_endpoint_test.go | 56 +++++++++++++++++++++++++++++ agent/connect/testing_ca.go | 40 +++++++++++++++------ agent/consul/connect_ca_endpoint.go | 2 +- agent/http_oss.go | 1 + agent/structs/connect_ca.go | 19 ++++++++-- 6 files changed, 145 insertions(+), 14 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 97c52512a..bc684f115 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/config" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/ipaddr" @@ -21,6 +22,9 @@ import ( "github.com/hashicorp/serf/serf" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + + // NOTE(mitcehllh): This is temporary while certs are stubbed out. + "github.com/mitchellh/go-testing-interface" ) type Self struct { @@ -844,3 +848,40 @@ func (s *HTTPServer) AgentConnectCARoots(resp http.ResponseWriter, req *http.Req // behavior will differ. return s.ConnectCARoots(resp, req) } + +// AgentConnectCALeafCert returns the certificate bundle for a service +// instance. This supports blocking queries to update the returned bundle. +func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Test the method + if req.Method != "GET" { + return nil, MethodNotAllowedError{req.Method, []string{"GET"}} + } + + // Get the service ID. Note that this is the ID of a service instance. + id := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/") + + // Retrieve the service specified + service := s.agent.State.Service(id) + if service == nil { + return nil, fmt.Errorf("unknown service ID: %s", id) + } + + // Create a CSR. + // TODO(mitchellh): This is obviously not production ready! + csr, pk := connect.TestCSR(&testing.RuntimeT{}, &connect.SpiffeIDService{ + Host: "1234.consul", + Namespace: "default", + Datacenter: s.agent.config.Datacenter, + Service: service.Service, + }) + + // Request signing + var reply structs.IssuedCert + args := structs.CASignRequest{CSR: csr} + if err := s.agent.RPC("ConnectCA.Sign", &args, &reply); err != nil { + return nil, err + } + reply.PrivateKeyPEM = pk + + return &reply, nil +} diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 4c2f9f1d6..15267107a 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2,6 +2,7 @@ package agent import ( "bytes" + "crypto/x509" "fmt" "io" "io/ioutil" @@ -2074,3 +2075,58 @@ func TestAgentConnectCARoots_list(t *testing.T) { assert.Equal("", r.SigningKey) } } + +func TestAgentConnectCALeafCert_good(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Set CAs + var reply interface{} + ca1 := connect.TestCA(t, nil) + assert.Nil(a.RPC("Test.ConnectCASetRoots", []*structs.CARoot{ca1}, &reply)) + + { + // Register a local service + args := &structs.ServiceDefinition{ + ID: "foo", + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + } + req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + assert.Nil(err) + if !assert.Equal(200, resp.Code) { + t.Log("Body: ", resp.Body.String()) + } + } + + // List + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/foo", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentConnectCALeafCert(resp, req) + assert.Nil(err) + + // Get the issued cert + issued, ok := obj.(*structs.IssuedCert) + assert.True(ok) + + // Verify that the cert is signed by the CA + roots := x509.NewCertPool() + assert.True(roots.AppendCertsFromPEM([]byte(ca1.RootCert))) + leaf, err := connect.ParseCert(issued.CertPEM) + assert.Nil(err) + _, err = leaf.Verify(x509.VerifyOptions{ + Roots: roots, + }) + assert.Nil(err) + + // TODO(mitchellh): verify the private key matches the cert +} diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index b7f436834..f79849016 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -187,29 +187,47 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) string { return buf.String() } -// TestCSR returns a CSR to sign the given service. -func TestCSR(t testing.T, id SpiffeID) string { +// TestCSR returns a CSR to sign the given service along with the PEM-encoded +// private key for this certificate. +func TestCSR(t testing.T, id SpiffeID) (string, string) { template := &x509.CertificateRequest{ URIs: []*url.URL{id.URI()}, SignatureAlgorithm: x509.ECDSAWithSHA256, } + // Result buffers + var csrBuf, pkBuf bytes.Buffer + // Create the private key we'll use signer := testPrivateKey(t, nil) - // Create the CSR itself - bs, err := x509.CreateCertificateRequest(rand.Reader, template, signer) - if err != nil { - t.Fatalf("error creating CSR: %s", err) + { + // Create the private key PEM + bs, err := x509.MarshalECPrivateKey(signer.(*ecdsa.PrivateKey)) + if err != nil { + t.Fatalf("error marshalling PK: %s", err) + } + + err = pem.Encode(&pkBuf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) + if err != nil { + t.Fatalf("error encoding PK: %s", err) + } } - var buf bytes.Buffer - err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: bs}) - if err != nil { - t.Fatalf("error encoding CSR: %s", err) + { + // Create the CSR itself + bs, err := x509.CreateCertificateRequest(rand.Reader, template, signer) + if err != nil { + t.Fatalf("error creating CSR: %s", err) + } + + err = pem.Encode(&csrBuf, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: bs}) + if err != nil { + t.Fatalf("error encoding CSR: %s", err) + } } - return buf.String() + return csrBuf.String(), pkBuf.String() } // testKeyID returns a KeyID from the given public key. The "raw" must be diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index d6ddaef58..1f732490b 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -171,7 +171,7 @@ func (s *ConnectCA) Sign( // Set the response *reply = structs.IssuedCert{ SerialNumber: template.SerialNumber, - Cert: buf.String(), + CertPEM: buf.String(), } return nil diff --git a/agent/http_oss.go b/agent/http_oss.go index 3cb18b2e1..d2e86622f 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -30,6 +30,7 @@ func init() { registerEndpoint("/v1/agent/check/fail/", []string{"PUT"}, (*HTTPServer).AgentCheckFail) registerEndpoint("/v1/agent/check/update/", []string{"PUT"}, (*HTTPServer).AgentCheckUpdate) registerEndpoint("/v1/agent/connect/ca/roots", []string{"GET"}, (*HTTPServer).AgentConnectCARoots) + registerEndpoint("/v1/agent/connect/ca/leaf/", []string{"GET"}, (*HTTPServer).AgentConnectCALeafCert) registerEndpoint("/v1/agent/service/register", []string{"PUT"}, (*HTTPServer).AgentRegisterService) registerEndpoint("/v1/agent/service/deregister/", []string{"PUT"}, (*HTTPServer).AgentDeregisterService) registerEndpoint("/v1/agent/service/maintenance/", []string{"PUT"}, (*HTTPServer).AgentServiceMaintenance) diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 0437b27cf..6dc2dbf30 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -2,6 +2,7 @@ package structs import ( "math/big" + "time" ) // IndexedCARoots is the list of currently trusted CA Roots. @@ -72,9 +73,23 @@ type IssuedCert struct { // SerialNumber is the unique serial number for this certificate. SerialNumber *big.Int - // Cert is the PEM-encoded certificate. This should not be stored in the + // CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private + // key for that cert, respectively. This should not be stored in the // state store, but is present in the sign API response. - Cert string `json:",omitempty"` + CertPEM string `json:",omitempty"` + PrivateKeyPEM string + + // Service is the name of the service for which the cert was issued. + // ServiceURI is the cert URI value. + Service string + ServiceURI string + + // ValidAfter and ValidBefore are the validity periods for the + // certificate. + ValidAfter time.Time + ValidBefore time.Time + + RaftIndex } // CAOp is the operation for a request related to intentions. From a8510f8224d9998312cfa540e31d7a9974518f8a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 21 Mar 2018 11:00:46 -0700 Subject: [PATCH 094/627] agent/consul: set more fields on the issued cert --- agent/consul/connect_ca_endpoint.go | 4 ++++ agent/consul/connect_ca_endpoint_test.go | 10 ++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 1f732490b..60e631cef 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -172,6 +172,10 @@ func (s *ConnectCA) Sign( *reply = structs.IssuedCert{ SerialNumber: template.SerialNumber, CertPEM: buf.String(), + Service: serviceId.Service, + ServiceURI: template.URIs[0].String(), + ValidAfter: template.NotBefore, + ValidBefore: template.NotAfter, } return nil diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index 8a3f1b4f2..f2404eb4c 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -76,9 +76,11 @@ func TestConnectCASign(t *testing.T) { assert.Nil(err) // Generate a CSR and request signing + spiffeId := connect.TestSpiffeIDService(t, "web") + csr, _ := connect.TestCSR(t, spiffeId) args := &structs.CASignRequest{ Datacenter: "dc01", - CSR: connect.TestCSR(t, connect.TestSpiffeIDService(t, "web")), + CSR: csr, } var reply structs.IssuedCert assert.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply)) @@ -86,10 +88,14 @@ func TestConnectCASign(t *testing.T) { // Verify that the cert is signed by the CA roots := x509.NewCertPool() assert.True(roots.AppendCertsFromPEM([]byte(ca.RootCert))) - leaf, err := connect.ParseCert(reply.Cert) + leaf, err := connect.ParseCert(reply.CertPEM) assert.Nil(err) _, err = leaf.Verify(x509.VerifyOptions{ Roots: roots, }) assert.Nil(err) + + // Verify other fields + assert.Equal("web", reply.Service) + assert.Equal(spiffeId.URI().String(), reply.ServiceURI) } From 17d6b437d2401ce63c59280c54271551825e0ebc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 21 Mar 2018 11:20:17 -0700 Subject: [PATCH 095/627] agent/consul/fsm,state: tests for CA root related changes --- agent/consul/fsm/commands_oss_test.go | 33 +++++ agent/consul/state/connect_ca_test.go | 204 ++++++++++++++++++++++++++ 2 files changed, 237 insertions(+) create mode 100644 agent/consul/state/connect_ca_test.go diff --git a/agent/consul/fsm/commands_oss_test.go b/agent/consul/fsm/commands_oss_test.go index acf67c5fb..81852a9c4 100644 --- a/agent/consul/fsm/commands_oss_test.go +++ b/agent/consul/fsm/commands_oss_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/autopilot" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" @@ -1217,3 +1218,35 @@ func TestFSM_Intention_CRUD(t *testing.T) { assert.Nil(actual) } } + +func TestFSM_CARoots(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + fsm, err := New(nil, os.Stderr) + assert.Nil(err) + + // Roots + ca1 := connect.TestCA(t, nil) + ca2 := connect.TestCA(t, nil) + ca2.Active = false + + // Create a new request. + req := structs.CARequest{ + Op: structs.CAOpSet, + Roots: []*structs.CARoot{ca1, ca2}, + } + + { + buf, err := structs.Encode(structs.ConnectCARequestType, req) + assert.Nil(err) + assert.True(fsm.Apply(makeLog(buf)).(bool)) + } + + // Verify it's in the state store. + { + _, roots, err := fsm.state.CARoots(nil) + assert.Nil(err) + assert.Len(roots, 2) + } +} diff --git a/agent/consul/state/connect_ca_test.go b/agent/consul/state/connect_ca_test.go new file mode 100644 index 000000000..14b5caf54 --- /dev/null +++ b/agent/consul/state/connect_ca_test.go @@ -0,0 +1,204 @@ +package state + +import ( + "testing" + + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/go-memdb" + "github.com/stretchr/testify/assert" +) + +func TestStore_CARootSetList(t *testing.T) { + assert := assert.New(t) + s := testStateStore(t) + + // Call list to populate the watch set + ws := memdb.NewWatchSet() + _, _, err := s.CARoots(ws) + assert.Nil(err) + + // Build a valid value + ca1 := connect.TestCA(t, nil) + + // Set + ok, err := s.CARootSetCAS(1, 0, []*structs.CARoot{ca1}) + assert.Nil(err) + assert.True(ok) + + // Make sure the index got updated. + assert.Equal(s.maxIndex(caRootTableName), uint64(1)) + assert.True(watchFired(ws), "watch fired") + + // Read it back out and verify it. + expected := *ca1 + expected.RaftIndex = structs.RaftIndex{ + CreateIndex: 1, + ModifyIndex: 1, + } + + ws = memdb.NewWatchSet() + _, roots, err := s.CARoots(ws) + assert.Nil(err) + assert.Len(roots, 1) + actual := roots[0] + assert.Equal(&expected, actual) +} + +func TestStore_CARootSet_emptyID(t *testing.T) { + assert := assert.New(t) + s := testStateStore(t) + + // Call list to populate the watch set + ws := memdb.NewWatchSet() + _, _, err := s.CARoots(ws) + assert.Nil(err) + + // Build a valid value + ca1 := connect.TestCA(t, nil) + ca1.ID = "" + + // Set + ok, err := s.CARootSetCAS(1, 0, []*structs.CARoot{ca1}) + assert.NotNil(err) + assert.Contains(err.Error(), ErrMissingCARootID.Error()) + assert.False(ok) + + // Make sure the index got updated. + assert.Equal(s.maxIndex(caRootTableName), uint64(0)) + assert.False(watchFired(ws), "watch fired") + + // Read it back out and verify it. + ws = memdb.NewWatchSet() + _, roots, err := s.CARoots(ws) + assert.Nil(err) + assert.Len(roots, 0) +} + +func TestStore_CARootActive_valid(t *testing.T) { + assert := assert.New(t) + s := testStateStore(t) + + // Build a valid value + ca1 := connect.TestCA(t, nil) + ca1.Active = false + ca2 := connect.TestCA(t, nil) + ca3 := connect.TestCA(t, nil) + ca3.Active = false + + // Set + ok, err := s.CARootSetCAS(1, 0, []*structs.CARoot{ca1, ca2, ca3}) + assert.Nil(err) + assert.True(ok) + + // Query + ws := memdb.NewWatchSet() + idx, res, err := s.CARootActive(ws) + assert.Equal(idx, uint64(1)) + assert.Nil(err) + assert.NotNil(res) + assert.Equal(ca2.ID, res.ID) +} + +// Test that querying the active CA returns the correct value. +func TestStore_CARootActive_none(t *testing.T) { + assert := assert.New(t) + s := testStateStore(t) + + // Querying with no results returns nil. + ws := memdb.NewWatchSet() + idx, res, err := s.CARootActive(ws) + assert.Equal(idx, uint64(0)) + assert.Nil(res) + assert.Nil(err) +} + +/* +func TestStore_Intention_Snapshot_Restore(t *testing.T) { + assert := assert.New(t) + s := testStateStore(t) + + // Create some intentions. + ixns := structs.Intentions{ + &structs.Intention{ + DestinationName: "foo", + }, + &structs.Intention{ + DestinationName: "bar", + }, + &structs.Intention{ + DestinationName: "baz", + }, + } + + // Force the sort order of the UUIDs before we create them so the + // order is deterministic. + id := testUUID() + ixns[0].ID = "a" + id[1:] + ixns[1].ID = "b" + id[1:] + ixns[2].ID = "c" + id[1:] + + // Now create + for i, ixn := range ixns { + assert.Nil(s.IntentionSet(uint64(4+i), ixn)) + } + + // Snapshot the queries. + snap := s.Snapshot() + defer snap.Close() + + // Alter the real state store. + assert.Nil(s.IntentionDelete(7, ixns[0].ID)) + + // Verify the snapshot. + assert.Equal(snap.LastIndex(), uint64(6)) + expected := structs.Intentions{ + &structs.Intention{ + ID: ixns[0].ID, + DestinationName: "foo", + Meta: map[string]string{}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 4, + ModifyIndex: 4, + }, + }, + &structs.Intention{ + ID: ixns[1].ID, + DestinationName: "bar", + Meta: map[string]string{}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 5, + ModifyIndex: 5, + }, + }, + &structs.Intention{ + ID: ixns[2].ID, + DestinationName: "baz", + Meta: map[string]string{}, + RaftIndex: structs.RaftIndex{ + CreateIndex: 6, + ModifyIndex: 6, + }, + }, + } + dump, err := snap.Intentions() + assert.Nil(err) + assert.Equal(expected, dump) + + // Restore the values into a new state store. + func() { + s := testStateStore(t) + restore := s.Restore() + for _, ixn := range dump { + assert.Nil(restore.Intention(ixn)) + } + restore.Commit() + + // Read the restored values back out and verify that they match. + idx, actual, err := s.Intentions(nil) + assert.Nil(err) + assert.Equal(idx, uint64(6)) + assert.Equal(expected, actual) + }() +} +*/ From 2dfca5dbc281b8df0574e10b585852b218dd00b0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 21 Mar 2018 11:33:19 -0700 Subject: [PATCH 096/627] agent/consul/fsm,state: snapshot/restore for CA roots --- agent/consul/fsm/snapshot_oss.go | 33 +++++++++++ agent/consul/fsm/snapshot_oss_test.go | 18 ++++++ agent/consul/state/connect_ca.go | 28 +++++++++ agent/consul/state/connect_ca_test.go | 82 ++++++++------------------- 4 files changed, 104 insertions(+), 57 deletions(-) diff --git a/agent/consul/fsm/snapshot_oss.go b/agent/consul/fsm/snapshot_oss.go index 1dde3ab0b..b042c7831 100644 --- a/agent/consul/fsm/snapshot_oss.go +++ b/agent/consul/fsm/snapshot_oss.go @@ -21,6 +21,7 @@ func init() { registerRestorer(structs.PreparedQueryRequestType, restorePreparedQuery) registerRestorer(structs.AutopilotRequestType, restoreAutopilot) registerRestorer(structs.IntentionRequestType, restoreIntention) + registerRestorer(structs.ConnectCARequestType, restoreConnectCA) } func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) error { @@ -48,6 +49,9 @@ func persistOSS(s *snapshot, sink raft.SnapshotSink, encoder *codec.Encoder) err if err := s.persistIntentions(sink, encoder); err != nil { return err } + if err := s.persistConnectCA(sink, encoder); err != nil { + return err + } return nil } @@ -262,6 +266,24 @@ func (s *snapshot) persistAutopilot(sink raft.SnapshotSink, return nil } +func (s *snapshot) persistConnectCA(sink raft.SnapshotSink, + encoder *codec.Encoder) error { + roots, err := s.state.CARoots() + if err != nil { + return err + } + + for _, r := range roots { + if _, err := sink.Write([]byte{byte(structs.ConnectCARequestType)}); err != nil { + return err + } + if err := encoder.Encode(r); err != nil { + return err + } + } + return nil +} + func (s *snapshot) persistIntentions(sink raft.SnapshotSink, encoder *codec.Encoder) error { ixns, err := s.state.Intentions() @@ -397,3 +419,14 @@ func restoreIntention(header *snapshotHeader, restore *state.Restore, decoder *c } return nil } + +func restoreConnectCA(header *snapshotHeader, restore *state.Restore, decoder *codec.Decoder) error { + var req structs.CARoot + if err := decoder.Decode(&req); err != nil { + return err + } + if err := restore.CARoot(&req); err != nil { + return err + } + return nil +} diff --git a/agent/consul/fsm/snapshot_oss_test.go b/agent/consul/fsm/snapshot_oss_test.go index 63f1ab1d3..971e6bbf5 100644 --- a/agent/consul/fsm/snapshot_oss_test.go +++ b/agent/consul/fsm/snapshot_oss_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/autopilot" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" @@ -110,6 +111,18 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { } assert.Nil(fsm.state.IntentionSet(14, ixn)) + // CA Roots + roots := []*structs.CARoot{ + connect.TestCA(t, nil), + connect.TestCA(t, nil), + } + for _, r := range roots[1:] { + r.Active = false + } + ok, err := fsm.state.CARootSetCAS(15, 0, roots) + assert.Nil(err) + assert.True(ok) + // Snapshot snap, err := fsm.Snapshot() if err != nil { @@ -278,6 +291,11 @@ func TestFSM_SnapshotRestore_OSS(t *testing.T) { assert.Len(ixns, 1) assert.Equal(ixn, ixns[0]) + // Verify CA roots are restored. + _, roots, err = fsm2.state.CARoots(nil) + assert.Nil(err) + assert.Len(roots, 2) + // Snapshot snap, err = fsm2.Snapshot() if err != nil { diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go index 3b66a07c6..05313ce2e 100644 --- a/agent/consul/state/connect_ca.go +++ b/agent/consul/state/connect_ca.go @@ -33,6 +33,34 @@ func init() { registerSchema(caRootTableSchema) } +// CARoots is used to pull all the CA roots for the snapshot. +func (s *Snapshot) CARoots() (structs.CARoots, error) { + ixns, err := s.tx.Get(caRootTableName, "id") + if err != nil { + return nil, err + } + + var ret structs.CARoots + for wrapped := ixns.Next(); wrapped != nil; wrapped = ixns.Next() { + ret = append(ret, wrapped.(*structs.CARoot)) + } + + return ret, nil +} + +// CARoots is used when restoring from a snapshot. +func (s *Restore) CARoot(r *structs.CARoot) error { + // Insert + if err := s.tx.Insert(caRootTableName, r); err != nil { + return fmt.Errorf("failed restoring CA root: %s", err) + } + if err := indexUpdateMaxTxn(s.tx, r.ModifyIndex, caRootTableName); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + + return nil +} + // CARoots returns the list of all CA roots. func (s *Store) CARoots(ws memdb.WatchSet) (uint64, structs.CARoots, error) { tx := s.db.Txn(false) diff --git a/agent/consul/state/connect_ca_test.go b/agent/consul/state/connect_ca_test.go index 14b5caf54..bbbac0f0f 100644 --- a/agent/consul/state/connect_ca_test.go +++ b/agent/consul/state/connect_ca_test.go @@ -113,92 +113,60 @@ func TestStore_CARootActive_none(t *testing.T) { assert.Nil(err) } -/* -func TestStore_Intention_Snapshot_Restore(t *testing.T) { +func TestStore_CARoot_Snapshot_Restore(t *testing.T) { assert := assert.New(t) s := testStateStore(t) // Create some intentions. - ixns := structs.Intentions{ - &structs.Intention{ - DestinationName: "foo", - }, - &structs.Intention{ - DestinationName: "bar", - }, - &structs.Intention{ - DestinationName: "baz", - }, + roots := structs.CARoots{ + connect.TestCA(t, nil), + connect.TestCA(t, nil), + connect.TestCA(t, nil), + } + for _, r := range roots[1:] { + r.Active = false } // Force the sort order of the UUIDs before we create them so the // order is deterministic. id := testUUID() - ixns[0].ID = "a" + id[1:] - ixns[1].ID = "b" + id[1:] - ixns[2].ID = "c" + id[1:] + roots[0].ID = "a" + id[1:] + roots[1].ID = "b" + id[1:] + roots[2].ID = "c" + id[1:] // Now create - for i, ixn := range ixns { - assert.Nil(s.IntentionSet(uint64(4+i), ixn)) - } + ok, err := s.CARootSetCAS(1, 0, roots) + assert.Nil(err) + assert.True(ok) // Snapshot the queries. snap := s.Snapshot() defer snap.Close() // Alter the real state store. - assert.Nil(s.IntentionDelete(7, ixns[0].ID)) + ok, err = s.CARootSetCAS(2, 1, roots[:1]) + assert.Nil(err) + assert.True(ok) // Verify the snapshot. - assert.Equal(snap.LastIndex(), uint64(6)) - expected := structs.Intentions{ - &structs.Intention{ - ID: ixns[0].ID, - DestinationName: "foo", - Meta: map[string]string{}, - RaftIndex: structs.RaftIndex{ - CreateIndex: 4, - ModifyIndex: 4, - }, - }, - &structs.Intention{ - ID: ixns[1].ID, - DestinationName: "bar", - Meta: map[string]string{}, - RaftIndex: structs.RaftIndex{ - CreateIndex: 5, - ModifyIndex: 5, - }, - }, - &structs.Intention{ - ID: ixns[2].ID, - DestinationName: "baz", - Meta: map[string]string{}, - RaftIndex: structs.RaftIndex{ - CreateIndex: 6, - ModifyIndex: 6, - }, - }, - } - dump, err := snap.Intentions() + assert.Equal(snap.LastIndex(), uint64(1)) + dump, err := snap.CARoots() assert.Nil(err) - assert.Equal(expected, dump) + assert.Equal(roots, dump) // Restore the values into a new state store. func() { s := testStateStore(t) restore := s.Restore() - for _, ixn := range dump { - assert.Nil(restore.Intention(ixn)) + for _, r := range dump { + assert.Nil(restore.CARoot(r)) } restore.Commit() // Read the restored values back out and verify that they match. - idx, actual, err := s.Intentions(nil) + idx, actual, err := s.CARoots(nil) assert.Nil(err) - assert.Equal(idx, uint64(6)) - assert.Equal(expected, actual) + assert.Equal(idx, uint64(2)) + assert.Equal(roots, actual) }() } -*/ From 746f80639adaab67004af1225349ea269f4b4369 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 21 Mar 2018 12:42:42 -0700 Subject: [PATCH 097/627] agent: /v1/connect/ca/configuration PUT for setting configuration --- agent/connect_ca_endpoint.go | 28 ++++++++++ agent/consul/connect_ca_endpoint.go | 87 +++++++++++++++++++++++++++++ agent/http_oss.go | 1 + agent/structs/connect_ca.go | 11 ++++ 4 files changed, 127 insertions(+) diff --git a/agent/connect_ca_endpoint.go b/agent/connect_ca_endpoint.go index 1c7871015..7832ba36f 100644 --- a/agent/connect_ca_endpoint.go +++ b/agent/connect_ca_endpoint.go @@ -1,6 +1,7 @@ package agent import ( + "fmt" "net/http" "github.com/hashicorp/consul/agent/structs" @@ -26,3 +27,30 @@ func (s *HTTPServer) ConnectCARoots(resp http.ResponseWriter, req *http.Request) return reply, nil } + +// /v1/connect/ca/configuration +func (s *HTTPServer) ConnectCAConfiguration(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + switch req.Method { + case "PUT": + return s.ConnectCAConfigurationSet(resp, req) + + default: + return nil, MethodNotAllowedError{req.Method, []string{"GET", "POST"}} + } +} + +// PUT /v1/connect/ca/configuration +func (s *HTTPServer) ConnectCAConfigurationSet(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Method is tested in ConnectCAConfiguration + + var args structs.CAConfiguration + if err := decodeBody(req, &args, nil); err != nil { + resp.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(resp, "Request decode failed: %v", err) + return nil, nil + } + + var reply interface{} + err := s.agent.RPC("ConnectCA.ConfigurationSet", &args, &reply) + return nil, err +} diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 60e631cef..a4cb569d8 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -14,6 +14,9 @@ import ( "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" + "github.com/hashicorp/go-uuid" + "github.com/mitchellh/go-testing-interface" + "github.com/mitchellh/mapstructure" ) // ConnectCA manages the Connect CA. @@ -22,6 +25,90 @@ type ConnectCA struct { srv *Server } +// ConfigurationSet updates the configuration for the CA. +// +// NOTE(mitchellh): This whole implementation is temporary until the real +// CA plugin work comes in. For now, this is only used to configure a single +// static CA root. +func (s *ConnectCA) ConfigurationSet( + args *structs.CAConfiguration, + reply *interface{}) error { + // NOTE(mitchellh): This is the temporary hardcoding of a static CA + // provider. This will allow us to test agent implementations and so on + // with an incomplete CA for now. + if args.Provider != "static" { + return fmt.Errorf("The CA provider can only be 'static' for now") + } + + // Config is the configuration allowed for our static provider + var config struct { + Name string + CertPEM string + PrivateKeyPEM string + Generate bool + } + if err := mapstructure.Decode(args.Config, &config); err != nil { + return fmt.Errorf("error decoding config: %s", err) + } + + // Basic validation so demos aren't super jank + if config.Name == "" { + return fmt.Errorf("Name must be set") + } + if config.CertPEM == "" || config.PrivateKeyPEM == "" { + if !config.Generate { + return fmt.Errorf( + "CertPEM and PrivateKeyPEM must be set, or Generate must be true") + } + } + + // Convenience to auto-generate the cert + if config.Generate { + ca := connect.TestCA(&testing.RuntimeT{}, nil) + config.CertPEM = ca.RootCert + config.PrivateKeyPEM = ca.SigningKey + } + + // TODO(mitchellh): verify that the private key is valid for the cert + + // Generate an ID for this + id, err := uuid.GenerateUUID() + if err != nil { + return err + } + + // Get the highest index + state := s.srv.fsm.State() + idx, _, err := state.CARoots(nil) + if err != nil { + return err + } + + // Commit + resp, err := s.srv.raftApply(structs.ConnectCARequestType, &structs.CARequest{ + Op: structs.CAOpSet, + Index: idx, + Roots: []*structs.CARoot{ + &structs.CARoot{ + ID: id, + Name: config.Name, + RootCert: config.CertPEM, + SigningKey: config.PrivateKeyPEM, + Active: true, + }, + }, + }) + if err != nil { + s.srv.logger.Printf("[ERR] consul.test: Apply failed %v", err) + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } + + return nil +} + // Roots returns the currently trusted root certificates. func (s *ConnectCA) Roots( args *structs.DCSpecificRequest, diff --git a/agent/http_oss.go b/agent/http_oss.go index d2e86622f..6c2e697ea 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -42,6 +42,7 @@ func init() { registerEndpoint("/v1/catalog/services", []string{"GET"}, (*HTTPServer).CatalogServices) registerEndpoint("/v1/catalog/service/", []string{"GET"}, (*HTTPServer).CatalogServiceNodes) registerEndpoint("/v1/catalog/node/", []string{"GET"}, (*HTTPServer).CatalogNodeServices) + registerEndpoint("/v1/connect/ca/configuration", []string{"PUT"}, (*HTTPServer).ConnectCAConfiguration) registerEndpoint("/v1/connect/ca/roots", []string{"GET"}, (*HTTPServer).ConnectCARoots) registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPServer).IntentionEndpoint) registerEndpoint("/v1/connect/intentions/match", []string{"GET"}, (*HTTPServer).IntentionMatch) diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 6dc2dbf30..8576a1b41 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -113,3 +113,14 @@ type CARequest struct { // always be active. Roots []*CARoot } + +// CAConfiguration is the configuration for the current CA plugin. +type CAConfiguration struct { + // Provider is the CA provider implementation to use. + Provider string + + // Configuration is arbitrary configuration for the provider. This + // should only contain primitive values and containers (such as lists + // and maps). + Config map[string]interface{} +} From deb55c436d356f75a44cc62e88da82f74066ab48 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 21 Mar 2018 12:55:43 -0700 Subject: [PATCH 098/627] agent/structs: hide some fields from JSON --- agent/structs/connect_ca.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 8576a1b41..f75efed5c 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -35,8 +35,8 @@ type CARoot struct { // SigningCert is the PEM-encoded signing certificate and SigningKey // is the PEM-encoded private key for the signing certificate. These // may actually be empty if the CA plugin in use manages these for us. - SigningCert string - SigningKey string + SigningCert string `json:",omitempty"` + SigningKey string `json:",omitempty"` // Active is true if this is the current active CA. This must only // be true for exactly one CA. For any method that modifies roots in the @@ -77,7 +77,7 @@ type IssuedCert struct { // key for that cert, respectively. This should not be stored in the // state store, but is present in the sign API response. CertPEM string `json:",omitempty"` - PrivateKeyPEM string + PrivateKeyPEM string `json:",omitempty"` // Service is the name of the service for which the cert was issued. // ServiceURI is the cert URI value. From 2026cf3753364a2145f32de225001e3ecb507a48 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 21 Mar 2018 12:54:51 -1000 Subject: [PATCH 099/627] agent/consul: encode issued cert serial number as hex encoded --- agent/connect/ca.go | 7 +++++++ agent/consul/connect_ca_endpoint.go | 2 +- agent/structs/connect_ca.go | 4 ++-- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/agent/connect/ca.go b/agent/connect/ca.go index a0a65ece6..efe7c14f3 100644 --- a/agent/connect/ca.go +++ b/agent/connect/ca.go @@ -7,6 +7,7 @@ import ( "crypto/x509" "encoding/pem" "fmt" + "strings" ) // ParseCert parses the x509 certificate from a PEM-encoded value. @@ -72,3 +73,9 @@ func KeyId(raw interface{}) ([]byte, error) { h.Write(pub.Y.Bytes()) return h.Sum([]byte{}), nil } + +// HexString returns a standard colon-separated hex value for the input +// byte slice. This should be used with cert serial numbers and so on. +func HexString(input []byte) string { + return strings.Replace(fmt.Sprintf("% x", input), " ", ":", -1) +} diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index a4cb569d8..f7557578c 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -257,7 +257,7 @@ func (s *ConnectCA) Sign( // Set the response *reply = structs.IssuedCert{ - SerialNumber: template.SerialNumber, + SerialNumber: connect.HexString(template.SerialNumber.Bytes()), CertPEM: buf.String(), Service: serviceId.Service, ServiceURI: template.URIs[0].String(), diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index f75efed5c..5ac8a0fc2 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -1,7 +1,6 @@ package structs import ( - "math/big" "time" ) @@ -71,7 +70,8 @@ func (q *CASignRequest) RequestDatacenter() string { // IssuedCert is a certificate that has been issued by a Connect CA. type IssuedCert struct { // SerialNumber is the unique serial number for this certificate. - SerialNumber *big.Int + // This is encoded in standard hex separated by :. + SerialNumber string // CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private // key for that cert, respectively. This should not be stored in the From e0562f1c21774b149fd39a6e760384173df3a565 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 21 Mar 2018 13:02:46 -1000 Subject: [PATCH 100/627] agent: implement an always-200 authorize endpoint --- agent/agent_endpoint.go | 13 +++++++++++++ agent/http_oss.go | 1 + 2 files changed, 14 insertions(+) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index bc684f115..1cbb4b1da 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -885,3 +885,16 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http. return &reply, nil } + +// AgentConnectAuthorize +// +// POST /v1/agent/connect/authorize +func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Test the method + if req.Method != "POST" { + return nil, MethodNotAllowedError{req.Method, []string{"POST"}} + } + + // NOTE(mitchellh): return 200 for now. To be implemented later. + return nil, nil +} diff --git a/agent/http_oss.go b/agent/http_oss.go index 6c2e697ea..774388ad3 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -29,6 +29,7 @@ func init() { registerEndpoint("/v1/agent/check/warn/", []string{"PUT"}, (*HTTPServer).AgentCheckWarn) registerEndpoint("/v1/agent/check/fail/", []string{"PUT"}, (*HTTPServer).AgentCheckFail) registerEndpoint("/v1/agent/check/update/", []string{"PUT"}, (*HTTPServer).AgentCheckUpdate) + registerEndpoint("/v1/agent/connect/authorize", []string{"POST"}, (*HTTPServer).AgentConnectAuthorize) registerEndpoint("/v1/agent/connect/ca/roots", []string{"GET"}, (*HTTPServer).AgentConnectCARoots) registerEndpoint("/v1/agent/connect/ca/leaf/", []string{"GET"}, (*HTTPServer).AgentConnectCALeafCert) registerEndpoint("/v1/agent/service/register", []string{"PUT"}, (*HTTPServer).AgentRegisterService) From 434d8750ae25939a9928d00f35dcfcc61208ff38 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 24 Mar 2018 08:27:44 -1000 Subject: [PATCH 101/627] agent/connect: address PR feedback for the CA.go file --- agent/connect/ca.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/agent/connect/ca.go b/agent/connect/ca.go index efe7c14f3..bca9392d3 100644 --- a/agent/connect/ca.go +++ b/agent/connect/ca.go @@ -12,6 +12,7 @@ import ( // ParseCert parses the x509 certificate from a PEM-encoded value. func ParseCert(pemValue string) (*x509.Certificate, error) { + // The _ result below is not an error but the remaining PEM bytes. block, _ := pem.Decode([]byte(pemValue)) if block == nil { return nil, fmt.Errorf("no PEM-encoded data found") @@ -27,6 +28,7 @@ func ParseCert(pemValue string) (*x509.Certificate, error) { // ParseSigner parses a crypto.Signer from a PEM-encoded key. The private key // is expected to be the first block in the PEM value. func ParseSigner(pemValue string) (crypto.Signer, error) { + // The _ result below is not an error but the remaining PEM bytes. block, _ := pem.Decode([]byte(pemValue)) if block == nil { return nil, fmt.Errorf("no PEM-encoded data found") @@ -44,6 +46,7 @@ func ParseSigner(pemValue string) (crypto.Signer, error) { // ParseCSR parses a CSR from a PEM-encoded value. The certificate request // must be the the first block in the PEM value. func ParseCSR(pemValue string) (*x509.CertificateRequest, error) { + // The _ result below is not an error but the remaining PEM bytes. block, _ := pem.Decode([]byte(pemValue)) if block == nil { return nil, fmt.Errorf("no PEM-encoded data found") @@ -57,7 +60,7 @@ func ParseCSR(pemValue string) (*x509.CertificateRequest, error) { } // KeyId returns a x509 KeyId from the given signing key. The key must be -// an *ecdsa.PublicKey, but is an interface type to support crypto.Signer. +// an *ecdsa.PublicKey currently, but may support more types in the future. func KeyId(raw interface{}) ([]byte, error) { pub, ok := raw.(*ecdsa.PublicKey) if !ok { @@ -66,12 +69,15 @@ func KeyId(raw interface{}) ([]byte, error) { // This is not standard; RFC allows any unique identifier as long as they // match in subject/authority chains but suggests specific hashing of DER - // bytes of public key including DER tags. I can't be bothered to do esp. - // since ECDSA keys don't have a handy way to marshal the publick key alone. - h := sha256.New() - h.Write(pub.X.Bytes()) - h.Write(pub.Y.Bytes()) - return h.Sum([]byte{}), nil + // bytes of public key including DER tags. + bs, err := x509.MarshalPKIXPublicKey(pub) + if err != nil { + return nil, err + } + + // String formatted + kID := sha256.Sum256(bs) + return []byte(strings.Replace(fmt.Sprintf("% x", kID), " ", ":", -1)), nil } // HexString returns a standard colon-separated hex value for the input From b0315811b9547217abb4de382f2982c0e8414947 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 24 Mar 2018 08:32:42 -1000 Subject: [PATCH 102/627] agent/connect: use proper keyusage fields for CA and leaf --- agent/connect/testing_ca.go | 46 ++++++++++++++--------------- agent/consul/connect_ca_endpoint.go | 5 +++- 2 files changed, 27 insertions(+), 24 deletions(-) diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index f79849016..95115536e 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -6,7 +6,6 @@ import ( "crypto/ecdsa" "crypto/elliptic" "crypto/rand" - "crypto/sha256" "crypto/x509" "crypto/x509/pkix" "encoding/pem" @@ -67,12 +66,14 @@ func TestCA(t testing.T, xc *structs.CARoot) *structs.CARoot { PermittedDNSDomainsCritical: true, PermittedDNSDomains: []string{uri.Hostname()}, BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, - IsCA: true, - NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), - NotBefore: time.Now(), - AuthorityKeyId: testKeyID(t, signer.Public()), - SubjectKeyId: testKeyID(t, signer.Public()), + KeyUsage: x509.KeyUsageCertSign | + x509.KeyUsageCRLSign | + x509.KeyUsageDigitalSignature, + IsCA: true, + NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: testKeyID(t, signer.Public()), + SubjectKeyId: testKeyID(t, signer.Public()), } bs, err := x509.CreateCertificate( @@ -100,7 +101,11 @@ func TestCA(t testing.T, xc *structs.CARoot) *structs.CARoot { t.Fatalf("error parsing signing key: %s", err) } - // Set the authority key to be the previous one + // Set the authority key to be the previous one. + // NOTE(mitchellh): From Paul Banks: if we have to cross-sign a cert + // that came from outside (e.g. vault) we can't rely on them using the + // same KeyID hashing algo we do so we'd need to actually copy this + // from the xc cert's subjectKeyIdentifier extension. template.AuthorityKeyId = testKeyID(t, xcsigner.Public()) // Create the new certificate where the parent is the previous @@ -161,7 +166,10 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) string { URIs: []*url.URL{spiffeId.URI()}, SignatureAlgorithm: x509.ECDSAWithSHA256, BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageDataEncipherment | x509.KeyUsageKeyAgreement, + KeyUsage: x509.KeyUsageDataEncipherment | + x509.KeyUsageKeyAgreement | + x509.KeyUsageDigitalSignature | + x509.KeyUsageKeyEncipherment, ExtKeyUsage: []x509.ExtKeyUsage{ x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, @@ -230,23 +238,15 @@ func TestCSR(t testing.T, id SpiffeID) (string, string) { return csrBuf.String(), pkBuf.String() } -// testKeyID returns a KeyID from the given public key. The "raw" must be -// an *ecdsa.PublicKey, but is an interface type to suppot crypto.Signer.Public -// values. +// testKeyID returns a KeyID from the given public key. This just calls +// KeyId but handles errors for tests. func testKeyID(t testing.T, raw interface{}) []byte { - pub, ok := raw.(*ecdsa.PublicKey) - if !ok { - t.Fatalf("raw is type %T, expected *ecdsa.PublicKey", raw) + result, err := KeyId(raw) + if err != nil { + t.Fatalf("KeyId error: %s", err) } - // This is not standard; RFC allows any unique identifier as long as they - // match in subject/authority chains but suggests specific hashing of DER - // bytes of public key including DER tags. I can't be bothered to do esp. - // since ECDSA keys don't have a handy way to marshal the publick key alone. - h := sha256.New() - h.Write(pub.X.Bytes()) - h.Write(pub.Y.Bytes()) - return h.Sum([]byte{}) + return result } // testMemoizePK is the private key that we memoize once we generate it diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index f7557578c..b3aca757e 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -232,7 +232,10 @@ func (s *ConnectCA) Sign( PublicKeyAlgorithm: csr.PublicKeyAlgorithm, PublicKey: csr.PublicKey, BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageDataEncipherment | x509.KeyUsageKeyAgreement, + KeyUsage: x509.KeyUsageDataEncipherment | + x509.KeyUsageKeyAgreement | + x509.KeyUsageDigitalSignature | + x509.KeyUsageKeyEncipherment, ExtKeyUsage: []x509.ExtKeyUsage{ x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, From da1bc48372ab0b5da86eb7492a504e4d2331945b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 24 Mar 2018 08:39:43 -1000 Subject: [PATCH 103/627] agent/connect: rename SpiffeID to CertURI --- agent/connect/testing_ca.go | 4 ++-- agent/connect/{spiffe.go => uri.go} | 15 ++++++++++----- agent/connect/{spiffe_test.go => uri_test.go} | 10 +++++----- agent/consul/connect_ca_endpoint.go | 2 +- 4 files changed, 18 insertions(+), 13 deletions(-) rename agent/connect/{spiffe.go => uri.go} (64%) rename agent/connect/{spiffe_test.go => uri_test.go} (81%) diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index 95115536e..6ce5362ac 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -197,9 +197,9 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) string { // TestCSR returns a CSR to sign the given service along with the PEM-encoded // private key for this certificate. -func TestCSR(t testing.T, id SpiffeID) (string, string) { +func TestCSR(t testing.T, uri CertURI) (string, string) { template := &x509.CertificateRequest{ - URIs: []*url.URL{id.URI()}, + URIs: []*url.URL{uri.URI()}, SignatureAlgorithm: x509.ECDSAWithSHA256, } diff --git a/agent/connect/spiffe.go b/agent/connect/uri.go similarity index 64% rename from agent/connect/spiffe.go rename to agent/connect/uri.go index 58a6b83e3..b33fb10ef 100644 --- a/agent/connect/spiffe.go +++ b/agent/connect/uri.go @@ -6,9 +6,14 @@ import ( "regexp" ) -// SpiffeID represents a Connect-valid SPIFFE ID. The user should type switch -// on the various implementations in this package to determine the type of ID. -type SpiffeID interface { +// CertURI represents a Connect-valid URI value for a TLS certificate. +// The user should type switch on the various implementations in this +// package to determine the type of URI and the data encoded within it. +// +// Note that the current implementations of this are all also SPIFFE IDs. +// However, we anticipate that we may accept URIs that are also not SPIFFE +// compliant and therefore the interface is named as such. +type CertURI interface { URI() *url.URL } @@ -17,8 +22,8 @@ var ( `^/ns/(\w+)/dc/(\w+)/svc/(\w+)$`) ) -// ParseSpiffeID parses a SPIFFE ID from the input URI. -func ParseSpiffeID(input *url.URL) (SpiffeID, error) { +// ParseCertURI parses a the URI value from a TLS certificate. +func ParseCertURI(input *url.URL) (CertURI, error) { if input.Scheme != "spiffe" { return nil, fmt.Errorf("SPIFFE ID must have 'spiffe' scheme") } diff --git a/agent/connect/spiffe_test.go b/agent/connect/uri_test.go similarity index 81% rename from agent/connect/spiffe_test.go rename to agent/connect/uri_test.go index 861a4fa63..370e3c420 100644 --- a/agent/connect/spiffe_test.go +++ b/agent/connect/uri_test.go @@ -7,9 +7,9 @@ import ( "github.com/stretchr/testify/assert" ) -// testSpiffeIDCases contains the test cases for parsing and encoding +// testCertURICases contains the test cases for parsing and encoding // the SPIFFE IDs. This is a global since it is used in multiple test functions. -var testSpiffeIDCases = []struct { +var testCertURICases = []struct { Name string URI string Struct interface{} @@ -35,8 +35,8 @@ var testSpiffeIDCases = []struct { }, } -func TestParseSpiffeID(t *testing.T) { - for _, tc := range testSpiffeIDCases { +func TestParseCertURI(t *testing.T) { + for _, tc := range testCertURICases { t.Run(tc.Name, func(t *testing.T) { assert := assert.New(t) @@ -45,7 +45,7 @@ func TestParseSpiffeID(t *testing.T) { assert.Nil(err) // Parse the ID and check the error/return value - actual, err := ParseSpiffeID(uri) + actual, err := ParseCertURI(uri) assert.Equal(tc.ParseError != "", err != nil, "error value") if err != nil { assert.Contains(err.Error(), tc.ParseError) diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index b3aca757e..4efdafc06 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -171,7 +171,7 @@ func (s *ConnectCA) Sign( } // Parse the SPIFFE ID - spiffeId, err := connect.ParseSpiffeID(csr.URIs[0]) + spiffeId, err := connect.ParseCertURI(csr.URIs[0]) if err != nil { return err } From 8934f00d03746c3eeaa0d08f42e7c97e46bc16e1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 24 Mar 2018 08:46:12 -1000 Subject: [PATCH 104/627] agent/connect: support SpiffeIDSigning --- agent/connect/testing_ca.go | 9 +++------ agent/connect/uri.go | 27 +++++++++++++++++++++++++++ agent/connect/uri_test.go | 10 ++++++++++ 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index 6ce5362ac..a2f711763 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -53,18 +53,15 @@ func TestCA(t testing.T, xc *structs.CARoot) *structs.CARoot { } // The URI (SPIFFE compatible) for the cert - uri, err := url.Parse(fmt.Sprintf("spiffe://%s.consul", testClusterID)) - if err != nil { - t.Fatalf("error parsing CA URI: %s", err) - } + id := &SpiffeIDSigning{ClusterID: testClusterID, Domain: "consul"} // Create the CA cert template := x509.Certificate{ SerialNumber: sn, Subject: pkix.Name{CommonName: result.Name}, - URIs: []*url.URL{uri}, + URIs: []*url.URL{id.URI()}, PermittedDNSDomainsCritical: true, - PermittedDNSDomains: []string{uri.Hostname()}, + PermittedDNSDomains: []string{id.URI().Hostname()}, BasicConstraintsValid: true, KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign | diff --git a/agent/connect/uri.go b/agent/connect/uri.go index b33fb10ef..3b56ec4ae 100644 --- a/agent/connect/uri.go +++ b/agent/connect/uri.go @@ -4,6 +4,7 @@ import ( "fmt" "net/url" "regexp" + "strings" ) // CertURI represents a Connect-valid URI value for a TLS certificate. @@ -38,6 +39,17 @@ func ParseCertURI(input *url.URL) (CertURI, error) { }, nil } + // Test for signing ID + if input.Path == "" { + idx := strings.Index(input.Host, ".") + if idx > 0 { + return &SpiffeIDSigning{ + ClusterID: input.Host[:idx], + Domain: input.Host[idx+1:], + }, nil + } + } + return nil, fmt.Errorf("SPIFFE ID is not in the expected format") } @@ -58,3 +70,18 @@ func (id *SpiffeIDService) URI() *url.URL { id.Namespace, id.Datacenter, id.Service) return &result } + +// SpiffeIDSigning is the structure to represent the SPIFFE ID for a +// signing certificate (not a leaf service). +type SpiffeIDSigning struct { + ClusterID string // Unique cluster ID + Domain string // The domain, usually "consul" +} + +// URI returns the *url.URL for this SPIFFE ID. +func (id *SpiffeIDSigning) URI() *url.URL { + var result url.URL + result.Scheme = "spiffe" + result.Host = fmt.Sprintf("%s.%s", id.ClusterID, id.Domain) + return &result +} diff --git a/agent/connect/uri_test.go b/agent/connect/uri_test.go index 370e3c420..247170f53 100644 --- a/agent/connect/uri_test.go +++ b/agent/connect/uri_test.go @@ -33,6 +33,16 @@ var testCertURICases = []struct { }, "", }, + + { + "signing ID", + "spiffe://1234.consul", + &SpiffeIDSigning{ + ClusterID: "1234", + Domain: "consul", + }, + "", + }, } func TestParseCertURI(t *testing.T) { From 9d93c520984b7c92cc748e4f346f1971b3b5f372 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 26 Mar 2018 20:31:17 -0700 Subject: [PATCH 105/627] agent/connect: support any values in the URL --- agent/connect/uri.go | 37 ++++++++++++++++++++++++++++++++----- agent/connect/uri_test.go | 15 +++++++++++++++ 2 files changed, 47 insertions(+), 5 deletions(-) diff --git a/agent/connect/uri.go b/agent/connect/uri.go index 3b56ec4ae..3562f2d6c 100644 --- a/agent/connect/uri.go +++ b/agent/connect/uri.go @@ -20,7 +20,7 @@ type CertURI interface { var ( spiffeIDServiceRegexp = regexp.MustCompile( - `^/ns/(\w+)/dc/(\w+)/svc/(\w+)$`) + `^/ns/([^/]+)/dc/([^/]+)/svc/([^/]+)$`) ) // ParseCertURI parses a the URI value from a TLS certificate. @@ -29,13 +29,40 @@ func ParseCertURI(input *url.URL) (CertURI, error) { return nil, fmt.Errorf("SPIFFE ID must have 'spiffe' scheme") } + // Path is the raw value of the path without url decoding values. + // RawPath is empty if there were no encoded values so we must + // check both. + path := input.Path + if input.RawPath != "" { + path = input.RawPath + } + // Test for service IDs - if v := spiffeIDServiceRegexp.FindStringSubmatch(input.Path); v != nil { + if v := spiffeIDServiceRegexp.FindStringSubmatch(path); v != nil { + // Determine the values. We assume they're sane to save cycles, + // but if the raw path is not empty that means that something is + // URL encoded so we go to the slow path. + ns := v[1] + dc := v[2] + service := v[3] + if input.RawPath != "" { + var err error + if ns, err = url.PathUnescape(v[1]); err != nil { + return nil, fmt.Errorf("Invalid namespace: %s", err) + } + if dc, err = url.PathUnescape(v[2]); err != nil { + return nil, fmt.Errorf("Invalid datacenter: %s", err) + } + if service, err = url.PathUnescape(v[3]); err != nil { + return nil, fmt.Errorf("Invalid service: %s", err) + } + } + return &SpiffeIDService{ Host: input.Host, - Namespace: v[1], - Datacenter: v[2], - Service: v[3], + Namespace: ns, + Datacenter: dc, + Service: service, }, nil } diff --git a/agent/connect/uri_test.go b/agent/connect/uri_test.go index 247170f53..2f28c940d 100644 --- a/agent/connect/uri_test.go +++ b/agent/connect/uri_test.go @@ -34,6 +34,18 @@ var testCertURICases = []struct { "", }, + { + "service with URL-encoded values", + "spiffe://1234.consul/ns/foo%2Fbar/dc/bar%2Fbaz/svc/baz%2Fqux", + &SpiffeIDService{ + Host: "1234.consul", + Namespace: "foo/bar", + Datacenter: "bar/baz", + Service: "baz/qux", + }, + "", + }, + { "signing ID", "spiffe://1234.consul", @@ -56,6 +68,9 @@ func TestParseCertURI(t *testing.T) { // Parse the ID and check the error/return value actual, err := ParseCertURI(uri) + if err != nil { + t.Logf("parse error: %s", err.Error()) + } assert.Equal(tc.ParseError != "", err != nil, "error value") if err != nil { assert.Contains(err.Error(), tc.ParseError) From 1985655dffd68bca5f16c816837a87a5532decc0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 26 Mar 2018 20:38:39 -0700 Subject: [PATCH 106/627] agent/consul/state: ensure exactly one active CA exists when setting --- agent/consul/state/connect_ca.go | 11 +++++++ agent/consul/state/connect_ca_test.go | 42 +++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go index 05313ce2e..95e763b8b 100644 --- a/agent/consul/state/connect_ca.go +++ b/agent/consul/state/connect_ca.go @@ -110,6 +110,17 @@ func (s *Store) CARootSetCAS(idx, cidx uint64, rs []*structs.CARoot) (bool, erro tx := s.db.Txn(true) defer tx.Abort() + // There must be exactly one active CA root. + activeCount := 0 + for _, r := range rs { + if r.Active { + activeCount++ + } + } + if activeCount != 1 { + return false, fmt.Errorf("there must be exactly one active CA") + } + // Get the current max index if midx := maxIndexTxn(tx, caRootTableName); midx != cidx { return false, nil diff --git a/agent/consul/state/connect_ca_test.go b/agent/consul/state/connect_ca_test.go index bbbac0f0f..cd77eac7c 100644 --- a/agent/consul/state/connect_ca_test.go +++ b/agent/consul/state/connect_ca_test.go @@ -75,6 +75,48 @@ func TestStore_CARootSet_emptyID(t *testing.T) { assert.Len(roots, 0) } +func TestStore_CARootSet_noActive(t *testing.T) { + assert := assert.New(t) + s := testStateStore(t) + + // Call list to populate the watch set + ws := memdb.NewWatchSet() + _, _, err := s.CARoots(ws) + assert.Nil(err) + + // Build a valid value + ca1 := connect.TestCA(t, nil) + ca1.Active = false + ca2 := connect.TestCA(t, nil) + ca2.Active = false + + // Set + ok, err := s.CARootSetCAS(1, 0, []*structs.CARoot{ca1, ca2}) + assert.NotNil(err) + assert.Contains(err.Error(), "exactly one active") + assert.False(ok) +} + +func TestStore_CARootSet_multipleActive(t *testing.T) { + assert := assert.New(t) + s := testStateStore(t) + + // Call list to populate the watch set + ws := memdb.NewWatchSet() + _, _, err := s.CARoots(ws) + assert.Nil(err) + + // Build a valid value + ca1 := connect.TestCA(t, nil) + ca2 := connect.TestCA(t, nil) + + // Set + ok, err := s.CARootSetCAS(1, 0, []*structs.CARoot{ca1, ca2}) + assert.NotNil(err) + assert.Contains(err.Error(), "exactly one active") + assert.False(ok) +} + func TestStore_CARootActive_valid(t *testing.T) { assert := assert.New(t) s := testStateStore(t) From 894ee3c5b043cf9ec9ea8c0ade0afa2b2e9e5753 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Mon, 26 Mar 2018 16:51:43 +0100 Subject: [PATCH 107/627] Add Connect agent, catalog and health endpoints to api Client --- agent/structs/structs.go | 2 +- api/agent.go | 20 +++++++ api/agent_test.go | 45 ++++++++++++++++ api/catalog.go | 15 +++++- api/catalog_test.go | 113 +++++++++++++++++++++++++++++++++++++++ api/health.go | 19 ++++++- api/health_test.go | 51 ++++++++++++++++++ 7 files changed, 262 insertions(+), 3 deletions(-) diff --git a/agent/structs/structs.go b/agent/structs/structs.go index a4e942230..4f25e50f0 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -464,7 +464,7 @@ type ServiceKind string const ( // ServiceKindTypical is a typical, classic Consul service. This is - // represented by the absense of a value. This was chosen for ease of + // represented by the absence of a value. This was chosen for ease of // backwards compatibility: existing services in the catalog would // default to the typical service. ServiceKindTypical ServiceKind = "" diff --git a/api/agent.go b/api/agent.go index 23690d48a..359206c54 100644 --- a/api/agent.go +++ b/api/agent.go @@ -5,6 +5,22 @@ import ( "fmt" ) +// ServiceKind is the kind of service being registered. +type ServiceKind string + +const ( + // ServiceKindTypical is a typical, classic Consul service. This is + // represented by the absence of a value. This was chosen for ease of + // backwards compatibility: existing services in the catalog would + // default to the typical service. + ServiceKindTypical ServiceKind = "" + + // ServiceKindConnectProxy is a proxy for the Connect feature. This + // service proxies another service within Consul and speaks the connect + // protocol. + ServiceKindConnectProxy ServiceKind = "connect-proxy" +) + // AgentCheck represents a check known to the agent type AgentCheck struct { Node string @@ -20,6 +36,7 @@ type AgentCheck struct { // AgentService represents a service known to the agent type AgentService struct { + Kind ServiceKind ID string Service string Tags []string @@ -29,6 +46,7 @@ type AgentService struct { EnableTagOverride bool CreateIndex uint64 ModifyIndex uint64 + ProxyDestination string } // AgentMember represents a cluster member known to the agent @@ -61,6 +79,7 @@ type MembersOpts struct { // AgentServiceRegistration is used to register a new service type AgentServiceRegistration struct { + Kind ServiceKind `json:",omitempty"` ID string `json:",omitempty"` Name string `json:",omitempty"` Tags []string `json:",omitempty"` @@ -70,6 +89,7 @@ type AgentServiceRegistration struct { Meta map[string]string `json:",omitempty"` Check *AgentServiceCheck Checks AgentServiceChecks + ProxyDestination string `json:",omitempty"` } // AgentCheckRegistration is used to register a new check diff --git a/api/agent_test.go b/api/agent_test.go index b195fed29..d45a9a131 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -185,6 +185,51 @@ func TestAPI_AgentServices(t *testing.T) { } } +func TestAPI_AgentServices_ConnectProxy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + // Register service + reg := &AgentServiceRegistration{ + Name: "foo", + Port: 8000, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + // Register proxy + reg = &AgentServiceRegistration{ + Kind: ServiceKindConnectProxy, + Name: "foo-proxy", + Port: 8001, + ProxyDestination: "foo", + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := services["foo"]; !ok { + t.Fatalf("missing service: %v", services) + } + if _, ok := services["foo-proxy"]; !ok { + t.Fatalf("missing proxy service: %v", services) + } + + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } + if err := agent.ServiceDeregister("foo-proxy"); err != nil { + t.Fatalf("err: %v", err) + } +} + func TestAPI_AgentServices_CheckPassing(t *testing.T) { t.Parallel() c, s := makeClient(t) diff --git a/api/catalog.go b/api/catalog.go index 80ce1bc81..1a6bbc3b3 100644 --- a/api/catalog.go +++ b/api/catalog.go @@ -156,7 +156,20 @@ func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, er // Service is used to query catalog entries for a given service func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/service/"+service) + return c.service(service, tag, q, false) +} + +// Connect is used to query catalog entries for a given Connect-enabled service +func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + return c.service(service, tag, q, true) +} + +func (c *Catalog) service(service, tag string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) { + path := "/v1/catalog/service/" + service + if connect { + path = "/v1/catalog/connect/" + service + } + r := c.c.newRequest("GET", path) r.setQueryOptions(q) if tag != "" { r.params.Set("tag", tag) diff --git a/api/catalog_test.go b/api/catalog_test.go index 11f50a919..9db640b9d 100644 --- a/api/catalog_test.go +++ b/api/catalog_test.go @@ -186,6 +186,7 @@ func TestAPI_CatalogService(t *testing.T) { defer s.Stop() catalog := c.Catalog() + retry.Run(t, func(r *retry.R) { services, meta, err := catalog.Service("consul", "", nil) if err != nil { @@ -235,6 +236,80 @@ func TestAPI_CatalogService_NodeMetaFilter(t *testing.T) { }) } +func TestAPI_CatalogConnect(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + catalog := c.Catalog() + + // Register service and proxy instances to test against. + service := &AgentService{ + ID: "redis1", + Service: "redis", + Port: 8000, + } + proxy := &AgentService{ + Kind: ServiceKindConnectProxy, + ProxyDestination: "redis", + ID: "redis-proxy1", + Service: "redis-proxy", + Port: 8001, + } + check := &AgentCheck{ + Node: "foobar", + CheckID: "service:redis1", + Name: "Redis health check", + Notes: "Script based health check", + Status: HealthPassing, + ServiceID: "redis1", + } + + reg := &CatalogRegistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + Service: service, + Check: check, + } + proxyReg := &CatalogRegistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + Service: proxy, + } + + retry.Run(t, func(r *retry.R) { + if _, err := catalog.Register(reg, nil); err != nil { + r.Fatal(err) + } + if _, err := catalog.Register(proxyReg, nil); err != nil { + r.Fatal(err) + } + + services, meta, err := catalog.Connect("redis", "", nil) + if err != nil { + r.Fatal(err) + } + + if meta.LastIndex == 0 { + r.Fatalf("Bad: %v", meta) + } + + if len(services) == 0 { + r.Fatalf("Bad: %v", services) + } + + if services[0].Datacenter != "dc1" { + r.Fatalf("Bad datacenter: %v", services[0]) + } + + if services[0].ServicePort != proxy.Port { + r.Fatalf("Returned port should be for proxy: %v", services[0]) + } + }) +} + func TestAPI_CatalogNode(t *testing.T) { t.Parallel() c, s := makeClient(t) @@ -297,10 +372,28 @@ func TestAPI_CatalogRegistration(t *testing.T) { Service: service, Check: check, } + // Register a connect proxy for that service too + proxy := &AgentService{ + ID: "redis-proxy1", + Service: "redis-proxy", + Port: 8001, + Kind: ServiceKindConnectProxy, + ProxyDestination: service.ID, + } + proxyReg := &CatalogRegistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + NodeMeta: map[string]string{"somekey": "somevalue"}, + Service: proxy, + } retry.Run(t, func(r *retry.R) { if _, err := catalog.Register(reg, nil); err != nil { r.Fatal(err) } + if _, err := catalog.Register(proxyReg, nil); err != nil { + r.Fatal(err) + } node, _, err := catalog.Node("foobar", nil) if err != nil { @@ -311,6 +404,10 @@ func TestAPI_CatalogRegistration(t *testing.T) { r.Fatal("missing service: redis1") } + if _, ok := node.Services["redis-proxy1"]; !ok { + r.Fatal("missing service: redis-proxy1") + } + health, _, err := c.Health().Node("foobar", nil) if err != nil { r.Fatal(err) @@ -333,10 +430,22 @@ func TestAPI_CatalogRegistration(t *testing.T) { ServiceID: "redis1", } + // ... and proxy + deregProxy := &CatalogDeregistration{ + Datacenter: "dc1", + Node: "foobar", + Address: "192.168.10.10", + ServiceID: "redis-proxy1", + } + if _, err := catalog.Deregister(dereg, nil); err != nil { t.Fatalf("err: %v", err) } + if _, err := catalog.Deregister(deregProxy, nil); err != nil { + t.Fatalf("err: %v", err) + } + retry.Run(t, func(r *retry.R) { node, _, err := catalog.Node("foobar", nil) if err != nil { @@ -346,6 +455,10 @@ func TestAPI_CatalogRegistration(t *testing.T) { if _, ok := node.Services["redis1"]; ok { r.Fatal("ServiceID:redis1 is not deregistered") } + + if _, ok := node.Services["redis-proxy1"]; ok { + r.Fatal("ServiceID:redis-proxy1 is not deregistered") + } }) // Test deregistration of the previously registered check diff --git a/api/health.go b/api/health.go index 53f3de4f7..5fcb39b5c 100644 --- a/api/health.go +++ b/api/health.go @@ -159,7 +159,24 @@ func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMe // for a given service. It can optionally do server-side filtering on a tag // or nodes with passing health checks only. func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/service/"+service) + return h.service(service, tag, passingOnly, q, false) +} + +// Connect is equivalent to Service except that it will only return services +// which are Connect-enabled and will returns the connection address for Connect +// client's to use which may be a proxy in front of the named service. TODO: If +// passingOnly is true only instances where both the service and any proxy are +// healthy will be returned. +func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + return h.service(service, tag, passingOnly, q, true) +} + +func (h *Health) service(service, tag string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) { + path := "/v1/health/service/" + service + if connect { + path = "/v1/health/connect/" + service + } + r := h.c.newRequest("GET", path) r.setQueryOptions(q) if tag != "" { r.params.Set("tag", tag) diff --git a/api/health_test.go b/api/health_test.go index c4ef11651..5c3c2b6a2 100644 --- a/api/health_test.go +++ b/api/health_test.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/consul/testutil" "github.com/hashicorp/consul/testutil/retry" "github.com/pascaldekloe/goe/verify" + "github.com/stretchr/testify/require" ) func TestAPI_HealthNode(t *testing.T) { @@ -282,6 +283,56 @@ func TestAPI_HealthService(t *testing.T) { }) } +func TestAPI_HealthConnect(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + health := c.Health() + + // Make a service with a proxy + reg := &AgentServiceRegistration{ + Name: "foo", + Port: 8000, + } + err := agent.ServiceRegister(reg) + require.Nil(t, err) + defer agent.ServiceDeregister("foo") + + // Register the proxy + proxyReg := &AgentServiceRegistration{ + Name: "foo-proxy", + Port: 8001, + Kind: ServiceKindConnectProxy, + ProxyDestination: "foo", + } + err = agent.ServiceRegister(proxyReg) + require.Nil(t, err) + defer agent.ServiceDeregister("foo-proxy") + + retry.Run(t, func(r *retry.R) { + services, meta, err := health.Connect("foo", "", true, nil) + if err != nil { + r.Fatal(err) + } + if meta.LastIndex == 0 { + r.Fatalf("bad: %v", meta) + } + // Should be exactly 1 service - the original shouldn't show up as a connect + // endpoint, only it's proxy. + if len(services) != 1 { + r.Fatalf("Bad: %v", services) + } + if services[0].Node.Datacenter != "dc1" { + r.Fatalf("Bad datacenter: %v", services[0].Node) + } + if services[0].Service.Port != proxyReg.Port { + r.Fatalf("Bad port: %v", services[0]) + } + }) +} + func TestAPI_HealthService_NodeMetaFilter(t *testing.T) { t.Parallel() meta := map[string]string{"somekey": "somevalue"} From 3efe3f8affd03fde42433353144ca30bb5aa2dfa Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Tue, 27 Mar 2018 10:49:27 +0100 Subject: [PATCH 108/627] require -> assert until rebase --- api/health_test.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/api/health_test.go b/api/health_test.go index 5c3c2b6a2..78867909f 100644 --- a/api/health_test.go +++ b/api/health_test.go @@ -7,7 +7,7 @@ import ( "github.com/hashicorp/consul/testutil" "github.com/hashicorp/consul/testutil/retry" "github.com/pascaldekloe/goe/verify" - "github.com/stretchr/testify/require" + "github.com/stretchr/testify/assert" ) func TestAPI_HealthNode(t *testing.T) { @@ -297,7 +297,10 @@ func TestAPI_HealthConnect(t *testing.T) { Port: 8000, } err := agent.ServiceRegister(reg) - require.Nil(t, err) + // TODO replace with require.Nil when we have it vendored in OSS and rebased + if !assert.Nil(t, err) { + return + } defer agent.ServiceDeregister("foo") // Register the proxy @@ -308,7 +311,10 @@ func TestAPI_HealthConnect(t *testing.T) { ProxyDestination: "foo", } err = agent.ServiceRegister(proxyReg) - require.Nil(t, err) + // TODO replace with require.Nil when we have it vendored in OSS and rebased + if !assert.Nil(t, err) { + return + } defer agent.ServiceDeregister("foo-proxy") retry.Run(t, func(r *retry.R) { From 68fa4a83b1c84340fd845dcc9d4318b431661787 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 27 Mar 2018 09:52:06 -0700 Subject: [PATCH 109/627] agent: get rid of method checks since they're done in the http layer --- agent/agent_endpoint.go | 10 ---------- agent/connect_ca_endpoint.go | 5 ----- agent/health_endpoint.go | 4 ---- agent/intentions_endpoint.go | 5 ----- 4 files changed, 24 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 1cbb4b1da..89bf16b62 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -852,11 +852,6 @@ func (s *HTTPServer) AgentConnectCARoots(resp http.ResponseWriter, req *http.Req // AgentConnectCALeafCert returns the certificate bundle for a service // instance. This supports blocking queries to update the returned bundle. func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - // Test the method - if req.Method != "GET" { - return nil, MethodNotAllowedError{req.Method, []string{"GET"}} - } - // Get the service ID. Note that this is the ID of a service instance. id := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/") @@ -890,11 +885,6 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http. // // POST /v1/agent/connect/authorize func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - // Test the method - if req.Method != "POST" { - return nil, MethodNotAllowedError{req.Method, []string{"POST"}} - } - // NOTE(mitchellh): return 200 for now. To be implemented later. return nil, nil } diff --git a/agent/connect_ca_endpoint.go b/agent/connect_ca_endpoint.go index 7832ba36f..43eeb8644 100644 --- a/agent/connect_ca_endpoint.go +++ b/agent/connect_ca_endpoint.go @@ -9,11 +9,6 @@ import ( // GET /v1/connect/ca/roots func (s *HTTPServer) ConnectCARoots(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - // Test the method - if req.Method != "GET" { - return nil, MethodNotAllowedError{req.Method, []string{"GET"}} - } - var args structs.DCSpecificRequest if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { return nil, nil diff --git a/agent/health_endpoint.go b/agent/health_endpoint.go index e57b5f48b..a0fb177b6 100644 --- a/agent/health_endpoint.go +++ b/agent/health_endpoint.go @@ -152,10 +152,6 @@ func (s *HTTPServer) HealthServiceNodes(resp http.ResponseWriter, req *http.Requ } func (s *HTTPServer) healthServiceNodes(resp http.ResponseWriter, req *http.Request, connect bool) (interface{}, error) { - if req.Method != "GET" { - return nil, MethodNotAllowedError{req.Method, []string{"GET"}} - } - // Set default DC args := structs.ServiceSpecificRequest{Connect: connect} s.parseSource(req, &args.Source) diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 5196f06c5..5a2e0e809 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -65,11 +65,6 @@ func (s *HTTPServer) IntentionCreate(resp http.ResponseWriter, req *http.Request // GET /v1/connect/intentions/match func (s *HTTPServer) IntentionMatch(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - // Test the method - if req.Method != "GET" { - return nil, MethodNotAllowedError{req.Method, []string{"GET"}} - } - // Prepare args args := &structs.IntentionQueryRequest{Match: &structs.IntentionQueryMatch{}} if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { From 7af99667b64331c4a32b9a29170c7b28c005e7ac Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 25 Mar 2018 14:39:18 -1000 Subject: [PATCH 110/627] agent/connect: Authorize for CertURI --- agent/connect/uri.go | 43 +++--------- agent/connect/uri_service.go | 42 ++++++++++++ agent/connect/uri_service_test.go | 104 ++++++++++++++++++++++++++++++ agent/connect/uri_signing.go | 29 +++++++++ agent/connect/uri_signing_test.go | 15 +++++ 5 files changed, 200 insertions(+), 33 deletions(-) create mode 100644 agent/connect/uri_service.go create mode 100644 agent/connect/uri_service_test.go create mode 100644 agent/connect/uri_signing.go create mode 100644 agent/connect/uri_signing_test.go diff --git a/agent/connect/uri.go b/agent/connect/uri.go index 3562f2d6c..48bfd3686 100644 --- a/agent/connect/uri.go +++ b/agent/connect/uri.go @@ -5,6 +5,8 @@ import ( "net/url" "regexp" "strings" + + "github.com/hashicorp/consul/agent/structs" ) // CertURI represents a Connect-valid URI value for a TLS certificate. @@ -15,6 +17,14 @@ import ( // However, we anticipate that we may accept URIs that are also not SPIFFE // compliant and therefore the interface is named as such. type CertURI interface { + // Authorize tests the authorization for this URI as a client + // for the given intention. The return value `auth` is only valid if + // the second value `match` is true. If the second value `match` is + // false, then the intention doesn't match this client and any + // result should be ignored. + Authorize(*structs.Intention) (auth bool, match bool) + + // URI is the valid URI value used in the cert. URI() *url.URL } @@ -79,36 +89,3 @@ func ParseCertURI(input *url.URL) (CertURI, error) { return nil, fmt.Errorf("SPIFFE ID is not in the expected format") } - -// SpiffeIDService is the structure to represent the SPIFFE ID for a service. -type SpiffeIDService struct { - Host string - Namespace string - Datacenter string - Service string -} - -// URI returns the *url.URL for this SPIFFE ID. -func (id *SpiffeIDService) URI() *url.URL { - var result url.URL - result.Scheme = "spiffe" - result.Host = id.Host - result.Path = fmt.Sprintf("/ns/%s/dc/%s/svc/%s", - id.Namespace, id.Datacenter, id.Service) - return &result -} - -// SpiffeIDSigning is the structure to represent the SPIFFE ID for a -// signing certificate (not a leaf service). -type SpiffeIDSigning struct { - ClusterID string // Unique cluster ID - Domain string // The domain, usually "consul" -} - -// URI returns the *url.URL for this SPIFFE ID. -func (id *SpiffeIDSigning) URI() *url.URL { - var result url.URL - result.Scheme = "spiffe" - result.Host = fmt.Sprintf("%s.%s", id.ClusterID, id.Domain) - return &result -} diff --git a/agent/connect/uri_service.go b/agent/connect/uri_service.go new file mode 100644 index 000000000..3e53e8e36 --- /dev/null +++ b/agent/connect/uri_service.go @@ -0,0 +1,42 @@ +package connect + +import ( + "fmt" + "net/url" + + "github.com/hashicorp/consul/agent/structs" +) + +// SpiffeIDService is the structure to represent the SPIFFE ID for a service. +type SpiffeIDService struct { + Host string + Namespace string + Datacenter string + Service string +} + +// URI returns the *url.URL for this SPIFFE ID. +func (id *SpiffeIDService) URI() *url.URL { + var result url.URL + result.Scheme = "spiffe" + result.Host = id.Host + result.Path = fmt.Sprintf("/ns/%s/dc/%s/svc/%s", + id.Namespace, id.Datacenter, id.Service) + return &result +} + +// CertURI impl. +func (id *SpiffeIDService) Authorize(ixn *structs.Intention) (bool, bool) { + if ixn.SourceNS != structs.IntentionWildcard && ixn.SourceNS != id.Namespace { + // Non-matching namespace + return false, false + } + + if ixn.SourceName != structs.IntentionWildcard && ixn.SourceName != id.Service { + // Non-matching name + return false, false + } + + // Match, return allow value + return ixn.Action == structs.IntentionActionAllow, true +} diff --git a/agent/connect/uri_service_test.go b/agent/connect/uri_service_test.go new file mode 100644 index 000000000..ac21bca28 --- /dev/null +++ b/agent/connect/uri_service_test.go @@ -0,0 +1,104 @@ +package connect + +import ( + "testing" + + "github.com/hashicorp/consul/agent/structs" + "github.com/stretchr/testify/assert" +) + +func TestSpiffeIDServiceAuthorize(t *testing.T) { + ns := structs.IntentionDefaultNamespace + serviceWeb := &SpiffeIDService{ + Host: "1234.consul", + Namespace: structs.IntentionDefaultNamespace, + Datacenter: "dc01", + Service: "web", + } + + cases := []struct { + Name string + URI *SpiffeIDService + Ixn *structs.Intention + Auth bool + Match bool + }{ + { + "exact source, not matching namespace", + serviceWeb, + &structs.Intention{ + SourceNS: "different", + SourceName: "db", + }, + false, + false, + }, + + { + "exact source, not matching name", + serviceWeb, + &structs.Intention{ + SourceNS: ns, + SourceName: "db", + }, + false, + false, + }, + + { + "exact source, allow", + serviceWeb, + &structs.Intention{ + SourceNS: serviceWeb.Namespace, + SourceName: serviceWeb.Service, + Action: structs.IntentionActionAllow, + }, + true, + true, + }, + + { + "exact source, deny", + serviceWeb, + &structs.Intention{ + SourceNS: serviceWeb.Namespace, + SourceName: serviceWeb.Service, + Action: structs.IntentionActionDeny, + }, + false, + true, + }, + + { + "exact namespace, wildcard service, deny", + serviceWeb, + &structs.Intention{ + SourceNS: serviceWeb.Namespace, + SourceName: structs.IntentionWildcard, + Action: structs.IntentionActionDeny, + }, + false, + true, + }, + + { + "exact namespace, wildcard service, allow", + serviceWeb, + &structs.Intention{ + SourceNS: serviceWeb.Namespace, + SourceName: structs.IntentionWildcard, + Action: structs.IntentionActionAllow, + }, + true, + true, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + auth, match := tc.URI.Authorize(tc.Ixn) + assert.Equal(t, tc.Auth, auth) + assert.Equal(t, tc.Match, match) + }) + } +} diff --git a/agent/connect/uri_signing.go b/agent/connect/uri_signing.go new file mode 100644 index 000000000..213f744d1 --- /dev/null +++ b/agent/connect/uri_signing.go @@ -0,0 +1,29 @@ +package connect + +import ( + "fmt" + "net/url" + + "github.com/hashicorp/consul/agent/structs" +) + +// SpiffeIDSigning is the structure to represent the SPIFFE ID for a +// signing certificate (not a leaf service). +type SpiffeIDSigning struct { + ClusterID string // Unique cluster ID + Domain string // The domain, usually "consul" +} + +// URI returns the *url.URL for this SPIFFE ID. +func (id *SpiffeIDSigning) URI() *url.URL { + var result url.URL + result.Scheme = "spiffe" + result.Host = fmt.Sprintf("%s.%s", id.ClusterID, id.Domain) + return &result +} + +// CertURI impl. +func (id *SpiffeIDSigning) Authorize(ixn *structs.Intention) (bool, bool) { + // Never authorize as a client. + return false, true +} diff --git a/agent/connect/uri_signing_test.go b/agent/connect/uri_signing_test.go new file mode 100644 index 000000000..a9be3c5e2 --- /dev/null +++ b/agent/connect/uri_signing_test.go @@ -0,0 +1,15 @@ +package connect + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// Signing ID should never authorize +func TestSpiffeIDSigningAuthorize(t *testing.T) { + var id SpiffeIDSigning + auth, ok := id.Authorize(nil) + assert.False(t, auth) + assert.True(t, ok) +} From 5364a8cd90cb482f3cab58cef4fcadd381ea1e94 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 25 Mar 2018 14:52:26 -1000 Subject: [PATCH 111/627] agent: /v1/agent/connect/authorize is functional, with tests --- agent/agent_endpoint.go | 84 +++++++++++++++++- agent/agent_endpoint_test.go | 162 +++++++++++++++++++++++++++++++++++ agent/structs/connect.go | 17 ++++ 3 files changed, 261 insertions(+), 2 deletions(-) create mode 100644 agent/structs/connect.go diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 89bf16b62..cb4d06c59 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "net/http" + "net/url" "strconv" "strings" @@ -885,6 +886,85 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http. // // POST /v1/agent/connect/authorize func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - // NOTE(mitchellh): return 200 for now. To be implemented later. - return nil, nil + // Decode the request from the request body + var authReq structs.ConnectAuthorizeRequest + if err := decodeBody(req, &authReq, nil); err != nil { + resp.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(resp, "Request decode failed: %v", err) + return nil, nil + } + + // We need to have a target to check intentions + if authReq.Target == "" { + resp.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(resp, "Target service must be specified") + return nil, nil + } + + // Parse the certificate URI from the client ID + uriRaw, err := url.Parse(authReq.ClientID) + if err != nil { + return &connectAuthorizeResp{ + Authorized: false, + Reason: fmt.Sprintf("Client ID must be a URI: %s", err), + }, nil + } + uri, err := connect.ParseCertURI(uriRaw) + if err != nil { + return &connectAuthorizeResp{ + Authorized: false, + Reason: fmt.Sprintf("Invalid client ID: %s", err), + }, nil + } + + uriService, ok := uri.(*connect.SpiffeIDService) + if !ok { + return &connectAuthorizeResp{ + Authorized: false, + Reason: fmt.Sprintf("Client ID must be a valid SPIFFE service URI"), + }, nil + } + + // Get the intentions for this target service + args := &structs.IntentionQueryRequest{ + Datacenter: s.agent.config.Datacenter, + Match: &structs.IntentionQueryMatch{ + Type: structs.IntentionMatchDestination, + Entries: []structs.IntentionMatchEntry{ + { + Namespace: structs.IntentionDefaultNamespace, + Name: authReq.Target, + }, + }, + }, + } + var reply structs.IndexedIntentionMatches + if err := s.agent.RPC("Intention.Match", args, &reply); err != nil { + return nil, err + } + if len(reply.Matches) != 1 { + return nil, fmt.Errorf("Internal error loading matches") + } + + // Test the authorization for each match + for _, ixn := range reply.Matches[0] { + if auth, ok := uriService.Authorize(ixn); ok { + return &connectAuthorizeResp{ + Authorized: auth, + Reason: fmt.Sprintf("Matched intention %s", ixn.ID), + }, nil + } + } + + // TODO(mitchellh): default behavior here for now is "deny" but we + // should consider how this is determined. + return &connectAuthorizeResp{ + Authorized: false, + Reason: "No matching intention, using default behavior", + }, nil +} + +type connectAuthorizeResp struct { + Authorized bool + Reason string } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 15267107a..cae7a4ccc 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2130,3 +2130,165 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { // TODO(mitchellh): verify the private key matches the cert } + +func TestAgentConnectAuthorize_badBody(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + args := []string{} + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentConnectAuthorize(resp, req) + assert.Nil(err) + assert.Equal(400, resp.Code) + assert.Contains(resp.Body.String(), "decode") +} + +func TestAgentConnectAuthorize_noTarget(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + args := &structs.ConnectAuthorizeRequest{} + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentConnectAuthorize(resp, req) + assert.Nil(err) + assert.Equal(400, resp.Code) + assert.Contains(resp.Body.String(), "Target service") +} + +// Client ID is not in the valid URI format +func TestAgentConnectAuthorize_idInvalidFormat(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + args := &structs.ConnectAuthorizeRequest{ + Target: "web", + ClientID: "tubes", + } + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + resp := httptest.NewRecorder() + respRaw, err := a.srv.AgentConnectAuthorize(resp, req) + assert.Nil(err) + assert.Equal(200, resp.Code) + + obj := respRaw.(*connectAuthorizeResp) + assert.False(obj.Authorized) + assert.Contains(obj.Reason, "Invalid client") +} + +// Client ID is a valid URI but its not a service URI +func TestAgentConnectAuthorize_idNotService(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + args := &structs.ConnectAuthorizeRequest{ + Target: "web", + ClientID: "spiffe://1234.consul", + } + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + resp := httptest.NewRecorder() + respRaw, err := a.srv.AgentConnectAuthorize(resp, req) + assert.Nil(err) + assert.Equal(200, resp.Code) + + obj := respRaw.(*connectAuthorizeResp) + assert.False(obj.Authorized) + assert.Contains(obj.Reason, "must be a valid") +} + +// Test when there is an intention allowing the connection +func TestAgentConnectAuthorize_allow(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + target := "db" + + // Create some intentions + { + req := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + req.Intention.SourceNS = structs.IntentionDefaultNamespace + req.Intention.SourceName = "web" + req.Intention.DestinationNS = structs.IntentionDefaultNamespace + req.Intention.DestinationName = target + req.Intention.Action = structs.IntentionActionAllow + + var reply string + assert.Nil(a.RPC("Intention.Apply", &req, &reply)) + } + + args := &structs.ConnectAuthorizeRequest{ + Target: target, + ClientID: connect.TestSpiffeIDService(t, "web").URI().String(), + } + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + resp := httptest.NewRecorder() + respRaw, err := a.srv.AgentConnectAuthorize(resp, req) + assert.Nil(err) + assert.Equal(200, resp.Code) + + obj := respRaw.(*connectAuthorizeResp) + assert.True(obj.Authorized) + assert.Contains(obj.Reason, "Matched") +} + +// Test when there is an intention denying the connection +func TestAgentConnectAuthorize_deny(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + target := "db" + + // Create some intentions + { + req := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + req.Intention.SourceNS = structs.IntentionDefaultNamespace + req.Intention.SourceName = "web" + req.Intention.DestinationNS = structs.IntentionDefaultNamespace + req.Intention.DestinationName = target + req.Intention.Action = structs.IntentionActionDeny + + var reply string + assert.Nil(a.RPC("Intention.Apply", &req, &reply)) + } + + args := &structs.ConnectAuthorizeRequest{ + Target: target, + ClientID: connect.TestSpiffeIDService(t, "web").URI().String(), + } + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + resp := httptest.NewRecorder() + respRaw, err := a.srv.AgentConnectAuthorize(resp, req) + assert.Nil(err) + assert.Equal(200, resp.Code) + + obj := respRaw.(*connectAuthorizeResp) + assert.False(obj.Authorized) + assert.Contains(obj.Reason, "Matched") +} diff --git a/agent/structs/connect.go b/agent/structs/connect.go new file mode 100644 index 000000000..1a2e03da8 --- /dev/null +++ b/agent/structs/connect.go @@ -0,0 +1,17 @@ +package structs + +// ConnectAuthorizeRequest is the structure of a request to authorize +// a connection. +type ConnectAuthorizeRequest struct { + // Target is the name of the service that is being requested. + Target string + + // ClientID is a unique identifier for the requesting client. This + // is currently the URI SAN from the TLS client certificate. + // + // ClientCertSerial is a colon-hex-encoded of the serial number for + // the requesting client cert. This is used to check against revocation + // lists. + ClientID string + ClientCertSerial string +} From c6269cda371c876163e0198e76bbafe48d38b1ae Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 25 Mar 2018 15:00:59 -1000 Subject: [PATCH 112/627] agent: default deny on connect authorize endpoint --- agent/agent_endpoint.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index cb4d06c59..0f9ccb852 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -956,11 +956,15 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R } } - // TODO(mitchellh): default behavior here for now is "deny" but we - // should consider how this is determined. + // If there was no matching intention, we always deny. Connect does + // support a blacklist (default allow) mode, but this works by appending + // */* => */* ALLOW intention to all Match requests. This means that + // the above should've matched. Therefore, if we reached here, something + // strange has happened and we should just deny the connection and err + // on the side of safety. return &connectAuthorizeResp{ Authorized: false, - Reason: "No matching intention, using default behavior", + Reason: "No matching intention, denying", }, nil } From 3f80808379a8ce4bba5e54e49d65d95bfbe73d58 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 25 Mar 2018 15:02:25 -1000 Subject: [PATCH 113/627] agent: bolster commenting for clearer understandability --- agent/agent_endpoint.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 0f9ccb852..7d6a19470 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -968,7 +968,9 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R }, nil } +// connectAuthorizeResp is the response format/structure for the +// /v1/agent/connect/authorize endpoint. type connectAuthorizeResp struct { - Authorized bool - Reason string + Authorized bool // True if authorized, false if not + Reason string // Reason for the Authorized value (whether true or false) } From 3e0e0a94a7f6903654ef98a7c1584be8bcf3e07a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 25 Mar 2018 15:06:10 -1000 Subject: [PATCH 114/627] agent/structs: String format for Intention, used for logging --- agent/agent_endpoint.go | 4 ++-- agent/structs/intention.go | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 7d6a19470..02682e592 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -921,7 +921,7 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R if !ok { return &connectAuthorizeResp{ Authorized: false, - Reason: fmt.Sprintf("Client ID must be a valid SPIFFE service URI"), + Reason: "Client ID must be a valid SPIFFE service URI", }, nil } @@ -951,7 +951,7 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R if auth, ok := uriService.Authorize(ixn); ok { return &connectAuthorizeResp{ Authorized: auth, - Reason: fmt.Sprintf("Matched intention %s", ixn.ID), + Reason: fmt.Sprintf("Matched intention: %s", ixn.String()), }, nil } } diff --git a/agent/structs/intention.go b/agent/structs/intention.go index fb83f85da..d801635c9 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -164,6 +164,15 @@ func (x *Intention) GetACLPrefix() (string, bool) { return x.DestinationName, x.DestinationName != "" } +// String returns a human-friendly string for this intention. +func (x *Intention) String() string { + return fmt.Sprintf("%s %s/%s => %s/%s (ID: %s", + strings.ToUpper(string(x.Action)), + x.SourceNS, x.SourceName, + x.DestinationNS, x.DestinationName, + x.ID) +} + // IntentionAction is the action that the intention represents. This // can be "allow" or "deny" to whitelist or blacklist intentions. type IntentionAction string From b3584b63555248d5c3817ccb72ccf655da47bb1d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 25 Mar 2018 15:50:05 -1000 Subject: [PATCH 115/627] agent: ACL checks for authorize, default behavior --- acl/acl.go | 15 ++++++ acl/acl_test.go | 19 ++++++++ agent/agent_endpoint.go | 44 ++++++++++++++---- agent/agent_endpoint_test.go | 90 ++++++++++++++++++++++++++++++++++++ 4 files changed, 159 insertions(+), 9 deletions(-) diff --git a/acl/acl.go b/acl/acl.go index 4ac88f01c..49dc569b9 100644 --- a/acl/acl.go +++ b/acl/acl.go @@ -60,6 +60,10 @@ type ACL interface { // EventWrite determines if a specific event may be fired. EventWrite(string) bool + // IntentionDefault determines the default authorized behavior + // when no intentions match a Connect request. + IntentionDefault() bool + // IntentionRead determines if a specific intention can be read. IntentionRead(string) bool @@ -161,6 +165,10 @@ func (s *StaticACL) EventWrite(string) bool { return s.defaultAllow } +func (s *StaticACL) IntentionDefault() bool { + return s.defaultAllow +} + func (s *StaticACL) IntentionRead(string) bool { return s.defaultAllow } @@ -493,6 +501,13 @@ func (p *PolicyACL) EventWrite(name string) bool { return p.parent.EventWrite(name) } +// IntentionDefault returns whether the default behavior when there are +// no matching intentions is to allow or deny. +func (p *PolicyACL) IntentionDefault() bool { + // We always go up, this can't be determined by a policy. + return p.parent.IntentionDefault() +} + // IntentionRead checks if writing (creating, updating, or deleting) of an // intention is allowed. func (p *PolicyACL) IntentionRead(prefix string) bool { diff --git a/acl/acl_test.go b/acl/acl_test.go index 85f35f606..263af0656 100644 --- a/acl/acl_test.go +++ b/acl/acl_test.go @@ -53,6 +53,9 @@ func TestStaticACL(t *testing.T) { if !all.EventWrite("foobar") { t.Fatalf("should allow") } + if !all.IntentionDefault() { + t.Fatalf("should allow") + } if !all.IntentionWrite("foobar") { t.Fatalf("should allow") } @@ -126,6 +129,9 @@ func TestStaticACL(t *testing.T) { if none.EventWrite("") { t.Fatalf("should not allow") } + if none.IntentionDefault() { + t.Fatalf("should not allow") + } if none.IntentionWrite("foo") { t.Fatalf("should not allow") } @@ -193,6 +199,9 @@ func TestStaticACL(t *testing.T) { if !manage.EventWrite("foobar") { t.Fatalf("should allow") } + if !manage.IntentionDefault() { + t.Fatalf("should allow") + } if !manage.IntentionWrite("foobar") { t.Fatalf("should allow") } @@ -454,6 +463,11 @@ func TestPolicyACL(t *testing.T) { t.Fatalf("Prepared query fail: %#v", c) } } + + // Check default intentions bubble up + if !acl.IntentionDefault() { + t.Fatal("should allow") + } } func TestPolicyACL_Parent(t *testing.T) { @@ -607,6 +621,11 @@ func TestPolicyACL_Parent(t *testing.T) { if acl.Snapshot() { t.Fatalf("should not allow") } + + // Check default intentions + if acl.IntentionDefault() { + t.Fatal("should not allow") + } } func TestPolicyACL_Agent(t *testing.T) { diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 02682e592..5a9218c37 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -886,6 +886,10 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http. // // POST /v1/agent/connect/authorize func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Fetch the token + var token string + s.parseToken(req, &token) + // Decode the request from the request body var authReq structs.ConnectAuthorizeRequest if err := decodeBody(req, &authReq, nil); err != nil { @@ -925,7 +929,18 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R }, nil } - // Get the intentions for this target service + // We need to verify service:write permissions for the given token. + // We do this manually here since the RPC request below only verifies + // service:read. + rule, err := s.agent.resolveToken(token) + if err != nil { + return nil, err + } + if rule != nil && !rule.ServiceWrite(authReq.Target, nil) { + return nil, acl.ErrPermissionDenied + } + + // Get the intentions for this target service. args := &structs.IntentionQueryRequest{ Datacenter: s.agent.config.Datacenter, Match: &structs.IntentionQueryMatch{ @@ -938,6 +953,7 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R }, }, } + args.Token = token var reply structs.IndexedIntentionMatches if err := s.agent.RPC("Intention.Match", args, &reply); err != nil { return nil, err @@ -956,15 +972,25 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R } } - // If there was no matching intention, we always deny. Connect does - // support a blacklist (default allow) mode, but this works by appending - // */* => */* ALLOW intention to all Match requests. This means that - // the above should've matched. Therefore, if we reached here, something - // strange has happened and we should just deny the connection and err - // on the side of safety. + // No match, we need to determine the default behavior. We do this by + // specifying the anonymous token token, which will get that behavior. + // The default behavior if ACLs are disabled is to allow connections + // to mimic the behavior of Consul itself: everything is allowed if + // ACLs are disabled. + rule, err = s.agent.resolveToken("") + if err != nil { + return nil, err + } + authz := true + reason := "ACLs disabled, access is allowed by default" + if rule != nil { + authz = rule.IntentionDefault() + reason = "Default behavior configured by ACLs" + } + return &connectAuthorizeResp{ - Authorized: false, - Reason: "No matching intention, denying", + Authorized: authz, + Reason: reason, }, nil } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index cae7a4ccc..bc59f3700 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2292,3 +2292,93 @@ func TestAgentConnectAuthorize_deny(t *testing.T) { assert.False(obj.Authorized) assert.Contains(obj.Reason, "Matched") } + +// Test that authorize fails without service:write for the target service. +func TestAgentConnectAuthorize_serviceWrite(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), TestACLConfig()) + defer a.Shutdown() + + // Create an ACL + var token string + { + args := map[string]interface{}{ + "Name": "User Token", + "Type": "client", + "Rules": `service "foo" { policy = "read" }`, + } + req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args)) + resp := httptest.NewRecorder() + obj, err := a.srv.ACLCreate(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + aclResp := obj.(aclCreateResponse) + token = aclResp.ID + } + + args := &structs.ConnectAuthorizeRequest{ + Target: "foo", + ClientID: connect.TestSpiffeIDService(t, "web").URI().String(), + } + req, _ := http.NewRequest("POST", + "/v1/agent/connect/authorize?token="+token, jsonReader(args)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentConnectAuthorize(resp, req) + assert.True(acl.IsErrPermissionDenied(err)) +} + +// Test when no intentions match w/ a default deny policy +func TestAgentConnectAuthorize_defaultDeny(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), TestACLConfig()) + defer a.Shutdown() + + args := &structs.ConnectAuthorizeRequest{ + Target: "foo", + ClientID: connect.TestSpiffeIDService(t, "web").URI().String(), + } + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args)) + resp := httptest.NewRecorder() + respRaw, err := a.srv.AgentConnectAuthorize(resp, req) + assert.Nil(err) + assert.Equal(200, resp.Code) + + obj := respRaw.(*connectAuthorizeResp) + assert.False(obj.Authorized) + assert.Contains(obj.Reason, "Default behavior") +} + +// Test when no intentions match w/ a default allow policy +func TestAgentConnectAuthorize_defaultAllow(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), ` + acl_datacenter = "dc1" + acl_default_policy = "allow" + acl_master_token = "root" + acl_agent_token = "root" + acl_agent_master_token = "towel" + acl_enforce_version_8 = true + `) + defer a.Shutdown() + + args := &structs.ConnectAuthorizeRequest{ + Target: "foo", + ClientID: connect.TestSpiffeIDService(t, "web").URI().String(), + } + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args)) + resp := httptest.NewRecorder() + respRaw, err := a.srv.AgentConnectAuthorize(resp, req) + assert.Nil(err) + assert.Equal(200, resp.Code) + + obj := respRaw.(*connectAuthorizeResp) + assert.True(obj.Authorized) + assert.Contains(obj.Reason, "Default behavior") +} From f983978fb821fec7120814d30e85f8d6457f7fab Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 27 Mar 2018 10:08:20 -0700 Subject: [PATCH 116/627] acl: IntentionDefault => IntentionDefaultAllow --- acl/acl.go | 12 ++++++------ acl/acl_test.go | 10 +++++----- agent/agent_endpoint.go | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/acl/acl.go b/acl/acl.go index 49dc569b9..a8ad0de96 100644 --- a/acl/acl.go +++ b/acl/acl.go @@ -60,9 +60,9 @@ type ACL interface { // EventWrite determines if a specific event may be fired. EventWrite(string) bool - // IntentionDefault determines the default authorized behavior + // IntentionDefaultAllow determines the default authorized behavior // when no intentions match a Connect request. - IntentionDefault() bool + IntentionDefaultAllow() bool // IntentionRead determines if a specific intention can be read. IntentionRead(string) bool @@ -165,7 +165,7 @@ func (s *StaticACL) EventWrite(string) bool { return s.defaultAllow } -func (s *StaticACL) IntentionDefault() bool { +func (s *StaticACL) IntentionDefaultAllow() bool { return s.defaultAllow } @@ -501,11 +501,11 @@ func (p *PolicyACL) EventWrite(name string) bool { return p.parent.EventWrite(name) } -// IntentionDefault returns whether the default behavior when there are +// IntentionDefaultAllow returns whether the default behavior when there are // no matching intentions is to allow or deny. -func (p *PolicyACL) IntentionDefault() bool { +func (p *PolicyACL) IntentionDefaultAllow() bool { // We always go up, this can't be determined by a policy. - return p.parent.IntentionDefault() + return p.parent.IntentionDefaultAllow() } // IntentionRead checks if writing (creating, updating, or deleting) of an diff --git a/acl/acl_test.go b/acl/acl_test.go index 263af0656..faf6f092f 100644 --- a/acl/acl_test.go +++ b/acl/acl_test.go @@ -53,7 +53,7 @@ func TestStaticACL(t *testing.T) { if !all.EventWrite("foobar") { t.Fatalf("should allow") } - if !all.IntentionDefault() { + if !all.IntentionDefaultAllow() { t.Fatalf("should allow") } if !all.IntentionWrite("foobar") { @@ -129,7 +129,7 @@ func TestStaticACL(t *testing.T) { if none.EventWrite("") { t.Fatalf("should not allow") } - if none.IntentionDefault() { + if none.IntentionDefaultAllow() { t.Fatalf("should not allow") } if none.IntentionWrite("foo") { @@ -199,7 +199,7 @@ func TestStaticACL(t *testing.T) { if !manage.EventWrite("foobar") { t.Fatalf("should allow") } - if !manage.IntentionDefault() { + if !manage.IntentionDefaultAllow() { t.Fatalf("should allow") } if !manage.IntentionWrite("foobar") { @@ -465,7 +465,7 @@ func TestPolicyACL(t *testing.T) { } // Check default intentions bubble up - if !acl.IntentionDefault() { + if !acl.IntentionDefaultAllow() { t.Fatal("should allow") } } @@ -623,7 +623,7 @@ func TestPolicyACL_Parent(t *testing.T) { } // Check default intentions - if acl.IntentionDefault() { + if acl.IntentionDefaultAllow() { t.Fatal("should not allow") } } diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 5a9218c37..20cb047b2 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -984,7 +984,7 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R authz := true reason := "ACLs disabled, access is allowed by default" if rule != nil { - authz = rule.IntentionDefault() + authz = rule.IntentionDefaultAllow() reason = "Default behavior configured by ACLs" } From 94e7a0a3c106b1ee1b90a0f45fee582096788d42 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 27 Mar 2018 10:09:13 -0700 Subject: [PATCH 117/627] agent: add TODO for verification --- agent/agent_endpoint.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 20cb047b2..a6b67816d 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -940,6 +940,9 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R return nil, acl.ErrPermissionDenied } + // TODO(mitchellh): we need to verify more things here, such as the + // trust domain, blacklist lookup of the serial, etc. + // Get the intentions for this target service. args := &structs.IntentionQueryRequest{ Datacenter: s.agent.config.Datacenter, From b5b301aa2a698dec4280a14820e01c66ebcbbed3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 27 Mar 2018 16:50:17 -0700 Subject: [PATCH 118/627] api: endpoints for working with CA roots, agent authorize, etc. --- api/agent.go | 82 +++++++++++++++++++++++++++++++++++++++++++++ api/agent_test.go | 15 +++++++++ api/connect.go | 65 +++++++++++++++++++++++++++++++++++ api/connect_test.go | 26 ++++++++++++++ 4 files changed, 188 insertions(+) create mode 100644 api/connect.go create mode 100644 api/connect_test.go diff --git a/api/agent.go b/api/agent.go index 359206c54..860483671 100644 --- a/api/agent.go +++ b/api/agent.go @@ -172,6 +172,19 @@ type SampledValue struct { Labels map[string]string } +// AgentAuthorizeParams are the request parameters for authorizing a request. +type AgentAuthorizeParams struct { + Target string + ClientID string + ClientCertSerial string +} + +// AgentAuthorize is the response structure for Connect authorization. +type AgentAuthorize struct { + Authorized bool + Reason string +} + // Agent can be used to query the Agent endpoints type Agent struct { c *Client @@ -505,6 +518,75 @@ func (a *Agent) ForceLeave(node string) error { return nil } +// ConnectAuthorize is used to authorize an incoming connection +// to a natively integrated Connect service. +// +// TODO(mitchellh): we need to test this better once we have a way to +// configure CAs from the API package (when the CA work is done). +func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, error) { + r := a.c.newRequest("POST", "/v1/agent/connect/authorize") + r.obj = auth + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + var out AgentAuthorize + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// ConnectCARoots returns the list of roots. +// +// TODO(mitchellh): we need to test this better once we have a way to +// configure CAs from the API package (when the CA work is done). +func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/connect/ca/roots") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CARootList + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// ConnectCALeaf gets the leaf certificate for the given service ID. +// +// TODO(mitchellh): we need to test this better once we have a way to +// configure CAs from the API package (when the CA work is done). +func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*IssuedCert, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/connect/ca/leaf/"+serviceID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out IssuedCert + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + // EnableServiceMaintenance toggles service maintenance mode on // for the given service ID. func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { diff --git a/api/agent_test.go b/api/agent_test.go index d45a9a131..653512be9 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/consul/testutil" "github.com/hashicorp/consul/testutil/retry" "github.com/hashicorp/serf/serf" + "github.com/stretchr/testify/require" ) func TestAPI_AgentSelf(t *testing.T) { @@ -981,3 +982,17 @@ func TestAPI_AgentUpdateToken(t *testing.T) { t.Fatalf("err: %v", err) } } + +func TestAPI_AgentConnectCARoots_empty(t *testing.T) { + t.Parallel() + + require := require.New(t) + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + list, meta, err := agent.ConnectCARoots(nil) + require.Nil(err) + require.Equal(uint64(0), meta.LastIndex) + require.Len(list.Roots, 0) +} diff --git a/api/connect.go b/api/connect.go new file mode 100644 index 000000000..0f75a45fa --- /dev/null +++ b/api/connect.go @@ -0,0 +1,65 @@ +package api + +import ( + "time" +) + +// CARootList is the structure for the results of listing roots. +type CARootList struct { + ActiveRootID string + Roots []*CARoot +} + +// CARoot is a single CA within Connect. +type CARoot struct { + ID string + Name string + RootCert string + Active bool + CreateIndex uint64 + ModifyIndex uint64 +} + +type IssuedCert struct { + SerialNumber string + CertPEM string + PrivateKeyPEM string + Service string + ServiceURI string + ValidAfter time.Time + ValidBefore time.Time + CreateIndex uint64 + ModifyIndex uint64 +} + +// Connect can be used to work with endpoints related to Connect, the +// feature for securely connecting services within Consul. +type Connect struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Connect() *Connect { + return &Connect{c} +} + +// CARoots queries the list of available roots. +func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/ca/roots") + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CARootList + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} diff --git a/api/connect_test.go b/api/connect_test.go new file mode 100644 index 000000000..3ad7cb078 --- /dev/null +++ b/api/connect_test.go @@ -0,0 +1,26 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// NOTE(mitchellh): we don't have a way to test CA roots yet since there +// is no API public way to configure the root certs. This wll be resolved +// in the future and we can write tests then. This is tested in agent and +// agent/consul which do have internal access to manually create roots. + +func TestAPI_ConnectCARoots_empty(t *testing.T) { + t.Parallel() + + require := require.New(t) + c, s := makeClient(t) + defer s.Stop() + + connect := c.Connect() + list, meta, err := connect.CARoots(nil) + require.Nil(err) + require.Equal(uint64(0), meta.LastIndex) + require.Len(list.Roots, 0) +} From 9c33068394bba45890cd440538154759cfb590e5 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 27 Mar 2018 21:33:05 -0700 Subject: [PATCH 119/627] api: starting intention endpoints, reorganize files slightly --- api/connect.go | 53 --------- api/connect_ca.go | 80 ++++++++++++++ api/{connect_test.go => connect_ca_test.go} | 0 api/connect_intention.go | 112 ++++++++++++++++++++ api/connect_intention_test.go | 48 +++++++++ 5 files changed, 240 insertions(+), 53 deletions(-) create mode 100644 api/connect_ca.go rename api/{connect_test.go => connect_ca_test.go} (100%) create mode 100644 api/connect_intention.go create mode 100644 api/connect_intention_test.go diff --git a/api/connect.go b/api/connect.go index 0f75a45fa..4b4e06900 100644 --- a/api/connect.go +++ b/api/connect.go @@ -1,37 +1,5 @@ package api -import ( - "time" -) - -// CARootList is the structure for the results of listing roots. -type CARootList struct { - ActiveRootID string - Roots []*CARoot -} - -// CARoot is a single CA within Connect. -type CARoot struct { - ID string - Name string - RootCert string - Active bool - CreateIndex uint64 - ModifyIndex uint64 -} - -type IssuedCert struct { - SerialNumber string - CertPEM string - PrivateKeyPEM string - Service string - ServiceURI string - ValidAfter time.Time - ValidBefore time.Time - CreateIndex uint64 - ModifyIndex uint64 -} - // Connect can be used to work with endpoints related to Connect, the // feature for securely connecting services within Consul. type Connect struct { @@ -42,24 +10,3 @@ type Connect struct { func (c *Client) Connect() *Connect { return &Connect{c} } - -// CARoots queries the list of available roots. -func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/connect/ca/roots") - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out CARootList - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return &out, qm, nil -} diff --git a/api/connect_ca.go b/api/connect_ca.go new file mode 100644 index 000000000..19046c2ab --- /dev/null +++ b/api/connect_ca.go @@ -0,0 +1,80 @@ +package api + +import ( + "time" +) + +// CARootList is the structure for the results of listing roots. +type CARootList struct { + ActiveRootID string + Roots []*CARoot +} + +// CARoot represents a root CA certificate that is trusted. +type CARoot struct { + // ID is a globally unique ID (UUID) representing this CA root. + ID string + + // Name is a human-friendly name for this CA root. This value is + // opaque to Consul and is not used for anything internally. + Name string + + // RootCert is the PEM-encoded public certificate. + RootCert string + + // Active is true if this is the current active CA. This must only + // be true for exactly one CA. For any method that modifies roots in the + // state store, tests should be written to verify that multiple roots + // cannot be active. + Active bool + + CreateIndex uint64 + ModifyIndex uint64 +} + +// IssuedCert is a certificate that has been issued by a Connect CA. +type IssuedCert struct { + // SerialNumber is the unique serial number for this certificate. + // This is encoded in standard hex separated by :. + SerialNumber string + + // CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private + // key for that cert, respectively. This should not be stored in the + // state store, but is present in the sign API response. + CertPEM string `json:",omitempty"` + PrivateKeyPEM string `json:",omitempty"` + + // Service is the name of the service for which the cert was issued. + // ServiceURI is the cert URI value. + Service string + ServiceURI string + + // ValidAfter and ValidBefore are the validity periods for the + // certificate. + ValidAfter time.Time + ValidBefore time.Time + + CreateIndex uint64 + ModifyIndex uint64 +} + +// CARoots queries the list of available roots. +func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/ca/roots") + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CARootList + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} diff --git a/api/connect_test.go b/api/connect_ca_test.go similarity index 100% rename from api/connect_test.go rename to api/connect_ca_test.go diff --git a/api/connect_intention.go b/api/connect_intention.go new file mode 100644 index 000000000..b138dd4ae --- /dev/null +++ b/api/connect_intention.go @@ -0,0 +1,112 @@ +package api + +import ( + "time" +) + +// Intention defines an intention for the Connect Service Graph. This defines +// the allowed or denied behavior of a connection between two services using +// Connect. +type Intention struct { + // ID is the UUID-based ID for the intention, always generated by Consul. + ID string + + // Description is a human-friendly description of this intention. + // It is opaque to Consul and is only stored and transferred in API + // requests. + Description string + + // SourceNS, SourceName are the namespace and name, respectively, of + // the source service. Either of these may be the wildcard "*", but only + // the full value can be a wildcard. Partial wildcards are not allowed. + // The source may also be a non-Consul service, as specified by SourceType. + // + // DestinationNS, DestinationName is the same, but for the destination + // service. The same rules apply. The destination is always a Consul + // service. + SourceNS, SourceName string + DestinationNS, DestinationName string + + // SourceType is the type of the value for the source. + SourceType IntentionSourceType + + // Action is whether this is a whitelist or blacklist intention. + Action IntentionAction + + // DefaultAddr, DefaultPort of the local listening proxy (if any) to + // make this connection. + DefaultAddr string + DefaultPort int + + // Meta is arbitrary metadata associated with the intention. This is + // opaque to Consul but is served in API responses. + Meta map[string]string + + // CreatedAt and UpdatedAt keep track of when this record was created + // or modified. + CreatedAt, UpdatedAt time.Time + + CreateIndex uint64 + ModifyIndex uint64 +} + +// IntentionAction is the action that the intention represents. This +// can be "allow" or "deny" to whitelist or blacklist intentions. +type IntentionAction string + +const ( + IntentionActionAllow IntentionAction = "allow" + IntentionActionDeny IntentionAction = "deny" +) + +// IntentionSourceType is the type of the source within an intention. +type IntentionSourceType string + +const ( + // IntentionSourceConsul is a service within the Consul catalog. + IntentionSourceConsul IntentionSourceType = "consul" +) + +// Intentions returns the list of intentions. +func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions") + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// IntentionCreate will create a new intention. The ID in the given +// structure must be empty and a generate ID will be returned on +// success. +func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/connect/intentions") + r.setWriteOptions(q) + r.obj = ixn + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} diff --git a/api/connect_intention_test.go b/api/connect_intention_test.go new file mode 100644 index 000000000..2fc742602 --- /dev/null +++ b/api/connect_intention_test.go @@ -0,0 +1,48 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAPI_ConnectIntentionCreate(t *testing.T) { + t.Parallel() + + require := require.New(t) + c, s := makeClient(t) + defer s.Stop() + + connect := c.Connect() + + // Create + ixn := testIntention() + id, _, err := connect.IntentionCreate(ixn, nil) + require.Nil(err) + require.NotEmpty(id) + + // List it + list, _, err := connect.Intentions(nil) + require.Nil(err) + require.Len(list, 1) + + actual := list[0] + ixn.ID = id + ixn.CreatedAt = actual.CreatedAt + ixn.UpdatedAt = actual.UpdatedAt + ixn.CreateIndex = actual.CreateIndex + ixn.ModifyIndex = actual.ModifyIndex + require.Equal(ixn, actual) +} + +func testIntention() *Intention { + return &Intention{ + SourceNS: "eng", + SourceName: "api", + DestinationNS: "eng", + DestinationName: "db", + Action: IntentionActionAllow, + SourceType: IntentionSourceConsul, + Meta: map[string]string{}, + } +} From c0894f0f50a961ec0a0395ab0bb11d7c9dd4aee3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Mar 2018 10:14:32 -0700 Subject: [PATCH 120/627] api: IntentionMatch --- api/connect_intention.go | 68 +++++++++++++++++++++++++++++++++++ api/connect_intention_test.go | 54 +++++++++++++++++++++++++++- 2 files changed, 121 insertions(+), 1 deletion(-) diff --git a/api/connect_intention.go b/api/connect_intention.go index b138dd4ae..aa2f82d3d 100644 --- a/api/connect_intention.go +++ b/api/connect_intention.go @@ -67,6 +67,22 @@ const ( IntentionSourceConsul IntentionSourceType = "consul" ) +// IntentionMatch are the arguments for the intention match API. +type IntentionMatch struct { + By IntentionMatchType + Names []string +} + +// IntentionMatchType is the target for a match request. For example, +// matching by source will look for all intentions that match the given +// source value. +type IntentionMatchType string + +const ( + IntentionMatchSource IntentionMatchType = "source" + IntentionMatchDestination IntentionMatchType = "destination" +) + // Intentions returns the list of intentions. func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) { r := h.c.newRequest("GET", "/v1/connect/intentions") @@ -88,6 +104,58 @@ func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) return out, qm, nil } +// IntentionGet retrieves a single intention. +func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// IntentionMatch returns the list of intentions that match a given source +// or destination. The returned intentions are ordered by precedence where +// result[0] is the highest precedence (if that matches, then that rule overrides +// all other rules). +// +// Matching can be done for multiple names at the same time. The resulting +// map is keyed by the given names. Casing is preserved. +func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[string][]*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/match") + r.setQueryOptions(q) + r.params.Set("by", string(args.By)) + for _, name := range args.Names { + r.params.Add("name", name) + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]*Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + // IntentionCreate will create a new intention. The ID in the given // structure must be empty and a generate ID will be returned on // success. diff --git a/api/connect_intention_test.go b/api/connect_intention_test.go index 2fc742602..0edcf4c49 100644 --- a/api/connect_intention_test.go +++ b/api/connect_intention_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAPI_ConnectIntentionCreate(t *testing.T) { +func TestAPI_ConnectIntentionCreateListGet(t *testing.T) { t.Parallel() require := require.New(t) @@ -33,6 +33,58 @@ func TestAPI_ConnectIntentionCreate(t *testing.T) { ixn.CreateIndex = actual.CreateIndex ixn.ModifyIndex = actual.ModifyIndex require.Equal(ixn, actual) + + // Get it + actual, _, err = connect.IntentionGet(id, nil) + require.Nil(err) + require.Equal(ixn, actual) +} + +func TestAPI_ConnectIntentionMatch(t *testing.T) { + t.Parallel() + + require := require.New(t) + c, s := makeClient(t) + defer s.Stop() + + connect := c.Connect() + + // Create + { + insert := [][]string{ + {"foo", "*"}, + {"foo", "bar"}, + {"foo", "baz"}, // shouldn't match + {"bar", "bar"}, // shouldn't match + {"bar", "*"}, // shouldn't match + {"*", "*"}, + } + + for _, v := range insert { + ixn := testIntention() + ixn.DestinationNS = v[0] + ixn.DestinationName = v[1] + id, _, err := connect.IntentionCreate(ixn, nil) + require.Nil(err) + require.NotEmpty(id) + } + } + + // Match it + result, _, err := connect.IntentionMatch(&IntentionMatch{ + By: IntentionMatchDestination, + Names: []string{"foo/bar"}, + }, nil) + require.Nil(err) + require.Len(result, 1) + + var actual [][]string + expected := [][]string{{"foo", "bar"}, {"foo", "*"}, {"*", "*"}} + for _, ixn := range result["foo/bar"] { + actual = append(actual, []string{ixn.DestinationNS, ixn.DestinationName}) + } + + require.Equal(expected, actual) } func testIntention() *Intention { From 9de861d722e4b04feddbe51d10cafd289561a3e5 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Mar 2018 14:16:41 -0700 Subject: [PATCH 121/627] api: fix up some comments and rename IssuedCert to LeafCert --- api/agent.go | 4 ++-- api/connect.go | 2 +- api/connect_ca.go | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/api/agent.go b/api/agent.go index 860483671..7a810bcab 100644 --- a/api/agent.go +++ b/api/agent.go @@ -567,7 +567,7 @@ func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) // // TODO(mitchellh): we need to test this better once we have a way to // configure CAs from the API package (when the CA work is done). -func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*IssuedCert, *QueryMeta, error) { +func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *QueryMeta, error) { r := a.c.newRequest("GET", "/v1/agent/connect/ca/leaf/"+serviceID) r.setQueryOptions(q) rtt, resp, err := requireOK(a.c.doRequest(r)) @@ -580,7 +580,7 @@ func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*IssuedCert, * parseQueryMeta(resp, qm) qm.RequestTime = rtt - var out IssuedCert + var out LeafCert if err := decodeBody(resp, &out); err != nil { return nil, nil, err } diff --git a/api/connect.go b/api/connect.go index 4b4e06900..a40d1e232 100644 --- a/api/connect.go +++ b/api/connect.go @@ -6,7 +6,7 @@ type Connect struct { c *Client } -// Health returns a handle to the health endpoints +// Connect returns a handle to the connect-related endpoints func (c *Client) Connect() *Connect { return &Connect{c} } diff --git a/api/connect_ca.go b/api/connect_ca.go index 19046c2ab..00951c75d 100644 --- a/api/connect_ca.go +++ b/api/connect_ca.go @@ -19,8 +19,8 @@ type CARoot struct { // opaque to Consul and is not used for anything internally. Name string - // RootCert is the PEM-encoded public certificate. - RootCert string + // RootCertPEM is the PEM-encoded public certificate. + RootCertPEM string `json:"RootCert"` // Active is true if this is the current active CA. This must only // be true for exactly one CA. For any method that modifies roots in the @@ -32,8 +32,8 @@ type CARoot struct { ModifyIndex uint64 } -// IssuedCert is a certificate that has been issued by a Connect CA. -type IssuedCert struct { +// LeafCert is a certificate that has been issued by a Connect CA. +type LeafCert struct { // SerialNumber is the unique serial number for this certificate. // This is encoded in standard hex separated by :. SerialNumber string From 26f254fac0fbb1e22fb69b1fde710573498ed19a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Mar 2018 14:20:25 -0700 Subject: [PATCH 122/627] api: rename Authorize field to ClientCertURI --- api/agent.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/agent.go b/api/agent.go index 7a810bcab..50d334d71 100644 --- a/api/agent.go +++ b/api/agent.go @@ -175,7 +175,7 @@ type SampledValue struct { // AgentAuthorizeParams are the request parameters for authorizing a request. type AgentAuthorizeParams struct { Target string - ClientID string + ClientCertURI string ClientCertSerial string } From 62b746c380f7f94158610dc72b0baac52439d1f2 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 28 Mar 2018 14:29:35 -0700 Subject: [PATCH 123/627] agent: rename authorize param ClientID to ClientCertURI --- agent/agent_endpoint.go | 2 +- agent/agent_endpoint_test.go | 28 ++++++++++++++-------------- agent/structs/connect.go | 4 ++-- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index a6b67816d..722909467 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -906,7 +906,7 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R } // Parse the certificate URI from the client ID - uriRaw, err := url.Parse(authReq.ClientID) + uriRaw, err := url.Parse(authReq.ClientCertURI) if err != nil { return &connectAuthorizeResp{ Authorized: false, diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index bc59f3700..1b017fa78 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2172,8 +2172,8 @@ func TestAgentConnectAuthorize_idInvalidFormat(t *testing.T) { defer a.Shutdown() args := &structs.ConnectAuthorizeRequest{ - Target: "web", - ClientID: "tubes", + Target: "web", + ClientCertURI: "tubes", } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() @@ -2195,8 +2195,8 @@ func TestAgentConnectAuthorize_idNotService(t *testing.T) { defer a.Shutdown() args := &structs.ConnectAuthorizeRequest{ - Target: "web", - ClientID: "spiffe://1234.consul", + Target: "web", + ClientCertURI: "spiffe://1234.consul", } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() @@ -2237,8 +2237,8 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { } args := &structs.ConnectAuthorizeRequest{ - Target: target, - ClientID: connect.TestSpiffeIDService(t, "web").URI().String(), + Target: target, + ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() @@ -2279,8 +2279,8 @@ func TestAgentConnectAuthorize_deny(t *testing.T) { } args := &structs.ConnectAuthorizeRequest{ - Target: target, - ClientID: connect.TestSpiffeIDService(t, "web").URI().String(), + Target: target, + ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() @@ -2320,8 +2320,8 @@ func TestAgentConnectAuthorize_serviceWrite(t *testing.T) { } args := &structs.ConnectAuthorizeRequest{ - Target: "foo", - ClientID: connect.TestSpiffeIDService(t, "web").URI().String(), + Target: "foo", + ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token="+token, jsonReader(args)) @@ -2339,8 +2339,8 @@ func TestAgentConnectAuthorize_defaultDeny(t *testing.T) { defer a.Shutdown() args := &structs.ConnectAuthorizeRequest{ - Target: "foo", - ClientID: connect.TestSpiffeIDService(t, "web").URI().String(), + Target: "foo", + ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args)) resp := httptest.NewRecorder() @@ -2369,8 +2369,8 @@ func TestAgentConnectAuthorize_defaultAllow(t *testing.T) { defer a.Shutdown() args := &structs.ConnectAuthorizeRequest{ - Target: "foo", - ClientID: connect.TestSpiffeIDService(t, "web").URI().String(), + Target: "foo", + ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize?token=root", jsonReader(args)) resp := httptest.NewRecorder() diff --git a/agent/structs/connect.go b/agent/structs/connect.go index 1a2e03da8..7f08615d3 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -6,12 +6,12 @@ type ConnectAuthorizeRequest struct { // Target is the name of the service that is being requested. Target string - // ClientID is a unique identifier for the requesting client. This + // ClientCertURI is a unique identifier for the requesting client. This // is currently the URI SAN from the TLS client certificate. // // ClientCertSerial is a colon-hex-encoded of the serial number for // the requesting client cert. This is used to check against revocation // lists. - ClientID string + ClientCertURI string ClientCertSerial string } From 800deb693c542af3413c0bba353a2f92b9b7b0f4 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Wed, 21 Mar 2018 22:35:00 +0000 Subject: [PATCH 124/627] Original proxy and connect.Client implementation. Working end to end. --- command/commands_oss.go | 4 + command/connect/connect.go | 40 +++ command/connect/connect_test.go | 13 + command/connect/proxy/proxy.go | 166 +++++++++++ command/connect/proxy/proxy_test.go | 1 + connect/auth.go | 43 +++ connect/client.go | 256 +++++++++++++++++ connect/client_test.go | 148 ++++++++++ .../testdata/ca1-ca-consul-internal.cert.pem | 14 + .../testdata/ca1-ca-consul-internal.key.pem | 5 + connect/testdata/ca1-svc-cache.cert.pem | 14 + connect/testdata/ca1-svc-cache.key.pem | 5 + connect/testdata/ca1-svc-db.cert.pem | 13 + connect/testdata/ca1-svc-db.key.pem | 5 + connect/testdata/ca1-svc-web.cert.pem | 13 + connect/testdata/ca1-svc-web.key.pem | 5 + connect/testdata/ca2-ca-vault.cert.pem | 14 + connect/testdata/ca2-ca-vault.key.pem | 5 + connect/testdata/ca2-svc-cache.cert.pem | 13 + connect/testdata/ca2-svc-cache.key.pem | 5 + connect/testdata/ca2-svc-db.cert.pem | 13 + connect/testdata/ca2-svc-db.key.pem | 5 + connect/testdata/ca2-svc-web.cert.pem | 13 + connect/testdata/ca2-svc-web.key.pem | 5 + connect/testdata/ca2-xc-by-ca1.cert.pem | 14 + connect/testdata/mkcerts.go | 243 ++++++++++++++++ connect/testing.go | 88 ++++++ connect/tls.go | 124 +++++++++ connect/tls_test.go | 45 +++ proxy/config.go | 111 ++++++++ proxy/config_test.go | 46 +++ proxy/conn.go | 48 ++++ proxy/conn_test.go | 119 ++++++++ proxy/manager.go | 140 ++++++++++ proxy/manager_test.go | 76 +++++ proxy/proxier.go | 32 +++ proxy/proxy.go | 112 ++++++++ proxy/public_listener.go | 119 ++++++++ proxy/public_listener_test.go | 38 +++ proxy/runner.go | 118 ++++++++ proxy/testdata/config-kitchensink.hcl | 36 +++ proxy/testing.go | 170 ++++++++++++ proxy/upstream.go | 261 ++++++++++++++++++ proxy/upstream_test.go | 75 +++++ 44 files changed, 2833 insertions(+) create mode 100644 command/connect/connect.go create mode 100644 command/connect/connect_test.go create mode 100644 command/connect/proxy/proxy.go create mode 100644 command/connect/proxy/proxy_test.go create mode 100644 connect/auth.go create mode 100644 connect/client.go create mode 100644 connect/client_test.go create mode 100644 connect/testdata/ca1-ca-consul-internal.cert.pem create mode 100644 connect/testdata/ca1-ca-consul-internal.key.pem create mode 100644 connect/testdata/ca1-svc-cache.cert.pem create mode 100644 connect/testdata/ca1-svc-cache.key.pem create mode 100644 connect/testdata/ca1-svc-db.cert.pem create mode 100644 connect/testdata/ca1-svc-db.key.pem create mode 100644 connect/testdata/ca1-svc-web.cert.pem create mode 100644 connect/testdata/ca1-svc-web.key.pem create mode 100644 connect/testdata/ca2-ca-vault.cert.pem create mode 100644 connect/testdata/ca2-ca-vault.key.pem create mode 100644 connect/testdata/ca2-svc-cache.cert.pem create mode 100644 connect/testdata/ca2-svc-cache.key.pem create mode 100644 connect/testdata/ca2-svc-db.cert.pem create mode 100644 connect/testdata/ca2-svc-db.key.pem create mode 100644 connect/testdata/ca2-svc-web.cert.pem create mode 100644 connect/testdata/ca2-svc-web.key.pem create mode 100644 connect/testdata/ca2-xc-by-ca1.cert.pem create mode 100644 connect/testdata/mkcerts.go create mode 100644 connect/testing.go create mode 100644 connect/tls.go create mode 100644 connect/tls_test.go create mode 100644 proxy/config.go create mode 100644 proxy/config_test.go create mode 100644 proxy/conn.go create mode 100644 proxy/conn_test.go create mode 100644 proxy/manager.go create mode 100644 proxy/manager_test.go create mode 100644 proxy/proxier.go create mode 100644 proxy/proxy.go create mode 100644 proxy/public_listener.go create mode 100644 proxy/public_listener_test.go create mode 100644 proxy/runner.go create mode 100644 proxy/testdata/config-kitchensink.hcl create mode 100644 proxy/testing.go create mode 100644 proxy/upstream.go create mode 100644 proxy/upstream_test.go diff --git a/command/commands_oss.go b/command/commands_oss.go index 43fbeb29c..c1e3e794a 100644 --- a/command/commands_oss.go +++ b/command/commands_oss.go @@ -6,6 +6,8 @@ import ( catlistdc "github.com/hashicorp/consul/command/catalog/list/dc" catlistnodes "github.com/hashicorp/consul/command/catalog/list/nodes" catlistsvc "github.com/hashicorp/consul/command/catalog/list/services" + "github.com/hashicorp/consul/command/connect" + "github.com/hashicorp/consul/command/connect/proxy" "github.com/hashicorp/consul/command/event" "github.com/hashicorp/consul/command/exec" "github.com/hashicorp/consul/command/forceleave" @@ -58,6 +60,8 @@ func init() { Register("catalog datacenters", func(ui cli.Ui) (cli.Command, error) { return catlistdc.New(ui), nil }) Register("catalog nodes", func(ui cli.Ui) (cli.Command, error) { return catlistnodes.New(ui), nil }) Register("catalog services", func(ui cli.Ui) (cli.Command, error) { return catlistsvc.New(ui), nil }) + Register("connect", func(ui cli.Ui) (cli.Command, error) { return connect.New(), nil }) + Register("connect proxy", func(ui cli.Ui) (cli.Command, error) { return proxy.New(ui, MakeShutdownCh()), nil }) Register("event", func(ui cli.Ui) (cli.Command, error) { return event.New(ui), nil }) Register("exec", func(ui cli.Ui) (cli.Command, error) { return exec.New(ui, MakeShutdownCh()), nil }) Register("force-leave", func(ui cli.Ui) (cli.Command, error) { return forceleave.New(ui), nil }) diff --git a/command/connect/connect.go b/command/connect/connect.go new file mode 100644 index 000000000..60c238876 --- /dev/null +++ b/command/connect/connect.go @@ -0,0 +1,40 @@ +package connect + +import ( + "github.com/hashicorp/consul/command/flags" + "github.com/mitchellh/cli" +) + +func New() *cmd { + return &cmd{} +} + +type cmd struct{} + +func (c *cmd) Run(args []string) int { + return cli.RunResultHelp +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return flags.Usage(help, nil) +} + +const synopsis = "Interact with Consul Connect" +const help = ` +Usage: consul connect [options] [args] + + This command has subcommands for interacting with Consul Connect. + + Here are some simple examples, and more detailed examples are available + in the subcommands or the documentation. + + Run the built-in Connect mTLS proxy + + $ consul connect proxy + + For more examples, ask for subcommand help or view the documentation. +` diff --git a/command/connect/connect_test.go b/command/connect/connect_test.go new file mode 100644 index 000000000..95c8ebd58 --- /dev/null +++ b/command/connect/connect_test.go @@ -0,0 +1,13 @@ +package connect + +import ( + "strings" + "testing" +) + +func TestCatalogCommand_noTabs(t *testing.T) { + t.Parallel() + if strings.ContainsRune(New().Help(), '\t') { + t.Fatal("help has tabs") + } +} diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go new file mode 100644 index 000000000..237f4b7e2 --- /dev/null +++ b/command/connect/proxy/proxy.go @@ -0,0 +1,166 @@ +package proxy + +import ( + "context" + "flag" + "fmt" + "io" + "log" + "net/http" + // Expose pprof if configured + _ "net/http/pprof" + + "github.com/hashicorp/consul/command/flags" + proxyImpl "github.com/hashicorp/consul/proxy" + + "github.com/hashicorp/consul/logger" + "github.com/hashicorp/logutils" + "github.com/mitchellh/cli" +) + +func New(ui cli.Ui, shutdownCh <-chan struct{}) *cmd { + c := &cmd{UI: ui, shutdownCh: shutdownCh} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string + + shutdownCh <-chan struct{} + + logFilter *logutils.LevelFilter + logOutput io.Writer + logger *log.Logger + + // flags + logLevel string + cfgFile string + proxyID string + pprofAddr string +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + + c.flags.StringVar(&c.cfgFile, "insecure-dev-config", "", + "If set, proxy config is read on startup from this file (in HCL or JSON"+ + "format). If a config file is given, the proxy will use that instead of "+ + "querying the local agent for it's configuration. It will not reload it "+ + "except on startup. In this mode the proxy WILL NOT authorize incoming "+ + "connections with the local agent which is totally insecure. This is "+ + "ONLY for development and testing.") + + c.flags.StringVar(&c.proxyID, "proxy-id", "", + "The proxy's ID on the local agent.") + + c.flags.StringVar(&c.logLevel, "log-level", "INFO", + "Specifies the log level.") + + c.flags.StringVar(&c.pprofAddr, "pprof-addr", "", + "Enable debugging via pprof. Providing a host:port (or just ':port') "+ + "enables profiling HTTP endpoints on that address.") + + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.ServerFlags()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + return 1 + } + + // Setup the log outputs + logConfig := &logger.Config{ + LogLevel: c.logLevel, + } + logFilter, logGate, _, logOutput, ok := logger.Setup(logConfig, c.UI) + if !ok { + return 1 + } + c.logFilter = logFilter + c.logOutput = logOutput + c.logger = log.New(logOutput, "", log.LstdFlags) + + // Enable Pprof if needed + if c.pprofAddr != "" { + go func() { + c.UI.Output(fmt.Sprintf("Starting pprof HTTP endpoints on "+ + "http://%s/debug/pprof", c.pprofAddr)) + log.Fatal(http.ListenAndServe(c.pprofAddr, nil)) + }() + } + + // Setup Consul client + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) + return 1 + } + + var p *proxyImpl.Proxy + if c.cfgFile != "" { + c.UI.Info("Configuring proxy locally from " + c.cfgFile) + + p, err = proxyImpl.NewFromConfigFile(client, c.cfgFile, c.logger) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed configuring from file: %s", err)) + return 1 + } + + } else { + p, err = proxyImpl.New(client, c.proxyID, c.logger) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed configuring from agent: %s", err)) + return 1 + } + } + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + err := p.Run(ctx) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed running proxy: %s", err)) + } + // If we exited early due to a fatal error, need to unblock the main + // routine. But we can't close shutdownCh since it might already be closed + // by a signal and there is no way to tell. We also can't send on it to + // unblock main routine since it's typed as receive only. So the best thing + // we can do is cancel the context and have the main routine select on both. + cancel() + }() + + c.UI.Output("Consul Connect proxy running!") + + c.UI.Output("Log data will now stream in as it occurs:\n") + logGate.Flush() + + // Wait for shutdown or context cancel (see Run() goroutine above) + select { + case <-c.shutdownCh: + cancel() + case <-ctx.Done(): + } + c.UI.Output("Consul Connect proxy shutdown") + return 0 +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return c.help +} + +const synopsis = "Runs a Consul Connect proxy" +const help = ` +Usage: consul proxy [options] + + Starts a Consul Connect proxy and runs until an interrupt is received. +` diff --git a/command/connect/proxy/proxy_test.go b/command/connect/proxy/proxy_test.go new file mode 100644 index 000000000..943b369ff --- /dev/null +++ b/command/connect/proxy/proxy_test.go @@ -0,0 +1 @@ +package proxy diff --git a/connect/auth.go b/connect/auth.go new file mode 100644 index 000000000..73c16f0bf --- /dev/null +++ b/connect/auth.go @@ -0,0 +1,43 @@ +package connect + +import "crypto/x509" + +// Auther is the interface that provides both Authentication and Authorization +// for an mTLS connection. It's only method is compatible with +// tls.Config.VerifyPeerCertificate. +type Auther interface { + // Auth is called during tls Connection establishment to Authenticate and + // Authorize the presented peer. Note that verifiedChains must not be relied + // upon as we typically have to skip Go's internal verification so the + // implementation takes full responsibility to validating the certificate + // against known roots. It is also up to the user of the interface to ensure + // appropriate validation is performed for client or server end by arranging + // for an appropriate implementation to be hooked into the tls.Config used. + Auth(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error +} + +// ClientAuther is used to auth Clients connecting to a Server. +type ClientAuther struct{} + +// Auth implements Auther +func (a *ClientAuther) Auth(rawCerts [][]byte, + verifiedChains [][]*x509.Certificate) error { + + // TODO(banks): implement path validation and AuthZ + return nil +} + +// ServerAuther is used to auth the Server identify from a connecting Client. +type ServerAuther struct { + // TODO(banks): We'll need a way to pass the expected service identity (name, + // namespace, dc, cluster) here based on discovery result. +} + +// Auth implements Auther +func (a *ServerAuther) Auth(rawCerts [][]byte, + verifiedChains [][]*x509.Certificate) error { + + // TODO(banks): implement path validation and verify URI matches the target + // service we intended to connect to. + return nil +} diff --git a/connect/client.go b/connect/client.go new file mode 100644 index 000000000..867bf0db5 --- /dev/null +++ b/connect/client.go @@ -0,0 +1,256 @@ +package connect + +import ( + "context" + "crypto/tls" + "fmt" + "math/rand" + "net" + + "github.com/hashicorp/consul/api" +) + +// CertStatus indicates whether the Client currently has valid certificates for +// incoming and outgoing connections. +type CertStatus int + +const ( + // CertStatusUnknown is the zero value for CertStatus which may be returned + // when a watch channel is closed on shutdown. It has no other meaning. + CertStatusUnknown CertStatus = iota + + // CertStatusOK indicates the client has valid certificates and trust roots to + // Authenticate incoming and outgoing connections. + CertStatusOK + + // CertStatusPending indicates the client is waiting to be issued initial + // certificates, or that it's certificates have expired and it's waiting to be + // issued new ones. In this state all incoming and outgoing connections will + // fail. + CertStatusPending +) + +func (s CertStatus) String() string { + switch s { + case CertStatusOK: + return "OK" + case CertStatusPending: + return "pending" + case CertStatusUnknown: + fallthrough + default: + return "unknown" + } +} + +// Client is the interface a basic client implementation must support. +type Client interface { + // TODO(banks): build this and test it + // CertStatus returns the current status of the client's certificates. It can + // be used to determine if the Client is able to service requests at the + // current time. + //CertStatus() CertStatus + + // TODO(banks): build this and test it + // WatchCertStatus returns a channel that is notified on all status changes. + // Note that a message on the channel isn't guaranteed to be different so it's + // value should be inspected. During Client shutdown the channel will be + // closed returning a zero type which is equivalent to CertStatusUnknown. + //WatchCertStatus() <-chan CertStatus + + // ServerTLSConfig returns the *tls.Config to be used when creating a TCP + // listener that should accept Connect connections. It is likely that at + // startup the tlsCfg returned will not be immediately usable since + // certificates are typically fetched from the agent asynchronously. In this + // case it's still safe to listen with the provided config, but auth failures + // will occur until initial certificate discovery is complete. In general at + // any time it is possible for certificates to expire before new replacements + // have been issued due to local network errors so the server may not actually + // have a working certificate configuration at any time, however as soon as + // valid certs can be issued it will automatically start working again so + // should take no action. + ServerTLSConfig() (*tls.Config, error) + + // DialService opens a new connection to the named service registered in + // Consul. It will perform service discovery to find healthy instances. If + // there is an error during connection it is returned and the caller may call + // again. The client implementation makes a best effort to make consecutive + // Dials against different instances either by randomising the list and/or + // maintaining a local memory of which instances recently failed. If the + // context passed times out before connection is established and verified an + // error is returned. + DialService(ctx context.Context, namespace, name string) (net.Conn, error) + + // DialPreparedQuery opens a new connection by executing the named Prepared + // Query against the local Consul agent, and picking one of the returned + // instances to connect to. It will perform service discovery with the same + // semantics as DialService. + DialPreparedQuery(ctx context.Context, namespace, name string) (net.Conn, error) +} + +/* + +Maybe also convenience wrappers for: + - listening TLS conn with right config + - http.ListenAndServeTLS equivalent + +*/ + +// AgentClient is the primary implementation of a connect.Client which +// communicates with the local Consul agent. +type AgentClient struct { + agent *api.Client + tlsCfg *ReloadableTLSConfig +} + +// NewClient returns an AgentClient to allow consuming and providing +// Connect-enabled network services. +func NewClient(agent *api.Client) Client { + // TODO(banks): hook up fetching certs from Agent and updating tlsCfg on cert + // delivery/change. Perhaps need to make + return &AgentClient{ + agent: agent, + tlsCfg: NewReloadableTLSConfig(defaultTLSConfig()), + } +} + +// NewInsecureDevClientWithLocalCerts returns an AgentClient that will still do +// service discovery via the local agent but will use externally provided +// certificates and skip authorization. This is intended just for development +// and must not be used ever in production. +func NewInsecureDevClientWithLocalCerts(agent *api.Client, caFile, certFile, + keyFile string) (Client, error) { + + cfg, err := devTLSConfigFromFiles(caFile, certFile, keyFile) + if err != nil { + return nil, err + } + + return &AgentClient{ + agent: agent, + tlsCfg: NewReloadableTLSConfig(cfg), + }, nil +} + +// ServerTLSConfig implements Client +func (c *AgentClient) ServerTLSConfig() (*tls.Config, error) { + return c.tlsCfg.ServerTLSConfig(), nil +} + +// DialService implements Client +func (c *AgentClient) DialService(ctx context.Context, namespace, + name string) (net.Conn, error) { + return c.dial(ctx, "service", namespace, name) +} + +// DialPreparedQuery implements Client +func (c *AgentClient) DialPreparedQuery(ctx context.Context, namespace, + name string) (net.Conn, error) { + return c.dial(ctx, "prepared_query", namespace, name) +} + +func (c *AgentClient) dial(ctx context.Context, discoveryType, namespace, + name string) (net.Conn, error) { + + svcs, err := c.discoverInstances(ctx, discoveryType, namespace, name) + if err != nil { + return nil, err + } + + svc, err := c.pickInstance(svcs) + if err != nil { + return nil, err + } + if svc == nil { + return nil, fmt.Errorf("no healthy services discovered") + } + + // OK we have a service we can dial! We need a ClientAuther that will validate + // the connection is legit. + + // TODO(banks): implement ClientAuther properly to actually verify connected + // cert matches the expected service/cluster etc. based on svc. + auther := &ClientAuther{} + tlsConfig := c.tlsCfg.TLSConfig(auther) + + // Resolve address TODO(banks): I expected this to happen magically in the + // agent at registration time if I register with no explicit address but + // apparently doesn't. This is a quick hack to make it work for now, need to + // see if there is a better shared code path for doing this. + addr := svc.Service.Address + if addr == "" { + addr = svc.Node.Address + } + var dialer net.Dialer + tcpConn, err := dialer.DialContext(ctx, "tcp", + fmt.Sprintf("%s:%d", addr, svc.Service.Port)) + if err != nil { + return nil, err + } + + tlsConn := tls.Client(tcpConn, tlsConfig) + err = tlsConn.Handshake() + if err != nil { + tlsConn.Close() + return nil, err + } + + return tlsConn, nil +} + +// pickInstance returns an instance from the given list to try to connect to. It +// may be made pluggable later, for now it just picks a random one regardless of +// whether the list is already shuffled. +func (c *AgentClient) pickInstance(svcs []*api.ServiceEntry) (*api.ServiceEntry, error) { + if len(svcs) < 1 { + return nil, nil + } + idx := rand.Intn(len(svcs)) + return svcs[idx], nil +} + +// discoverInstances returns all instances for the given discoveryType, +// namespace and name. The returned service entries may or may not be shuffled +func (c *AgentClient) discoverInstances(ctx context.Context, discoverType, + namespace, name string) ([]*api.ServiceEntry, error) { + + q := &api.QueryOptions{ + // TODO(banks): make this configurable? + AllowStale: true, + } + q = q.WithContext(ctx) + + switch discoverType { + case "service": + svcs, _, err := c.agent.Health().Connect(name, "", true, q) + if err != nil { + return nil, err + } + return svcs, err + + case "prepared_query": + // TODO(banks): it's not super clear to me how this should work eventually. + // How do we distinguise between a PreparedQuery for the actual services and + // one that should return the connect proxies where that differs? If we + // can't then we end up with a janky UX where user specifies a reasonable + // prepared query but we try to connect to non-connect services and fail + // with a confusing TLS error. Maybe just a way to filter PreparedQuery + // results by connect-enabled would be sufficient (or even metadata to do + // that ourselves in the response although less efficient). + resp, _, err := c.agent.PreparedQuery().Execute(name, q) + if err != nil { + return nil, err + } + + // Awkward, we have a slice of api.ServiceEntry here but want a slice of + // *api.ServiceEntry for compat with Connect/Service APIs. Have to convert + // them to keep things type-happy. + svcs := make([]*api.ServiceEntry, len(resp.Nodes)) + for idx, se := range resp.Nodes { + svcs[idx] = &se + } + return svcs, err + default: + return nil, fmt.Errorf("unsupported discovery type: %s", discoverType) + } +} diff --git a/connect/client_test.go b/connect/client_test.go new file mode 100644 index 000000000..fcb18e600 --- /dev/null +++ b/connect/client_test.go @@ -0,0 +1,148 @@ +package connect + +import ( + "context" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "testing" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/testutil" + "github.com/stretchr/testify/require" +) + +func TestNewInsecureDevClientWithLocalCerts(t *testing.T) { + + agent, err := api.NewClient(api.DefaultConfig()) + require.Nil(t, err) + + got, err := NewInsecureDevClientWithLocalCerts(agent, + "testdata/ca1-ca-consul-internal.cert.pem", + "testdata/ca1-svc-web.cert.pem", + "testdata/ca1-svc-web.key.pem", + ) + require.Nil(t, err) + + // Sanity check correct certs were loaded + serverCfg, err := got.ServerTLSConfig() + require.Nil(t, err) + caSubjects := serverCfg.RootCAs.Subjects() + require.Len(t, caSubjects, 1) + caSubject, err := testNameFromRawDN(caSubjects[0]) + require.Nil(t, err) + require.Equal(t, "Consul Internal", caSubject.CommonName) + + require.Len(t, serverCfg.Certificates, 1) + cert, err := x509.ParseCertificate(serverCfg.Certificates[0].Certificate[0]) + require.Nil(t, err) + require.Equal(t, "web", cert.Subject.CommonName) +} + +func testNameFromRawDN(raw []byte) (*pkix.Name, error) { + var seq pkix.RDNSequence + if _, err := asn1.Unmarshal(raw, &seq); err != nil { + return nil, err + } + + var name pkix.Name + name.FillFromRDNSequence(&seq) + return &name, nil +} + +func testAgent(t *testing.T) (*testutil.TestServer, *api.Client) { + t.Helper() + + // Make client config + conf := api.DefaultConfig() + + // Create server + server, err := testutil.NewTestServerConfigT(t, nil) + require.Nil(t, err) + + conf.Address = server.HTTPAddr + + // Create client + agent, err := api.NewClient(conf) + require.Nil(t, err) + + return server, agent +} + +func testService(t *testing.T, ca, name string, client *api.Client) *httptest.Server { + t.Helper() + + // Run a test service to discover + server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("svc: " + name)) + })) + server.TLS = TestTLSConfig(t, ca, name) + server.StartTLS() + + u, err := url.Parse(server.URL) + require.Nil(t, err) + + port, err := strconv.Atoi(u.Port()) + require.Nil(t, err) + + // If client is passed, register the test service instance + if client != nil { + svc := &api.AgentServiceRegistration{ + // TODO(banks): we don't really have a good way to represent + // connect-native apps yet so we have to pretend out little server is a + // proxy for now. + Kind: api.ServiceKindConnectProxy, + ProxyDestination: name, + Name: name + "-proxy", + Address: u.Hostname(), + Port: port, + } + err := client.Agent().ServiceRegister(svc) + require.Nil(t, err) + } + + return server +} + +func TestDialService(t *testing.T) { + consulServer, agent := testAgent(t) + defer consulServer.Stop() + + svc := testService(t, "ca1", "web", agent) + defer svc.Close() + + c, err := NewInsecureDevClientWithLocalCerts(agent, + "testdata/ca1-ca-consul-internal.cert.pem", + "testdata/ca1-svc-web.cert.pem", + "testdata/ca1-svc-web.key.pem", + ) + require.Nil(t, err) + + conn, err := c.DialService(context.Background(), "default", "web") + require.Nilf(t, err, "err: %s", err) + + // Inject our conn into http.Transport + httpClient := &http.Client{ + Transport: &http.Transport{ + DialTLS: func(network, addr string) (net.Conn, error) { + return conn, nil + }, + }, + } + + // Don't be fooled the hostname here is ignored since we did the dialling + // ourselves + resp, err := httpClient.Get("https://web.connect.consul/") + require.Nil(t, err) + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + require.Nil(t, err) + + require.Equal(t, "svc: web", string(body)) +} diff --git a/connect/testdata/ca1-ca-consul-internal.cert.pem b/connect/testdata/ca1-ca-consul-internal.cert.pem new file mode 100644 index 000000000..6a557775f --- /dev/null +++ b/connect/testdata/ca1-ca-consul-internal.cert.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICIDCCAcagAwIBAgIBATAKBggqhkjOPQQDAjAaMRgwFgYDVQQDEw9Db25zdWwg +SW50ZXJuYWwwHhcNMTgwMzIzMjIwNDI1WhcNMjgwMzIwMjIwNDI1WjAaMRgwFgYD +VQQDEw9Db25zdWwgSW50ZXJuYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT3 +IPiDHugKYEVaSpIzBjqU5lQrmirC6N1XHyOAhF2psGGxcxezpf8Vgy5Iv6XbmeHr +cttyzUYtUKhrFBhxkPYRo4H8MIH5MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E +BTADAQH/MCkGA1UdDgQiBCCrnNQy2IQS73Co9WbrPXtq/YP9SvIBOJ8iYRWTOxjC +qTArBgNVHSMEJDAigCCrnNQy2IQS73Co9WbrPXtq/YP9SvIBOJ8iYRWTOxjCqTA/ +BgNVHREEODA2hjRzcGlmZmU6Ly8xMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1 +NTU1NTU1NTUuY29uc3VsMD0GA1UdHgEB/wQzMDGgLzAtgisxMTExMTExMS0yMjIy +LTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqGSM49BAMCA0gAMEUC +IQDwWL6ZuszKrZjSJwDzdhRQtj1ppezJrKaDTJx+4F/tyQIgEaQCR935ztIqZzgO +Ka6ozcH2Ubd4j4cDC1XswVMW6zs= +-----END CERTIFICATE----- diff --git a/connect/testdata/ca1-ca-consul-internal.key.pem b/connect/testdata/ca1-ca-consul-internal.key.pem new file mode 100644 index 000000000..8c40fd26b --- /dev/null +++ b/connect/testdata/ca1-ca-consul-internal.key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIDUDO3I7WKbLTTWkNKA4unB2RLq/RX+L+XIFssDE/AD7oAoGCCqGSM49 +AwEHoUQDQgAE9yD4gx7oCmBFWkqSMwY6lOZUK5oqwujdVx8jgIRdqbBhsXMXs6X/ +FYMuSL+l25nh63Lbcs1GLVCoaxQYcZD2EQ== +-----END EC PRIVATE KEY----- diff --git a/connect/testdata/ca1-svc-cache.cert.pem b/connect/testdata/ca1-svc-cache.cert.pem new file mode 100644 index 000000000..097a2b6a6 --- /dev/null +++ b/connect/testdata/ca1-svc-cache.cert.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICEDCCAbagAwIBAgIBBTAKBggqhkjOPQQDAjAaMRgwFgYDVQQDEw9Db25zdWwg +SW50ZXJuYWwwHhcNMTgwMzIzMjIwNDI1WhcNMjgwMzIwMjIwNDI1WjAQMQ4wDAYD +VQQDEwVjYWNoZTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABOWw8369v4DHJAI6 +k061hU8rxaQs87mZFQ52JfleJjRoDUuZIPLhZHMFbvbI8pDWi7YdjluNbzNNh6nu +fAivylujgfYwgfMwDgYDVR0PAQH/BAQDAgO4MB0GA1UdJQQWMBQGCCsGAQUFBwMC +BggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCCHhMqV2/R8meSsXtwh +OLC9hP7WQfuvwJ6V6uwKZdEofTArBgNVHSMEJDAigCCrnNQy2IQS73Co9WbrPXtq +/YP9SvIBOJ8iYRWTOxjCqTBcBgNVHREEVTBThlFzcGlmZmU6Ly8xMTExMTExMS0y +MjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsL25zL2RlZmF1bHQvZGMv +ZGMwMS9zdmMvY2FjaGUwCgYIKoZIzj0EAwIDSAAwRQIgPfekKBd/ltpVkdjnB0Hp +cV9HPwy12tXp4suR2nspSNkCIQD1Th/hvxuBKkRYy9Bl+jgTbrFdd4fLCWPeFbaM +sgLK7g== +-----END CERTIFICATE----- diff --git a/connect/testdata/ca1-svc-cache.key.pem b/connect/testdata/ca1-svc-cache.key.pem new file mode 100644 index 000000000..f780f63db --- /dev/null +++ b/connect/testdata/ca1-svc-cache.key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIPTSPV2cWNnO69f+vYyCg5frpoBtK6L+kZVLrGCv3TdnoAoGCCqGSM49 +AwEHoUQDQgAE5bDzfr2/gMckAjqTTrWFTyvFpCzzuZkVDnYl+V4mNGgNS5kg8uFk +cwVu9sjykNaLth2OW41vM02Hqe58CK/KWw== +-----END EC PRIVATE KEY----- diff --git a/connect/testdata/ca1-svc-db.cert.pem b/connect/testdata/ca1-svc-db.cert.pem new file mode 100644 index 000000000..d00a38ea0 --- /dev/null +++ b/connect/testdata/ca1-svc-db.cert.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICCjCCAbCgAwIBAgIBBDAKBggqhkjOPQQDAjAaMRgwFgYDVQQDEw9Db25zdWwg +SW50ZXJuYWwwHhcNMTgwMzIzMjIwNDI1WhcNMjgwMzIwMjIwNDI1WjANMQswCQYD +VQQDEwJkYjBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABEcTyr2l7yYWZuh++02M +usR20QrZtHdd7goKmYrIpQ3ekmHuLLgJWgTTaIhCj8fzbryep+s8oM7EiPhRQ14l +uSujgfMwgfAwDgYDVR0PAQH/BAQDAgO4MB0GA1UdJQQWMBQGCCsGAQUFBwMCBggr +BgEFBQcDATAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCAy6jHCBBT2bii+aMJCDJ33 +bFJtR72bxDBUi5b+YWyWwDArBgNVHSMEJDAigCCrnNQy2IQS73Co9WbrPXtq/YP9 +SvIBOJ8iYRWTOxjCqTBZBgNVHREEUjBQhk5zcGlmZmU6Ly8xMTExMTExMS0yMjIy +LTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsL25zL2RlZmF1bHQvZGMvZGMw +MS9zdmMvZGIwCgYIKoZIzj0EAwIDSAAwRQIhALCW4cOEpuYfLJ0NGwEmYG5Fko0N +WMccL0gEQzKUbIWrAiAIw8wkTSf1O8vTHeKdR1fCmdVoDRFRKB643PaofUzFxA== +-----END CERTIFICATE----- diff --git a/connect/testdata/ca1-svc-db.key.pem b/connect/testdata/ca1-svc-db.key.pem new file mode 100644 index 000000000..3ec23a33b --- /dev/null +++ b/connect/testdata/ca1-svc-db.key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIMHv1pjt75IjKXzl8l4rBtEFS1pEuOM4WNgeHg5Qn1RroAoGCCqGSM49 +AwEHoUQDQgAERxPKvaXvJhZm6H77TYy6xHbRCtm0d13uCgqZisilDd6SYe4suAla +BNNoiEKPx/NuvJ6n6zygzsSI+FFDXiW5Kw== +-----END EC PRIVATE KEY----- diff --git a/connect/testdata/ca1-svc-web.cert.pem b/connect/testdata/ca1-svc-web.cert.pem new file mode 100644 index 000000000..a786a2c06 --- /dev/null +++ b/connect/testdata/ca1-svc-web.cert.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICDDCCAbKgAwIBAgIBAzAKBggqhkjOPQQDAjAaMRgwFgYDVQQDEw9Db25zdWwg +SW50ZXJuYWwwHhcNMTgwMzIzMjIwNDI1WhcNMjgwMzIwMjIwNDI1WjAOMQwwCgYD +VQQDEwN3ZWIwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARF47lERGXziNBC74Kh +U3W29/M7JO9LIUaJgK0LJbhgf0MuPxf7gX+PnxH5ImI5yfXRv0SSxeCq7377IkXP +XS6Fo4H0MIHxMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcDAgYI +KwYBBQUHAwEwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQg26hfNYiVwYRm7CQJvdOd +NIOmG3t8vNwXCtktC782cf8wKwYDVR0jBCQwIoAgq5zUMtiEEu9wqPVm6z17av2D +/UryATifImEVkzsYwqkwWgYDVR0RBFMwUYZPc3BpZmZlOi8vMTExMTExMTEtMjIy +Mi0zMzMzLTQ0NDQtNTU1NTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2Rj +MDEvc3ZjL3dlYjAKBggqhkjOPQQDAgNIADBFAiAzi8uBs+ApPfAZZm5eO/hhVZiv +E8p84VKCqPeF3tFfoAIhANVkdSnp2AKU5T7SlJHmieu3DFNWCVpajlHJvf286J94 +-----END CERTIFICATE----- diff --git a/connect/testdata/ca1-svc-web.key.pem b/connect/testdata/ca1-svc-web.key.pem new file mode 100644 index 000000000..8ed82c13c --- /dev/null +++ b/connect/testdata/ca1-svc-web.key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIPOIj4BFS0fknG+uAVKZIWRpnzp7O3OKpBDgEmuml7lcoAoGCCqGSM49 +AwEHoUQDQgAEReO5RERl84jQQu+CoVN1tvfzOyTvSyFGiYCtCyW4YH9DLj8X+4F/ +j58R+SJiOcn10b9EksXgqu9++yJFz10uhQ== +-----END EC PRIVATE KEY----- diff --git a/connect/testdata/ca2-ca-vault.cert.pem b/connect/testdata/ca2-ca-vault.cert.pem new file mode 100644 index 000000000..a7f617468 --- /dev/null +++ b/connect/testdata/ca2-ca-vault.cert.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICDDCCAbKgAwIBAgIBAjAKBggqhkjOPQQDAjAQMQ4wDAYDVQQDEwVWYXVsdDAe +Fw0xODAzMjMyMjA0MjVaFw0yODAzMjAyMjA0MjVaMBAxDjAMBgNVBAMTBVZhdWx0 +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEAjGVnRy/7Q2SU4ePbKbsurRAHKYA +CuA3r9QrowgZOr7yptF54shiobMIORpfKYkoYkhzL1lhWKI06BUJ4xuPd6OB/DCB ++TAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zApBgNVHQ4EIgQgqEc5 +ZrELD5ySxapbU+eRb+aEv1MEoCvjC0mCA1uJecMwKwYDVR0jBCQwIoAgqEc5ZrEL +D5ySxapbU+eRb+aEv1MEoCvjC0mCA1uJecMwPwYDVR0RBDgwNoY0c3BpZmZlOi8v +MTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1NTU1NTU1NTU1LmNvbnN1bDA9BgNV +HR4BAf8EMzAxoC8wLYIrMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1NTU1NTU1 +NTU1LmNvbnN1bDAKBggqhkjOPQQDAgNIADBFAiEA6pBdeglhq//A7sYnYk85XL+3 +4IDrXrGN3KjC9qo3J9ICIDS9pEoTPWAWDfn1ccPafKVBrJm6KrmljcvymQ2QUDIZ +-----END CERTIFICATE----- +---- diff --git a/connect/testdata/ca2-ca-vault.key.pem b/connect/testdata/ca2-ca-vault.key.pem new file mode 100644 index 000000000..43534b961 --- /dev/null +++ b/connect/testdata/ca2-ca-vault.key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIKnuCctuvtyzf+M6B8jGqejG4T5o7NMRYjO2M3dZITCboAoGCCqGSM49 +AwEHoUQDQgAEAjGVnRy/7Q2SU4ePbKbsurRAHKYACuA3r9QrowgZOr7yptF54shi +obMIORpfKYkoYkhzL1lhWKI06BUJ4xuPdw== +-----END EC PRIVATE KEY----- diff --git a/connect/testdata/ca2-svc-cache.cert.pem b/connect/testdata/ca2-svc-cache.cert.pem new file mode 100644 index 000000000..32110e232 --- /dev/null +++ b/connect/testdata/ca2-svc-cache.cert.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICBzCCAaygAwIBAgIBCDAKBggqhkjOPQQDAjAQMQ4wDAYDVQQDEwVWYXVsdDAe +Fw0xODAzMjMyMjA0MjVaFw0yODAzMjAyMjA0MjVaMBAxDjAMBgNVBAMTBWNhY2hl +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEyB6D+Eqi/71EhUrBWlcZOV2vjS9Y +xnUQ3jfH+QUZur7WOuGLnO7eArbAHcDbqKGyDWxlkZH04sGYOXaEW7UUd6OB9jCB +8zAOBgNVHQ8BAf8EBAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMB +MAwGA1UdEwEB/wQCMAAwKQYDVR0OBCIEIGapiHFxlbYbNKFlwdPMpKhIypvNZXo8 +k/OZLki/vurQMCsGA1UdIwQkMCKAIKhHOWaxCw+cksWqW1PnkW/mhL9TBKAr4wtJ +ggNbiXnDMFwGA1UdEQRVMFOGUXNwaWZmZTovLzExMTExMTExLTIyMjItMzMzMy00 +NDQ0LTU1NTU1NTU1NTU1NS5jb25zdWwvbnMvZGVmYXVsdC9kYy9kYzAxL3N2Yy9j +YWNoZTAKBggqhkjOPQQDAgNJADBGAiEA/vRLXbkigS6l89MxFk0RFE7Zo4vorv7s +E1juCOsVJBICIQDXlpmYH9fPon6DYMyOxQttNjkuWbJgnPv7rPg+CllRyA== +-----END CERTIFICATE----- diff --git a/connect/testdata/ca2-svc-cache.key.pem b/connect/testdata/ca2-svc-cache.key.pem new file mode 100644 index 000000000..cabad8179 --- /dev/null +++ b/connect/testdata/ca2-svc-cache.key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIEbQOv4odF2Tu8ZnJTJuytvOd2HOF9HxgGw5ei1pkP4moAoGCCqGSM49 +AwEHoUQDQgAEyB6D+Eqi/71EhUrBWlcZOV2vjS9YxnUQ3jfH+QUZur7WOuGLnO7e +ArbAHcDbqKGyDWxlkZH04sGYOXaEW7UUdw== +-----END EC PRIVATE KEY----- diff --git a/connect/testdata/ca2-svc-db.cert.pem b/connect/testdata/ca2-svc-db.cert.pem new file mode 100644 index 000000000..33273058a --- /dev/null +++ b/connect/testdata/ca2-svc-db.cert.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIBBzAKBggqhkjOPQQDAjAQMQ4wDAYDVQQDEwVWYXVsdDAe +Fw0xODAzMjMyMjA0MjVaFw0yODAzMjAyMjA0MjVaMA0xCzAJBgNVBAMTAmRiMFkw +EwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFeB4DynO6IeKOE4zFLlBVFv+4HeWRvK3 +6cQ9L6v5uhLfdcYyqhT/QLbQ4R8ks1vUTTiq0XJsAGdkvkt71fiEl6OB8zCB8DAO +BgNVHQ8BAf8EBAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMAwG +A1UdEwEB/wQCMAAwKQYDVR0OBCIEIKjVz8n91cej8q6WpDNd0hwSMAE2ddY056PH +hMfaBM6GMCsGA1UdIwQkMCKAIKhHOWaxCw+cksWqW1PnkW/mhL9TBKAr4wtJggNb +iXnDMFkGA1UdEQRSMFCGTnNwaWZmZTovLzExMTExMTExLTIyMjItMzMzMy00NDQ0 +LTU1NTU1NTU1NTU1NS5jb25zdWwvbnMvZGVmYXVsdC9kYy9kYzAxL3N2Yy9kYjAK +BggqhkjOPQQDAgNIADBFAiAdYkokbeZr7W32NhjcNoTMNwpz9CqJpK6Yzu4N6EJc +pAIhALHpRM57zdiMouDOlhGPX5XKzbSl2AnBjFvbPqgFV09Z +-----END CERTIFICATE----- diff --git a/connect/testdata/ca2-svc-db.key.pem b/connect/testdata/ca2-svc-db.key.pem new file mode 100644 index 000000000..7f7ab9ff8 --- /dev/null +++ b/connect/testdata/ca2-svc-db.key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIHnzia+DNTFB7uYQEuWvLR2czGCuDfOTt1FfcBo1uBJioAoGCCqGSM49 +AwEHoUQDQgAEFeB4DynO6IeKOE4zFLlBVFv+4HeWRvK36cQ9L6v5uhLfdcYyqhT/ +QLbQ4R8ks1vUTTiq0XJsAGdkvkt71fiElw== +-----END EC PRIVATE KEY----- diff --git a/connect/testdata/ca2-svc-web.cert.pem b/connect/testdata/ca2-svc-web.cert.pem new file mode 100644 index 000000000..ae1e338f6 --- /dev/null +++ b/connect/testdata/ca2-svc-web.cert.pem @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICAjCCAaigAwIBAgIBBjAKBggqhkjOPQQDAjAQMQ4wDAYDVQQDEwVWYXVsdDAe +Fw0xODAzMjMyMjA0MjVaFw0yODAzMjAyMjA0MjVaMA4xDDAKBgNVBAMTA3dlYjBZ +MBMGByqGSM49AgEGCCqGSM49AwEHA0IABM9XzxWFCa80uQDfJEGboUC15Yr+FwDp +OemThalQxFpkL7gQSIgpzgGULIx+jCiu+clJ0QhbWT2dnS8vFUKq35qjgfQwgfEw +DgYDVR0PAQH/BAQDAgO4MB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAM +BgNVHRMBAf8EAjAAMCkGA1UdDgQiBCCN+TKHPCOr48hxRCx4rqbWQg5QHkCSNzjZ +qi1JGs13njArBgNVHSMEJDAigCCoRzlmsQsPnJLFqltT55Fv5oS/UwSgK+MLSYID +W4l5wzBaBgNVHREEUzBRhk9zcGlmZmU6Ly8xMTExMTExMS0yMjIyLTMzMzMtNDQ0 +NC01NTU1NTU1NTU1NTUuY29uc3VsL25zL2RlZmF1bHQvZGMvZGMwMS9zdmMvd2Vi +MAoGCCqGSM49BAMCA0gAMEUCIBd6gpL6E8rms5BU+cJeeyv0Rjc18edn2g3q2wLN +r1zAAiEAv16whKwR0DeKkldGLDQIu9nCNvfDZrEWgywIBYbzLxY= +-----END CERTIFICATE----- diff --git a/connect/testdata/ca2-svc-web.key.pem b/connect/testdata/ca2-svc-web.key.pem new file mode 100644 index 000000000..65f0bc48e --- /dev/null +++ b/connect/testdata/ca2-svc-web.key.pem @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIOCMjjRexX3qHjixpRwLxggJd9yuskqUoPy8/MepafP+oAoGCCqGSM49 +AwEHoUQDQgAEz1fPFYUJrzS5AN8kQZuhQLXliv4XAOk56ZOFqVDEWmQvuBBIiCnO +AZQsjH6MKK75yUnRCFtZPZ2dLy8VQqrfmg== +-----END EC PRIVATE KEY----- diff --git a/connect/testdata/ca2-xc-by-ca1.cert.pem b/connect/testdata/ca2-xc-by-ca1.cert.pem new file mode 100644 index 000000000..e864f6c00 --- /dev/null +++ b/connect/testdata/ca2-xc-by-ca1.cert.pem @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICFjCCAbygAwIBAgIBAjAKBggqhkjOPQQDAjAaMRgwFgYDVQQDEw9Db25zdWwg +SW50ZXJuYWwwHhcNMTgwMzIzMjIwNDI1WhcNMjgwMzIwMjIwNDI1WjAQMQ4wDAYD +VQQDEwVWYXVsdDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAIxlZ0cv+0NklOH +j2ym7Lq0QBymAArgN6/UK6MIGTq+8qbReeLIYqGzCDkaXymJKGJIcy9ZYViiNOgV +CeMbj3ejgfwwgfkwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wKQYD +VR0OBCIEIKhHOWaxCw+cksWqW1PnkW/mhL9TBKAr4wtJggNbiXnDMCsGA1UdIwQk +MCKAIKuc1DLYhBLvcKj1Zus9e2r9g/1K8gE4nyJhFZM7GMKpMD8GA1UdEQQ4MDaG +NHNwaWZmZTovLzExMTExMTExLTIyMjItMzMzMy00NDQ0LTU1NTU1NTU1NTU1NS5j +b25zdWwwPQYDVR0eAQH/BDMwMaAvMC2CKzExMTExMTExLTIyMjItMzMzMy00NDQ0 +LTU1NTU1NTU1NTU1NS5jb25zdWwwCgYIKoZIzj0EAwIDSAAwRQIgWWWj8/6SaY2y +wzOtIphwZLewCuLMG6KG8uY4S7UsosgCIQDhCbT/LUKq/A21khQncBmM79ng9Gbx +/4Zw8zbVmnZJKg== +-----END CERTIFICATE----- diff --git a/connect/testdata/mkcerts.go b/connect/testdata/mkcerts.go new file mode 100644 index 000000000..7fe82f53a --- /dev/null +++ b/connect/testdata/mkcerts.go @@ -0,0 +1,243 @@ +package main + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "log" + "math/big" + "net/url" + "os" + "regexp" + "strings" + "time" +) + +// You can verify a given leaf with a given root using: +// +// $ openssl verify -verbose -CAfile ca2-ca-vault.cert.pem ca1-svc-db.cert.pem +// +// Note that to verify via the cross-signed intermediate, openssl requires it to +// be bundled with the _root_ CA bundle and will ignore the cert if it's passed +// with the subject. You can do that with: +// +// $ openssl verify -verbose -CAfile \ +// <(cat ca1-ca-consul-internal.cert.pem ca2-xc-by-ca1.cert.pem) \ +// ca2-svc-db.cert.pem +// ca2-svc-db.cert.pem: OK +// +// Note that the same leaf and root without the intermediate should fail: +// +// $ openssl verify -verbose -CAfile ca1-ca-consul-internal.cert.pem ca2-svc-db.cert.pem +// ca2-svc-db.cert.pem: CN = db +// error 20 at 0 depth lookup:unable to get local issuer certificate +// +// NOTE: THIS IS A QUIRK OF OPENSSL; in Connect we will distribute the roots +// alone and stable intermediates like the XC cert to the _leaf_. + +var clusterID = "11111111-2222-3333-4444-555555555555" +var cAs = []string{"Consul Internal", "Vault"} +var services = []string{"web", "db", "cache"} +var slugRe = regexp.MustCompile("[^a-zA-Z0-9]+") +var serial int64 + +type caInfo struct { + id int + name string + slug string + uri *url.URL + pk *ecdsa.PrivateKey + cert *x509.Certificate +} + +func main() { + // Make CA certs + caInfos := make(map[string]caInfo) + var previousCA *caInfo + for idx, name := range cAs { + ca := caInfo{ + id: idx + 1, + name: name, + slug: strings.ToLower(slugRe.ReplaceAllString(name, "-")), + } + pk, err := makePK(fmt.Sprintf("ca%d-ca-%s.key.pem", ca.id, ca.slug)) + if err != nil { + log.Fatal(err) + } + ca.pk = pk + caURI, err := url.Parse(fmt.Sprintf("spiffe://%s.consul", clusterID)) + if err != nil { + log.Fatal(err) + } + ca.uri = caURI + cert, err := makeCACert(ca, previousCA) + if err != nil { + log.Fatal(err) + } + ca.cert = cert + caInfos[name] = ca + previousCA = &ca + } + + // For each CA, make a leaf cert for each service + for _, ca := range caInfos { + for _, svc := range services { + _, err := makeLeafCert(ca, svc) + if err != nil { + log.Fatal(err) + } + } + } +} + +func makePK(path string) (*ecdsa.PrivateKey, error) { + log.Printf("Writing PK file: %s", path) + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, err + } + + bs, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return nil, err + } + + err = writePEM(path, "EC PRIVATE KEY", bs) + return priv, nil +} + +func makeCACert(ca caInfo, previousCA *caInfo) (*x509.Certificate, error) { + path := fmt.Sprintf("ca%d-ca-%s.cert.pem", ca.id, ca.slug) + log.Printf("Writing CA cert file: %s", path) + serial++ + subj := pkix.Name{ + CommonName: ca.name, + } + template := x509.Certificate{ + SerialNumber: big.NewInt(serial), + Subject: subj, + // New in go 1.10 + URIs: []*url.URL{ca.uri}, + // Add DNS name constraint + PermittedDNSDomainsCritical: true, + PermittedDNSDomains: []string{ca.uri.Hostname()}, + SignatureAlgorithm: x509.ECDSAWithSHA256, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature, + IsCA: true, + NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: keyID(&ca.pk.PublicKey), + SubjectKeyId: keyID(&ca.pk.PublicKey), + } + bs, err := x509.CreateCertificate(rand.Reader, &template, &template, + &ca.pk.PublicKey, ca.pk) + if err != nil { + return nil, err + } + + err = writePEM(path, "CERTIFICATE", bs) + if err != nil { + return nil, err + } + + cert, err := x509.ParseCertificate(bs) + if err != nil { + return nil, err + } + + if previousCA != nil { + // Also create cross-signed cert as we would use during rotation between + // previous CA and this one. + template.AuthorityKeyId = keyID(&previousCA.pk.PublicKey) + bs, err := x509.CreateCertificate(rand.Reader, &template, + previousCA.cert, &ca.pk.PublicKey, previousCA.pk) + if err != nil { + return nil, err + } + + path := fmt.Sprintf("ca%d-xc-by-ca%d.cert.pem", ca.id, previousCA.id) + err = writePEM(path, "CERTIFICATE", bs) + if err != nil { + return nil, err + } + } + + return cert, err +} + +func keyID(pub *ecdsa.PublicKey) []byte { + // This is not standard; RFC allows any unique identifier as long as they + // match in subject/authority chains but suggests specific hashing of DER + // bytes of public key including DER tags. I can't be bothered to do esp. + // since ECDSA keys don't have a handy way to marshal the publick key alone. + h := sha256.New() + h.Write(pub.X.Bytes()) + h.Write(pub.Y.Bytes()) + return h.Sum([]byte{}) +} + +func makeLeafCert(ca caInfo, svc string) (*x509.Certificate, error) { + svcURI := ca.uri + svcURI.Path = "/ns/default/dc/dc01/svc/" + svc + + keyPath := fmt.Sprintf("ca%d-svc-%s.key.pem", ca.id, svc) + cPath := fmt.Sprintf("ca%d-svc-%s.cert.pem", ca.id, svc) + + pk, err := makePK(keyPath) + if err != nil { + return nil, err + } + + log.Printf("Writing Service Cert: %s", cPath) + + serial++ + subj := pkix.Name{ + CommonName: svc, + } + template := x509.Certificate{ + SerialNumber: big.NewInt(serial), + Subject: subj, + // New in go 1.10 + URIs: []*url.URL{svcURI}, + SignatureAlgorithm: x509.ECDSAWithSHA256, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageDataEncipherment | + x509.KeyUsageKeyAgreement | x509.KeyUsageDigitalSignature | + x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: keyID(&ca.pk.PublicKey), + SubjectKeyId: keyID(&pk.PublicKey), + } + bs, err := x509.CreateCertificate(rand.Reader, &template, ca.cert, + &pk.PublicKey, ca.pk) + if err != nil { + return nil, err + } + + err = writePEM(cPath, "CERTIFICATE", bs) + if err != nil { + return nil, err + } + + return x509.ParseCertificate(bs) +} + +func writePEM(name, typ string, bs []byte) error { + f, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + return err + } + defer f.Close() + return pem.Encode(f, &pem.Block{Type: typ, Bytes: bs}) +} diff --git a/connect/testing.go b/connect/testing.go new file mode 100644 index 000000000..90db332a2 --- /dev/null +++ b/connect/testing.go @@ -0,0 +1,88 @@ +package connect + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "path" + "path/filepath" + "runtime" + + "github.com/mitchellh/go-testing-interface" + "github.com/stretchr/testify/require" +) + +// testDataDir is a janky temporary hack to allow use of these methods from +// proxy package. We need to revisit where all this lives since it logically +// overlaps with consul/agent in Mitchell's PR and that one generates certs on +// the fly which will make this unecessary but I want to get things working for +// now with what I've got :). This wonderful heap kinda-sorta gets the path +// relative to _this_ file so it works even if the Test* method is being called +// from a test binary in another package dir. +func testDataDir() string { + _, filename, _, ok := runtime.Caller(0) + if !ok { + panic("no caller information") + } + return path.Dir(filename) + "/testdata" +} + +// TestCAPool returns an *x509.CertPool containing the named CA certs from the +// testdata dir. +func TestCAPool(t testing.T, caNames ...string) *x509.CertPool { + t.Helper() + pool := x509.NewCertPool() + for _, name := range caNames { + certs, err := filepath.Glob(testDataDir() + "/" + name + "-ca-*.cert.pem") + require.Nil(t, err) + for _, cert := range certs { + caPem, err := ioutil.ReadFile(cert) + require.Nil(t, err) + pool.AppendCertsFromPEM(caPem) + } + } + return pool +} + +// TestSvcKeyPair returns an tls.Certificate containing both cert and private +// key for a given service under a given CA from the testdata dir. +func TestSvcKeyPair(t testing.T, ca, name string) tls.Certificate { + t.Helper() + prefix := fmt.Sprintf(testDataDir()+"/%s-svc-%s", ca, name) + cert, err := tls.LoadX509KeyPair(prefix+".cert.pem", prefix+".key.pem") + require.Nil(t, err) + return cert +} + +// TestTLSConfig returns a *tls.Config suitable for use during tests. +func TestTLSConfig(t testing.T, ca, svc string) *tls.Config { + t.Helper() + return &tls.Config{ + Certificates: []tls.Certificate{TestSvcKeyPair(t, ca, svc)}, + MinVersion: tls.VersionTLS12, + RootCAs: TestCAPool(t, ca), + ClientCAs: TestCAPool(t, ca), + ClientAuth: tls.RequireAndVerifyClientCert, + // In real life we'll need to do this too since otherwise Go will attempt to + // verify DNS names match DNS SAN/CN which we don't want, but we'll hook + // VerifyPeerCertificates and do our own x509 path validation as well as + // AuthZ upcall. For now we are just testing the basic proxy mechanism so + // this is fine. + InsecureSkipVerify: true, + } +} + +// TestAuther is a simple Auther implementation that does nothing but what you +// tell it to! +type TestAuther struct { + // Return is the value returned from an Auth() call. Set it to nil to have all + // certificates unconditionally accepted or a value to have them fail. + Return error +} + +// Auth implements Auther +func (a *TestAuther) Auth(rawCerts [][]byte, + verifiedChains [][]*x509.Certificate) error { + return a.Return +} diff --git a/connect/tls.go b/connect/tls.go new file mode 100644 index 000000000..af66d9c0c --- /dev/null +++ b/connect/tls.go @@ -0,0 +1,124 @@ +package connect + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "sync" +) + +// defaultTLSConfig returns the standard config for connect clients and servers. +func defaultTLSConfig() *tls.Config { + serverAuther := &ServerAuther{} + return &tls.Config{ + MinVersion: tls.VersionTLS12, + ClientAuth: tls.RequireAndVerifyClientCert, + // We don't have access to go internals that decide if AES hardware + // acceleration is available in order to prefer CHA CHA if not. So let's + // just always prefer AES for now. We can look into doing something uglier + // later like using an external lib for AES checking if it seems important. + // https://github.com/golang/go/blob/df91b8044dbe790c69c16058330f545be069cc1f/src/crypto/tls/common.go#L919:14 + CipherSuites: []uint16{ + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + }, + // We have to set this since otherwise Go will attempt to verify DNS names + // match DNS SAN/CN which we don't want. We hook up VerifyPeerCertificate to + // do our own path validation as well as Connect AuthZ. + InsecureSkipVerify: true, + // By default auth as if we are a server. Clients need to override this with + // an Auther that is performs correct validation of the server identity they + // intended to connect to. + VerifyPeerCertificate: serverAuther.Auth, + } +} + +// ReloadableTLSConfig exposes a tls.Config that can have it's certificates +// reloaded. This works by +type ReloadableTLSConfig struct { + mu sync.Mutex + + // cfg is the current config to use for new connections + cfg *tls.Config +} + +// NewReloadableTLSConfig returns a reloadable config currently set to base. The +// Auther used to verify certificates for incoming connections on a Server will +// just be copied from the VerifyPeerCertificate passed. Clients will need to +// pass a specific Auther instance when they call TLSConfig that is configured +// to perform the necessary validation of the server's identity. +func NewReloadableTLSConfig(base *tls.Config) *ReloadableTLSConfig { + return &ReloadableTLSConfig{cfg: base} +} + +// ServerTLSConfig returns a *tls.Config that will dynamically load certs for +// each inbound connection via the GetConfigForClient callback. +func (c *ReloadableTLSConfig) ServerTLSConfig() *tls.Config { + // Setup the basic one with current params even though we will be using + // different config for each new conn. + c.mu.Lock() + base := c.cfg + c.mu.Unlock() + + // Dynamically fetch the current config for each new inbound connection + base.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) { + return c.TLSConfig(nil), nil + } + + return base +} + +// TLSConfig returns the current value for the config. It is safe to call from +// any goroutine. The passed Auther is inserted into the config's +// VerifyPeerCertificate. Passing a nil Auther will leave the default one in the +// base config +func (c *ReloadableTLSConfig) TLSConfig(auther Auther) *tls.Config { + c.mu.Lock() + cfgCopy := c.cfg + c.mu.Unlock() + if auther != nil { + cfgCopy.VerifyPeerCertificate = auther.Auth + } + return cfgCopy +} + +// SetTLSConfig sets the config used for future connections. It is safe to call +// from any goroutine. +func (c *ReloadableTLSConfig) SetTLSConfig(cfg *tls.Config) error { + c.mu.Lock() + defer c.mu.Unlock() + c.cfg = cfg + return nil +} + +// devTLSConfigFromFiles returns a default TLS Config but with certs and CAs +// based on local files for dev. +func devTLSConfigFromFiles(caFile, certFile, + keyFile string) (*tls.Config, error) { + + roots := x509.NewCertPool() + + bs, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, err + } + + roots.AppendCertsFromPEM(bs) + + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + + cfg := defaultTLSConfig() + + cfg.Certificates = []tls.Certificate{cert} + cfg.RootCAs = roots + cfg.ClientCAs = roots + + return cfg, nil +} diff --git a/connect/tls_test.go b/connect/tls_test.go new file mode 100644 index 000000000..0c99df3ad --- /dev/null +++ b/connect/tls_test.go @@ -0,0 +1,45 @@ +package connect + +import ( + "crypto/tls" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestReloadableTLSConfig(t *testing.T) { + base := TestTLSConfig(t, "ca1", "web") + + c := NewReloadableTLSConfig(base) + + a := &TestAuther{ + Return: nil, + } + + // The dynamic config should be the one we loaded, but with the passed auther + expect := base + expect.VerifyPeerCertificate = a.Auth + require.Equal(t, base, c.TLSConfig(a)) + + // The server config should return same too for new connections + serverCfg := c.ServerTLSConfig() + require.NotNil(t, serverCfg.GetConfigForClient) + got, err := serverCfg.GetConfigForClient(&tls.ClientHelloInfo{}) + require.Nil(t, err) + require.Equal(t, base, got) + + // Now change the config as if we just rotated to a new CA + new := TestTLSConfig(t, "ca2", "web") + err = c.SetTLSConfig(new) + require.Nil(t, err) + + // The dynamic config should be the one we loaded (with same auther due to nil) + require.Equal(t, new, c.TLSConfig(nil)) + + // The server config should return same too for new connections + serverCfg = c.ServerTLSConfig() + require.NotNil(t, serverCfg.GetConfigForClient) + got, err = serverCfg.GetConfigForClient(&tls.ClientHelloInfo{}) + require.Nil(t, err) + require.Equal(t, new, got) +} diff --git a/proxy/config.go b/proxy/config.go new file mode 100644 index 000000000..a5958135a --- /dev/null +++ b/proxy/config.go @@ -0,0 +1,111 @@ +package proxy + +import ( + "io/ioutil" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/hcl" +) + +// Config is the publicly configurable state for an entire proxy instance. It's +// mostly used as the format for the local-file config mode which is mostly for +// dev/testing. In normal use, different parts of this config are pulled from +// different locations (e.g. command line, agent config endpoint, agent +// certificate endpoints). +type Config struct { + // ProxyID is the identifier for this proxy as registered in Consul. It's only + // guaranteed to be unique per agent. + ProxyID string `json:"proxy_id" hcl:"proxy_id"` + + // Token is the authentication token provided for queries to the local agent. + Token string `json:"token" hcl:"token"` + + // ProxiedServiceName is the name of the service this proxy is representing. + ProxiedServiceName string `json:"proxied_service_name" hcl:"proxied_service_name"` + + // ProxiedServiceNamespace is the namespace of the service this proxy is + // representing. + ProxiedServiceNamespace string `json:"proxied_service_namespace" hcl:"proxied_service_namespace"` + + // PublicListener configures the mTLS listener. + PublicListener PublicListenerConfig `json:"public_listener" hcl:"public_listener"` + + // Upstreams configures outgoing proxies for remote connect services. + Upstreams []UpstreamConfig `json:"upstreams" hcl:"upstreams"` + + // DevCAFile allows passing the file path to PEM encoded root certificate + // bundle to be used in development instead of the ones supplied by Connect. + DevCAFile string `json:"dev_ca_file" hcl:"dev_ca_file"` + + // DevServiceCertFile allows passing the file path to PEM encoded service + // certificate (client and server) to be used in development instead of the + // ones supplied by Connect. + DevServiceCertFile string `json:"dev_service_cert_file" hcl:"dev_service_cert_file"` + + // DevServiceKeyFile allows passing the file path to PEM encoded service + // private key to be used in development instead of the ones supplied by + // Connect. + DevServiceKeyFile string `json:"dev_service_key_file" hcl:"dev_service_key_file"` +} + +// ConfigWatcher is a simple interface to allow dynamic configurations from +// plugggable sources. +type ConfigWatcher interface { + // Watch returns a channel that will deliver new Configs if something external + // provokes it. + Watch() <-chan *Config +} + +// StaticConfigWatcher is a simple ConfigWatcher that delivers a static Config +// once and then never changes it. +type StaticConfigWatcher struct { + ch chan *Config +} + +// NewStaticConfigWatcher returns a ConfigWatcher for a config that never +// changes. It assumes only one "watcher" will ever call Watch. The config is +// delivered on the first call but will never be delivered again to allow +// callers to call repeatedly (e.g. select in a loop). +func NewStaticConfigWatcher(cfg *Config) *StaticConfigWatcher { + sc := &StaticConfigWatcher{ + // Buffer it so we can queue up the config for first delivery. + ch: make(chan *Config, 1), + } + sc.ch <- cfg + return sc +} + +// Watch implements ConfigWatcher on a static configuration for compatibility. +// It returns itself on the channel once and then leaves it open. +func (sc *StaticConfigWatcher) Watch() <-chan *Config { + return sc.ch +} + +// ParseConfigFile parses proxy configuration form a file for local dev. +func ParseConfigFile(filename string) (*Config, error) { + bs, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + var cfg Config + + err = hcl.Unmarshal(bs, &cfg) + if err != nil { + return nil, err + } + + return &cfg, nil +} + +// AgentConfigWatcher watches the local Consul agent for proxy config changes. +type AgentConfigWatcher struct { + client *api.Client +} + +// Watch implements ConfigWatcher. +func (w *AgentConfigWatcher) Watch() <-chan *Config { + watch := make(chan *Config) + // TODO implement me + return watch +} diff --git a/proxy/config_test.go b/proxy/config_test.go new file mode 100644 index 000000000..89287d573 --- /dev/null +++ b/proxy/config_test.go @@ -0,0 +1,46 @@ +package proxy + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseConfigFile(t *testing.T) { + cfg, err := ParseConfigFile("testdata/config-kitchensink.hcl") + require.Nil(t, err) + + expect := &Config{ + ProxyID: "foo", + Token: "11111111-2222-3333-4444-555555555555", + ProxiedServiceName: "web", + ProxiedServiceNamespace: "default", + PublicListener: PublicListenerConfig{ + BindAddress: ":9999", + LocalServiceAddress: "127.0.0.1:5000", + LocalConnectTimeoutMs: 1000, + HandshakeTimeoutMs: 5000, + }, + Upstreams: []UpstreamConfig{ + { + LocalBindAddress: "127.0.0.1:6000", + DestinationName: "db", + DestinationNamespace: "default", + DestinationType: "service", + ConnectTimeoutMs: 10000, + }, + { + LocalBindAddress: "127.0.0.1:6001", + DestinationName: "geo-cache", + DestinationNamespace: "default", + DestinationType: "prepared_query", + ConnectTimeoutMs: 10000, + }, + }, + DevCAFile: "connect/testdata/ca1-ca-consul-internal.cert.pem", + DevServiceCertFile: "connect/testdata/ca1-svc-web.cert.pem", + DevServiceKeyFile: "connect/testdata/ca1-svc-web.key.pem", + } + + require.Equal(t, expect, cfg) +} diff --git a/proxy/conn.go b/proxy/conn.go new file mode 100644 index 000000000..dfad81db7 --- /dev/null +++ b/proxy/conn.go @@ -0,0 +1,48 @@ +package proxy + +import ( + "io" + "net" + "sync/atomic" +) + +// Conn represents a single proxied TCP connection. +type Conn struct { + src, dst net.Conn + stopping int32 +} + +// NewConn returns a conn joining the two given net.Conn +func NewConn(src, dst net.Conn) *Conn { + return &Conn{ + src: src, + dst: dst, + stopping: 0, + } +} + +// Close closes both connections. +func (c *Conn) Close() { + atomic.StoreInt32(&c.stopping, 1) + c.src.Close() + c.dst.Close() +} + +// CopyBytes will continuously copy bytes in both directions between src and dst +// until either connection is closed. +func (c *Conn) CopyBytes() error { + defer c.Close() + + go func() { + // Need this since Copy is only guaranteed to stop when it's source reader + // (second arg) hits EOF or error but either conn might close first possibly + // causing this goroutine to exit but not the outer one. See TestSc + //defer c.Close() + io.Copy(c.dst, c.src) + }() + _, err := io.Copy(c.src, c.dst) + if atomic.LoadInt32(&c.stopping) == 1 { + return nil + } + return err +} diff --git a/proxy/conn_test.go b/proxy/conn_test.go new file mode 100644 index 000000000..ac907238d --- /dev/null +++ b/proxy/conn_test.go @@ -0,0 +1,119 @@ +package proxy + +import ( + "bufio" + "net" + "testing" + + "github.com/stretchr/testify/require" +) + +// testConnSetup listens on a random TCP port and passes the accepted net.Conn +// back to test code on returned channel. It then creates a source and +// destination Conn. And a cleanup func +func testConnSetup(t *testing.T) (net.Conn, net.Conn, func()) { + t.Helper() + + l, err := net.Listen("tcp", "localhost:0") + require.Nil(t, err) + + ch := make(chan net.Conn, 1) + go func(ch chan net.Conn) { + src, err := l.Accept() + require.Nil(t, err) + ch <- src + }(ch) + + dst, err := net.Dial("tcp", l.Addr().String()) + require.Nil(t, err) + + src := <-ch + + stopper := func() { + l.Close() + src.Close() + dst.Close() + } + + return src, dst, stopper +} + +func TestConn(t *testing.T) { + src, dst, stop := testConnSetup(t) + defer stop() + + c := NewConn(src, dst) + + retCh := make(chan error, 1) + go func() { + retCh <- c.CopyBytes() + }() + + srcR := bufio.NewReader(src) + dstR := bufio.NewReader(dst) + + _, err := src.Write([]byte("ping 1\n")) + require.Nil(t, err) + _, err = dst.Write([]byte("ping 2\n")) + require.Nil(t, err) + + got, err := dstR.ReadString('\n') + require.Equal(t, "ping 1\n", got) + + got, err = srcR.ReadString('\n') + require.Equal(t, "ping 2\n", got) + + _, err = src.Write([]byte("pong 1\n")) + require.Nil(t, err) + _, err = dst.Write([]byte("pong 2\n")) + require.Nil(t, err) + + got, err = dstR.ReadString('\n') + require.Equal(t, "pong 1\n", got) + + got, err = srcR.ReadString('\n') + require.Equal(t, "pong 2\n", got) + + c.Close() + + ret := <-retCh + require.Nil(t, ret, "Close() should not cause error return") +} + +func TestConnSrcClosing(t *testing.T) { + src, dst, stop := testConnSetup(t) + defer stop() + + c := NewConn(src, dst) + retCh := make(chan error, 1) + go func() { + retCh <- c.CopyBytes() + }() + + // If we close the src conn, we expect CopyBytes to return and src to be + // closed too. No good way to assert that the conn is closed really other than + // assume the retCh receive will hand unless CopyBytes exits and that + // CopyBytes defers Closing both. i.e. if this test doesn't time out it's + // good! + src.Close() + <-retCh +} + +func TestConnDstClosing(t *testing.T) { + src, dst, stop := testConnSetup(t) + defer stop() + + c := NewConn(src, dst) + retCh := make(chan error, 1) + go func() { + retCh <- c.CopyBytes() + }() + + // If we close the dst conn, we expect CopyBytes to return and src to be + // closed too. No good way to assert that the conn is closed really other than + // assume the retCh receive will hand unless CopyBytes exits and that + // CopyBytes defers Closing both. i.e. if this test doesn't time out it's + // good! + dst.Close() + <-retCh +} diff --git a/proxy/manager.go b/proxy/manager.go new file mode 100644 index 000000000..c22a1b7ff --- /dev/null +++ b/proxy/manager.go @@ -0,0 +1,140 @@ +package proxy + +import ( + "errors" + "log" + "os" +) + +var ( + // ErrExists is the error returned when adding a proxy that exists already. + ErrExists = errors.New("proxy with that name already exists") + // ErrNotExist is the error returned when removing a proxy that doesn't exist. + ErrNotExist = errors.New("proxy with that name doesn't exist") +) + +// Manager implements the logic for configuring and running a set of proxiers. +// Typically it's used to run one PublicListener and zero or more Upstreams. +type Manager struct { + ch chan managerCmd + + // stopped is used to signal the caller of StopAll when the run loop exits + // after stopping all runners. It's only closed. + stopped chan struct{} + + // runners holds the currently running instances. It should only by accessed + // from within the `run` goroutine. + runners map[string]*Runner + + logger *log.Logger +} + +type managerCmd struct { + name string + p Proxier + errCh chan error +} + +// NewManager creates a manager of proxier instances. +func NewManager() *Manager { + return NewManagerWithLogger(log.New(os.Stdout, "", log.LstdFlags)) +} + +// NewManagerWithLogger creates a manager of proxier instances with the +// specified logger. +func NewManagerWithLogger(logger *log.Logger) *Manager { + m := &Manager{ + ch: make(chan managerCmd), + stopped: make(chan struct{}), + runners: make(map[string]*Runner), + logger: logger, + } + go m.run() + return m +} + +// RunProxier starts a new Proxier instance in the manager. It is safe to call +// from separate goroutines. If there is already a running proxy with the same +// name it returns ErrExists. +func (m *Manager) RunProxier(name string, p Proxier) error { + cmd := managerCmd{ + name: name, + p: p, + errCh: make(chan error), + } + m.ch <- cmd + return <-cmd.errCh +} + +// StopProxier stops a Proxier instance by name. It is safe to call from +// separate goroutines. If the instance with that name doesn't exist it returns +// ErrNotExist. +func (m *Manager) StopProxier(name string) error { + cmd := managerCmd{ + name: name, + p: nil, + errCh: make(chan error), + } + m.ch <- cmd + return <-cmd.errCh +} + +// StopAll shuts down the manager instance and stops all running proxies. It is +// safe to call from any goroutine but must only be called once. +func (m *Manager) StopAll() error { + close(m.ch) + <-m.stopped + return nil +} + +// run is the main manager processing loop. It keeps all actions in a single +// goroutine triggered by channel commands to keep it simple to reason about +// lifecycle events for each proxy. +func (m *Manager) run() { + defer close(m.stopped) + + // range over channel blocks and loops on each message received until channel + // is closed. + for cmd := range m.ch { + if cmd.p == nil { + m.remove(&cmd) + } else { + m.add(&cmd) + } + } + + // Shutting down, Stop all the runners + for _, r := range m.runners { + r.Stop() + } +} + +// add the named proxier instance and stop it. Should only be called from the +// run loop. +func (m *Manager) add(cmd *managerCmd) { + // Check existing + if _, ok := m.runners[cmd.name]; ok { + cmd.errCh <- ErrExists + return + } + + // Start new runner + r := NewRunnerWithLogger(cmd.name, cmd.p, m.logger) + m.runners[cmd.name] = r + go r.Listen() + cmd.errCh <- nil +} + +// remove the named proxier instance and stop it. Should only be called from the +// run loop. +func (m *Manager) remove(cmd *managerCmd) { + // Fetch proxier by name + r, ok := m.runners[cmd.name] + if !ok { + cmd.errCh <- ErrNotExist + return + } + err := r.Stop() + delete(m.runners, cmd.name) + cmd.errCh <- err +} diff --git a/proxy/manager_test.go b/proxy/manager_test.go new file mode 100644 index 000000000..d4fa8c5b4 --- /dev/null +++ b/proxy/manager_test.go @@ -0,0 +1,76 @@ +package proxy + +import ( + "fmt" + "net" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestManager(t *testing.T) { + m := NewManager() + + addrs := TestLocalBindAddrs(t, 3) + + for i := 0; i < len(addrs); i++ { + name := fmt.Sprintf("proxier-%d", i) + // Run proxy + err := m.RunProxier(name, &TestProxier{ + Addr: addrs[i], + Prefix: name + ": ", + }) + require.Nil(t, err) + } + + // Make sure each one is echoing correctly now all are running + for i := 0; i < len(addrs); i++ { + conn, err := net.Dial("tcp", addrs[i]) + require.Nil(t, err) + TestEchoConn(t, conn, fmt.Sprintf("proxier-%d: ", i)) + conn.Close() + } + + // Stop first proxier + err := m.StopProxier("proxier-0") + require.Nil(t, err) + + // We should fail to dial it now. Note that Runner.Stop is synchronous so + // there should be a strong guarantee that it's stopped listening by now. + _, err = net.Dial("tcp", addrs[0]) + require.NotNil(t, err) + + // Rest of proxiers should still be running + for i := 1; i < len(addrs); i++ { + conn, err := net.Dial("tcp", addrs[i]) + require.Nil(t, err) + TestEchoConn(t, conn, fmt.Sprintf("proxier-%d: ", i)) + conn.Close() + } + + // Stop non-existent proxier should fail + err = m.StopProxier("foo") + require.Equal(t, ErrNotExist, err) + + // Add already-running proxier should fail + err = m.RunProxier("proxier-1", &TestProxier{}) + require.Equal(t, ErrExists, err) + + // But rest should stay running + for i := 1; i < len(addrs); i++ { + conn, err := net.Dial("tcp", addrs[i]) + require.Nil(t, err) + TestEchoConn(t, conn, fmt.Sprintf("proxier-%d: ", i)) + conn.Close() + } + + // StopAll should stop everything + err = m.StopAll() + require.Nil(t, err) + + // Verify failures + for i := 0; i < len(addrs); i++ { + _, err = net.Dial("tcp", addrs[i]) + require.NotNilf(t, err, "proxier-%d should not be running", i) + } +} diff --git a/proxy/proxier.go b/proxy/proxier.go new file mode 100644 index 000000000..23940c6ad --- /dev/null +++ b/proxy/proxier.go @@ -0,0 +1,32 @@ +package proxy + +import ( + "errors" + "net" +) + +// ErrStopped is returned for operations on a proxy that is stopped +var ErrStopped = errors.New("stopped") + +// ErrStopping is returned for operations on a proxy that is stopping +var ErrStopping = errors.New("stopping") + +// Proxier is an interface for managing different proxy implementations in a +// standard way. We have at least two different types of Proxier implementations +// needed: one for the incoming mTLS -> local proxy and another for each +// "upstream" service the app needs to talk out to (which listens locally and +// performs service discovery to find a suitable remote service). +type Proxier interface { + // Listener returns a net.Listener that is open and ready for use, the Proxy + // manager will arrange accepting new connections from it and passing them to + // the handler method. + Listener() (net.Listener, error) + + // HandleConn is called for each incoming connection accepted by the listener. + // It is called in it's own goroutine and should run until it hits an error. + // When stopping the Proxier, the manager will simply close the conn provided + // and expects an error to be eventually returned. Any time spent not blocked + // on the passed conn (for example doing service discovery) should therefore + // be time-bound so that shutdown can't stall forever. + HandleConn(conn net.Conn) error +} diff --git a/proxy/proxy.go b/proxy/proxy.go new file mode 100644 index 000000000..a293466b8 --- /dev/null +++ b/proxy/proxy.go @@ -0,0 +1,112 @@ +package proxy + +import ( + "context" + "log" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/connect" +) + +// Proxy implements the built-in connect proxy. +type Proxy struct { + proxyID, token string + + connect connect.Client + manager *Manager + cfgWatch ConfigWatcher + cfg *Config + + logger *log.Logger +} + +// NewFromConfigFile returns a Proxy instance configured just from a local file. +// This is intended mostly for development and bypasses the normal mechanisms +// for fetching config and certificates from the local agent. +func NewFromConfigFile(client *api.Client, filename string, + logger *log.Logger) (*Proxy, error) { + cfg, err := ParseConfigFile(filename) + if err != nil { + return nil, err + } + + connect, err := connect.NewInsecureDevClientWithLocalCerts(client, + cfg.DevCAFile, cfg.DevServiceCertFile, cfg.DevServiceKeyFile) + if err != nil { + return nil, err + } + + p := &Proxy{ + proxyID: cfg.ProxyID, + connect: connect, + manager: NewManagerWithLogger(logger), + cfgWatch: NewStaticConfigWatcher(cfg), + logger: logger, + } + return p, nil +} + +// New returns a Proxy with the given id, consuming the provided (configured) +// agent. It is ready to Run(). +func New(client *api.Client, proxyID string, logger *log.Logger) (*Proxy, error) { + p := &Proxy{ + proxyID: proxyID, + connect: connect.NewClient(client), + manager: NewManagerWithLogger(logger), + cfgWatch: &AgentConfigWatcher{client: client}, + logger: logger, + } + return p, nil +} + +// Run the proxy instance until a fatal error occurs or ctx is cancelled. +func (p *Proxy) Run(ctx context.Context) error { + defer p.manager.StopAll() + + // Watch for config changes (initial setup happens on first "change") + for { + select { + case newCfg := <-p.cfgWatch.Watch(): + p.logger.Printf("[DEBUG] got new config") + if p.cfg == nil { + // Initial setup + err := p.startPublicListener(ctx, newCfg.PublicListener) + if err != nil { + return err + } + } + + // TODO add/remove upstreams properly based on a diff with current + for _, uc := range newCfg.Upstreams { + uc.Client = p.connect + uc.logger = p.logger + err := p.manager.RunProxier(uc.String(), NewUpstream(uc)) + if err == ErrExists { + continue + } + if err != nil { + p.logger.Printf("[ERR] failed to start upstream %s: %s", uc.String(), + err) + } + } + p.cfg = newCfg + + case <-ctx.Done(): + return nil + } + } +} + +func (p *Proxy) startPublicListener(ctx context.Context, + cfg PublicListenerConfig) error { + + // Get TLS creds + tlsCfg, err := p.connect.ServerTLSConfig() + if err != nil { + return err + } + cfg.TLSConfig = tlsCfg + + cfg.logger = p.logger + return p.manager.RunProxier("public_listener", NewPublicListener(cfg)) +} diff --git a/proxy/public_listener.go b/proxy/public_listener.go new file mode 100644 index 000000000..1942992cf --- /dev/null +++ b/proxy/public_listener.go @@ -0,0 +1,119 @@ +package proxy + +import ( + "crypto/tls" + "fmt" + "log" + "net" + "os" + "time" +) + +// PublicListener provides an implementation of Proxier that listens for inbound +// mTLS connections, authenticates them with the local agent, and if successful +// forwards them to the locally configured app. +type PublicListener struct { + cfg *PublicListenerConfig +} + +// PublicListenerConfig contains the most basic parameters needed to start the +// proxy. +// +// Note that the tls.Configs here are expected to be "dynamic" in the sense that +// they are expected to use `GetConfigForClient` (added in go 1.8) to return +// dynamic config per connection if required. +type PublicListenerConfig struct { + // BindAddress is the host:port the public mTLS listener will bind to. + BindAddress string `json:"bind_address" hcl:"bind_address"` + + // LocalServiceAddress is the host:port for the proxied application. This + // should be on loopback or otherwise protected as it's plain TCP. + LocalServiceAddress string `json:"local_service_address" hcl:"local_service_address"` + + // TLSConfig config is used for the mTLS listener. + TLSConfig *tls.Config + + // LocalConnectTimeout is the timeout for establishing connections with the + // local backend. Defaults to 1000 (1s). + LocalConnectTimeoutMs int `json:"local_connect_timeout_ms" hcl:"local_connect_timeout_ms"` + + // HandshakeTimeout is the timeout for incoming mTLS clients to complete a + // handshake. Setting this low avoids DOS by malicious clients holding + // resources open. Defaults to 10000 (10s). + HandshakeTimeoutMs int `json:"handshake_timeout_ms" hcl:"handshake_timeout_ms"` + + logger *log.Logger +} + +func (plc *PublicListenerConfig) applyDefaults() { + if plc.LocalConnectTimeoutMs == 0 { + plc.LocalConnectTimeoutMs = 1000 + } + if plc.HandshakeTimeoutMs == 0 { + plc.HandshakeTimeoutMs = 10000 + } + if plc.logger == nil { + plc.logger = log.New(os.Stdout, "", log.LstdFlags) + } +} + +// NewPublicListener returns a proxy instance with the given config. +func NewPublicListener(cfg PublicListenerConfig) *PublicListener { + p := &PublicListener{ + cfg: &cfg, + } + p.cfg.applyDefaults() + return p +} + +// Listener implements Proxier +func (p *PublicListener) Listener() (net.Listener, error) { + l, err := net.Listen("tcp", p.cfg.BindAddress) + if err != nil { + return nil, err + } + + return tls.NewListener(l, p.cfg.TLSConfig), nil +} + +// HandleConn implements Proxier +func (p *PublicListener) HandleConn(conn net.Conn) error { + defer conn.Close() + tlsConn, ok := conn.(*tls.Conn) + if !ok { + return fmt.Errorf("non-TLS conn") + } + + // Setup Handshake timer + to := time.Duration(p.cfg.HandshakeTimeoutMs) * time.Millisecond + err := tlsConn.SetDeadline(time.Now().Add(to)) + if err != nil { + return err + } + + // Force TLS handshake so that abusive clients can't hold resources open + err = tlsConn.Handshake() + if err != nil { + return err + } + + // Handshake OK, clear the deadline + err = tlsConn.SetDeadline(time.Time{}) + if err != nil { + return err + } + + // Huzzah, open a connection to the backend and let them talk + // TODO maybe add a connection pool here? + to = time.Duration(p.cfg.LocalConnectTimeoutMs) * time.Millisecond + dst, err := net.DialTimeout("tcp", p.cfg.LocalServiceAddress, to) + if err != nil { + return fmt.Errorf("failed dialling local app: %s", err) + } + + p.cfg.logger.Printf("[DEBUG] accepted connection from %s", conn.RemoteAddr()) + + // Hand conn and dst over to Conn to manage the byte copying. + c := NewConn(conn, dst) + return c.CopyBytes() +} diff --git a/proxy/public_listener_test.go b/proxy/public_listener_test.go new file mode 100644 index 000000000..83e84d658 --- /dev/null +++ b/proxy/public_listener_test.go @@ -0,0 +1,38 @@ +package proxy + +import ( + "crypto/tls" + "testing" + + "github.com/hashicorp/consul/connect" + "github.com/stretchr/testify/require" +) + +func TestPublicListener(t *testing.T) { + addrs := TestLocalBindAddrs(t, 2) + + cfg := PublicListenerConfig{ + BindAddress: addrs[0], + LocalServiceAddress: addrs[1], + HandshakeTimeoutMs: 100, + LocalConnectTimeoutMs: 100, + TLSConfig: connect.TestTLSConfig(t, "ca1", "web"), + } + + testApp, err := NewTestTCPServer(t, cfg.LocalServiceAddress) + require.Nil(t, err) + defer testApp.Close() + + p := NewPublicListener(cfg) + + // Run proxy + r := NewRunner("test", p) + go r.Listen() + defer r.Stop() + + // Proxy and backend are running, play the part of a TLS client using same + // cert for now. + conn, err := tls.Dial("tcp", cfg.BindAddress, connect.TestTLSConfig(t, "ca1", "web")) + require.Nil(t, err) + TestEchoConn(t, conn, "") +} diff --git a/proxy/runner.go b/proxy/runner.go new file mode 100644 index 000000000..b559b22b7 --- /dev/null +++ b/proxy/runner.go @@ -0,0 +1,118 @@ +package proxy + +import ( + "log" + "net" + "os" + "sync" + "sync/atomic" +) + +// Runner manages the lifecycle of one Proxier. +type Runner struct { + name string + p Proxier + + // Stopping is if a flag that is updated and read atomically + stopping int32 + stopCh chan struct{} + // wg is used to signal back to Stop when all goroutines have stopped + wg sync.WaitGroup + + logger *log.Logger +} + +// NewRunner returns a Runner ready to Listen. +func NewRunner(name string, p Proxier) *Runner { + return NewRunnerWithLogger(name, p, log.New(os.Stdout, "", log.LstdFlags)) +} + +// NewRunnerWithLogger returns a Runner ready to Listen using the specified +// log.Logger. +func NewRunnerWithLogger(name string, p Proxier, logger *log.Logger) *Runner { + return &Runner{ + name: name, + p: p, + stopCh: make(chan struct{}), + logger: logger, + } +} + +// Listen starts the proxier instance. It blocks until a fatal error occurs or +// Stop() is called. +func (r *Runner) Listen() error { + if atomic.LoadInt32(&r.stopping) == 1 { + return ErrStopped + } + + l, err := r.p.Listener() + if err != nil { + return err + } + r.logger.Printf("[INFO] proxy: %s listening on %s", r.name, l.Addr().String()) + + // Run goroutine that will close listener on stop + go func() { + <-r.stopCh + l.Close() + r.logger.Printf("[INFO] proxy: %s shutdown", r.name) + }() + + // Add one for the accept loop + r.wg.Add(1) + defer r.wg.Done() + + for { + conn, err := l.Accept() + if err != nil { + if atomic.LoadInt32(&r.stopping) == 1 { + return nil + } + return err + } + + go r.handle(conn) + } + + return nil +} + +func (r *Runner) handle(conn net.Conn) { + r.wg.Add(1) + defer r.wg.Done() + + // Start a goroutine that will watch for the Runner stopping and close the + // conn, or watch for the Proxier closing (e.g. because other end hung up) and + // stop the goroutine to avoid leaks + doneCh := make(chan struct{}) + defer close(doneCh) + + go func() { + select { + case <-r.stopCh: + r.logger.Printf("[DEBUG] proxy: %s: terminating conn", r.name) + conn.Close() + return + case <-doneCh: + // Connection is already closed, this goroutine not needed any more + return + } + }() + + err := r.p.HandleConn(conn) + if err != nil { + r.logger.Printf("[DEBUG] proxy: %s: connection terminated: %s", r.name, err) + } else { + r.logger.Printf("[DEBUG] proxy: %s: connection terminated", r.name) + } +} + +// Stop stops the Listener and closes any active connections immediately. +func (r *Runner) Stop() error { + old := atomic.SwapInt32(&r.stopping, 1) + if old == 0 { + close(r.stopCh) + } + r.wg.Wait() + return nil +} diff --git a/proxy/testdata/config-kitchensink.hcl b/proxy/testdata/config-kitchensink.hcl new file mode 100644 index 000000000..766928353 --- /dev/null +++ b/proxy/testdata/config-kitchensink.hcl @@ -0,0 +1,36 @@ +# Example proxy config with everything specified + +proxy_id = "foo" +token = "11111111-2222-3333-4444-555555555555" + +proxied_service_name = "web" +proxied_service_namespace = "default" + +# Assumes running consul in dev mode from the repo root... +dev_ca_file = "connect/testdata/ca1-ca-consul-internal.cert.pem" +dev_service_cert_file = "connect/testdata/ca1-svc-web.cert.pem" +dev_service_key_file = "connect/testdata/ca1-svc-web.key.pem" + +public_listener { + bind_address = ":9999" + local_service_address = "127.0.0.1:5000" + local_connect_timeout_ms = 1000 + handshake_timeout_ms = 5000 +} + +upstreams = [ + { + local_bind_address = "127.0.0.1:6000" + destination_name = "db" + destination_namespace = "default" + destination_type = "service" + connect_timeout_ms = 10000 + }, + { + local_bind_address = "127.0.0.1:6001" + destination_name = "geo-cache" + destination_namespace = "default" + destination_type = "prepared_query" + connect_timeout_ms = 10000 + } +] diff --git a/proxy/testing.go b/proxy/testing.go new file mode 100644 index 000000000..bd132b77f --- /dev/null +++ b/proxy/testing.go @@ -0,0 +1,170 @@ +package proxy + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "log" + "net" + "sync/atomic" + + "github.com/hashicorp/consul/lib/freeport" + "github.com/mitchellh/go-testing-interface" + "github.com/stretchr/testify/require" +) + +// TestLocalBindAddrs returns n localhost address:port strings with free ports +// for binding test listeners to. +func TestLocalBindAddrs(t testing.T, n int) []string { + ports := freeport.GetT(t, n) + addrs := make([]string, n) + for i, p := range ports { + addrs[i] = fmt.Sprintf("localhost:%d", p) + } + return addrs +} + +// TestTCPServer is a simple TCP echo server for use during tests. +type TestTCPServer struct { + l net.Listener + stopped int32 + accepted, closed, active int32 +} + +// NewTestTCPServer opens as a listening socket on the given address and returns +// a TestTCPServer serving requests to it. The server is already started and can +// be stopped by calling Close(). +func NewTestTCPServer(t testing.T, addr string) (*TestTCPServer, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + log.Printf("test tcp server listening on %s", addr) + s := &TestTCPServer{ + l: l, + } + go s.accept() + return s, nil +} + +// Close stops the server +func (s *TestTCPServer) Close() { + atomic.StoreInt32(&s.stopped, 1) + if s.l != nil { + s.l.Close() + } +} + +func (s *TestTCPServer) accept() error { + for { + conn, err := s.l.Accept() + if err != nil { + if atomic.LoadInt32(&s.stopped) == 1 { + log.Printf("test tcp echo server %s stopped", s.l.Addr()) + return nil + } + log.Printf("test tcp echo server %s failed: %s", s.l.Addr(), err) + return err + } + + atomic.AddInt32(&s.accepted, 1) + atomic.AddInt32(&s.active, 1) + + go func(c net.Conn) { + io.Copy(c, c) + atomic.AddInt32(&s.closed, 1) + atomic.AddInt32(&s.active, -1) + }(conn) + } +} + +// TestEchoConn attempts to write some bytes to conn and expects to read them +// back within a short timeout (10ms). If prefix is not empty we expect it to be +// poresent at the start of all echoed responses (for example to distinguish +// between multiple echo server instances). +func TestEchoConn(t testing.T, conn net.Conn, prefix string) { + t.Helper() + + // Write some bytes and read them back + n, err := conn.Write([]byte("Hello World")) + require.Equal(t, 11, n) + require.Nil(t, err) + + expectLen := 11 + len(prefix) + + buf := make([]byte, expectLen) + // read until our buffer is full - it might be separate packets if prefix is + // in use. + got := 0 + for got < expectLen { + n, err = conn.Read(buf[got:]) + require.Nil(t, err) + got += n + } + require.Equal(t, expectLen, got) + require.Equal(t, prefix+"Hello World", string(buf[:])) +} + +// TestConnectClient is a testing mock that implements connect.Client but +// stubs the methods to make testing simpler. +type TestConnectClient struct { + Server *TestTCPServer + TLSConfig *tls.Config + Calls []callTuple +} +type callTuple struct { + typ, ns, name string +} + +// ServerTLSConfig implements connect.Client +func (c *TestConnectClient) ServerTLSConfig() (*tls.Config, error) { + return c.TLSConfig, nil +} + +// DialService implements connect.Client +func (c *TestConnectClient) DialService(ctx context.Context, namespace, + name string) (net.Conn, error) { + + c.Calls = append(c.Calls, callTuple{"service", namespace, name}) + + // Actually returning a vanilla TCP conn not a TLS one but the caller + // shouldn't care for tests since this interface should hide all the TLS + // config and verification. + return net.Dial("tcp", c.Server.l.Addr().String()) +} + +// DialPreparedQuery implements connect.Client +func (c *TestConnectClient) DialPreparedQuery(ctx context.Context, namespace, + name string) (net.Conn, error) { + + c.Calls = append(c.Calls, callTuple{"prepared_query", namespace, name}) + + // Actually returning a vanilla TCP conn not a TLS one but the caller + // shouldn't care for tests since this interface should hide all the TLS + // config and verification. + return net.Dial("tcp", c.Server.l.Addr().String()) +} + +// TestProxier is a simple Proxier instance that can be used in tests. +type TestProxier struct { + // Addr to listen on + Addr string + // Prefix to write first before echoing on new connections + Prefix string +} + +// Listener implements Proxier +func (p *TestProxier) Listener() (net.Listener, error) { + return net.Listen("tcp", p.Addr) +} + +// HandleConn implements Proxier +func (p *TestProxier) HandleConn(conn net.Conn) error { + _, err := conn.Write([]byte(p.Prefix)) + if err != nil { + return err + } + _, err = io.Copy(conn, conn) + return err +} diff --git a/proxy/upstream.go b/proxy/upstream.go new file mode 100644 index 000000000..1101624be --- /dev/null +++ b/proxy/upstream.go @@ -0,0 +1,261 @@ +package proxy + +import ( + "context" + "fmt" + "log" + "net" + "os" + "time" + + "github.com/hashicorp/consul/connect" +) + +// Upstream provides an implementation of Proxier that listens for inbound TCP +// connections on the private network shared with the proxied application +// (typically localhost). For each accepted connection from the app, it uses the +// connect.Client to discover an instance and connect over mTLS. +type Upstream struct { + cfg *UpstreamConfig +} + +// UpstreamConfig configures the upstream +type UpstreamConfig struct { + // Client is the connect client to perform discovery with + Client connect.Client + + // LocalAddress is the host:port to listen on for local app connections. + LocalBindAddress string `json:"local_bind_address" hcl:"local_bind_address,attr"` + + // DestinationName is the service name of the destination. + DestinationName string `json:"destination_name" hcl:"destination_name,attr"` + + // DestinationNamespace is the namespace of the destination. + DestinationNamespace string `json:"destination_namespace" hcl:"destination_namespace,attr"` + + // DestinationType determines which service discovery method is used to find a + // candidate instance to connect to. + DestinationType string `json:"destination_type" hcl:"destination_type,attr"` + + // ConnectTimeout is the timeout for establishing connections with the remote + // service instance. Defaults to 10,000 (10s). + ConnectTimeoutMs int `json:"connect_timeout_ms" hcl:"connect_timeout_ms,attr"` + + logger *log.Logger +} + +func (uc *UpstreamConfig) applyDefaults() { + if uc.ConnectTimeoutMs == 0 { + uc.ConnectTimeoutMs = 10000 + } + if uc.logger == nil { + uc.logger = log.New(os.Stdout, "", log.LstdFlags) + } +} + +// String returns a string that uniquely identifies the Upstream. Used for +// identifying the upstream in log output and map keys. +func (uc *UpstreamConfig) String() string { + return fmt.Sprintf("%s->%s:%s/%s", uc.LocalBindAddress, uc.DestinationType, + uc.DestinationNamespace, uc.DestinationName) +} + +// NewUpstream returns an outgoing proxy instance with the given config. +func NewUpstream(cfg UpstreamConfig) *Upstream { + u := &Upstream{ + cfg: &cfg, + } + u.cfg.applyDefaults() + return u +} + +// String returns a string that uniquely identifies the Upstream. Used for +// identifying the upstream in log output and map keys. +func (u *Upstream) String() string { + return u.cfg.String() +} + +// Listener implements Proxier +func (u *Upstream) Listener() (net.Listener, error) { + return net.Listen("tcp", u.cfg.LocalBindAddress) +} + +// HandleConn implements Proxier +func (u *Upstream) HandleConn(conn net.Conn) error { + defer conn.Close() + + // Discover destination instance + dst, err := u.discoverAndDial() + if err != nil { + return err + } + + // Hand conn and dst over to Conn to manage the byte copying. + c := NewConn(conn, dst) + return c.CopyBytes() +} + +func (u *Upstream) discoverAndDial() (net.Conn, error) { + to := time.Duration(u.cfg.ConnectTimeoutMs) * time.Millisecond + ctx, cancel := context.WithTimeout(context.Background(), to) + defer cancel() + + switch u.cfg.DestinationType { + case "service": + return u.cfg.Client.DialService(ctx, u.cfg.DestinationNamespace, + u.cfg.DestinationName) + + case "prepared_query": + return u.cfg.Client.DialPreparedQuery(ctx, u.cfg.DestinationNamespace, + u.cfg.DestinationName) + + default: + return nil, fmt.Errorf("invalid destination type %s", u.cfg.DestinationType) + } +} + +/* +// Upstream represents a service that the proxied application needs to connect +// out to. It provides a dedication local TCP listener (usually listening only +// on loopback) and forwards incoming connections to the proxy to handle. +type Upstream struct { + cfg *UpstreamConfig + wg sync.WaitGroup + + proxy *Proxy + fatalErr error +} + +// NewUpstream creates an upstream ready to attach to a proxy instance with +// Proxy.AddUpstream. An Upstream can only be attached to single Proxy instance +// at once. +func NewUpstream(p *Proxy, cfg *UpstreamConfig) *Upstream { + return &Upstream{ + cfg: cfg, + proxy: p, + shutdown: make(chan struct{}), + } +} + +// UpstreamConfig configures the upstream +type UpstreamConfig struct { + // LocalAddress is the host:port to listen on for local app connections. + LocalAddress string + + // DestinationName is the service name of the destination. + DestinationName string + + // DestinationNamespace is the namespace of the destination. + DestinationNamespace string + + // DestinationType determines which service discovery method is used to find a + // candidate instance to connect to. + DestinationType string +} + +// String returns a string representation for the upstream for debugging or +// use as a unique key. +func (uc *UpstreamConfig) String() string { + return fmt.Sprintf("%s->%s:%s/%s", uc.LocalAddress, uc.DestinationType, + uc.DestinationNamespace, uc.DestinationName) +} + +func (u *Upstream) listen() error { + l, err := net.Listen("tcp", u.cfg.LocalAddress) + if err != nil { + u.fatal(err) + return + } + + for { + conn, err := l.Accept() + if err != nil { + return err + } + + go u.discoverAndConnect(conn) + } +} + +func (u *Upstream) discoverAndConnect(src net.Conn) { + // First, we need an upstream instance from Consul to connect to + dstAddrs, err := u.discoverInstances() + if err != nil { + u.fatal(fmt.Errorf("failed to discover upstream instances: %s", err)) + return + } + + if len(dstAddrs) < 1 { + log.Printf("[INFO] no instances found for %s", len(dstAddrs), u) + } + + // Attempt connection to first one that works + // TODO: configurable number/deadline? + for idx, addr := range dstAddrs { + err := u.proxy.startProxyingConn(src, addr, false) + if err != nil { + log.Printf("[INFO] failed to connect to %s: %s (%d of %d)", addr, err, + idx+1, len(dstAddrs)) + continue + } + return + } + + log.Printf("[INFO] failed to connect to all %d instances for %s", + len(dstAddrs), u) +} + +func (u *Upstream) discoverInstances() ([]string, error) { + switch u.cfg.DestinationType { + case "service": + svcs, _, err := u.cfg.Consul.Health().Service(u.cfg.DestinationName, "", + true, nil) + if err != nil { + return nil, err + } + + addrs := make([]string, len(svcs)) + + // Shuffle order as we go since health endpoint doesn't + perm := rand.Perm(len(addrs)) + for i, se := range svcs { + // Pick location in output array based on next permutation position + j := perm[i] + addrs[j] = fmt.Sprintf("%s:%d", se.Service.Address, se.Service.Port) + } + + return addrs, nil + + case "prepared_query": + pqr, _, err := u.cfg.Consul.PreparedQuery().Execute(u.cfg.DestinationName, + nil) + if err != nil { + return nil, err + } + + addrs := make([]string, 0, len(svcs)) + for _, se := range pqr.Nodes { + addrs = append(addrs, fmt.Sprintf("%s:%d", se.Service.Address, + se.Service.Port)) + } + + // PreparedQuery execution already shuffles the result + return addrs, nil + + default: + u.fatal(fmt.Errorf("invalid destination type %s", u.cfg.DestinationType)) + } +} + +func (u *Upstream) fatal(err Error) { + log.Printf("[ERROR] upstream %s stopping on error: %s", u.cfg.LocalAddress, + err) + u.fatalErr = err +} + +// String returns a string representation for the upstream for debugging or +// use as a unique key. +func (u *Upstream) String() string { + return u.cfg.String() +} +*/ diff --git a/proxy/upstream_test.go b/proxy/upstream_test.go new file mode 100644 index 000000000..79bca0136 --- /dev/null +++ b/proxy/upstream_test.go @@ -0,0 +1,75 @@ +package proxy + +import ( + "net" + "testing" + + "github.com/hashicorp/consul/connect" + "github.com/stretchr/testify/require" +) + +func TestUpstream(t *testing.T) { + tests := []struct { + name string + cfg UpstreamConfig + }{ + { + name: "service", + cfg: UpstreamConfig{ + DestinationType: "service", + DestinationNamespace: "default", + DestinationName: "db", + ConnectTimeoutMs: 100, + }, + }, + { + name: "prepared_query", + cfg: UpstreamConfig{ + DestinationType: "prepared_query", + DestinationNamespace: "default", + DestinationName: "geo-db", + ConnectTimeoutMs: 100, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + addrs := TestLocalBindAddrs(t, 2) + + testApp, err := NewTestTCPServer(t, addrs[0]) + require.Nil(t, err) + defer testApp.Close() + + // Create mock client that will "discover" our test tcp server as a target and + // skip TLS altogether. + client := &TestConnectClient{ + Server: testApp, + TLSConfig: connect.TestTLSConfig(t, "ca1", "web"), + } + + // Override cfg params + tt.cfg.LocalBindAddress = addrs[1] + tt.cfg.Client = client + + u := NewUpstream(tt.cfg) + + // Run proxy + r := NewRunner("test", u) + go r.Listen() + defer r.Stop() + + // Proxy and fake remote service are running, play the part of the app + // connecting to a remote connect service over TCP. + conn, err := net.Dial("tcp", tt.cfg.LocalBindAddress) + require.Nil(t, err) + TestEchoConn(t, conn, "") + + // Validate that discovery actually was called as we expected + require.Len(t, client.Calls, 1) + require.Equal(t, tt.cfg.DestinationType, client.Calls[0].typ) + require.Equal(t, tt.cfg.DestinationNamespace, client.Calls[0].ns) + require.Equal(t, tt.cfg.DestinationName, client.Calls[0].name) + }) + } +} From 2d6a2ce1e3c5c62c13cfeac96aeabd03446ad301 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 29 Mar 2018 16:25:11 +0100 Subject: [PATCH 125/627] connect.Service based implementation after review feedback. --- agent/connect/testing_ca.go | 108 ++--- agent/connect/testing_ca_test.go | 15 +- connect/auth.go | 43 -- connect/certgen/certgen.go | 86 ++++ connect/client.go | 436 +++++++++--------- connect/client_test.go | 238 +++++----- connect/example_test.go | 53 +++ connect/resolver.go | 131 ++++++ connect/resolver_test.go | 164 +++++++ connect/service.go | 185 ++++++++ connect/service_test.go | 105 +++++ .../testdata/ca1-ca-consul-internal.cert.pem | 14 - .../testdata/ca1-ca-consul-internal.key.pem | 5 - connect/testdata/ca1-svc-cache.cert.pem | 14 - connect/testdata/ca1-svc-cache.key.pem | 5 - connect/testdata/ca1-svc-db.cert.pem | 13 - connect/testdata/ca1-svc-db.key.pem | 5 - connect/testdata/ca1-svc-web.cert.pem | 13 - connect/testdata/ca1-svc-web.key.pem | 5 - connect/testdata/ca2-ca-vault.cert.pem | 14 - connect/testdata/ca2-ca-vault.key.pem | 5 - connect/testdata/ca2-svc-cache.cert.pem | 13 - connect/testdata/ca2-svc-cache.key.pem | 5 - connect/testdata/ca2-svc-db.cert.pem | 13 - connect/testdata/ca2-svc-db.key.pem | 5 - connect/testdata/ca2-svc-web.cert.pem | 13 - connect/testdata/ca2-svc-web.key.pem | 5 - connect/testdata/ca2-xc-by-ca1.cert.pem | 14 - connect/testdata/mkcerts.go | 243 ---------- connect/testing.go | 162 ++++++- connect/tls.go | 120 +++-- connect/tls_test.go | 114 +++-- 32 files changed, 1417 insertions(+), 947 deletions(-) delete mode 100644 connect/auth.go create mode 100644 connect/certgen/certgen.go create mode 100644 connect/example_test.go create mode 100644 connect/resolver.go create mode 100644 connect/resolver_test.go create mode 100644 connect/service.go create mode 100644 connect/service_test.go delete mode 100644 connect/testdata/ca1-ca-consul-internal.cert.pem delete mode 100644 connect/testdata/ca1-ca-consul-internal.key.pem delete mode 100644 connect/testdata/ca1-svc-cache.cert.pem delete mode 100644 connect/testdata/ca1-svc-cache.key.pem delete mode 100644 connect/testdata/ca1-svc-db.cert.pem delete mode 100644 connect/testdata/ca1-svc-db.key.pem delete mode 100644 connect/testdata/ca1-svc-web.cert.pem delete mode 100644 connect/testdata/ca1-svc-web.key.pem delete mode 100644 connect/testdata/ca2-ca-vault.cert.pem delete mode 100644 connect/testdata/ca2-ca-vault.key.pem delete mode 100644 connect/testdata/ca2-svc-cache.cert.pem delete mode 100644 connect/testdata/ca2-svc-cache.key.pem delete mode 100644 connect/testdata/ca2-svc-db.cert.pem delete mode 100644 connect/testdata/ca2-svc-db.key.pem delete mode 100644 connect/testdata/ca2-svc-web.cert.pem delete mode 100644 connect/testdata/ca2-svc-web.key.pem delete mode 100644 connect/testdata/ca2-xc-by-ca1.cert.pem delete mode 100644 connect/testdata/mkcerts.go diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index a2f711763..3fbcf2e02 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -29,7 +29,7 @@ const testClusterID = "11111111-2222-3333-4444-555555555555" // testCACounter is just an atomically incremented counter for creating // unique names for the CA certs. -var testCACounter uint64 = 0 +var testCACounter uint64 // TestCA creates a test CA certificate and signing key and returns it // in the CARoot structure format. The returned CA will be set as Active = true. @@ -44,7 +44,8 @@ func TestCA(t testing.T, xc *structs.CARoot) *structs.CARoot { result.Name = fmt.Sprintf("Test CA %d", atomic.AddUint64(&testCACounter, 1)) // Create the private key we'll use for this CA cert. - signer := testPrivateKey(t, &result) + signer, keyPEM := testPrivateKey(t) + result.SigningKey = keyPEM // The serial number for the cert sn, err := testSerialNumber() @@ -125,9 +126,9 @@ func TestCA(t testing.T, xc *structs.CARoot) *structs.CARoot { return &result } -// TestLeaf returns a valid leaf certificate for the named service with -// the given CA Root. -func TestLeaf(t testing.T, service string, root *structs.CARoot) string { +// TestLeaf returns a valid leaf certificate and it's private key for the named +// service with the given CA Root. +func TestLeaf(t testing.T, service string, root *structs.CARoot) (string, string) { // Parse the CA cert and signing key from the root cert := root.SigningCert if cert == "" { @@ -137,7 +138,7 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) string { if err != nil { t.Fatalf("error parsing CA cert: %s", err) } - signer, err := ParseSigner(root.SigningKey) + caSigner, err := ParseSigner(root.SigningKey) if err != nil { t.Fatalf("error parsing signing key: %s", err) } @@ -156,6 +157,9 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) string { t.Fatalf("error generating serial number: %s", err) } + // Genereate fresh private key + pkSigner, pkPEM := testPrivateKey(t) + // Cert template for generation template := x509.Certificate{ SerialNumber: sn, @@ -173,14 +177,14 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) string { }, NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), NotBefore: time.Now(), - AuthorityKeyId: testKeyID(t, signer.Public()), - SubjectKeyId: testKeyID(t, signer.Public()), + AuthorityKeyId: testKeyID(t, caSigner.Public()), + SubjectKeyId: testKeyID(t, pkSigner.Public()), } // Create the certificate, PEM encode it and return that value. var buf bytes.Buffer bs, err := x509.CreateCertificate( - rand.Reader, &template, caCert, signer.Public(), signer) + rand.Reader, &template, caCert, pkSigner.Public(), caSigner) if err != nil { t.Fatalf("error generating certificate: %s", err) } @@ -189,7 +193,7 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) string { t.Fatalf("error encoding private key: %s", err) } - return buf.String() + return buf.String(), pkPEM } // TestCSR returns a CSR to sign the given service along with the PEM-encoded @@ -200,39 +204,22 @@ func TestCSR(t testing.T, uri CertURI) (string, string) { SignatureAlgorithm: x509.ECDSAWithSHA256, } - // Result buffers - var csrBuf, pkBuf bytes.Buffer - // Create the private key we'll use - signer := testPrivateKey(t, nil) + signer, pkPEM := testPrivateKey(t) - { - // Create the private key PEM - bs, err := x509.MarshalECPrivateKey(signer.(*ecdsa.PrivateKey)) - if err != nil { - t.Fatalf("error marshalling PK: %s", err) - } - - err = pem.Encode(&pkBuf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) - if err != nil { - t.Fatalf("error encoding PK: %s", err) - } + // Create the CSR itself + var csrBuf bytes.Buffer + bs, err := x509.CreateCertificateRequest(rand.Reader, template, signer) + if err != nil { + t.Fatalf("error creating CSR: %s", err) } - { - // Create the CSR itself - bs, err := x509.CreateCertificateRequest(rand.Reader, template, signer) - if err != nil { - t.Fatalf("error creating CSR: %s", err) - } - - err = pem.Encode(&csrBuf, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: bs}) - if err != nil { - t.Fatalf("error encoding CSR: %s", err) - } + err = pem.Encode(&csrBuf, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: bs}) + if err != nil { + t.Fatalf("error encoding CSR: %s", err) } - return csrBuf.String(), pkBuf.String() + return csrBuf.String(), pkPEM } // testKeyID returns a KeyID from the given public key. This just calls @@ -246,25 +233,26 @@ func testKeyID(t testing.T, raw interface{}) []byte { return result } -// testMemoizePK is the private key that we memoize once we generate it -// once so that our tests don't rely on too much system entropy. -var testMemoizePK atomic.Value - -// testPrivateKey creates an ECDSA based private key. -func testPrivateKey(t testing.T, ca *structs.CARoot) crypto.Signer { - // If we already generated a private key, use that - var pk *ecdsa.PrivateKey - if v := testMemoizePK.Load(); v != nil { - pk = v.(*ecdsa.PrivateKey) - } - - // If we have no key, then create a new one. - if pk == nil { - var err error - pk, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - t.Fatalf("error generating private key: %s", err) - } +// testPrivateKey creates an ECDSA based private key. Both a crypto.Signer and +// the key in PEM form are returned. +// +// NOTE(banks): this was memoized to save entropy during tests but it turns out +// crypto/rand will never block and always reads from /dev/urandom on unix OSes +// which does not consume entropy. +// +// If we find by profiling it's taking a lot of cycles we could optimise/cache +// again but we at least need to use different keys for each distinct CA (when +// multiple CAs are generated at once e.g. to test cross-signing) and a +// different one again for the leafs otherwise we risk tests that have false +// positives since signatures from different logical cert's keys are +// indistinguishable, but worse we build validation chains using AuthorityKeyID +// which will be the same for multiple CAs/Leafs. Also note that our UUID +// generator also reads from crypto rand and is called far more often during +// tests than this will be. +func testPrivateKey(t testing.T) (crypto.Signer, string) { + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("error generating private key: %s", err) } bs, err := x509.MarshalECPrivateKey(pk) @@ -277,14 +265,8 @@ func testPrivateKey(t testing.T, ca *structs.CARoot) crypto.Signer { if err != nil { t.Fatalf("error encoding private key: %s", err) } - if ca != nil { - ca.SigningKey = buf.String() - } - // Memoize the key - testMemoizePK.Store(pk) - - return pk + return pk, buf.String() } // testSerialNumber generates a serial number suitable for a certificate. diff --git a/agent/connect/testing_ca_test.go b/agent/connect/testing_ca_test.go index d07aac201..193e532c3 100644 --- a/agent/connect/testing_ca_test.go +++ b/agent/connect/testing_ca_test.go @@ -29,7 +29,7 @@ func TestTestCAAndLeaf(t *testing.T) { // Create the certs ca := TestCA(t, nil) - leaf := TestLeaf(t, "web", ca) + leaf, _ := TestLeaf(t, "web", ca) // Create a temporary directory for storing the certs td, err := ioutil.TempDir("", "consul") @@ -62,8 +62,8 @@ func TestTestCAAndLeaf_xc(t *testing.T) { // Create the certs ca1 := TestCA(t, nil) ca2 := TestCA(t, ca1) - leaf1 := TestLeaf(t, "web", ca1) - leaf2 := TestLeaf(t, "web", ca2) + leaf1, _ := TestLeaf(t, "web", ca1) + leaf2, _ := TestLeaf(t, "web", ca2) // Create a temporary directory for storing the certs td, err := ioutil.TempDir("", "consul") @@ -98,12 +98,3 @@ func TestTestCAAndLeaf_xc(t *testing.T) { assert.Nil(err) } } - -// Test that the private key is memoized to preseve system entropy. -func TestTestPrivateKey_memoize(t *testing.T) { - ca1 := TestCA(t, nil) - ca2 := TestCA(t, nil) - if ca1.SigningKey != ca2.SigningKey { - t.Fatal("should have the same signing keys for tests") - } -} diff --git a/connect/auth.go b/connect/auth.go deleted file mode 100644 index 73c16f0bf..000000000 --- a/connect/auth.go +++ /dev/null @@ -1,43 +0,0 @@ -package connect - -import "crypto/x509" - -// Auther is the interface that provides both Authentication and Authorization -// for an mTLS connection. It's only method is compatible with -// tls.Config.VerifyPeerCertificate. -type Auther interface { - // Auth is called during tls Connection establishment to Authenticate and - // Authorize the presented peer. Note that verifiedChains must not be relied - // upon as we typically have to skip Go's internal verification so the - // implementation takes full responsibility to validating the certificate - // against known roots. It is also up to the user of the interface to ensure - // appropriate validation is performed for client or server end by arranging - // for an appropriate implementation to be hooked into the tls.Config used. - Auth(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error -} - -// ClientAuther is used to auth Clients connecting to a Server. -type ClientAuther struct{} - -// Auth implements Auther -func (a *ClientAuther) Auth(rawCerts [][]byte, - verifiedChains [][]*x509.Certificate) error { - - // TODO(banks): implement path validation and AuthZ - return nil -} - -// ServerAuther is used to auth the Server identify from a connecting Client. -type ServerAuther struct { - // TODO(banks): We'll need a way to pass the expected service identity (name, - // namespace, dc, cluster) here based on discovery result. -} - -// Auth implements Auther -func (a *ServerAuther) Auth(rawCerts [][]byte, - verifiedChains [][]*x509.Certificate) error { - - // TODO(banks): implement path validation and verify URI matches the target - // service we intended to connect to. - return nil -} diff --git a/connect/certgen/certgen.go b/connect/certgen/certgen.go new file mode 100644 index 000000000..6fecf6ae1 --- /dev/null +++ b/connect/certgen/certgen.go @@ -0,0 +1,86 @@ +// certgen: a tool for generating test certificates on disk for use as +// test-fixtures and for end-to-end testing and local development. +// +// Example usage: +// +// $ go run connect/certgen/certgen.go -out-dir /tmp/connect-certs +// +// You can verify a given leaf with a given root using: +// +// $ openssl verify -verbose -CAfile ca2-ca.cert.pem ca1-svc-db.cert.pem +// +// Note that to verify via the cross-signed intermediate, openssl requires it to +// be bundled with the _root_ CA bundle and will ignore the cert if it's passed +// with the subject. You can do that with: +// +// $ openssl verify -verbose -CAfile \ +// <(cat ca1-ca.cert.pem ca2-xc-by-ca1.cert.pem) \ +// ca2-svc-db.cert.pem +// ca2-svc-db.cert.pem: OK +// +// Note that the same leaf and root without the intermediate should fail: +// +// $ openssl verify -verbose -CAfile ca1-ca.cert.pem ca2-svc-db.cert.pem +// ca2-svc-db.cert.pem: CN = db +// error 20 at 0 depth lookup:unable to get local issuer certificate +// +// NOTE: THIS IS A QUIRK OF OPENSSL; in Connect we distribute the roots alone +// and stable intermediates like the XC cert to the _leaf_. +package main // import "github.com/hashicorp/consul/connect/certgen" +import ( + "flag" + "fmt" + "io/ioutil" + "log" + "os" + + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/structs" + "github.com/mitchellh/go-testing-interface" +) + +func main() { + var numCAs = 2 + var services = []string{"web", "db", "cache"} + //var slugRe = regexp.MustCompile("[^a-zA-Z0-9]+") + var outDir string + + flag.StringVar(&outDir, "out-dir", "", + "REQUIRED: the dir to write certificates to") + flag.Parse() + + if outDir == "" { + flag.PrintDefaults() + os.Exit(1) + } + + // Create CA certs + var prevCA *structs.CARoot + for i := 1; i <= numCAs; i++ { + ca := connect.TestCA(&testing.RuntimeT{}, prevCA) + prefix := fmt.Sprintf("%s/ca%d-ca", outDir, i) + writeFile(prefix+".cert.pem", ca.RootCert) + writeFile(prefix+".key.pem", ca.SigningKey) + if prevCA != nil { + fname := fmt.Sprintf("%s/ca%d-xc-by-ca%d.cert.pem", outDir, i, i-1) + writeFile(fname, ca.SigningCert) + } + prevCA = ca + + // Create service certs for each CA + for _, svc := range services { + certPEM, keyPEM := connect.TestLeaf(&testing.RuntimeT{}, svc, ca) + prefix := fmt.Sprintf("%s/ca%d-svc-%s", outDir, i, svc) + writeFile(prefix+".cert.pem", certPEM) + writeFile(prefix+".key.pem", keyPEM) + } + } +} + +func writeFile(name, content string) { + fmt.Println("Writing ", name) + err := ioutil.WriteFile(name, []byte(content), 0600) + if err != nil { + log.Fatalf("failed writing file: %s", err) + } +} diff --git a/connect/client.go b/connect/client.go index 867bf0db5..18e43f4cb 100644 --- a/connect/client.go +++ b/connect/client.go @@ -1,256 +1,256 @@ package connect -import ( - "context" - "crypto/tls" - "fmt" - "math/rand" - "net" +// import ( +// "context" +// "crypto/tls" +// "fmt" +// "math/rand" +// "net" - "github.com/hashicorp/consul/api" -) +// "github.com/hashicorp/consul/api" +// ) -// CertStatus indicates whether the Client currently has valid certificates for -// incoming and outgoing connections. -type CertStatus int +// // CertStatus indicates whether the Client currently has valid certificates for +// // incoming and outgoing connections. +// type CertStatus int -const ( - // CertStatusUnknown is the zero value for CertStatus which may be returned - // when a watch channel is closed on shutdown. It has no other meaning. - CertStatusUnknown CertStatus = iota +// const ( +// // CertStatusUnknown is the zero value for CertStatus which may be returned +// // when a watch channel is closed on shutdown. It has no other meaning. +// CertStatusUnknown CertStatus = iota - // CertStatusOK indicates the client has valid certificates and trust roots to - // Authenticate incoming and outgoing connections. - CertStatusOK +// // CertStatusOK indicates the client has valid certificates and trust roots to +// // Authenticate incoming and outgoing connections. +// CertStatusOK - // CertStatusPending indicates the client is waiting to be issued initial - // certificates, or that it's certificates have expired and it's waiting to be - // issued new ones. In this state all incoming and outgoing connections will - // fail. - CertStatusPending -) +// // CertStatusPending indicates the client is waiting to be issued initial +// // certificates, or that it's certificates have expired and it's waiting to be +// // issued new ones. In this state all incoming and outgoing connections will +// // fail. +// CertStatusPending +// ) -func (s CertStatus) String() string { - switch s { - case CertStatusOK: - return "OK" - case CertStatusPending: - return "pending" - case CertStatusUnknown: - fallthrough - default: - return "unknown" - } -} +// func (s CertStatus) String() string { +// switch s { +// case CertStatusOK: +// return "OK" +// case CertStatusPending: +// return "pending" +// case CertStatusUnknown: +// fallthrough +// default: +// return "unknown" +// } +// } -// Client is the interface a basic client implementation must support. -type Client interface { - // TODO(banks): build this and test it - // CertStatus returns the current status of the client's certificates. It can - // be used to determine if the Client is able to service requests at the - // current time. - //CertStatus() CertStatus +// // Client is the interface a basic client implementation must support. +// type Client interface { +// // TODO(banks): build this and test it +// // CertStatus returns the current status of the client's certificates. It can +// // be used to determine if the Client is able to service requests at the +// // current time. +// //CertStatus() CertStatus - // TODO(banks): build this and test it - // WatchCertStatus returns a channel that is notified on all status changes. - // Note that a message on the channel isn't guaranteed to be different so it's - // value should be inspected. During Client shutdown the channel will be - // closed returning a zero type which is equivalent to CertStatusUnknown. - //WatchCertStatus() <-chan CertStatus +// // TODO(banks): build this and test it +// // WatchCertStatus returns a channel that is notified on all status changes. +// // Note that a message on the channel isn't guaranteed to be different so it's +// // value should be inspected. During Client shutdown the channel will be +// // closed returning a zero type which is equivalent to CertStatusUnknown. +// //WatchCertStatus() <-chan CertStatus - // ServerTLSConfig returns the *tls.Config to be used when creating a TCP - // listener that should accept Connect connections. It is likely that at - // startup the tlsCfg returned will not be immediately usable since - // certificates are typically fetched from the agent asynchronously. In this - // case it's still safe to listen with the provided config, but auth failures - // will occur until initial certificate discovery is complete. In general at - // any time it is possible for certificates to expire before new replacements - // have been issued due to local network errors so the server may not actually - // have a working certificate configuration at any time, however as soon as - // valid certs can be issued it will automatically start working again so - // should take no action. - ServerTLSConfig() (*tls.Config, error) +// // ServerTLSConfig returns the *tls.Config to be used when creating a TCP +// // listener that should accept Connect connections. It is likely that at +// // startup the tlsCfg returned will not be immediately usable since +// // certificates are typically fetched from the agent asynchronously. In this +// // case it's still safe to listen with the provided config, but auth failures +// // will occur until initial certificate discovery is complete. In general at +// // any time it is possible for certificates to expire before new replacements +// // have been issued due to local network errors so the server may not actually +// // have a working certificate configuration at any time, however as soon as +// // valid certs can be issued it will automatically start working again so +// // should take no action. +// ServerTLSConfig() (*tls.Config, error) - // DialService opens a new connection to the named service registered in - // Consul. It will perform service discovery to find healthy instances. If - // there is an error during connection it is returned and the caller may call - // again. The client implementation makes a best effort to make consecutive - // Dials against different instances either by randomising the list and/or - // maintaining a local memory of which instances recently failed. If the - // context passed times out before connection is established and verified an - // error is returned. - DialService(ctx context.Context, namespace, name string) (net.Conn, error) +// // DialService opens a new connection to the named service registered in +// // Consul. It will perform service discovery to find healthy instances. If +// // there is an error during connection it is returned and the caller may call +// // again. The client implementation makes a best effort to make consecutive +// // Dials against different instances either by randomising the list and/or +// // maintaining a local memory of which instances recently failed. If the +// // context passed times out before connection is established and verified an +// // error is returned. +// DialService(ctx context.Context, namespace, name string) (net.Conn, error) - // DialPreparedQuery opens a new connection by executing the named Prepared - // Query against the local Consul agent, and picking one of the returned - // instances to connect to. It will perform service discovery with the same - // semantics as DialService. - DialPreparedQuery(ctx context.Context, namespace, name string) (net.Conn, error) -} +// // DialPreparedQuery opens a new connection by executing the named Prepared +// // Query against the local Consul agent, and picking one of the returned +// // instances to connect to. It will perform service discovery with the same +// // semantics as DialService. +// DialPreparedQuery(ctx context.Context, namespace, name string) (net.Conn, error) +// } -/* +// /* -Maybe also convenience wrappers for: - - listening TLS conn with right config - - http.ListenAndServeTLS equivalent +// Maybe also convenience wrappers for: +// - listening TLS conn with right config +// - http.ListenAndServeTLS equivalent -*/ +// */ -// AgentClient is the primary implementation of a connect.Client which -// communicates with the local Consul agent. -type AgentClient struct { - agent *api.Client - tlsCfg *ReloadableTLSConfig -} +// // AgentClient is the primary implementation of a connect.Client which +// // communicates with the local Consul agent. +// type AgentClient struct { +// agent *api.Client +// tlsCfg *ReloadableTLSConfig +// } -// NewClient returns an AgentClient to allow consuming and providing -// Connect-enabled network services. -func NewClient(agent *api.Client) Client { - // TODO(banks): hook up fetching certs from Agent and updating tlsCfg on cert - // delivery/change. Perhaps need to make - return &AgentClient{ - agent: agent, - tlsCfg: NewReloadableTLSConfig(defaultTLSConfig()), - } -} +// // NewClient returns an AgentClient to allow consuming and providing +// // Connect-enabled network services. +// func NewClient(agent *api.Client) Client { +// // TODO(banks): hook up fetching certs from Agent and updating tlsCfg on cert +// // delivery/change. Perhaps need to make +// return &AgentClient{ +// agent: agent, +// tlsCfg: NewReloadableTLSConfig(defaultTLSConfig()), +// } +// } -// NewInsecureDevClientWithLocalCerts returns an AgentClient that will still do -// service discovery via the local agent but will use externally provided -// certificates and skip authorization. This is intended just for development -// and must not be used ever in production. -func NewInsecureDevClientWithLocalCerts(agent *api.Client, caFile, certFile, - keyFile string) (Client, error) { +// // NewInsecureDevClientWithLocalCerts returns an AgentClient that will still do +// // service discovery via the local agent but will use externally provided +// // certificates and skip authorization. This is intended just for development +// // and must not be used ever in production. +// func NewInsecureDevClientWithLocalCerts(agent *api.Client, caFile, certFile, +// keyFile string) (Client, error) { - cfg, err := devTLSConfigFromFiles(caFile, certFile, keyFile) - if err != nil { - return nil, err - } +// cfg, err := devTLSConfigFromFiles(caFile, certFile, keyFile) +// if err != nil { +// return nil, err +// } - return &AgentClient{ - agent: agent, - tlsCfg: NewReloadableTLSConfig(cfg), - }, nil -} +// return &AgentClient{ +// agent: agent, +// tlsCfg: NewReloadableTLSConfig(cfg), +// }, nil +// } -// ServerTLSConfig implements Client -func (c *AgentClient) ServerTLSConfig() (*tls.Config, error) { - return c.tlsCfg.ServerTLSConfig(), nil -} +// // ServerTLSConfig implements Client +// func (c *AgentClient) ServerTLSConfig() (*tls.Config, error) { +// return c.tlsCfg.ServerTLSConfig(), nil +// } -// DialService implements Client -func (c *AgentClient) DialService(ctx context.Context, namespace, - name string) (net.Conn, error) { - return c.dial(ctx, "service", namespace, name) -} +// // DialService implements Client +// func (c *AgentClient) DialService(ctx context.Context, namespace, +// name string) (net.Conn, error) { +// return c.dial(ctx, "service", namespace, name) +// } -// DialPreparedQuery implements Client -func (c *AgentClient) DialPreparedQuery(ctx context.Context, namespace, - name string) (net.Conn, error) { - return c.dial(ctx, "prepared_query", namespace, name) -} +// // DialPreparedQuery implements Client +// func (c *AgentClient) DialPreparedQuery(ctx context.Context, namespace, +// name string) (net.Conn, error) { +// return c.dial(ctx, "prepared_query", namespace, name) +// } -func (c *AgentClient) dial(ctx context.Context, discoveryType, namespace, - name string) (net.Conn, error) { +// func (c *AgentClient) dial(ctx context.Context, discoveryType, namespace, +// name string) (net.Conn, error) { - svcs, err := c.discoverInstances(ctx, discoveryType, namespace, name) - if err != nil { - return nil, err - } +// svcs, err := c.discoverInstances(ctx, discoveryType, namespace, name) +// if err != nil { +// return nil, err +// } - svc, err := c.pickInstance(svcs) - if err != nil { - return nil, err - } - if svc == nil { - return nil, fmt.Errorf("no healthy services discovered") - } +// svc, err := c.pickInstance(svcs) +// if err != nil { +// return nil, err +// } +// if svc == nil { +// return nil, fmt.Errorf("no healthy services discovered") +// } - // OK we have a service we can dial! We need a ClientAuther that will validate - // the connection is legit. +// // OK we have a service we can dial! We need a ClientAuther that will validate +// // the connection is legit. - // TODO(banks): implement ClientAuther properly to actually verify connected - // cert matches the expected service/cluster etc. based on svc. - auther := &ClientAuther{} - tlsConfig := c.tlsCfg.TLSConfig(auther) +// // TODO(banks): implement ClientAuther properly to actually verify connected +// // cert matches the expected service/cluster etc. based on svc. +// auther := &ClientAuther{} +// tlsConfig := c.tlsCfg.TLSConfig(auther) - // Resolve address TODO(banks): I expected this to happen magically in the - // agent at registration time if I register with no explicit address but - // apparently doesn't. This is a quick hack to make it work for now, need to - // see if there is a better shared code path for doing this. - addr := svc.Service.Address - if addr == "" { - addr = svc.Node.Address - } - var dialer net.Dialer - tcpConn, err := dialer.DialContext(ctx, "tcp", - fmt.Sprintf("%s:%d", addr, svc.Service.Port)) - if err != nil { - return nil, err - } +// // Resolve address TODO(banks): I expected this to happen magically in the +// // agent at registration time if I register with no explicit address but +// // apparently doesn't. This is a quick hack to make it work for now, need to +// // see if there is a better shared code path for doing this. +// addr := svc.Service.Address +// if addr == "" { +// addr = svc.Node.Address +// } +// var dialer net.Dialer +// tcpConn, err := dialer.DialContext(ctx, "tcp", +// fmt.Sprintf("%s:%d", addr, svc.Service.Port)) +// if err != nil { +// return nil, err +// } - tlsConn := tls.Client(tcpConn, tlsConfig) - err = tlsConn.Handshake() - if err != nil { - tlsConn.Close() - return nil, err - } +// tlsConn := tls.Client(tcpConn, tlsConfig) +// err = tlsConn.Handshake() +// if err != nil { +// tlsConn.Close() +// return nil, err +// } - return tlsConn, nil -} +// return tlsConn, nil +// } -// pickInstance returns an instance from the given list to try to connect to. It -// may be made pluggable later, for now it just picks a random one regardless of -// whether the list is already shuffled. -func (c *AgentClient) pickInstance(svcs []*api.ServiceEntry) (*api.ServiceEntry, error) { - if len(svcs) < 1 { - return nil, nil - } - idx := rand.Intn(len(svcs)) - return svcs[idx], nil -} +// // pickInstance returns an instance from the given list to try to connect to. It +// // may be made pluggable later, for now it just picks a random one regardless of +// // whether the list is already shuffled. +// func (c *AgentClient) pickInstance(svcs []*api.ServiceEntry) (*api.ServiceEntry, error) { +// if len(svcs) < 1 { +// return nil, nil +// } +// idx := rand.Intn(len(svcs)) +// return svcs[idx], nil +// } -// discoverInstances returns all instances for the given discoveryType, -// namespace and name. The returned service entries may or may not be shuffled -func (c *AgentClient) discoverInstances(ctx context.Context, discoverType, - namespace, name string) ([]*api.ServiceEntry, error) { +// // discoverInstances returns all instances for the given discoveryType, +// // namespace and name. The returned service entries may or may not be shuffled +// func (c *AgentClient) discoverInstances(ctx context.Context, discoverType, +// namespace, name string) ([]*api.ServiceEntry, error) { - q := &api.QueryOptions{ - // TODO(banks): make this configurable? - AllowStale: true, - } - q = q.WithContext(ctx) +// q := &api.QueryOptions{ +// // TODO(banks): make this configurable? +// AllowStale: true, +// } +// q = q.WithContext(ctx) - switch discoverType { - case "service": - svcs, _, err := c.agent.Health().Connect(name, "", true, q) - if err != nil { - return nil, err - } - return svcs, err +// switch discoverType { +// case "service": +// svcs, _, err := c.agent.Health().Connect(name, "", true, q) +// if err != nil { +// return nil, err +// } +// return svcs, err - case "prepared_query": - // TODO(banks): it's not super clear to me how this should work eventually. - // How do we distinguise between a PreparedQuery for the actual services and - // one that should return the connect proxies where that differs? If we - // can't then we end up with a janky UX where user specifies a reasonable - // prepared query but we try to connect to non-connect services and fail - // with a confusing TLS error. Maybe just a way to filter PreparedQuery - // results by connect-enabled would be sufficient (or even metadata to do - // that ourselves in the response although less efficient). - resp, _, err := c.agent.PreparedQuery().Execute(name, q) - if err != nil { - return nil, err - } +// case "prepared_query": +// // TODO(banks): it's not super clear to me how this should work eventually. +// // How do we distinguise between a PreparedQuery for the actual services and +// // one that should return the connect proxies where that differs? If we +// // can't then we end up with a janky UX where user specifies a reasonable +// // prepared query but we try to connect to non-connect services and fail +// // with a confusing TLS error. Maybe just a way to filter PreparedQuery +// // results by connect-enabled would be sufficient (or even metadata to do +// // that ourselves in the response although less efficient). +// resp, _, err := c.agent.PreparedQuery().Execute(name, q) +// if err != nil { +// return nil, err +// } - // Awkward, we have a slice of api.ServiceEntry here but want a slice of - // *api.ServiceEntry for compat with Connect/Service APIs. Have to convert - // them to keep things type-happy. - svcs := make([]*api.ServiceEntry, len(resp.Nodes)) - for idx, se := range resp.Nodes { - svcs[idx] = &se - } - return svcs, err - default: - return nil, fmt.Errorf("unsupported discovery type: %s", discoverType) - } -} +// // Awkward, we have a slice of api.ServiceEntry here but want a slice of +// // *api.ServiceEntry for compat with Connect/Service APIs. Have to convert +// // them to keep things type-happy. +// svcs := make([]*api.ServiceEntry, len(resp.Nodes)) +// for idx, se := range resp.Nodes { +// svcs[idx] = &se +// } +// return svcs, err +// default: +// return nil, fmt.Errorf("unsupported discovery type: %s", discoverType) +// } +// } diff --git a/connect/client_test.go b/connect/client_test.go index fcb18e600..045bc8fd6 100644 --- a/connect/client_test.go +++ b/connect/client_test.go @@ -1,148 +1,148 @@ package connect -import ( - "context" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "net/url" - "strconv" - "testing" +// import ( +// "context" +// "crypto/x509" +// "crypto/x509/pkix" +// "encoding/asn1" +// "io/ioutil" +// "net" +// "net/http" +// "net/http/httptest" +// "net/url" +// "strconv" +// "testing" - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/testutil" - "github.com/stretchr/testify/require" -) +// "github.com/hashicorp/consul/api" +// "github.com/hashicorp/consul/testutil" +// "github.com/stretchr/testify/require" +// ) -func TestNewInsecureDevClientWithLocalCerts(t *testing.T) { +// func TestNewInsecureDevClientWithLocalCerts(t *testing.T) { - agent, err := api.NewClient(api.DefaultConfig()) - require.Nil(t, err) +// agent, err := api.NewClient(api.DefaultConfig()) +// require.Nil(t, err) - got, err := NewInsecureDevClientWithLocalCerts(agent, - "testdata/ca1-ca-consul-internal.cert.pem", - "testdata/ca1-svc-web.cert.pem", - "testdata/ca1-svc-web.key.pem", - ) - require.Nil(t, err) +// got, err := NewInsecureDevClientWithLocalCerts(agent, +// "testdata/ca1-ca-consul-internal.cert.pem", +// "testdata/ca1-svc-web.cert.pem", +// "testdata/ca1-svc-web.key.pem", +// ) +// require.Nil(t, err) - // Sanity check correct certs were loaded - serverCfg, err := got.ServerTLSConfig() - require.Nil(t, err) - caSubjects := serverCfg.RootCAs.Subjects() - require.Len(t, caSubjects, 1) - caSubject, err := testNameFromRawDN(caSubjects[0]) - require.Nil(t, err) - require.Equal(t, "Consul Internal", caSubject.CommonName) +// // Sanity check correct certs were loaded +// serverCfg, err := got.ServerTLSConfig() +// require.Nil(t, err) +// caSubjects := serverCfg.RootCAs.Subjects() +// require.Len(t, caSubjects, 1) +// caSubject, err := testNameFromRawDN(caSubjects[0]) +// require.Nil(t, err) +// require.Equal(t, "Consul Internal", caSubject.CommonName) - require.Len(t, serverCfg.Certificates, 1) - cert, err := x509.ParseCertificate(serverCfg.Certificates[0].Certificate[0]) - require.Nil(t, err) - require.Equal(t, "web", cert.Subject.CommonName) -} +// require.Len(t, serverCfg.Certificates, 1) +// cert, err := x509.ParseCertificate(serverCfg.Certificates[0].Certificate[0]) +// require.Nil(t, err) +// require.Equal(t, "web", cert.Subject.CommonName) +// } -func testNameFromRawDN(raw []byte) (*pkix.Name, error) { - var seq pkix.RDNSequence - if _, err := asn1.Unmarshal(raw, &seq); err != nil { - return nil, err - } +// func testNameFromRawDN(raw []byte) (*pkix.Name, error) { +// var seq pkix.RDNSequence +// if _, err := asn1.Unmarshal(raw, &seq); err != nil { +// return nil, err +// } - var name pkix.Name - name.FillFromRDNSequence(&seq) - return &name, nil -} +// var name pkix.Name +// name.FillFromRDNSequence(&seq) +// return &name, nil +// } -func testAgent(t *testing.T) (*testutil.TestServer, *api.Client) { - t.Helper() +// func testAgent(t *testing.T) (*testutil.TestServer, *api.Client) { +// t.Helper() - // Make client config - conf := api.DefaultConfig() +// // Make client config +// conf := api.DefaultConfig() - // Create server - server, err := testutil.NewTestServerConfigT(t, nil) - require.Nil(t, err) +// // Create server +// server, err := testutil.NewTestServerConfigT(t, nil) +// require.Nil(t, err) - conf.Address = server.HTTPAddr +// conf.Address = server.HTTPAddr - // Create client - agent, err := api.NewClient(conf) - require.Nil(t, err) +// // Create client +// agent, err := api.NewClient(conf) +// require.Nil(t, err) - return server, agent -} +// return server, agent +// } -func testService(t *testing.T, ca, name string, client *api.Client) *httptest.Server { - t.Helper() +// func testService(t *testing.T, ca, name string, client *api.Client) *httptest.Server { +// t.Helper() - // Run a test service to discover - server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("svc: " + name)) - })) - server.TLS = TestTLSConfig(t, ca, name) - server.StartTLS() +// // Run a test service to discover +// server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("svc: " + name)) +// })) +// server.TLS = TestTLSConfig(t, ca, name) +// server.StartTLS() - u, err := url.Parse(server.URL) - require.Nil(t, err) +// u, err := url.Parse(server.URL) +// require.Nil(t, err) - port, err := strconv.Atoi(u.Port()) - require.Nil(t, err) +// port, err := strconv.Atoi(u.Port()) +// require.Nil(t, err) - // If client is passed, register the test service instance - if client != nil { - svc := &api.AgentServiceRegistration{ - // TODO(banks): we don't really have a good way to represent - // connect-native apps yet so we have to pretend out little server is a - // proxy for now. - Kind: api.ServiceKindConnectProxy, - ProxyDestination: name, - Name: name + "-proxy", - Address: u.Hostname(), - Port: port, - } - err := client.Agent().ServiceRegister(svc) - require.Nil(t, err) - } +// // If client is passed, register the test service instance +// if client != nil { +// svc := &api.AgentServiceRegistration{ +// // TODO(banks): we don't really have a good way to represent +// // connect-native apps yet so we have to pretend out little server is a +// // proxy for now. +// Kind: api.ServiceKindConnectProxy, +// ProxyDestination: name, +// Name: name + "-proxy", +// Address: u.Hostname(), +// Port: port, +// } +// err := client.Agent().ServiceRegister(svc) +// require.Nil(t, err) +// } - return server -} +// return server +// } -func TestDialService(t *testing.T) { - consulServer, agent := testAgent(t) - defer consulServer.Stop() +// func TestDialService(t *testing.T) { +// consulServer, agent := testAgent(t) +// defer consulServer.Stop() - svc := testService(t, "ca1", "web", agent) - defer svc.Close() +// svc := testService(t, "ca1", "web", agent) +// defer svc.Close() - c, err := NewInsecureDevClientWithLocalCerts(agent, - "testdata/ca1-ca-consul-internal.cert.pem", - "testdata/ca1-svc-web.cert.pem", - "testdata/ca1-svc-web.key.pem", - ) - require.Nil(t, err) +// c, err := NewInsecureDevClientWithLocalCerts(agent, +// "testdata/ca1-ca-consul-internal.cert.pem", +// "testdata/ca1-svc-web.cert.pem", +// "testdata/ca1-svc-web.key.pem", +// ) +// require.Nil(t, err) - conn, err := c.DialService(context.Background(), "default", "web") - require.Nilf(t, err, "err: %s", err) +// conn, err := c.DialService(context.Background(), "default", "web") +// require.Nilf(t, err, "err: %s", err) - // Inject our conn into http.Transport - httpClient := &http.Client{ - Transport: &http.Transport{ - DialTLS: func(network, addr string) (net.Conn, error) { - return conn, nil - }, - }, - } +// // Inject our conn into http.Transport +// httpClient := &http.Client{ +// Transport: &http.Transport{ +// DialTLS: func(network, addr string) (net.Conn, error) { +// return conn, nil +// }, +// }, +// } - // Don't be fooled the hostname here is ignored since we did the dialling - // ourselves - resp, err := httpClient.Get("https://web.connect.consul/") - require.Nil(t, err) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - require.Nil(t, err) +// // Don't be fooled the hostname here is ignored since we did the dialling +// // ourselves +// resp, err := httpClient.Get("https://web.connect.consul/") +// require.Nil(t, err) +// defer resp.Body.Close() +// body, err := ioutil.ReadAll(resp.Body) +// require.Nil(t, err) - require.Equal(t, "svc: web", string(body)) -} +// require.Equal(t, "svc: web", string(body)) +// } diff --git a/connect/example_test.go b/connect/example_test.go new file mode 100644 index 000000000..eb66bdbc0 --- /dev/null +++ b/connect/example_test.go @@ -0,0 +1,53 @@ +package connect + +import ( + "crypto/tls" + "log" + "net" + "net/http" + + "github.com/hashicorp/consul/api" +) + +type apiHandler struct{} + +func (apiHandler) ServeHTTP(http.ResponseWriter, *http.Request) {} + +// Note: this assumes a suitable Consul ACL token with 'service:write' for +// service 'web' is set in CONSUL_HTTP_TOKEN ENV var. +func ExampleService_ServerTLSConfig_hTTP() { + client, _ := api.NewClient(api.DefaultConfig()) + svc, _ := NewService("web", client) + server := &http.Server{ + Addr: ":8080", + Handler: apiHandler{}, + TLSConfig: svc.ServerTLSConfig(), + } + // Cert and key files are blank since the tls.Config will handle providing + // those dynamically. + log.Fatal(server.ListenAndServeTLS("", "")) +} + +func acceptLoop(l net.Listener) {} + +// Note: this assumes a suitable Consul ACL token with 'service:write' for +// service 'web' is set in CONSUL_HTTP_TOKEN ENV var. +func ExampleService_ServerTLSConfig_tLS() { + client, _ := api.NewClient(api.DefaultConfig()) + svc, _ := NewService("web", client) + l, _ := tls.Listen("tcp", ":8080", svc.ServerTLSConfig()) + acceptLoop(l) +} + +func handleResponse(r *http.Response) {} + +// Note: this assumes a suitable Consul ACL token with 'service:write' for +// service 'web' is set in CONSUL_HTTP_TOKEN ENV var. +func ExampleService_HTTPClient() { + client, _ := api.NewClient(api.DefaultConfig()) + svc, _ := NewService("web", client) + + httpClient := svc.HTTPClient() + resp, _ := httpClient.Get("https://web.service.consul/foo/bar") + handleResponse(resp) +} diff --git a/connect/resolver.go b/connect/resolver.go new file mode 100644 index 000000000..41dc70e82 --- /dev/null +++ b/connect/resolver.go @@ -0,0 +1,131 @@ +package connect + +import ( + "context" + "fmt" + "math/rand" + + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/api" + testing "github.com/mitchellh/go-testing-interface" +) + +// Resolver is the interface implemented by a service discovery mechanism. +type Resolver interface { + // Resolve returns a single service instance to connect to. Implementations + // may attempt to ensure the instance returned is currently available. It is + // expected that a client will re-dial on a connection failure so making an + // effort to return a different service instance each time where available + // increases reliability. The context passed can be used to impose timeouts + // which may or may not be respected by implementations that make network + // calls to resolve the service. The addr returned is a string in any valid + // form for passing directly to `net.Dial("tcp", addr)`. + Resolve(ctx context.Context) (addr string, certURI connect.CertURI, err error) +} + +// StaticResolver is a statically defined resolver. This can be used to connect +// to an known-Connect endpoint without performing service discovery. +type StaticResolver struct { + // Addr is the network address (including port) of the instance. It must be + // the connect-enabled mTLS server and may be a proxy in front of the actual + // target service process. It is a string in any valid form for passing + // directly to `net.Dial("tcp", addr)`. + Addr string + + // CertURL is the _identity_ we expect the server to present in it's TLS + // certificate. It must be an exact match or the connection will be rejected. + CertURI connect.CertURI +} + +// Resolve implements Resolver by returning the static values. +func (sr *StaticResolver) Resolve(ctx context.Context) (string, connect.CertURI, error) { + return sr.Addr, sr.CertURI, nil +} + +const ( + // ConsulResolverTypeService indicates resolving healthy service nodes. + ConsulResolverTypeService int = iota + + // ConsulResolverTypePreparedQuery indicates resolving via prepared query. + ConsulResolverTypePreparedQuery +) + +// ConsulResolver queries Consul for a service instance. +type ConsulResolver struct { + // Client is the Consul API client to use. Must be non-nil or Resolve will + // panic. + Client *api.Client + + // Namespace of the query target + Namespace string + + // Name of the query target + Name string + + // Type of the query target, + Type int + + // Datacenter to resolve in, empty indicates agent's local DC. + Datacenter string +} + +// Resolve performs service discovery against the local Consul agent and returns +// the address and expected identity of a suitable service instance. +func (cr *ConsulResolver) Resolve(ctx context.Context) (string, connect.CertURI, error) { + switch cr.Type { + case ConsulResolverTypeService: + return cr.resolveService(ctx) + case ConsulResolverTypePreparedQuery: + // TODO(banks): we need to figure out what API changes are needed for + // prepared queries to become connect-aware. How do we signal that we want + // connect-enabled endpoints vs the direct ones for the responses? + return "", nil, fmt.Errorf("not implemented") + default: + return "", nil, fmt.Errorf("unknown resolver type") + } +} + +func (cr *ConsulResolver) resolveService(ctx context.Context) (string, connect.CertURI, error) { + health := cr.Client.Health() + + svcs, _, err := health.Connect(cr.Name, "", true, cr.queryOptions(ctx)) + if err != nil { + return "", nil, err + } + + if len(svcs) < 1 { + return "", nil, fmt.Errorf("no healthy instances found") + } + + // Services are not shuffled by HTTP API, pick one at (pseudo) random. + idx := 0 + if len(svcs) > 1 { + idx = rand.Intn(len(svcs)) + } + + addr := svcs[idx].Service.Address + if addr == "" { + addr = svcs[idx].Node.Address + } + port := svcs[idx].Service.Port + + // Generate the expected CertURI + + // TODO(banks): when we've figured out the CA story around generating and + // propagating these trust domains we need to actually fetch the trust domain + // somehow. We also need to implement namespaces. Use of test function here is + // temporary pending the work on trust domains. + certURI := connect.TestSpiffeIDService(&testing.RuntimeT{}, cr.Name) + + return fmt.Sprintf("%s:%d", addr, port), certURI, nil +} + +func (cr *ConsulResolver) queryOptions(ctx context.Context) *api.QueryOptions { + q := &api.QueryOptions{ + // We may make this configurable one day but we may also implement our own + // caching which is even more stale so... + AllowStale: true, + Datacenter: cr.Datacenter, + } + return q.WithContext(ctx) +} diff --git a/connect/resolver_test.go b/connect/resolver_test.go new file mode 100644 index 000000000..29a40e3d3 --- /dev/null +++ b/connect/resolver_test.go @@ -0,0 +1,164 @@ +package connect + +import ( + "context" + "testing" + "time" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/require" +) + +func TestStaticResolver_Resolve(t *testing.T) { + type fields struct { + Addr string + CertURI connect.CertURI + } + tests := []struct { + name string + fields fields + }{ + { + name: "simples", + fields: fields{"1.2.3.4:80", connect.TestSpiffeIDService(t, "foo")}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sr := StaticResolver{ + Addr: tt.fields.Addr, + CertURI: tt.fields.CertURI, + } + addr, certURI, err := sr.Resolve(context.Background()) + require := require.New(t) + require.Nil(err) + require.Equal(sr.Addr, addr) + require.Equal(sr.CertURI, certURI) + }) + } +} + +func TestConsulResolver_Resolve(t *testing.T) { + + // Setup a local test agent to query + agent := agent.NewTestAgent("test-consul", "") + defer agent.Shutdown() + + cfg := api.DefaultConfig() + cfg.Address = agent.HTTPAddr() + client, err := api.NewClient(cfg) + require.Nil(t, err) + + // Setup a service with a connect proxy instance + regSrv := &api.AgentServiceRegistration{ + Name: "web", + Port: 8080, + } + err = client.Agent().ServiceRegister(regSrv) + require.Nil(t, err) + + regProxy := &api.AgentServiceRegistration{ + Kind: "connect-proxy", + Name: "web-proxy", + Port: 9090, + ProxyDestination: "web", + } + err = client.Agent().ServiceRegister(regProxy) + require.Nil(t, err) + + // And another proxy so we can test handling with multiple endpoints returned + regProxy.Port = 9091 + regProxy.ID = "web-proxy-2" + err = client.Agent().ServiceRegister(regProxy) + require.Nil(t, err) + + proxyAddrs := []string{ + agent.Config.AdvertiseAddrLAN.String() + ":9090", + agent.Config.AdvertiseAddrLAN.String() + ":9091", + } + + type fields struct { + Namespace string + Name string + Type int + Datacenter string + } + tests := []struct { + name string + fields fields + timeout time.Duration + wantAddr string + wantCertURI connect.CertURI + wantErr bool + }{ + { + name: "basic service discovery", + fields: fields{ + Namespace: "default", + Name: "web", + Type: ConsulResolverTypeService, + }, + wantCertURI: connect.TestSpiffeIDService(t, "web"), + wantErr: false, + }, + { + name: "Bad Type errors", + fields: fields{ + Namespace: "default", + Name: "web", + Type: 123, + }, + wantErr: true, + }, + { + name: "Non-existent service errors", + fields: fields{ + Namespace: "default", + Name: "foo", + Type: ConsulResolverTypeService, + }, + wantErr: true, + }, + { + name: "timeout errors", + fields: fields{ + Namespace: "default", + Name: "web", + Type: ConsulResolverTypeService, + }, + timeout: 1 * time.Nanosecond, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + cr := &ConsulResolver{ + Client: client, + Namespace: tt.fields.Namespace, + Name: tt.fields.Name, + Type: tt.fields.Type, + Datacenter: tt.fields.Datacenter, + } + // WithCancel just to have a cancel func in scope to assign in the if + // clause. + ctx, cancel := context.WithCancel(context.Background()) + if tt.timeout > 0 { + ctx, cancel = context.WithTimeout(ctx, tt.timeout) + } + defer cancel() + gotAddr, gotCertURI, err := cr.Resolve(ctx) + if tt.wantErr { + require.NotNil(err) + return + } + + require.Nil(err) + // Address should be either of the registered proxy ports so check both + require.Contains(proxyAddrs, gotAddr) + require.Equal(tt.wantCertURI, gotCertURI) + }) + } +} diff --git a/connect/service.go b/connect/service.go new file mode 100644 index 000000000..db83ce5aa --- /dev/null +++ b/connect/service.go @@ -0,0 +1,185 @@ +package connect + +import ( + "context" + "crypto/tls" + "log" + "net" + "net/http" + "os" + "time" + + "github.com/hashicorp/consul/api" +) + +// Service represents a Consul service that accepts and/or connects via Connect. +// This can represent a service that only is a server, only is a client, or +// both. +// +// TODO(banks): API for monitoring status of certs from app +// +// TODO(banks): Agent implicit health checks based on knowing which certs are +// available should prevent clients being routed until the agent knows the +// service has been delivered valid certificates. Once built, document that here +// too. +type Service struct { + // serviceID is the unique ID for this service in the agent-local catalog. + // This is often but not always the service name. This is used to request + // Connect metadata. If the service with this ID doesn't exist on the local + // agent no error will be returned and the Service will retry periodically. + // This allows service startup and registration to happen in either order + // without coordination since they might be performed by separate processes. + serviceID string + + // client is the Consul API client. It must be configured with an appropriate + // Token that has `service:write` policy on the provided ServiceID. If an + // insufficient token is provided, the Service will abort further attempts to + // fetch certificates and print a loud error message. It will not Close() or + // kill the process since that could lead to a crash loop in every service if + // ACL token was revoked. All attempts to dial will error and any incoming + // connections will fail to verify. + client *api.Client + + // serverTLSCfg is the (reloadable) TLS config we use for serving. + serverTLSCfg *ReloadableTLSConfig + + // clientTLSCfg is the (reloadable) TLS config we use for dialling. + clientTLSCfg *ReloadableTLSConfig + + logger *log.Logger +} + +// NewService creates and starts a Service. The caller must close the returned +// service to free resources and allow the program to exit normally. This is +// typically called in a signal handler. +func NewService(serviceID string, client *api.Client) (*Service, error) { + return NewServiceWithLogger(serviceID, client, + log.New(os.Stderr, "", log.LstdFlags)) +} + +// NewServiceWithLogger starts the service with a specified log.Logger. +func NewServiceWithLogger(serviceID string, client *api.Client, + logger *log.Logger) (*Service, error) { + s := &Service{ + serviceID: serviceID, + client: client, + logger: logger, + } + s.serverTLSCfg = NewReloadableTLSConfig(defaultTLSConfig(serverVerifyCerts)) + s.clientTLSCfg = NewReloadableTLSConfig(defaultTLSConfig(clientVerifyCerts)) + + // TODO(banks) run the background certificate sync + return s, nil +} + +// NewDevServiceFromCertFiles creates a Service using certificate and key files +// passed instead of fetching them from the client. +func NewDevServiceFromCertFiles(serviceID string, client *api.Client, + logger *log.Logger, caFile, certFile, keyFile string) (*Service, error) { + s := &Service{ + serviceID: serviceID, + client: client, + logger: logger, + } + tlsCfg, err := devTLSConfigFromFiles(caFile, certFile, keyFile) + if err != nil { + return nil, err + } + + // Note that NewReloadableTLSConfig makes a copy so we can re-use the same + // base for both client and server with swapped verifiers. + tlsCfg.VerifyPeerCertificate = serverVerifyCerts + s.serverTLSCfg = NewReloadableTLSConfig(tlsCfg) + tlsCfg.VerifyPeerCertificate = clientVerifyCerts + s.clientTLSCfg = NewReloadableTLSConfig(tlsCfg) + return s, nil +} + +// ServerTLSConfig returns a *tls.Config that allows any TCP listener to accept +// and authorize incoming Connect clients. It will return a single static config +// with hooks to dynamically load certificates, and perform Connect +// authorization during verification. Service implementations do not need to +// reload this to get new certificates. +// +// At any time it may be possible that the Service instance does not have access +// to usable certificates due to not being initially setup yet or a prolonged +// error during renewal. The listener will be able to accept connections again +// once connectivity is restored provided the client's Token is valid. +func (s *Service) ServerTLSConfig() *tls.Config { + return s.serverTLSCfg.TLSConfig() +} + +// Dial connects to a remote Connect-enabled server. The passed Resolver is used +// to discover a single candidate instance which will be dialled and have it's +// TLS certificate verified against the expected identity. Failures are returned +// directly with no retries. Repeated dials may use different instances +// depending on the Resolver implementation. +// +// Timeout can be managed via the Context. +func (s *Service) Dial(ctx context.Context, resolver Resolver) (net.Conn, error) { + addr, certURI, err := resolver.Resolve(ctx) + if err != nil { + return nil, err + } + var dialer net.Dialer + tcpConn, err := dialer.DialContext(ctx, "tcp", addr) + if err != nil { + return nil, err + } + + tlsConn := tls.Client(tcpConn, s.clientTLSCfg.TLSConfig()) + // Set deadline for Handshake to complete. + deadline, ok := ctx.Deadline() + if ok { + tlsConn.SetDeadline(deadline) + } + err = tlsConn.Handshake() + if err != nil { + tlsConn.Close() + return nil, err + } + // Clear deadline since that was only for connection. Caller can set their own + // deadline later as necessary. + tlsConn.SetDeadline(time.Time{}) + + // Verify that the connect server's URI matches certURI + err = verifyServerCertMatchesURI(tlsConn.ConnectionState().PeerCertificates, + certURI) + if err != nil { + tlsConn.Close() + return nil, err + } + + return tlsConn, nil +} + +// HTTPDialContext is compatible with http.Transport.DialContext. It expects the +// addr hostname to be specified using Consul DNS query syntax, e.g. +// "web.service.consul". It converts that into the equivalent ConsulResolver and +// then call s.Dial with the resolver. This is low level, clients should +// typically use HTTPClient directly. +func (s *Service) HTTPDialContext(ctx context.Context, network, + addr string) (net.Conn, error) { + var r ConsulResolver + // TODO(banks): parse addr into ConsulResolver + return s.Dial(ctx, &r) +} + +// HTTPClient returns an *http.Client configured to dial remote Consul Connect +// HTTP services. The client will return an error if attempting to make requests +// to a non HTTPS hostname. It resolves the domain of the request with the same +// syntax as Consul DNS queries although it performs discovery directly via the +// API rather than just relying on Consul DNS. Hostnames that are not valid +// Consul DNS queries will fail. +func (s *Service) HTTPClient() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + DialContext: s.HTTPDialContext, + }, + } +} + +// Close stops the service and frees resources. +func (s *Service) Close() { + // TODO(banks): stop background activity if started +} diff --git a/connect/service_test.go b/connect/service_test.go new file mode 100644 index 000000000..a2adfe7f1 --- /dev/null +++ b/connect/service_test.go @@ -0,0 +1,105 @@ +package connect + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/hashicorp/consul/agent/connect" + "github.com/stretchr/testify/require" +) + +func TestService_Dial(t *testing.T) { + ca := connect.TestCA(t, nil) + + tests := []struct { + name string + accept bool + handshake bool + presentService string + wantErr string + }{ + { + name: "working", + accept: true, + handshake: true, + presentService: "db", + wantErr: "", + }, + { + name: "tcp connect fail", + accept: false, + handshake: false, + presentService: "db", + wantErr: "connection refused", + }, + { + name: "handshake timeout", + accept: true, + handshake: false, + presentService: "db", + wantErr: "i/o timeout", + }, + { + name: "bad cert", + accept: true, + handshake: true, + presentService: "web", + wantErr: "peer certificate mismatch", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + s, err := NewService("web", nil) + require.Nil(err) + + // Force TLSConfig + s.clientTLSCfg = NewReloadableTLSConfig(TestTLSConfig(t, "web", ca)) + + ctx, cancel := context.WithTimeout(context.Background(), + 100*time.Millisecond) + defer cancel() + + testSvc := NewTestService(t, tt.presentService, ca) + testSvc.TimeoutHandshake = !tt.handshake + + if tt.accept { + go func() { + err := testSvc.Serve() + require.Nil(err) + }() + defer testSvc.Close() + } + + // Always expect to be connecting to a "DB" + resolver := &StaticResolver{ + Addr: testSvc.Addr, + CertURI: connect.TestSpiffeIDService(t, "db"), + } + + // All test runs should complete in under 500ms due to the timeout about. + // Don't wait for whole test run to get stuck. + testTimeout := 500 * time.Millisecond + testTimer := time.AfterFunc(testTimeout, func() { + panic(fmt.Sprintf("test timed out after %s", testTimeout)) + }) + + conn, err := s.Dial(ctx, resolver) + testTimer.Stop() + + if tt.wantErr == "" { + require.Nil(err) + } else { + require.NotNil(err) + require.Contains(err.Error(), tt.wantErr) + } + + if err == nil { + conn.Close() + } + }) + } +} diff --git a/connect/testdata/ca1-ca-consul-internal.cert.pem b/connect/testdata/ca1-ca-consul-internal.cert.pem deleted file mode 100644 index 6a557775f..000000000 --- a/connect/testdata/ca1-ca-consul-internal.cert.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICIDCCAcagAwIBAgIBATAKBggqhkjOPQQDAjAaMRgwFgYDVQQDEw9Db25zdWwg -SW50ZXJuYWwwHhcNMTgwMzIzMjIwNDI1WhcNMjgwMzIwMjIwNDI1WjAaMRgwFgYD -VQQDEw9Db25zdWwgSW50ZXJuYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAT3 -IPiDHugKYEVaSpIzBjqU5lQrmirC6N1XHyOAhF2psGGxcxezpf8Vgy5Iv6XbmeHr -cttyzUYtUKhrFBhxkPYRo4H8MIH5MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E -BTADAQH/MCkGA1UdDgQiBCCrnNQy2IQS73Co9WbrPXtq/YP9SvIBOJ8iYRWTOxjC -qTArBgNVHSMEJDAigCCrnNQy2IQS73Co9WbrPXtq/YP9SvIBOJ8iYRWTOxjCqTA/ -BgNVHREEODA2hjRzcGlmZmU6Ly8xMTExMTExMS0yMjIyLTMzMzMtNDQ0NC01NTU1 -NTU1NTU1NTUuY29uc3VsMD0GA1UdHgEB/wQzMDGgLzAtgisxMTExMTExMS0yMjIy -LTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsMAoGCCqGSM49BAMCA0gAMEUC -IQDwWL6ZuszKrZjSJwDzdhRQtj1ppezJrKaDTJx+4F/tyQIgEaQCR935ztIqZzgO -Ka6ozcH2Ubd4j4cDC1XswVMW6zs= ------END CERTIFICATE----- diff --git a/connect/testdata/ca1-ca-consul-internal.key.pem b/connect/testdata/ca1-ca-consul-internal.key.pem deleted file mode 100644 index 8c40fd26b..000000000 --- a/connect/testdata/ca1-ca-consul-internal.key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIDUDO3I7WKbLTTWkNKA4unB2RLq/RX+L+XIFssDE/AD7oAoGCCqGSM49 -AwEHoUQDQgAE9yD4gx7oCmBFWkqSMwY6lOZUK5oqwujdVx8jgIRdqbBhsXMXs6X/ -FYMuSL+l25nh63Lbcs1GLVCoaxQYcZD2EQ== ------END EC PRIVATE KEY----- diff --git a/connect/testdata/ca1-svc-cache.cert.pem b/connect/testdata/ca1-svc-cache.cert.pem deleted file mode 100644 index 097a2b6a6..000000000 --- a/connect/testdata/ca1-svc-cache.cert.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICEDCCAbagAwIBAgIBBTAKBggqhkjOPQQDAjAaMRgwFgYDVQQDEw9Db25zdWwg -SW50ZXJuYWwwHhcNMTgwMzIzMjIwNDI1WhcNMjgwMzIwMjIwNDI1WjAQMQ4wDAYD -VQQDEwVjYWNoZTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABOWw8369v4DHJAI6 -k061hU8rxaQs87mZFQ52JfleJjRoDUuZIPLhZHMFbvbI8pDWi7YdjluNbzNNh6nu -fAivylujgfYwgfMwDgYDVR0PAQH/BAQDAgO4MB0GA1UdJQQWMBQGCCsGAQUFBwMC -BggrBgEFBQcDATAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCCHhMqV2/R8meSsXtwh -OLC9hP7WQfuvwJ6V6uwKZdEofTArBgNVHSMEJDAigCCrnNQy2IQS73Co9WbrPXtq -/YP9SvIBOJ8iYRWTOxjCqTBcBgNVHREEVTBThlFzcGlmZmU6Ly8xMTExMTExMS0y -MjIyLTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsL25zL2RlZmF1bHQvZGMv -ZGMwMS9zdmMvY2FjaGUwCgYIKoZIzj0EAwIDSAAwRQIgPfekKBd/ltpVkdjnB0Hp -cV9HPwy12tXp4suR2nspSNkCIQD1Th/hvxuBKkRYy9Bl+jgTbrFdd4fLCWPeFbaM -sgLK7g== ------END CERTIFICATE----- diff --git a/connect/testdata/ca1-svc-cache.key.pem b/connect/testdata/ca1-svc-cache.key.pem deleted file mode 100644 index f780f63db..000000000 --- a/connect/testdata/ca1-svc-cache.key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIPTSPV2cWNnO69f+vYyCg5frpoBtK6L+kZVLrGCv3TdnoAoGCCqGSM49 -AwEHoUQDQgAE5bDzfr2/gMckAjqTTrWFTyvFpCzzuZkVDnYl+V4mNGgNS5kg8uFk -cwVu9sjykNaLth2OW41vM02Hqe58CK/KWw== ------END EC PRIVATE KEY----- diff --git a/connect/testdata/ca1-svc-db.cert.pem b/connect/testdata/ca1-svc-db.cert.pem deleted file mode 100644 index d00a38ea0..000000000 --- a/connect/testdata/ca1-svc-db.cert.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICCjCCAbCgAwIBAgIBBDAKBggqhkjOPQQDAjAaMRgwFgYDVQQDEw9Db25zdWwg -SW50ZXJuYWwwHhcNMTgwMzIzMjIwNDI1WhcNMjgwMzIwMjIwNDI1WjANMQswCQYD -VQQDEwJkYjBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABEcTyr2l7yYWZuh++02M -usR20QrZtHdd7goKmYrIpQ3ekmHuLLgJWgTTaIhCj8fzbryep+s8oM7EiPhRQ14l -uSujgfMwgfAwDgYDVR0PAQH/BAQDAgO4MB0GA1UdJQQWMBQGCCsGAQUFBwMCBggr -BgEFBQcDATAMBgNVHRMBAf8EAjAAMCkGA1UdDgQiBCAy6jHCBBT2bii+aMJCDJ33 -bFJtR72bxDBUi5b+YWyWwDArBgNVHSMEJDAigCCrnNQy2IQS73Co9WbrPXtq/YP9 -SvIBOJ8iYRWTOxjCqTBZBgNVHREEUjBQhk5zcGlmZmU6Ly8xMTExMTExMS0yMjIy -LTMzMzMtNDQ0NC01NTU1NTU1NTU1NTUuY29uc3VsL25zL2RlZmF1bHQvZGMvZGMw -MS9zdmMvZGIwCgYIKoZIzj0EAwIDSAAwRQIhALCW4cOEpuYfLJ0NGwEmYG5Fko0N -WMccL0gEQzKUbIWrAiAIw8wkTSf1O8vTHeKdR1fCmdVoDRFRKB643PaofUzFxA== ------END CERTIFICATE----- diff --git a/connect/testdata/ca1-svc-db.key.pem b/connect/testdata/ca1-svc-db.key.pem deleted file mode 100644 index 3ec23a33b..000000000 --- a/connect/testdata/ca1-svc-db.key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIMHv1pjt75IjKXzl8l4rBtEFS1pEuOM4WNgeHg5Qn1RroAoGCCqGSM49 -AwEHoUQDQgAERxPKvaXvJhZm6H77TYy6xHbRCtm0d13uCgqZisilDd6SYe4suAla -BNNoiEKPx/NuvJ6n6zygzsSI+FFDXiW5Kw== ------END EC PRIVATE KEY----- diff --git a/connect/testdata/ca1-svc-web.cert.pem b/connect/testdata/ca1-svc-web.cert.pem deleted file mode 100644 index a786a2c06..000000000 --- a/connect/testdata/ca1-svc-web.cert.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICDDCCAbKgAwIBAgIBAzAKBggqhkjOPQQDAjAaMRgwFgYDVQQDEw9Db25zdWwg -SW50ZXJuYWwwHhcNMTgwMzIzMjIwNDI1WhcNMjgwMzIwMjIwNDI1WjAOMQwwCgYD -VQQDEwN3ZWIwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARF47lERGXziNBC74Kh -U3W29/M7JO9LIUaJgK0LJbhgf0MuPxf7gX+PnxH5ImI5yfXRv0SSxeCq7377IkXP -XS6Fo4H0MIHxMA4GA1UdDwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcDAgYI -KwYBBQUHAwEwDAYDVR0TAQH/BAIwADApBgNVHQ4EIgQg26hfNYiVwYRm7CQJvdOd -NIOmG3t8vNwXCtktC782cf8wKwYDVR0jBCQwIoAgq5zUMtiEEu9wqPVm6z17av2D -/UryATifImEVkzsYwqkwWgYDVR0RBFMwUYZPc3BpZmZlOi8vMTExMTExMTEtMjIy -Mi0zMzMzLTQ0NDQtNTU1NTU1NTU1NTU1LmNvbnN1bC9ucy9kZWZhdWx0L2RjL2Rj -MDEvc3ZjL3dlYjAKBggqhkjOPQQDAgNIADBFAiAzi8uBs+ApPfAZZm5eO/hhVZiv -E8p84VKCqPeF3tFfoAIhANVkdSnp2AKU5T7SlJHmieu3DFNWCVpajlHJvf286J94 ------END CERTIFICATE----- diff --git a/connect/testdata/ca1-svc-web.key.pem b/connect/testdata/ca1-svc-web.key.pem deleted file mode 100644 index 8ed82c13c..000000000 --- a/connect/testdata/ca1-svc-web.key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIPOIj4BFS0fknG+uAVKZIWRpnzp7O3OKpBDgEmuml7lcoAoGCCqGSM49 -AwEHoUQDQgAEReO5RERl84jQQu+CoVN1tvfzOyTvSyFGiYCtCyW4YH9DLj8X+4F/ -j58R+SJiOcn10b9EksXgqu9++yJFz10uhQ== ------END EC PRIVATE KEY----- diff --git a/connect/testdata/ca2-ca-vault.cert.pem b/connect/testdata/ca2-ca-vault.cert.pem deleted file mode 100644 index a7f617468..000000000 --- a/connect/testdata/ca2-ca-vault.cert.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICDDCCAbKgAwIBAgIBAjAKBggqhkjOPQQDAjAQMQ4wDAYDVQQDEwVWYXVsdDAe -Fw0xODAzMjMyMjA0MjVaFw0yODAzMjAyMjA0MjVaMBAxDjAMBgNVBAMTBVZhdWx0 -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEAjGVnRy/7Q2SU4ePbKbsurRAHKYA -CuA3r9QrowgZOr7yptF54shiobMIORpfKYkoYkhzL1lhWKI06BUJ4xuPd6OB/DCB -+TAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zApBgNVHQ4EIgQgqEc5 -ZrELD5ySxapbU+eRb+aEv1MEoCvjC0mCA1uJecMwKwYDVR0jBCQwIoAgqEc5ZrEL -D5ySxapbU+eRb+aEv1MEoCvjC0mCA1uJecMwPwYDVR0RBDgwNoY0c3BpZmZlOi8v -MTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1NTU1NTU1NTU1LmNvbnN1bDA9BgNV -HR4BAf8EMzAxoC8wLYIrMTExMTExMTEtMjIyMi0zMzMzLTQ0NDQtNTU1NTU1NTU1 -NTU1LmNvbnN1bDAKBggqhkjOPQQDAgNIADBFAiEA6pBdeglhq//A7sYnYk85XL+3 -4IDrXrGN3KjC9qo3J9ICIDS9pEoTPWAWDfn1ccPafKVBrJm6KrmljcvymQ2QUDIZ ------END CERTIFICATE----- ----- diff --git a/connect/testdata/ca2-ca-vault.key.pem b/connect/testdata/ca2-ca-vault.key.pem deleted file mode 100644 index 43534b961..000000000 --- a/connect/testdata/ca2-ca-vault.key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIKnuCctuvtyzf+M6B8jGqejG4T5o7NMRYjO2M3dZITCboAoGCCqGSM49 -AwEHoUQDQgAEAjGVnRy/7Q2SU4ePbKbsurRAHKYACuA3r9QrowgZOr7yptF54shi -obMIORpfKYkoYkhzL1lhWKI06BUJ4xuPdw== ------END EC PRIVATE KEY----- diff --git a/connect/testdata/ca2-svc-cache.cert.pem b/connect/testdata/ca2-svc-cache.cert.pem deleted file mode 100644 index 32110e232..000000000 --- a/connect/testdata/ca2-svc-cache.cert.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICBzCCAaygAwIBAgIBCDAKBggqhkjOPQQDAjAQMQ4wDAYDVQQDEwVWYXVsdDAe -Fw0xODAzMjMyMjA0MjVaFw0yODAzMjAyMjA0MjVaMBAxDjAMBgNVBAMTBWNhY2hl -MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEyB6D+Eqi/71EhUrBWlcZOV2vjS9Y -xnUQ3jfH+QUZur7WOuGLnO7eArbAHcDbqKGyDWxlkZH04sGYOXaEW7UUd6OB9jCB -8zAOBgNVHQ8BAf8EBAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMB -MAwGA1UdEwEB/wQCMAAwKQYDVR0OBCIEIGapiHFxlbYbNKFlwdPMpKhIypvNZXo8 -k/OZLki/vurQMCsGA1UdIwQkMCKAIKhHOWaxCw+cksWqW1PnkW/mhL9TBKAr4wtJ -ggNbiXnDMFwGA1UdEQRVMFOGUXNwaWZmZTovLzExMTExMTExLTIyMjItMzMzMy00 -NDQ0LTU1NTU1NTU1NTU1NS5jb25zdWwvbnMvZGVmYXVsdC9kYy9kYzAxL3N2Yy9j -YWNoZTAKBggqhkjOPQQDAgNJADBGAiEA/vRLXbkigS6l89MxFk0RFE7Zo4vorv7s -E1juCOsVJBICIQDXlpmYH9fPon6DYMyOxQttNjkuWbJgnPv7rPg+CllRyA== ------END CERTIFICATE----- diff --git a/connect/testdata/ca2-svc-cache.key.pem b/connect/testdata/ca2-svc-cache.key.pem deleted file mode 100644 index cabad8179..000000000 --- a/connect/testdata/ca2-svc-cache.key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIEbQOv4odF2Tu8ZnJTJuytvOd2HOF9HxgGw5ei1pkP4moAoGCCqGSM49 -AwEHoUQDQgAEyB6D+Eqi/71EhUrBWlcZOV2vjS9YxnUQ3jfH+QUZur7WOuGLnO7e -ArbAHcDbqKGyDWxlkZH04sGYOXaEW7UUdw== ------END EC PRIVATE KEY----- diff --git a/connect/testdata/ca2-svc-db.cert.pem b/connect/testdata/ca2-svc-db.cert.pem deleted file mode 100644 index 33273058a..000000000 --- a/connect/testdata/ca2-svc-db.cert.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICADCCAaagAwIBAgIBBzAKBggqhkjOPQQDAjAQMQ4wDAYDVQQDEwVWYXVsdDAe -Fw0xODAzMjMyMjA0MjVaFw0yODAzMjAyMjA0MjVaMA0xCzAJBgNVBAMTAmRiMFkw -EwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFeB4DynO6IeKOE4zFLlBVFv+4HeWRvK3 -6cQ9L6v5uhLfdcYyqhT/QLbQ4R8ks1vUTTiq0XJsAGdkvkt71fiEl6OB8zCB8DAO -BgNVHQ8BAf8EBAMCA7gwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMAwG -A1UdEwEB/wQCMAAwKQYDVR0OBCIEIKjVz8n91cej8q6WpDNd0hwSMAE2ddY056PH -hMfaBM6GMCsGA1UdIwQkMCKAIKhHOWaxCw+cksWqW1PnkW/mhL9TBKAr4wtJggNb -iXnDMFkGA1UdEQRSMFCGTnNwaWZmZTovLzExMTExMTExLTIyMjItMzMzMy00NDQ0 -LTU1NTU1NTU1NTU1NS5jb25zdWwvbnMvZGVmYXVsdC9kYy9kYzAxL3N2Yy9kYjAK -BggqhkjOPQQDAgNIADBFAiAdYkokbeZr7W32NhjcNoTMNwpz9CqJpK6Yzu4N6EJc -pAIhALHpRM57zdiMouDOlhGPX5XKzbSl2AnBjFvbPqgFV09Z ------END CERTIFICATE----- diff --git a/connect/testdata/ca2-svc-db.key.pem b/connect/testdata/ca2-svc-db.key.pem deleted file mode 100644 index 7f7ab9ff8..000000000 --- a/connect/testdata/ca2-svc-db.key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIHnzia+DNTFB7uYQEuWvLR2czGCuDfOTt1FfcBo1uBJioAoGCCqGSM49 -AwEHoUQDQgAEFeB4DynO6IeKOE4zFLlBVFv+4HeWRvK36cQ9L6v5uhLfdcYyqhT/ -QLbQ4R8ks1vUTTiq0XJsAGdkvkt71fiElw== ------END EC PRIVATE KEY----- diff --git a/connect/testdata/ca2-svc-web.cert.pem b/connect/testdata/ca2-svc-web.cert.pem deleted file mode 100644 index ae1e338f6..000000000 --- a/connect/testdata/ca2-svc-web.cert.pem +++ /dev/null @@ -1,13 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICAjCCAaigAwIBAgIBBjAKBggqhkjOPQQDAjAQMQ4wDAYDVQQDEwVWYXVsdDAe -Fw0xODAzMjMyMjA0MjVaFw0yODAzMjAyMjA0MjVaMA4xDDAKBgNVBAMTA3dlYjBZ -MBMGByqGSM49AgEGCCqGSM49AwEHA0IABM9XzxWFCa80uQDfJEGboUC15Yr+FwDp -OemThalQxFpkL7gQSIgpzgGULIx+jCiu+clJ0QhbWT2dnS8vFUKq35qjgfQwgfEw -DgYDVR0PAQH/BAQDAgO4MB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAM -BgNVHRMBAf8EAjAAMCkGA1UdDgQiBCCN+TKHPCOr48hxRCx4rqbWQg5QHkCSNzjZ -qi1JGs13njArBgNVHSMEJDAigCCoRzlmsQsPnJLFqltT55Fv5oS/UwSgK+MLSYID -W4l5wzBaBgNVHREEUzBRhk9zcGlmZmU6Ly8xMTExMTExMS0yMjIyLTMzMzMtNDQ0 -NC01NTU1NTU1NTU1NTUuY29uc3VsL25zL2RlZmF1bHQvZGMvZGMwMS9zdmMvd2Vi -MAoGCCqGSM49BAMCA0gAMEUCIBd6gpL6E8rms5BU+cJeeyv0Rjc18edn2g3q2wLN -r1zAAiEAv16whKwR0DeKkldGLDQIu9nCNvfDZrEWgywIBYbzLxY= ------END CERTIFICATE----- diff --git a/connect/testdata/ca2-svc-web.key.pem b/connect/testdata/ca2-svc-web.key.pem deleted file mode 100644 index 65f0bc48e..000000000 --- a/connect/testdata/ca2-svc-web.key.pem +++ /dev/null @@ -1,5 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIOCMjjRexX3qHjixpRwLxggJd9yuskqUoPy8/MepafP+oAoGCCqGSM49 -AwEHoUQDQgAEz1fPFYUJrzS5AN8kQZuhQLXliv4XAOk56ZOFqVDEWmQvuBBIiCnO -AZQsjH6MKK75yUnRCFtZPZ2dLy8VQqrfmg== ------END EC PRIVATE KEY----- diff --git a/connect/testdata/ca2-xc-by-ca1.cert.pem b/connect/testdata/ca2-xc-by-ca1.cert.pem deleted file mode 100644 index e864f6c00..000000000 --- a/connect/testdata/ca2-xc-by-ca1.cert.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICFjCCAbygAwIBAgIBAjAKBggqhkjOPQQDAjAaMRgwFgYDVQQDEw9Db25zdWwg -SW50ZXJuYWwwHhcNMTgwMzIzMjIwNDI1WhcNMjgwMzIwMjIwNDI1WjAQMQ4wDAYD -VQQDEwVWYXVsdDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABAIxlZ0cv+0NklOH -j2ym7Lq0QBymAArgN6/UK6MIGTq+8qbReeLIYqGzCDkaXymJKGJIcy9ZYViiNOgV -CeMbj3ejgfwwgfkwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wKQYD -VR0OBCIEIKhHOWaxCw+cksWqW1PnkW/mhL9TBKAr4wtJggNbiXnDMCsGA1UdIwQk -MCKAIKuc1DLYhBLvcKj1Zus9e2r9g/1K8gE4nyJhFZM7GMKpMD8GA1UdEQQ4MDaG -NHNwaWZmZTovLzExMTExMTExLTIyMjItMzMzMy00NDQ0LTU1NTU1NTU1NTU1NS5j -b25zdWwwPQYDVR0eAQH/BDMwMaAvMC2CKzExMTExMTExLTIyMjItMzMzMy00NDQ0 -LTU1NTU1NTU1NTU1NS5jb25zdWwwCgYIKoZIzj0EAwIDSAAwRQIgWWWj8/6SaY2y -wzOtIphwZLewCuLMG6KG8uY4S7UsosgCIQDhCbT/LUKq/A21khQncBmM79ng9Gbx -/4Zw8zbVmnZJKg== ------END CERTIFICATE----- diff --git a/connect/testdata/mkcerts.go b/connect/testdata/mkcerts.go deleted file mode 100644 index 7fe82f53a..000000000 --- a/connect/testdata/mkcerts.go +++ /dev/null @@ -1,243 +0,0 @@ -package main - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/sha256" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "log" - "math/big" - "net/url" - "os" - "regexp" - "strings" - "time" -) - -// You can verify a given leaf with a given root using: -// -// $ openssl verify -verbose -CAfile ca2-ca-vault.cert.pem ca1-svc-db.cert.pem -// -// Note that to verify via the cross-signed intermediate, openssl requires it to -// be bundled with the _root_ CA bundle and will ignore the cert if it's passed -// with the subject. You can do that with: -// -// $ openssl verify -verbose -CAfile \ -// <(cat ca1-ca-consul-internal.cert.pem ca2-xc-by-ca1.cert.pem) \ -// ca2-svc-db.cert.pem -// ca2-svc-db.cert.pem: OK -// -// Note that the same leaf and root without the intermediate should fail: -// -// $ openssl verify -verbose -CAfile ca1-ca-consul-internal.cert.pem ca2-svc-db.cert.pem -// ca2-svc-db.cert.pem: CN = db -// error 20 at 0 depth lookup:unable to get local issuer certificate -// -// NOTE: THIS IS A QUIRK OF OPENSSL; in Connect we will distribute the roots -// alone and stable intermediates like the XC cert to the _leaf_. - -var clusterID = "11111111-2222-3333-4444-555555555555" -var cAs = []string{"Consul Internal", "Vault"} -var services = []string{"web", "db", "cache"} -var slugRe = regexp.MustCompile("[^a-zA-Z0-9]+") -var serial int64 - -type caInfo struct { - id int - name string - slug string - uri *url.URL - pk *ecdsa.PrivateKey - cert *x509.Certificate -} - -func main() { - // Make CA certs - caInfos := make(map[string]caInfo) - var previousCA *caInfo - for idx, name := range cAs { - ca := caInfo{ - id: idx + 1, - name: name, - slug: strings.ToLower(slugRe.ReplaceAllString(name, "-")), - } - pk, err := makePK(fmt.Sprintf("ca%d-ca-%s.key.pem", ca.id, ca.slug)) - if err != nil { - log.Fatal(err) - } - ca.pk = pk - caURI, err := url.Parse(fmt.Sprintf("spiffe://%s.consul", clusterID)) - if err != nil { - log.Fatal(err) - } - ca.uri = caURI - cert, err := makeCACert(ca, previousCA) - if err != nil { - log.Fatal(err) - } - ca.cert = cert - caInfos[name] = ca - previousCA = &ca - } - - // For each CA, make a leaf cert for each service - for _, ca := range caInfos { - for _, svc := range services { - _, err := makeLeafCert(ca, svc) - if err != nil { - log.Fatal(err) - } - } - } -} - -func makePK(path string) (*ecdsa.PrivateKey, error) { - log.Printf("Writing PK file: %s", path) - priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return nil, err - } - - bs, err := x509.MarshalECPrivateKey(priv) - if err != nil { - return nil, err - } - - err = writePEM(path, "EC PRIVATE KEY", bs) - return priv, nil -} - -func makeCACert(ca caInfo, previousCA *caInfo) (*x509.Certificate, error) { - path := fmt.Sprintf("ca%d-ca-%s.cert.pem", ca.id, ca.slug) - log.Printf("Writing CA cert file: %s", path) - serial++ - subj := pkix.Name{ - CommonName: ca.name, - } - template := x509.Certificate{ - SerialNumber: big.NewInt(serial), - Subject: subj, - // New in go 1.10 - URIs: []*url.URL{ca.uri}, - // Add DNS name constraint - PermittedDNSDomainsCritical: true, - PermittedDNSDomains: []string{ca.uri.Hostname()}, - SignatureAlgorithm: x509.ECDSAWithSHA256, - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign | x509.KeyUsageDigitalSignature, - IsCA: true, - NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), - NotBefore: time.Now(), - AuthorityKeyId: keyID(&ca.pk.PublicKey), - SubjectKeyId: keyID(&ca.pk.PublicKey), - } - bs, err := x509.CreateCertificate(rand.Reader, &template, &template, - &ca.pk.PublicKey, ca.pk) - if err != nil { - return nil, err - } - - err = writePEM(path, "CERTIFICATE", bs) - if err != nil { - return nil, err - } - - cert, err := x509.ParseCertificate(bs) - if err != nil { - return nil, err - } - - if previousCA != nil { - // Also create cross-signed cert as we would use during rotation between - // previous CA and this one. - template.AuthorityKeyId = keyID(&previousCA.pk.PublicKey) - bs, err := x509.CreateCertificate(rand.Reader, &template, - previousCA.cert, &ca.pk.PublicKey, previousCA.pk) - if err != nil { - return nil, err - } - - path := fmt.Sprintf("ca%d-xc-by-ca%d.cert.pem", ca.id, previousCA.id) - err = writePEM(path, "CERTIFICATE", bs) - if err != nil { - return nil, err - } - } - - return cert, err -} - -func keyID(pub *ecdsa.PublicKey) []byte { - // This is not standard; RFC allows any unique identifier as long as they - // match in subject/authority chains but suggests specific hashing of DER - // bytes of public key including DER tags. I can't be bothered to do esp. - // since ECDSA keys don't have a handy way to marshal the publick key alone. - h := sha256.New() - h.Write(pub.X.Bytes()) - h.Write(pub.Y.Bytes()) - return h.Sum([]byte{}) -} - -func makeLeafCert(ca caInfo, svc string) (*x509.Certificate, error) { - svcURI := ca.uri - svcURI.Path = "/ns/default/dc/dc01/svc/" + svc - - keyPath := fmt.Sprintf("ca%d-svc-%s.key.pem", ca.id, svc) - cPath := fmt.Sprintf("ca%d-svc-%s.cert.pem", ca.id, svc) - - pk, err := makePK(keyPath) - if err != nil { - return nil, err - } - - log.Printf("Writing Service Cert: %s", cPath) - - serial++ - subj := pkix.Name{ - CommonName: svc, - } - template := x509.Certificate{ - SerialNumber: big.NewInt(serial), - Subject: subj, - // New in go 1.10 - URIs: []*url.URL{svcURI}, - SignatureAlgorithm: x509.ECDSAWithSHA256, - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageDataEncipherment | - x509.KeyUsageKeyAgreement | x509.KeyUsageDigitalSignature | - x509.KeyUsageKeyEncipherment, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageClientAuth, - x509.ExtKeyUsageServerAuth, - }, - NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), - NotBefore: time.Now(), - AuthorityKeyId: keyID(&ca.pk.PublicKey), - SubjectKeyId: keyID(&pk.PublicKey), - } - bs, err := x509.CreateCertificate(rand.Reader, &template, ca.cert, - &pk.PublicKey, ca.pk) - if err != nil { - return nil, err - } - - err = writePEM(cPath, "CERTIFICATE", bs) - if err != nil { - return nil, err - } - - return x509.ParseCertificate(bs) -} - -func writePEM(name, typ string, bs []byte) error { - f, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE, 0600) - if err != nil { - return err - } - defer f.Close() - return pem.Encode(f, &pem.Block{Type: typ, Bytes: bs}) -} diff --git a/connect/testing.go b/connect/testing.go index 90db332a2..7e1b9cdac 100644 --- a/connect/testing.go +++ b/connect/testing.go @@ -4,30 +4,155 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" - "path" - "path/filepath" - "runtime" + "io" + "net" + "sync/atomic" - "github.com/mitchellh/go-testing-interface" + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/lib/freeport" + testing "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/require" ) -// testDataDir is a janky temporary hack to allow use of these methods from -// proxy package. We need to revisit where all this lives since it logically -// overlaps with consul/agent in Mitchell's PR and that one generates certs on -// the fly which will make this unecessary but I want to get things working for -// now with what I've got :). This wonderful heap kinda-sorta gets the path -// relative to _this_ file so it works even if the Test* method is being called -// from a test binary in another package dir. -func testDataDir() string { - _, filename, _, ok := runtime.Caller(0) - if !ok { - panic("no caller information") - } - return path.Dir(filename) + "/testdata" +// testVerifier creates a helper verifyFunc that can be set in a tls.Config and +// records calls made, passing back the certificates presented via the returned +// channel. The channel is buffered so up to 128 verification calls can be made +// without reading the chan before verification blocks. +func testVerifier(t testing.T, returnErr error) (verifyFunc, chan [][]byte) { + ch := make(chan [][]byte, 128) + return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + ch <- rawCerts + return returnErr + }, ch } +// TestTLSConfig returns a *tls.Config suitable for use during tests. +func TestTLSConfig(t testing.T, service string, ca *structs.CARoot) *tls.Config { + t.Helper() + + // Insecure default (nil verifier) + cfg := defaultTLSConfig(nil) + cfg.Certificates = []tls.Certificate{TestSvcKeyPair(t, service, ca)} + cfg.RootCAs = TestCAPool(t, ca) + cfg.ClientCAs = TestCAPool(t, ca) + return cfg +} + +// TestCAPool returns an *x509.CertPool containing the passed CA's root(s) +func TestCAPool(t testing.T, cas ...*structs.CARoot) *x509.CertPool { + t.Helper() + pool := x509.NewCertPool() + for _, ca := range cas { + pool.AppendCertsFromPEM([]byte(ca.RootCert)) + } + return pool +} + +// TestSvcKeyPair returns an tls.Certificate containing both cert and private +// key for a given service under a given CA from the testdata dir. +func TestSvcKeyPair(t testing.T, service string, ca *structs.CARoot) tls.Certificate { + t.Helper() + certPEM, keyPEM := connect.TestLeaf(t, service, ca) + cert, err := tls.X509KeyPair([]byte(certPEM), []byte(keyPEM)) + require.Nil(t, err) + return cert +} + +// TestPeerCertificates returns a []*x509.Certificate as you'd get from +// tls.Conn.ConnectionState().PeerCertificates including the named certificate. +func TestPeerCertificates(t testing.T, service string, ca *structs.CARoot) []*x509.Certificate { + t.Helper() + certPEM, _ := connect.TestLeaf(t, service, ca) + cert, err := connect.ParseCert(certPEM) + require.Nil(t, err) + return []*x509.Certificate{cert} +} + +// TestService runs a service listener that can be used to test clients. It's +// behaviour can be controlled by the struct members. +type TestService struct { + // The service name to serve. + Service string + // The (test) CA to use for generating certs. + CA *structs.CARoot + // TimeoutHandshake controls whether the listening server will complete a TLS + // handshake quickly enough. + TimeoutHandshake bool + // TLSCfg is the tls.Config that will be used. By default it's set up from the + // service and ca set. + TLSCfg *tls.Config + // Addr is the listen address. It is set to a random free port on `localhost` + // by default. + Addr string + + l net.Listener + stopFlag int32 + stopChan chan struct{} +} + +// NewTestService returns a TestService. It should be closed when test is +// complete. +func NewTestService(t testing.T, service string, ca *structs.CARoot) *TestService { + ports := freeport.GetT(t, 1) + return &TestService{ + Service: service, + CA: ca, + stopChan: make(chan struct{}), + TLSCfg: TestTLSConfig(t, service, ca), + Addr: fmt.Sprintf("localhost:%d", ports[0]), + } +} + +// Serve runs a TestService and blocks until it is closed or errors. +func (s *TestService) Serve() error { + // Just accept TCP conn but so we can control timing of accept/handshake + l, err := net.Listen("tcp", s.Addr) + if err != nil { + return err + } + s.l = l + + for { + conn, err := s.l.Accept() + if err != nil { + if atomic.LoadInt32(&s.stopFlag) == 1 { + return nil + } + return err + } + + // Ignore the conn if we are not actively ha + if !s.TimeoutHandshake { + // Upgrade conn to TLS + conn = tls.Server(conn, s.TLSCfg) + + // Run an echo service + go io.Copy(conn, conn) + } + + // Close this conn when we stop + go func(c net.Conn) { + <-s.stopChan + c.Close() + }(conn) + } + + return nil +} + +// Close stops a TestService +func (s *TestService) Close() { + old := atomic.SwapInt32(&s.stopFlag, 1) + if old == 0 { + if s.l != nil { + s.l.Close() + } + close(s.stopChan) + } +} + +/* // TestCAPool returns an *x509.CertPool containing the named CA certs from the // testdata dir. func TestCAPool(t testing.T, caNames ...string) *x509.CertPool { @@ -86,3 +211,4 @@ func (a *TestAuther) Auth(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { return a.Return } +*/ diff --git a/connect/tls.go b/connect/tls.go index af66d9c0c..8d3bc3a94 100644 --- a/connect/tls.go +++ b/connect/tls.go @@ -3,13 +3,18 @@ package connect import ( "crypto/tls" "crypto/x509" + "errors" "io/ioutil" "sync" + + "github.com/hashicorp/consul/agent/connect" ) -// defaultTLSConfig returns the standard config for connect clients and servers. -func defaultTLSConfig() *tls.Config { - serverAuther := &ServerAuther{} +// verifyFunc is the type of tls.Config.VerifyPeerCertificate for convenience. +type verifyFunc func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error + +// defaultTLSConfig returns the standard config. +func defaultTLSConfig(verify verifyFunc) *tls.Config { return &tls.Config{ MinVersion: tls.VersionTLS12, ClientAuth: tls.RequireAndVerifyClientCert, @@ -29,16 +34,18 @@ func defaultTLSConfig() *tls.Config { // We have to set this since otherwise Go will attempt to verify DNS names // match DNS SAN/CN which we don't want. We hook up VerifyPeerCertificate to // do our own path validation as well as Connect AuthZ. - InsecureSkipVerify: true, - // By default auth as if we are a server. Clients need to override this with - // an Auther that is performs correct validation of the server identity they - // intended to connect to. - VerifyPeerCertificate: serverAuther.Auth, + InsecureSkipVerify: true, + VerifyPeerCertificate: verify, + // Include h2 to allow connect http servers to automatically support http2. + // See: https://github.com/golang/go/blob/917c33fe8672116b04848cf11545296789cafd3b/src/net/http/server.go#L2724-L2731 + NextProtos: []string{"h2"}, } } // ReloadableTLSConfig exposes a tls.Config that can have it's certificates -// reloaded. This works by +// reloaded. On a server, this uses GetConfigForClient to pass the current +// tls.Config or client certificate for each acceptted connection. On a client, +// this uses GetClientCertificate to provide the current client certificate. type ReloadableTLSConfig struct { mu sync.Mutex @@ -46,52 +53,40 @@ type ReloadableTLSConfig struct { cfg *tls.Config } -// NewReloadableTLSConfig returns a reloadable config currently set to base. The -// Auther used to verify certificates for incoming connections on a Server will -// just be copied from the VerifyPeerCertificate passed. Clients will need to -// pass a specific Auther instance when they call TLSConfig that is configured -// to perform the necessary validation of the server's identity. +// NewReloadableTLSConfig returns a reloadable config currently set to base. func NewReloadableTLSConfig(base *tls.Config) *ReloadableTLSConfig { - return &ReloadableTLSConfig{cfg: base} + c := &ReloadableTLSConfig{} + c.SetTLSConfig(base) + return c } -// ServerTLSConfig returns a *tls.Config that will dynamically load certs for -// each inbound connection via the GetConfigForClient callback. -func (c *ReloadableTLSConfig) ServerTLSConfig() *tls.Config { - // Setup the basic one with current params even though we will be using - // different config for each new conn. - c.mu.Lock() - base := c.cfg - c.mu.Unlock() - - // Dynamically fetch the current config for each new inbound connection - base.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) { - return c.TLSConfig(nil), nil - } - - return base -} - -// TLSConfig returns the current value for the config. It is safe to call from -// any goroutine. The passed Auther is inserted into the config's -// VerifyPeerCertificate. Passing a nil Auther will leave the default one in the -// base config -func (c *ReloadableTLSConfig) TLSConfig(auther Auther) *tls.Config { +// TLSConfig returns a *tls.Config that will dynamically load certs. It's +// suitable for use in either a client or server. +func (c *ReloadableTLSConfig) TLSConfig() *tls.Config { c.mu.Lock() cfgCopy := c.cfg c.mu.Unlock() - if auther != nil { - cfgCopy.VerifyPeerCertificate = auther.Auth - } return cfgCopy } // SetTLSConfig sets the config used for future connections. It is safe to call // from any goroutine. func (c *ReloadableTLSConfig) SetTLSConfig(cfg *tls.Config) error { + copy := cfg.Clone() + copy.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + current := c.TLSConfig() + if len(current.Certificates) < 1 { + return nil, errors.New("tls: no certificates configured") + } + return ¤t.Certificates[0], nil + } + copy.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { + return c.TLSConfig(), nil + } + c.mu.Lock() defer c.mu.Unlock() - c.cfg = cfg + c.cfg = copy return nil } @@ -114,7 +109,8 @@ func devTLSConfigFromFiles(caFile, certFile, return nil, err } - cfg := defaultTLSConfig() + // Insecure no verification + cfg := defaultTLSConfig(nil) cfg.Certificates = []tls.Certificate{cert} cfg.RootCAs = roots @@ -122,3 +118,43 @@ func devTLSConfigFromFiles(caFile, certFile, return cfg, nil } + +// verifyServerCertMatchesURI is used on tls connections dialled to a connect +// server to ensure that the certificate it presented has the correct identity. +func verifyServerCertMatchesURI(certs []*x509.Certificate, + expected connect.CertURI) error { + expectedStr := expected.URI().String() + + if len(certs) < 1 { + return errors.New("peer certificate mismatch") + } + + // Only check the first cert assuming this is the only leaf. It's not clear if + // services might ever legitimately present multiple leaf certificates or if + // the slice is just to allow presenting the whole chain of intermediates. + cert := certs[0] + + // Our certs will only ever have a single URI for now so only check that + if len(cert.URIs) < 1 { + return errors.New("peer certificate mismatch") + } + // We may want to do better than string matching later in some special + // cases and/or encapsulate the "match" logic inside the CertURI + // implementation but for now this is all we need. + if cert.URIs[0].String() == expectedStr { + return nil + } + return errors.New("peer certificate mismatch") +} + +// serverVerifyCerts is the verifyFunc for use on Connect servers. +func serverVerifyCerts(rawCerts [][]byte, chains [][]*x509.Certificate) error { + // TODO(banks): implement me + return nil +} + +// clientVerifyCerts is the verifyFunc for use on Connect clients. +func clientVerifyCerts(rawCerts [][]byte, chains [][]*x509.Certificate) error { + // TODO(banks): implement me + return nil +} diff --git a/connect/tls_test.go b/connect/tls_test.go index 0c99df3ad..3605f22db 100644 --- a/connect/tls_test.go +++ b/connect/tls_test.go @@ -1,45 +1,103 @@ package connect import ( - "crypto/tls" + "crypto/x509" "testing" + "github.com/hashicorp/consul/agent/connect" "github.com/stretchr/testify/require" ) func TestReloadableTLSConfig(t *testing.T) { - base := TestTLSConfig(t, "ca1", "web") + require := require.New(t) + verify, _ := testVerifier(t, nil) + base := defaultTLSConfig(verify) c := NewReloadableTLSConfig(base) - a := &TestAuther{ - Return: nil, - } + // The dynamic config should be the one we loaded (with some different hooks) + got := c.TLSConfig() + expect := *base + // Equal and even cmp.Diff fail on tls.Config due to unexported fields in + // each. Compare a few things to prove it's returning the bits we + // specifically set. + require.Equal(expect.Certificates, got.Certificates) + require.Equal(expect.RootCAs, got.RootCAs) + require.Equal(expect.ClientCAs, got.ClientCAs) + require.Equal(expect.InsecureSkipVerify, got.InsecureSkipVerify) + require.Equal(expect.MinVersion, got.MinVersion) + require.Equal(expect.CipherSuites, got.CipherSuites) + require.NotNil(got.GetClientCertificate) + require.NotNil(got.GetConfigForClient) + require.Contains(got.NextProtos, "h2") - // The dynamic config should be the one we loaded, but with the passed auther - expect := base - expect.VerifyPeerCertificate = a.Auth - require.Equal(t, base, c.TLSConfig(a)) + ca := connect.TestCA(t, nil) - // The server config should return same too for new connections - serverCfg := c.ServerTLSConfig() - require.NotNil(t, serverCfg.GetConfigForClient) - got, err := serverCfg.GetConfigForClient(&tls.ClientHelloInfo{}) - require.Nil(t, err) - require.Equal(t, base, got) + // Now change the config as if we just loaded certs from Consul + new := TestTLSConfig(t, "web", ca) + err := c.SetTLSConfig(new) + require.Nil(err) - // Now change the config as if we just rotated to a new CA - new := TestTLSConfig(t, "ca2", "web") - err = c.SetTLSConfig(new) - require.Nil(t, err) + // Change the passed config to ensure SetTLSConfig made a copy otherwise this + // is racey. + expect = *new + new.Certificates = nil - // The dynamic config should be the one we loaded (with same auther due to nil) - require.Equal(t, new, c.TLSConfig(nil)) - - // The server config should return same too for new connections - serverCfg = c.ServerTLSConfig() - require.NotNil(t, serverCfg.GetConfigForClient) - got, err = serverCfg.GetConfigForClient(&tls.ClientHelloInfo{}) - require.Nil(t, err) - require.Equal(t, new, got) + // The dynamic config should be the one we loaded (with some different hooks) + got = c.TLSConfig() + require.Equal(expect.Certificates, got.Certificates) + require.Equal(expect.RootCAs, got.RootCAs) + require.Equal(expect.ClientCAs, got.ClientCAs) + require.Equal(expect.InsecureSkipVerify, got.InsecureSkipVerify) + require.Equal(expect.MinVersion, got.MinVersion) + require.Equal(expect.CipherSuites, got.CipherSuites) + require.NotNil(got.GetClientCertificate) + require.NotNil(got.GetConfigForClient) + require.Contains(got.NextProtos, "h2") +} + +func Test_verifyServerCertMatchesURI(t *testing.T) { + ca1 := connect.TestCA(t, nil) + + tests := []struct { + name string + certs []*x509.Certificate + expected connect.CertURI + wantErr bool + }{ + { + name: "simple match", + certs: TestPeerCertificates(t, "web", ca1), + expected: connect.TestSpiffeIDService(t, "web"), + wantErr: false, + }, + { + name: "mismatch", + certs: TestPeerCertificates(t, "web", ca1), + expected: connect.TestSpiffeIDService(t, "db"), + wantErr: true, + }, + { + name: "no certs", + certs: []*x509.Certificate{}, + expected: connect.TestSpiffeIDService(t, "db"), + wantErr: true, + }, + { + name: "nil certs", + certs: nil, + expected: connect.TestSpiffeIDService(t, "db"), + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := verifyServerCertMatchesURI(tt.certs, tt.expected) + if tt.wantErr { + require.NotNil(t, err) + } else { + require.Nil(t, err) + } + }) + } } From 67669abf82ef8e21dfa3dc9db0bf0d405b4a61a6 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Tue, 3 Apr 2018 12:55:50 +0100 Subject: [PATCH 126/627] Remove old connect client and proxy implementation --- connect/client.go | 256 ------------------------- connect/client_test.go | 148 --------------- connect/testing.go | 61 ------ proxy/config.go | 111 ----------- proxy/config_test.go | 46 ----- proxy/conn.go | 48 ----- proxy/conn_test.go | 119 ------------ proxy/manager.go | 140 -------------- proxy/manager_test.go | 76 -------- proxy/proxier.go | 32 ---- proxy/proxy.go | 112 ----------- proxy/public_listener.go | 119 ------------ proxy/public_listener_test.go | 38 ---- proxy/runner.go | 118 ------------ proxy/testdata/config-kitchensink.hcl | 36 ---- proxy/testing.go | 170 ----------------- proxy/upstream.go | 261 -------------------------- proxy/upstream_test.go | 75 -------- 18 files changed, 1966 deletions(-) delete mode 100644 connect/client.go delete mode 100644 connect/client_test.go delete mode 100644 proxy/config.go delete mode 100644 proxy/config_test.go delete mode 100644 proxy/conn.go delete mode 100644 proxy/conn_test.go delete mode 100644 proxy/manager.go delete mode 100644 proxy/manager_test.go delete mode 100644 proxy/proxier.go delete mode 100644 proxy/proxy.go delete mode 100644 proxy/public_listener.go delete mode 100644 proxy/public_listener_test.go delete mode 100644 proxy/runner.go delete mode 100644 proxy/testdata/config-kitchensink.hcl delete mode 100644 proxy/testing.go delete mode 100644 proxy/upstream.go delete mode 100644 proxy/upstream_test.go diff --git a/connect/client.go b/connect/client.go deleted file mode 100644 index 18e43f4cb..000000000 --- a/connect/client.go +++ /dev/null @@ -1,256 +0,0 @@ -package connect - -// import ( -// "context" -// "crypto/tls" -// "fmt" -// "math/rand" -// "net" - -// "github.com/hashicorp/consul/api" -// ) - -// // CertStatus indicates whether the Client currently has valid certificates for -// // incoming and outgoing connections. -// type CertStatus int - -// const ( -// // CertStatusUnknown is the zero value for CertStatus which may be returned -// // when a watch channel is closed on shutdown. It has no other meaning. -// CertStatusUnknown CertStatus = iota - -// // CertStatusOK indicates the client has valid certificates and trust roots to -// // Authenticate incoming and outgoing connections. -// CertStatusOK - -// // CertStatusPending indicates the client is waiting to be issued initial -// // certificates, or that it's certificates have expired and it's waiting to be -// // issued new ones. In this state all incoming and outgoing connections will -// // fail. -// CertStatusPending -// ) - -// func (s CertStatus) String() string { -// switch s { -// case CertStatusOK: -// return "OK" -// case CertStatusPending: -// return "pending" -// case CertStatusUnknown: -// fallthrough -// default: -// return "unknown" -// } -// } - -// // Client is the interface a basic client implementation must support. -// type Client interface { -// // TODO(banks): build this and test it -// // CertStatus returns the current status of the client's certificates. It can -// // be used to determine if the Client is able to service requests at the -// // current time. -// //CertStatus() CertStatus - -// // TODO(banks): build this and test it -// // WatchCertStatus returns a channel that is notified on all status changes. -// // Note that a message on the channel isn't guaranteed to be different so it's -// // value should be inspected. During Client shutdown the channel will be -// // closed returning a zero type which is equivalent to CertStatusUnknown. -// //WatchCertStatus() <-chan CertStatus - -// // ServerTLSConfig returns the *tls.Config to be used when creating a TCP -// // listener that should accept Connect connections. It is likely that at -// // startup the tlsCfg returned will not be immediately usable since -// // certificates are typically fetched from the agent asynchronously. In this -// // case it's still safe to listen with the provided config, but auth failures -// // will occur until initial certificate discovery is complete. In general at -// // any time it is possible for certificates to expire before new replacements -// // have been issued due to local network errors so the server may not actually -// // have a working certificate configuration at any time, however as soon as -// // valid certs can be issued it will automatically start working again so -// // should take no action. -// ServerTLSConfig() (*tls.Config, error) - -// // DialService opens a new connection to the named service registered in -// // Consul. It will perform service discovery to find healthy instances. If -// // there is an error during connection it is returned and the caller may call -// // again. The client implementation makes a best effort to make consecutive -// // Dials against different instances either by randomising the list and/or -// // maintaining a local memory of which instances recently failed. If the -// // context passed times out before connection is established and verified an -// // error is returned. -// DialService(ctx context.Context, namespace, name string) (net.Conn, error) - -// // DialPreparedQuery opens a new connection by executing the named Prepared -// // Query against the local Consul agent, and picking one of the returned -// // instances to connect to. It will perform service discovery with the same -// // semantics as DialService. -// DialPreparedQuery(ctx context.Context, namespace, name string) (net.Conn, error) -// } - -// /* - -// Maybe also convenience wrappers for: -// - listening TLS conn with right config -// - http.ListenAndServeTLS equivalent - -// */ - -// // AgentClient is the primary implementation of a connect.Client which -// // communicates with the local Consul agent. -// type AgentClient struct { -// agent *api.Client -// tlsCfg *ReloadableTLSConfig -// } - -// // NewClient returns an AgentClient to allow consuming and providing -// // Connect-enabled network services. -// func NewClient(agent *api.Client) Client { -// // TODO(banks): hook up fetching certs from Agent and updating tlsCfg on cert -// // delivery/change. Perhaps need to make -// return &AgentClient{ -// agent: agent, -// tlsCfg: NewReloadableTLSConfig(defaultTLSConfig()), -// } -// } - -// // NewInsecureDevClientWithLocalCerts returns an AgentClient that will still do -// // service discovery via the local agent but will use externally provided -// // certificates and skip authorization. This is intended just for development -// // and must not be used ever in production. -// func NewInsecureDevClientWithLocalCerts(agent *api.Client, caFile, certFile, -// keyFile string) (Client, error) { - -// cfg, err := devTLSConfigFromFiles(caFile, certFile, keyFile) -// if err != nil { -// return nil, err -// } - -// return &AgentClient{ -// agent: agent, -// tlsCfg: NewReloadableTLSConfig(cfg), -// }, nil -// } - -// // ServerTLSConfig implements Client -// func (c *AgentClient) ServerTLSConfig() (*tls.Config, error) { -// return c.tlsCfg.ServerTLSConfig(), nil -// } - -// // DialService implements Client -// func (c *AgentClient) DialService(ctx context.Context, namespace, -// name string) (net.Conn, error) { -// return c.dial(ctx, "service", namespace, name) -// } - -// // DialPreparedQuery implements Client -// func (c *AgentClient) DialPreparedQuery(ctx context.Context, namespace, -// name string) (net.Conn, error) { -// return c.dial(ctx, "prepared_query", namespace, name) -// } - -// func (c *AgentClient) dial(ctx context.Context, discoveryType, namespace, -// name string) (net.Conn, error) { - -// svcs, err := c.discoverInstances(ctx, discoveryType, namespace, name) -// if err != nil { -// return nil, err -// } - -// svc, err := c.pickInstance(svcs) -// if err != nil { -// return nil, err -// } -// if svc == nil { -// return nil, fmt.Errorf("no healthy services discovered") -// } - -// // OK we have a service we can dial! We need a ClientAuther that will validate -// // the connection is legit. - -// // TODO(banks): implement ClientAuther properly to actually verify connected -// // cert matches the expected service/cluster etc. based on svc. -// auther := &ClientAuther{} -// tlsConfig := c.tlsCfg.TLSConfig(auther) - -// // Resolve address TODO(banks): I expected this to happen magically in the -// // agent at registration time if I register with no explicit address but -// // apparently doesn't. This is a quick hack to make it work for now, need to -// // see if there is a better shared code path for doing this. -// addr := svc.Service.Address -// if addr == "" { -// addr = svc.Node.Address -// } -// var dialer net.Dialer -// tcpConn, err := dialer.DialContext(ctx, "tcp", -// fmt.Sprintf("%s:%d", addr, svc.Service.Port)) -// if err != nil { -// return nil, err -// } - -// tlsConn := tls.Client(tcpConn, tlsConfig) -// err = tlsConn.Handshake() -// if err != nil { -// tlsConn.Close() -// return nil, err -// } - -// return tlsConn, nil -// } - -// // pickInstance returns an instance from the given list to try to connect to. It -// // may be made pluggable later, for now it just picks a random one regardless of -// // whether the list is already shuffled. -// func (c *AgentClient) pickInstance(svcs []*api.ServiceEntry) (*api.ServiceEntry, error) { -// if len(svcs) < 1 { -// return nil, nil -// } -// idx := rand.Intn(len(svcs)) -// return svcs[idx], nil -// } - -// // discoverInstances returns all instances for the given discoveryType, -// // namespace and name. The returned service entries may or may not be shuffled -// func (c *AgentClient) discoverInstances(ctx context.Context, discoverType, -// namespace, name string) ([]*api.ServiceEntry, error) { - -// q := &api.QueryOptions{ -// // TODO(banks): make this configurable? -// AllowStale: true, -// } -// q = q.WithContext(ctx) - -// switch discoverType { -// case "service": -// svcs, _, err := c.agent.Health().Connect(name, "", true, q) -// if err != nil { -// return nil, err -// } -// return svcs, err - -// case "prepared_query": -// // TODO(banks): it's not super clear to me how this should work eventually. -// // How do we distinguise between a PreparedQuery for the actual services and -// // one that should return the connect proxies where that differs? If we -// // can't then we end up with a janky UX where user specifies a reasonable -// // prepared query but we try to connect to non-connect services and fail -// // with a confusing TLS error. Maybe just a way to filter PreparedQuery -// // results by connect-enabled would be sufficient (or even metadata to do -// // that ourselves in the response although less efficient). -// resp, _, err := c.agent.PreparedQuery().Execute(name, q) -// if err != nil { -// return nil, err -// } - -// // Awkward, we have a slice of api.ServiceEntry here but want a slice of -// // *api.ServiceEntry for compat with Connect/Service APIs. Have to convert -// // them to keep things type-happy. -// svcs := make([]*api.ServiceEntry, len(resp.Nodes)) -// for idx, se := range resp.Nodes { -// svcs[idx] = &se -// } -// return svcs, err -// default: -// return nil, fmt.Errorf("unsupported discovery type: %s", discoverType) -// } -// } diff --git a/connect/client_test.go b/connect/client_test.go deleted file mode 100644 index 045bc8fd6..000000000 --- a/connect/client_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package connect - -// import ( -// "context" -// "crypto/x509" -// "crypto/x509/pkix" -// "encoding/asn1" -// "io/ioutil" -// "net" -// "net/http" -// "net/http/httptest" -// "net/url" -// "strconv" -// "testing" - -// "github.com/hashicorp/consul/api" -// "github.com/hashicorp/consul/testutil" -// "github.com/stretchr/testify/require" -// ) - -// func TestNewInsecureDevClientWithLocalCerts(t *testing.T) { - -// agent, err := api.NewClient(api.DefaultConfig()) -// require.Nil(t, err) - -// got, err := NewInsecureDevClientWithLocalCerts(agent, -// "testdata/ca1-ca-consul-internal.cert.pem", -// "testdata/ca1-svc-web.cert.pem", -// "testdata/ca1-svc-web.key.pem", -// ) -// require.Nil(t, err) - -// // Sanity check correct certs were loaded -// serverCfg, err := got.ServerTLSConfig() -// require.Nil(t, err) -// caSubjects := serverCfg.RootCAs.Subjects() -// require.Len(t, caSubjects, 1) -// caSubject, err := testNameFromRawDN(caSubjects[0]) -// require.Nil(t, err) -// require.Equal(t, "Consul Internal", caSubject.CommonName) - -// require.Len(t, serverCfg.Certificates, 1) -// cert, err := x509.ParseCertificate(serverCfg.Certificates[0].Certificate[0]) -// require.Nil(t, err) -// require.Equal(t, "web", cert.Subject.CommonName) -// } - -// func testNameFromRawDN(raw []byte) (*pkix.Name, error) { -// var seq pkix.RDNSequence -// if _, err := asn1.Unmarshal(raw, &seq); err != nil { -// return nil, err -// } - -// var name pkix.Name -// name.FillFromRDNSequence(&seq) -// return &name, nil -// } - -// func testAgent(t *testing.T) (*testutil.TestServer, *api.Client) { -// t.Helper() - -// // Make client config -// conf := api.DefaultConfig() - -// // Create server -// server, err := testutil.NewTestServerConfigT(t, nil) -// require.Nil(t, err) - -// conf.Address = server.HTTPAddr - -// // Create client -// agent, err := api.NewClient(conf) -// require.Nil(t, err) - -// return server, agent -// } - -// func testService(t *testing.T, ca, name string, client *api.Client) *httptest.Server { -// t.Helper() - -// // Run a test service to discover -// server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// w.Write([]byte("svc: " + name)) -// })) -// server.TLS = TestTLSConfig(t, ca, name) -// server.StartTLS() - -// u, err := url.Parse(server.URL) -// require.Nil(t, err) - -// port, err := strconv.Atoi(u.Port()) -// require.Nil(t, err) - -// // If client is passed, register the test service instance -// if client != nil { -// svc := &api.AgentServiceRegistration{ -// // TODO(banks): we don't really have a good way to represent -// // connect-native apps yet so we have to pretend out little server is a -// // proxy for now. -// Kind: api.ServiceKindConnectProxy, -// ProxyDestination: name, -// Name: name + "-proxy", -// Address: u.Hostname(), -// Port: port, -// } -// err := client.Agent().ServiceRegister(svc) -// require.Nil(t, err) -// } - -// return server -// } - -// func TestDialService(t *testing.T) { -// consulServer, agent := testAgent(t) -// defer consulServer.Stop() - -// svc := testService(t, "ca1", "web", agent) -// defer svc.Close() - -// c, err := NewInsecureDevClientWithLocalCerts(agent, -// "testdata/ca1-ca-consul-internal.cert.pem", -// "testdata/ca1-svc-web.cert.pem", -// "testdata/ca1-svc-web.key.pem", -// ) -// require.Nil(t, err) - -// conn, err := c.DialService(context.Background(), "default", "web") -// require.Nilf(t, err, "err: %s", err) - -// // Inject our conn into http.Transport -// httpClient := &http.Client{ -// Transport: &http.Transport{ -// DialTLS: func(network, addr string) (net.Conn, error) { -// return conn, nil -// }, -// }, -// } - -// // Don't be fooled the hostname here is ignored since we did the dialling -// // ourselves -// resp, err := httpClient.Get("https://web.connect.consul/") -// require.Nil(t, err) -// defer resp.Body.Close() -// body, err := ioutil.ReadAll(resp.Body) -// require.Nil(t, err) - -// require.Equal(t, "svc: web", string(body)) -// } diff --git a/connect/testing.go b/connect/testing.go index 7e1b9cdac..f6fa438cf 100644 --- a/connect/testing.go +++ b/connect/testing.go @@ -151,64 +151,3 @@ func (s *TestService) Close() { close(s.stopChan) } } - -/* -// TestCAPool returns an *x509.CertPool containing the named CA certs from the -// testdata dir. -func TestCAPool(t testing.T, caNames ...string) *x509.CertPool { - t.Helper() - pool := x509.NewCertPool() - for _, name := range caNames { - certs, err := filepath.Glob(testDataDir() + "/" + name + "-ca-*.cert.pem") - require.Nil(t, err) - for _, cert := range certs { - caPem, err := ioutil.ReadFile(cert) - require.Nil(t, err) - pool.AppendCertsFromPEM(caPem) - } - } - return pool -} - -// TestSvcKeyPair returns an tls.Certificate containing both cert and private -// key for a given service under a given CA from the testdata dir. -func TestSvcKeyPair(t testing.T, ca, name string) tls.Certificate { - t.Helper() - prefix := fmt.Sprintf(testDataDir()+"/%s-svc-%s", ca, name) - cert, err := tls.LoadX509KeyPair(prefix+".cert.pem", prefix+".key.pem") - require.Nil(t, err) - return cert -} - -// TestTLSConfig returns a *tls.Config suitable for use during tests. -func TestTLSConfig(t testing.T, ca, svc string) *tls.Config { - t.Helper() - return &tls.Config{ - Certificates: []tls.Certificate{TestSvcKeyPair(t, ca, svc)}, - MinVersion: tls.VersionTLS12, - RootCAs: TestCAPool(t, ca), - ClientCAs: TestCAPool(t, ca), - ClientAuth: tls.RequireAndVerifyClientCert, - // In real life we'll need to do this too since otherwise Go will attempt to - // verify DNS names match DNS SAN/CN which we don't want, but we'll hook - // VerifyPeerCertificates and do our own x509 path validation as well as - // AuthZ upcall. For now we are just testing the basic proxy mechanism so - // this is fine. - InsecureSkipVerify: true, - } -} - -// TestAuther is a simple Auther implementation that does nothing but what you -// tell it to! -type TestAuther struct { - // Return is the value returned from an Auth() call. Set it to nil to have all - // certificates unconditionally accepted or a value to have them fail. - Return error -} - -// Auth implements Auther -func (a *TestAuther) Auth(rawCerts [][]byte, - verifiedChains [][]*x509.Certificate) error { - return a.Return -} -*/ diff --git a/proxy/config.go b/proxy/config.go deleted file mode 100644 index a5958135a..000000000 --- a/proxy/config.go +++ /dev/null @@ -1,111 +0,0 @@ -package proxy - -import ( - "io/ioutil" - - "github.com/hashicorp/consul/api" - "github.com/hashicorp/hcl" -) - -// Config is the publicly configurable state for an entire proxy instance. It's -// mostly used as the format for the local-file config mode which is mostly for -// dev/testing. In normal use, different parts of this config are pulled from -// different locations (e.g. command line, agent config endpoint, agent -// certificate endpoints). -type Config struct { - // ProxyID is the identifier for this proxy as registered in Consul. It's only - // guaranteed to be unique per agent. - ProxyID string `json:"proxy_id" hcl:"proxy_id"` - - // Token is the authentication token provided for queries to the local agent. - Token string `json:"token" hcl:"token"` - - // ProxiedServiceName is the name of the service this proxy is representing. - ProxiedServiceName string `json:"proxied_service_name" hcl:"proxied_service_name"` - - // ProxiedServiceNamespace is the namespace of the service this proxy is - // representing. - ProxiedServiceNamespace string `json:"proxied_service_namespace" hcl:"proxied_service_namespace"` - - // PublicListener configures the mTLS listener. - PublicListener PublicListenerConfig `json:"public_listener" hcl:"public_listener"` - - // Upstreams configures outgoing proxies for remote connect services. - Upstreams []UpstreamConfig `json:"upstreams" hcl:"upstreams"` - - // DevCAFile allows passing the file path to PEM encoded root certificate - // bundle to be used in development instead of the ones supplied by Connect. - DevCAFile string `json:"dev_ca_file" hcl:"dev_ca_file"` - - // DevServiceCertFile allows passing the file path to PEM encoded service - // certificate (client and server) to be used in development instead of the - // ones supplied by Connect. - DevServiceCertFile string `json:"dev_service_cert_file" hcl:"dev_service_cert_file"` - - // DevServiceKeyFile allows passing the file path to PEM encoded service - // private key to be used in development instead of the ones supplied by - // Connect. - DevServiceKeyFile string `json:"dev_service_key_file" hcl:"dev_service_key_file"` -} - -// ConfigWatcher is a simple interface to allow dynamic configurations from -// plugggable sources. -type ConfigWatcher interface { - // Watch returns a channel that will deliver new Configs if something external - // provokes it. - Watch() <-chan *Config -} - -// StaticConfigWatcher is a simple ConfigWatcher that delivers a static Config -// once and then never changes it. -type StaticConfigWatcher struct { - ch chan *Config -} - -// NewStaticConfigWatcher returns a ConfigWatcher for a config that never -// changes. It assumes only one "watcher" will ever call Watch. The config is -// delivered on the first call but will never be delivered again to allow -// callers to call repeatedly (e.g. select in a loop). -func NewStaticConfigWatcher(cfg *Config) *StaticConfigWatcher { - sc := &StaticConfigWatcher{ - // Buffer it so we can queue up the config for first delivery. - ch: make(chan *Config, 1), - } - sc.ch <- cfg - return sc -} - -// Watch implements ConfigWatcher on a static configuration for compatibility. -// It returns itself on the channel once and then leaves it open. -func (sc *StaticConfigWatcher) Watch() <-chan *Config { - return sc.ch -} - -// ParseConfigFile parses proxy configuration form a file for local dev. -func ParseConfigFile(filename string) (*Config, error) { - bs, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - - var cfg Config - - err = hcl.Unmarshal(bs, &cfg) - if err != nil { - return nil, err - } - - return &cfg, nil -} - -// AgentConfigWatcher watches the local Consul agent for proxy config changes. -type AgentConfigWatcher struct { - client *api.Client -} - -// Watch implements ConfigWatcher. -func (w *AgentConfigWatcher) Watch() <-chan *Config { - watch := make(chan *Config) - // TODO implement me - return watch -} diff --git a/proxy/config_test.go b/proxy/config_test.go deleted file mode 100644 index 89287d573..000000000 --- a/proxy/config_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package proxy - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestParseConfigFile(t *testing.T) { - cfg, err := ParseConfigFile("testdata/config-kitchensink.hcl") - require.Nil(t, err) - - expect := &Config{ - ProxyID: "foo", - Token: "11111111-2222-3333-4444-555555555555", - ProxiedServiceName: "web", - ProxiedServiceNamespace: "default", - PublicListener: PublicListenerConfig{ - BindAddress: ":9999", - LocalServiceAddress: "127.0.0.1:5000", - LocalConnectTimeoutMs: 1000, - HandshakeTimeoutMs: 5000, - }, - Upstreams: []UpstreamConfig{ - { - LocalBindAddress: "127.0.0.1:6000", - DestinationName: "db", - DestinationNamespace: "default", - DestinationType: "service", - ConnectTimeoutMs: 10000, - }, - { - LocalBindAddress: "127.0.0.1:6001", - DestinationName: "geo-cache", - DestinationNamespace: "default", - DestinationType: "prepared_query", - ConnectTimeoutMs: 10000, - }, - }, - DevCAFile: "connect/testdata/ca1-ca-consul-internal.cert.pem", - DevServiceCertFile: "connect/testdata/ca1-svc-web.cert.pem", - DevServiceKeyFile: "connect/testdata/ca1-svc-web.key.pem", - } - - require.Equal(t, expect, cfg) -} diff --git a/proxy/conn.go b/proxy/conn.go deleted file mode 100644 index dfad81db7..000000000 --- a/proxy/conn.go +++ /dev/null @@ -1,48 +0,0 @@ -package proxy - -import ( - "io" - "net" - "sync/atomic" -) - -// Conn represents a single proxied TCP connection. -type Conn struct { - src, dst net.Conn - stopping int32 -} - -// NewConn returns a conn joining the two given net.Conn -func NewConn(src, dst net.Conn) *Conn { - return &Conn{ - src: src, - dst: dst, - stopping: 0, - } -} - -// Close closes both connections. -func (c *Conn) Close() { - atomic.StoreInt32(&c.stopping, 1) - c.src.Close() - c.dst.Close() -} - -// CopyBytes will continuously copy bytes in both directions between src and dst -// until either connection is closed. -func (c *Conn) CopyBytes() error { - defer c.Close() - - go func() { - // Need this since Copy is only guaranteed to stop when it's source reader - // (second arg) hits EOF or error but either conn might close first possibly - // causing this goroutine to exit but not the outer one. See TestSc - //defer c.Close() - io.Copy(c.dst, c.src) - }() - _, err := io.Copy(c.src, c.dst) - if atomic.LoadInt32(&c.stopping) == 1 { - return nil - } - return err -} diff --git a/proxy/conn_test.go b/proxy/conn_test.go deleted file mode 100644 index ac907238d..000000000 --- a/proxy/conn_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package proxy - -import ( - "bufio" - "net" - "testing" - - "github.com/stretchr/testify/require" -) - -// testConnSetup listens on a random TCP port and passes the accepted net.Conn -// back to test code on returned channel. It then creates a source and -// destination Conn. And a cleanup func -func testConnSetup(t *testing.T) (net.Conn, net.Conn, func()) { - t.Helper() - - l, err := net.Listen("tcp", "localhost:0") - require.Nil(t, err) - - ch := make(chan net.Conn, 1) - go func(ch chan net.Conn) { - src, err := l.Accept() - require.Nil(t, err) - ch <- src - }(ch) - - dst, err := net.Dial("tcp", l.Addr().String()) - require.Nil(t, err) - - src := <-ch - - stopper := func() { - l.Close() - src.Close() - dst.Close() - } - - return src, dst, stopper -} - -func TestConn(t *testing.T) { - src, dst, stop := testConnSetup(t) - defer stop() - - c := NewConn(src, dst) - - retCh := make(chan error, 1) - go func() { - retCh <- c.CopyBytes() - }() - - srcR := bufio.NewReader(src) - dstR := bufio.NewReader(dst) - - _, err := src.Write([]byte("ping 1\n")) - require.Nil(t, err) - _, err = dst.Write([]byte("ping 2\n")) - require.Nil(t, err) - - got, err := dstR.ReadString('\n') - require.Equal(t, "ping 1\n", got) - - got, err = srcR.ReadString('\n') - require.Equal(t, "ping 2\n", got) - - _, err = src.Write([]byte("pong 1\n")) - require.Nil(t, err) - _, err = dst.Write([]byte("pong 2\n")) - require.Nil(t, err) - - got, err = dstR.ReadString('\n') - require.Equal(t, "pong 1\n", got) - - got, err = srcR.ReadString('\n') - require.Equal(t, "pong 2\n", got) - - c.Close() - - ret := <-retCh - require.Nil(t, ret, "Close() should not cause error return") -} - -func TestConnSrcClosing(t *testing.T) { - src, dst, stop := testConnSetup(t) - defer stop() - - c := NewConn(src, dst) - retCh := make(chan error, 1) - go func() { - retCh <- c.CopyBytes() - }() - - // If we close the src conn, we expect CopyBytes to return and src to be - // closed too. No good way to assert that the conn is closed really other than - // assume the retCh receive will hand unless CopyBytes exits and that - // CopyBytes defers Closing both. i.e. if this test doesn't time out it's - // good! - src.Close() - <-retCh -} - -func TestConnDstClosing(t *testing.T) { - src, dst, stop := testConnSetup(t) - defer stop() - - c := NewConn(src, dst) - retCh := make(chan error, 1) - go func() { - retCh <- c.CopyBytes() - }() - - // If we close the dst conn, we expect CopyBytes to return and src to be - // closed too. No good way to assert that the conn is closed really other than - // assume the retCh receive will hand unless CopyBytes exits and that - // CopyBytes defers Closing both. i.e. if this test doesn't time out it's - // good! - dst.Close() - <-retCh -} diff --git a/proxy/manager.go b/proxy/manager.go deleted file mode 100644 index c22a1b7ff..000000000 --- a/proxy/manager.go +++ /dev/null @@ -1,140 +0,0 @@ -package proxy - -import ( - "errors" - "log" - "os" -) - -var ( - // ErrExists is the error returned when adding a proxy that exists already. - ErrExists = errors.New("proxy with that name already exists") - // ErrNotExist is the error returned when removing a proxy that doesn't exist. - ErrNotExist = errors.New("proxy with that name doesn't exist") -) - -// Manager implements the logic for configuring and running a set of proxiers. -// Typically it's used to run one PublicListener and zero or more Upstreams. -type Manager struct { - ch chan managerCmd - - // stopped is used to signal the caller of StopAll when the run loop exits - // after stopping all runners. It's only closed. - stopped chan struct{} - - // runners holds the currently running instances. It should only by accessed - // from within the `run` goroutine. - runners map[string]*Runner - - logger *log.Logger -} - -type managerCmd struct { - name string - p Proxier - errCh chan error -} - -// NewManager creates a manager of proxier instances. -func NewManager() *Manager { - return NewManagerWithLogger(log.New(os.Stdout, "", log.LstdFlags)) -} - -// NewManagerWithLogger creates a manager of proxier instances with the -// specified logger. -func NewManagerWithLogger(logger *log.Logger) *Manager { - m := &Manager{ - ch: make(chan managerCmd), - stopped: make(chan struct{}), - runners: make(map[string]*Runner), - logger: logger, - } - go m.run() - return m -} - -// RunProxier starts a new Proxier instance in the manager. It is safe to call -// from separate goroutines. If there is already a running proxy with the same -// name it returns ErrExists. -func (m *Manager) RunProxier(name string, p Proxier) error { - cmd := managerCmd{ - name: name, - p: p, - errCh: make(chan error), - } - m.ch <- cmd - return <-cmd.errCh -} - -// StopProxier stops a Proxier instance by name. It is safe to call from -// separate goroutines. If the instance with that name doesn't exist it returns -// ErrNotExist. -func (m *Manager) StopProxier(name string) error { - cmd := managerCmd{ - name: name, - p: nil, - errCh: make(chan error), - } - m.ch <- cmd - return <-cmd.errCh -} - -// StopAll shuts down the manager instance and stops all running proxies. It is -// safe to call from any goroutine but must only be called once. -func (m *Manager) StopAll() error { - close(m.ch) - <-m.stopped - return nil -} - -// run is the main manager processing loop. It keeps all actions in a single -// goroutine triggered by channel commands to keep it simple to reason about -// lifecycle events for each proxy. -func (m *Manager) run() { - defer close(m.stopped) - - // range over channel blocks and loops on each message received until channel - // is closed. - for cmd := range m.ch { - if cmd.p == nil { - m.remove(&cmd) - } else { - m.add(&cmd) - } - } - - // Shutting down, Stop all the runners - for _, r := range m.runners { - r.Stop() - } -} - -// add the named proxier instance and stop it. Should only be called from the -// run loop. -func (m *Manager) add(cmd *managerCmd) { - // Check existing - if _, ok := m.runners[cmd.name]; ok { - cmd.errCh <- ErrExists - return - } - - // Start new runner - r := NewRunnerWithLogger(cmd.name, cmd.p, m.logger) - m.runners[cmd.name] = r - go r.Listen() - cmd.errCh <- nil -} - -// remove the named proxier instance and stop it. Should only be called from the -// run loop. -func (m *Manager) remove(cmd *managerCmd) { - // Fetch proxier by name - r, ok := m.runners[cmd.name] - if !ok { - cmd.errCh <- ErrNotExist - return - } - err := r.Stop() - delete(m.runners, cmd.name) - cmd.errCh <- err -} diff --git a/proxy/manager_test.go b/proxy/manager_test.go deleted file mode 100644 index d4fa8c5b4..000000000 --- a/proxy/manager_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package proxy - -import ( - "fmt" - "net" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestManager(t *testing.T) { - m := NewManager() - - addrs := TestLocalBindAddrs(t, 3) - - for i := 0; i < len(addrs); i++ { - name := fmt.Sprintf("proxier-%d", i) - // Run proxy - err := m.RunProxier(name, &TestProxier{ - Addr: addrs[i], - Prefix: name + ": ", - }) - require.Nil(t, err) - } - - // Make sure each one is echoing correctly now all are running - for i := 0; i < len(addrs); i++ { - conn, err := net.Dial("tcp", addrs[i]) - require.Nil(t, err) - TestEchoConn(t, conn, fmt.Sprintf("proxier-%d: ", i)) - conn.Close() - } - - // Stop first proxier - err := m.StopProxier("proxier-0") - require.Nil(t, err) - - // We should fail to dial it now. Note that Runner.Stop is synchronous so - // there should be a strong guarantee that it's stopped listening by now. - _, err = net.Dial("tcp", addrs[0]) - require.NotNil(t, err) - - // Rest of proxiers should still be running - for i := 1; i < len(addrs); i++ { - conn, err := net.Dial("tcp", addrs[i]) - require.Nil(t, err) - TestEchoConn(t, conn, fmt.Sprintf("proxier-%d: ", i)) - conn.Close() - } - - // Stop non-existent proxier should fail - err = m.StopProxier("foo") - require.Equal(t, ErrNotExist, err) - - // Add already-running proxier should fail - err = m.RunProxier("proxier-1", &TestProxier{}) - require.Equal(t, ErrExists, err) - - // But rest should stay running - for i := 1; i < len(addrs); i++ { - conn, err := net.Dial("tcp", addrs[i]) - require.Nil(t, err) - TestEchoConn(t, conn, fmt.Sprintf("proxier-%d: ", i)) - conn.Close() - } - - // StopAll should stop everything - err = m.StopAll() - require.Nil(t, err) - - // Verify failures - for i := 0; i < len(addrs); i++ { - _, err = net.Dial("tcp", addrs[i]) - require.NotNilf(t, err, "proxier-%d should not be running", i) - } -} diff --git a/proxy/proxier.go b/proxy/proxier.go deleted file mode 100644 index 23940c6ad..000000000 --- a/proxy/proxier.go +++ /dev/null @@ -1,32 +0,0 @@ -package proxy - -import ( - "errors" - "net" -) - -// ErrStopped is returned for operations on a proxy that is stopped -var ErrStopped = errors.New("stopped") - -// ErrStopping is returned for operations on a proxy that is stopping -var ErrStopping = errors.New("stopping") - -// Proxier is an interface for managing different proxy implementations in a -// standard way. We have at least two different types of Proxier implementations -// needed: one for the incoming mTLS -> local proxy and another for each -// "upstream" service the app needs to talk out to (which listens locally and -// performs service discovery to find a suitable remote service). -type Proxier interface { - // Listener returns a net.Listener that is open and ready for use, the Proxy - // manager will arrange accepting new connections from it and passing them to - // the handler method. - Listener() (net.Listener, error) - - // HandleConn is called for each incoming connection accepted by the listener. - // It is called in it's own goroutine and should run until it hits an error. - // When stopping the Proxier, the manager will simply close the conn provided - // and expects an error to be eventually returned. Any time spent not blocked - // on the passed conn (for example doing service discovery) should therefore - // be time-bound so that shutdown can't stall forever. - HandleConn(conn net.Conn) error -} diff --git a/proxy/proxy.go b/proxy/proxy.go deleted file mode 100644 index a293466b8..000000000 --- a/proxy/proxy.go +++ /dev/null @@ -1,112 +0,0 @@ -package proxy - -import ( - "context" - "log" - - "github.com/hashicorp/consul/api" - "github.com/hashicorp/consul/connect" -) - -// Proxy implements the built-in connect proxy. -type Proxy struct { - proxyID, token string - - connect connect.Client - manager *Manager - cfgWatch ConfigWatcher - cfg *Config - - logger *log.Logger -} - -// NewFromConfigFile returns a Proxy instance configured just from a local file. -// This is intended mostly for development and bypasses the normal mechanisms -// for fetching config and certificates from the local agent. -func NewFromConfigFile(client *api.Client, filename string, - logger *log.Logger) (*Proxy, error) { - cfg, err := ParseConfigFile(filename) - if err != nil { - return nil, err - } - - connect, err := connect.NewInsecureDevClientWithLocalCerts(client, - cfg.DevCAFile, cfg.DevServiceCertFile, cfg.DevServiceKeyFile) - if err != nil { - return nil, err - } - - p := &Proxy{ - proxyID: cfg.ProxyID, - connect: connect, - manager: NewManagerWithLogger(logger), - cfgWatch: NewStaticConfigWatcher(cfg), - logger: logger, - } - return p, nil -} - -// New returns a Proxy with the given id, consuming the provided (configured) -// agent. It is ready to Run(). -func New(client *api.Client, proxyID string, logger *log.Logger) (*Proxy, error) { - p := &Proxy{ - proxyID: proxyID, - connect: connect.NewClient(client), - manager: NewManagerWithLogger(logger), - cfgWatch: &AgentConfigWatcher{client: client}, - logger: logger, - } - return p, nil -} - -// Run the proxy instance until a fatal error occurs or ctx is cancelled. -func (p *Proxy) Run(ctx context.Context) error { - defer p.manager.StopAll() - - // Watch for config changes (initial setup happens on first "change") - for { - select { - case newCfg := <-p.cfgWatch.Watch(): - p.logger.Printf("[DEBUG] got new config") - if p.cfg == nil { - // Initial setup - err := p.startPublicListener(ctx, newCfg.PublicListener) - if err != nil { - return err - } - } - - // TODO add/remove upstreams properly based on a diff with current - for _, uc := range newCfg.Upstreams { - uc.Client = p.connect - uc.logger = p.logger - err := p.manager.RunProxier(uc.String(), NewUpstream(uc)) - if err == ErrExists { - continue - } - if err != nil { - p.logger.Printf("[ERR] failed to start upstream %s: %s", uc.String(), - err) - } - } - p.cfg = newCfg - - case <-ctx.Done(): - return nil - } - } -} - -func (p *Proxy) startPublicListener(ctx context.Context, - cfg PublicListenerConfig) error { - - // Get TLS creds - tlsCfg, err := p.connect.ServerTLSConfig() - if err != nil { - return err - } - cfg.TLSConfig = tlsCfg - - cfg.logger = p.logger - return p.manager.RunProxier("public_listener", NewPublicListener(cfg)) -} diff --git a/proxy/public_listener.go b/proxy/public_listener.go deleted file mode 100644 index 1942992cf..000000000 --- a/proxy/public_listener.go +++ /dev/null @@ -1,119 +0,0 @@ -package proxy - -import ( - "crypto/tls" - "fmt" - "log" - "net" - "os" - "time" -) - -// PublicListener provides an implementation of Proxier that listens for inbound -// mTLS connections, authenticates them with the local agent, and if successful -// forwards them to the locally configured app. -type PublicListener struct { - cfg *PublicListenerConfig -} - -// PublicListenerConfig contains the most basic parameters needed to start the -// proxy. -// -// Note that the tls.Configs here are expected to be "dynamic" in the sense that -// they are expected to use `GetConfigForClient` (added in go 1.8) to return -// dynamic config per connection if required. -type PublicListenerConfig struct { - // BindAddress is the host:port the public mTLS listener will bind to. - BindAddress string `json:"bind_address" hcl:"bind_address"` - - // LocalServiceAddress is the host:port for the proxied application. This - // should be on loopback or otherwise protected as it's plain TCP. - LocalServiceAddress string `json:"local_service_address" hcl:"local_service_address"` - - // TLSConfig config is used for the mTLS listener. - TLSConfig *tls.Config - - // LocalConnectTimeout is the timeout for establishing connections with the - // local backend. Defaults to 1000 (1s). - LocalConnectTimeoutMs int `json:"local_connect_timeout_ms" hcl:"local_connect_timeout_ms"` - - // HandshakeTimeout is the timeout for incoming mTLS clients to complete a - // handshake. Setting this low avoids DOS by malicious clients holding - // resources open. Defaults to 10000 (10s). - HandshakeTimeoutMs int `json:"handshake_timeout_ms" hcl:"handshake_timeout_ms"` - - logger *log.Logger -} - -func (plc *PublicListenerConfig) applyDefaults() { - if plc.LocalConnectTimeoutMs == 0 { - plc.LocalConnectTimeoutMs = 1000 - } - if plc.HandshakeTimeoutMs == 0 { - plc.HandshakeTimeoutMs = 10000 - } - if plc.logger == nil { - plc.logger = log.New(os.Stdout, "", log.LstdFlags) - } -} - -// NewPublicListener returns a proxy instance with the given config. -func NewPublicListener(cfg PublicListenerConfig) *PublicListener { - p := &PublicListener{ - cfg: &cfg, - } - p.cfg.applyDefaults() - return p -} - -// Listener implements Proxier -func (p *PublicListener) Listener() (net.Listener, error) { - l, err := net.Listen("tcp", p.cfg.BindAddress) - if err != nil { - return nil, err - } - - return tls.NewListener(l, p.cfg.TLSConfig), nil -} - -// HandleConn implements Proxier -func (p *PublicListener) HandleConn(conn net.Conn) error { - defer conn.Close() - tlsConn, ok := conn.(*tls.Conn) - if !ok { - return fmt.Errorf("non-TLS conn") - } - - // Setup Handshake timer - to := time.Duration(p.cfg.HandshakeTimeoutMs) * time.Millisecond - err := tlsConn.SetDeadline(time.Now().Add(to)) - if err != nil { - return err - } - - // Force TLS handshake so that abusive clients can't hold resources open - err = tlsConn.Handshake() - if err != nil { - return err - } - - // Handshake OK, clear the deadline - err = tlsConn.SetDeadline(time.Time{}) - if err != nil { - return err - } - - // Huzzah, open a connection to the backend and let them talk - // TODO maybe add a connection pool here? - to = time.Duration(p.cfg.LocalConnectTimeoutMs) * time.Millisecond - dst, err := net.DialTimeout("tcp", p.cfg.LocalServiceAddress, to) - if err != nil { - return fmt.Errorf("failed dialling local app: %s", err) - } - - p.cfg.logger.Printf("[DEBUG] accepted connection from %s", conn.RemoteAddr()) - - // Hand conn and dst over to Conn to manage the byte copying. - c := NewConn(conn, dst) - return c.CopyBytes() -} diff --git a/proxy/public_listener_test.go b/proxy/public_listener_test.go deleted file mode 100644 index 83e84d658..000000000 --- a/proxy/public_listener_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package proxy - -import ( - "crypto/tls" - "testing" - - "github.com/hashicorp/consul/connect" - "github.com/stretchr/testify/require" -) - -func TestPublicListener(t *testing.T) { - addrs := TestLocalBindAddrs(t, 2) - - cfg := PublicListenerConfig{ - BindAddress: addrs[0], - LocalServiceAddress: addrs[1], - HandshakeTimeoutMs: 100, - LocalConnectTimeoutMs: 100, - TLSConfig: connect.TestTLSConfig(t, "ca1", "web"), - } - - testApp, err := NewTestTCPServer(t, cfg.LocalServiceAddress) - require.Nil(t, err) - defer testApp.Close() - - p := NewPublicListener(cfg) - - // Run proxy - r := NewRunner("test", p) - go r.Listen() - defer r.Stop() - - // Proxy and backend are running, play the part of a TLS client using same - // cert for now. - conn, err := tls.Dial("tcp", cfg.BindAddress, connect.TestTLSConfig(t, "ca1", "web")) - require.Nil(t, err) - TestEchoConn(t, conn, "") -} diff --git a/proxy/runner.go b/proxy/runner.go deleted file mode 100644 index b559b22b7..000000000 --- a/proxy/runner.go +++ /dev/null @@ -1,118 +0,0 @@ -package proxy - -import ( - "log" - "net" - "os" - "sync" - "sync/atomic" -) - -// Runner manages the lifecycle of one Proxier. -type Runner struct { - name string - p Proxier - - // Stopping is if a flag that is updated and read atomically - stopping int32 - stopCh chan struct{} - // wg is used to signal back to Stop when all goroutines have stopped - wg sync.WaitGroup - - logger *log.Logger -} - -// NewRunner returns a Runner ready to Listen. -func NewRunner(name string, p Proxier) *Runner { - return NewRunnerWithLogger(name, p, log.New(os.Stdout, "", log.LstdFlags)) -} - -// NewRunnerWithLogger returns a Runner ready to Listen using the specified -// log.Logger. -func NewRunnerWithLogger(name string, p Proxier, logger *log.Logger) *Runner { - return &Runner{ - name: name, - p: p, - stopCh: make(chan struct{}), - logger: logger, - } -} - -// Listen starts the proxier instance. It blocks until a fatal error occurs or -// Stop() is called. -func (r *Runner) Listen() error { - if atomic.LoadInt32(&r.stopping) == 1 { - return ErrStopped - } - - l, err := r.p.Listener() - if err != nil { - return err - } - r.logger.Printf("[INFO] proxy: %s listening on %s", r.name, l.Addr().String()) - - // Run goroutine that will close listener on stop - go func() { - <-r.stopCh - l.Close() - r.logger.Printf("[INFO] proxy: %s shutdown", r.name) - }() - - // Add one for the accept loop - r.wg.Add(1) - defer r.wg.Done() - - for { - conn, err := l.Accept() - if err != nil { - if atomic.LoadInt32(&r.stopping) == 1 { - return nil - } - return err - } - - go r.handle(conn) - } - - return nil -} - -func (r *Runner) handle(conn net.Conn) { - r.wg.Add(1) - defer r.wg.Done() - - // Start a goroutine that will watch for the Runner stopping and close the - // conn, or watch for the Proxier closing (e.g. because other end hung up) and - // stop the goroutine to avoid leaks - doneCh := make(chan struct{}) - defer close(doneCh) - - go func() { - select { - case <-r.stopCh: - r.logger.Printf("[DEBUG] proxy: %s: terminating conn", r.name) - conn.Close() - return - case <-doneCh: - // Connection is already closed, this goroutine not needed any more - return - } - }() - - err := r.p.HandleConn(conn) - if err != nil { - r.logger.Printf("[DEBUG] proxy: %s: connection terminated: %s", r.name, err) - } else { - r.logger.Printf("[DEBUG] proxy: %s: connection terminated", r.name) - } -} - -// Stop stops the Listener and closes any active connections immediately. -func (r *Runner) Stop() error { - old := atomic.SwapInt32(&r.stopping, 1) - if old == 0 { - close(r.stopCh) - } - r.wg.Wait() - return nil -} diff --git a/proxy/testdata/config-kitchensink.hcl b/proxy/testdata/config-kitchensink.hcl deleted file mode 100644 index 766928353..000000000 --- a/proxy/testdata/config-kitchensink.hcl +++ /dev/null @@ -1,36 +0,0 @@ -# Example proxy config with everything specified - -proxy_id = "foo" -token = "11111111-2222-3333-4444-555555555555" - -proxied_service_name = "web" -proxied_service_namespace = "default" - -# Assumes running consul in dev mode from the repo root... -dev_ca_file = "connect/testdata/ca1-ca-consul-internal.cert.pem" -dev_service_cert_file = "connect/testdata/ca1-svc-web.cert.pem" -dev_service_key_file = "connect/testdata/ca1-svc-web.key.pem" - -public_listener { - bind_address = ":9999" - local_service_address = "127.0.0.1:5000" - local_connect_timeout_ms = 1000 - handshake_timeout_ms = 5000 -} - -upstreams = [ - { - local_bind_address = "127.0.0.1:6000" - destination_name = "db" - destination_namespace = "default" - destination_type = "service" - connect_timeout_ms = 10000 - }, - { - local_bind_address = "127.0.0.1:6001" - destination_name = "geo-cache" - destination_namespace = "default" - destination_type = "prepared_query" - connect_timeout_ms = 10000 - } -] diff --git a/proxy/testing.go b/proxy/testing.go deleted file mode 100644 index bd132b77f..000000000 --- a/proxy/testing.go +++ /dev/null @@ -1,170 +0,0 @@ -package proxy - -import ( - "context" - "crypto/tls" - "fmt" - "io" - "log" - "net" - "sync/atomic" - - "github.com/hashicorp/consul/lib/freeport" - "github.com/mitchellh/go-testing-interface" - "github.com/stretchr/testify/require" -) - -// TestLocalBindAddrs returns n localhost address:port strings with free ports -// for binding test listeners to. -func TestLocalBindAddrs(t testing.T, n int) []string { - ports := freeport.GetT(t, n) - addrs := make([]string, n) - for i, p := range ports { - addrs[i] = fmt.Sprintf("localhost:%d", p) - } - return addrs -} - -// TestTCPServer is a simple TCP echo server for use during tests. -type TestTCPServer struct { - l net.Listener - stopped int32 - accepted, closed, active int32 -} - -// NewTestTCPServer opens as a listening socket on the given address and returns -// a TestTCPServer serving requests to it. The server is already started and can -// be stopped by calling Close(). -func NewTestTCPServer(t testing.T, addr string) (*TestTCPServer, error) { - l, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - log.Printf("test tcp server listening on %s", addr) - s := &TestTCPServer{ - l: l, - } - go s.accept() - return s, nil -} - -// Close stops the server -func (s *TestTCPServer) Close() { - atomic.StoreInt32(&s.stopped, 1) - if s.l != nil { - s.l.Close() - } -} - -func (s *TestTCPServer) accept() error { - for { - conn, err := s.l.Accept() - if err != nil { - if atomic.LoadInt32(&s.stopped) == 1 { - log.Printf("test tcp echo server %s stopped", s.l.Addr()) - return nil - } - log.Printf("test tcp echo server %s failed: %s", s.l.Addr(), err) - return err - } - - atomic.AddInt32(&s.accepted, 1) - atomic.AddInt32(&s.active, 1) - - go func(c net.Conn) { - io.Copy(c, c) - atomic.AddInt32(&s.closed, 1) - atomic.AddInt32(&s.active, -1) - }(conn) - } -} - -// TestEchoConn attempts to write some bytes to conn and expects to read them -// back within a short timeout (10ms). If prefix is not empty we expect it to be -// poresent at the start of all echoed responses (for example to distinguish -// between multiple echo server instances). -func TestEchoConn(t testing.T, conn net.Conn, prefix string) { - t.Helper() - - // Write some bytes and read them back - n, err := conn.Write([]byte("Hello World")) - require.Equal(t, 11, n) - require.Nil(t, err) - - expectLen := 11 + len(prefix) - - buf := make([]byte, expectLen) - // read until our buffer is full - it might be separate packets if prefix is - // in use. - got := 0 - for got < expectLen { - n, err = conn.Read(buf[got:]) - require.Nil(t, err) - got += n - } - require.Equal(t, expectLen, got) - require.Equal(t, prefix+"Hello World", string(buf[:])) -} - -// TestConnectClient is a testing mock that implements connect.Client but -// stubs the methods to make testing simpler. -type TestConnectClient struct { - Server *TestTCPServer - TLSConfig *tls.Config - Calls []callTuple -} -type callTuple struct { - typ, ns, name string -} - -// ServerTLSConfig implements connect.Client -func (c *TestConnectClient) ServerTLSConfig() (*tls.Config, error) { - return c.TLSConfig, nil -} - -// DialService implements connect.Client -func (c *TestConnectClient) DialService(ctx context.Context, namespace, - name string) (net.Conn, error) { - - c.Calls = append(c.Calls, callTuple{"service", namespace, name}) - - // Actually returning a vanilla TCP conn not a TLS one but the caller - // shouldn't care for tests since this interface should hide all the TLS - // config and verification. - return net.Dial("tcp", c.Server.l.Addr().String()) -} - -// DialPreparedQuery implements connect.Client -func (c *TestConnectClient) DialPreparedQuery(ctx context.Context, namespace, - name string) (net.Conn, error) { - - c.Calls = append(c.Calls, callTuple{"prepared_query", namespace, name}) - - // Actually returning a vanilla TCP conn not a TLS one but the caller - // shouldn't care for tests since this interface should hide all the TLS - // config and verification. - return net.Dial("tcp", c.Server.l.Addr().String()) -} - -// TestProxier is a simple Proxier instance that can be used in tests. -type TestProxier struct { - // Addr to listen on - Addr string - // Prefix to write first before echoing on new connections - Prefix string -} - -// Listener implements Proxier -func (p *TestProxier) Listener() (net.Listener, error) { - return net.Listen("tcp", p.Addr) -} - -// HandleConn implements Proxier -func (p *TestProxier) HandleConn(conn net.Conn) error { - _, err := conn.Write([]byte(p.Prefix)) - if err != nil { - return err - } - _, err = io.Copy(conn, conn) - return err -} diff --git a/proxy/upstream.go b/proxy/upstream.go deleted file mode 100644 index 1101624be..000000000 --- a/proxy/upstream.go +++ /dev/null @@ -1,261 +0,0 @@ -package proxy - -import ( - "context" - "fmt" - "log" - "net" - "os" - "time" - - "github.com/hashicorp/consul/connect" -) - -// Upstream provides an implementation of Proxier that listens for inbound TCP -// connections on the private network shared with the proxied application -// (typically localhost). For each accepted connection from the app, it uses the -// connect.Client to discover an instance and connect over mTLS. -type Upstream struct { - cfg *UpstreamConfig -} - -// UpstreamConfig configures the upstream -type UpstreamConfig struct { - // Client is the connect client to perform discovery with - Client connect.Client - - // LocalAddress is the host:port to listen on for local app connections. - LocalBindAddress string `json:"local_bind_address" hcl:"local_bind_address,attr"` - - // DestinationName is the service name of the destination. - DestinationName string `json:"destination_name" hcl:"destination_name,attr"` - - // DestinationNamespace is the namespace of the destination. - DestinationNamespace string `json:"destination_namespace" hcl:"destination_namespace,attr"` - - // DestinationType determines which service discovery method is used to find a - // candidate instance to connect to. - DestinationType string `json:"destination_type" hcl:"destination_type,attr"` - - // ConnectTimeout is the timeout for establishing connections with the remote - // service instance. Defaults to 10,000 (10s). - ConnectTimeoutMs int `json:"connect_timeout_ms" hcl:"connect_timeout_ms,attr"` - - logger *log.Logger -} - -func (uc *UpstreamConfig) applyDefaults() { - if uc.ConnectTimeoutMs == 0 { - uc.ConnectTimeoutMs = 10000 - } - if uc.logger == nil { - uc.logger = log.New(os.Stdout, "", log.LstdFlags) - } -} - -// String returns a string that uniquely identifies the Upstream. Used for -// identifying the upstream in log output and map keys. -func (uc *UpstreamConfig) String() string { - return fmt.Sprintf("%s->%s:%s/%s", uc.LocalBindAddress, uc.DestinationType, - uc.DestinationNamespace, uc.DestinationName) -} - -// NewUpstream returns an outgoing proxy instance with the given config. -func NewUpstream(cfg UpstreamConfig) *Upstream { - u := &Upstream{ - cfg: &cfg, - } - u.cfg.applyDefaults() - return u -} - -// String returns a string that uniquely identifies the Upstream. Used for -// identifying the upstream in log output and map keys. -func (u *Upstream) String() string { - return u.cfg.String() -} - -// Listener implements Proxier -func (u *Upstream) Listener() (net.Listener, error) { - return net.Listen("tcp", u.cfg.LocalBindAddress) -} - -// HandleConn implements Proxier -func (u *Upstream) HandleConn(conn net.Conn) error { - defer conn.Close() - - // Discover destination instance - dst, err := u.discoverAndDial() - if err != nil { - return err - } - - // Hand conn and dst over to Conn to manage the byte copying. - c := NewConn(conn, dst) - return c.CopyBytes() -} - -func (u *Upstream) discoverAndDial() (net.Conn, error) { - to := time.Duration(u.cfg.ConnectTimeoutMs) * time.Millisecond - ctx, cancel := context.WithTimeout(context.Background(), to) - defer cancel() - - switch u.cfg.DestinationType { - case "service": - return u.cfg.Client.DialService(ctx, u.cfg.DestinationNamespace, - u.cfg.DestinationName) - - case "prepared_query": - return u.cfg.Client.DialPreparedQuery(ctx, u.cfg.DestinationNamespace, - u.cfg.DestinationName) - - default: - return nil, fmt.Errorf("invalid destination type %s", u.cfg.DestinationType) - } -} - -/* -// Upstream represents a service that the proxied application needs to connect -// out to. It provides a dedication local TCP listener (usually listening only -// on loopback) and forwards incoming connections to the proxy to handle. -type Upstream struct { - cfg *UpstreamConfig - wg sync.WaitGroup - - proxy *Proxy - fatalErr error -} - -// NewUpstream creates an upstream ready to attach to a proxy instance with -// Proxy.AddUpstream. An Upstream can only be attached to single Proxy instance -// at once. -func NewUpstream(p *Proxy, cfg *UpstreamConfig) *Upstream { - return &Upstream{ - cfg: cfg, - proxy: p, - shutdown: make(chan struct{}), - } -} - -// UpstreamConfig configures the upstream -type UpstreamConfig struct { - // LocalAddress is the host:port to listen on for local app connections. - LocalAddress string - - // DestinationName is the service name of the destination. - DestinationName string - - // DestinationNamespace is the namespace of the destination. - DestinationNamespace string - - // DestinationType determines which service discovery method is used to find a - // candidate instance to connect to. - DestinationType string -} - -// String returns a string representation for the upstream for debugging or -// use as a unique key. -func (uc *UpstreamConfig) String() string { - return fmt.Sprintf("%s->%s:%s/%s", uc.LocalAddress, uc.DestinationType, - uc.DestinationNamespace, uc.DestinationName) -} - -func (u *Upstream) listen() error { - l, err := net.Listen("tcp", u.cfg.LocalAddress) - if err != nil { - u.fatal(err) - return - } - - for { - conn, err := l.Accept() - if err != nil { - return err - } - - go u.discoverAndConnect(conn) - } -} - -func (u *Upstream) discoverAndConnect(src net.Conn) { - // First, we need an upstream instance from Consul to connect to - dstAddrs, err := u.discoverInstances() - if err != nil { - u.fatal(fmt.Errorf("failed to discover upstream instances: %s", err)) - return - } - - if len(dstAddrs) < 1 { - log.Printf("[INFO] no instances found for %s", len(dstAddrs), u) - } - - // Attempt connection to first one that works - // TODO: configurable number/deadline? - for idx, addr := range dstAddrs { - err := u.proxy.startProxyingConn(src, addr, false) - if err != nil { - log.Printf("[INFO] failed to connect to %s: %s (%d of %d)", addr, err, - idx+1, len(dstAddrs)) - continue - } - return - } - - log.Printf("[INFO] failed to connect to all %d instances for %s", - len(dstAddrs), u) -} - -func (u *Upstream) discoverInstances() ([]string, error) { - switch u.cfg.DestinationType { - case "service": - svcs, _, err := u.cfg.Consul.Health().Service(u.cfg.DestinationName, "", - true, nil) - if err != nil { - return nil, err - } - - addrs := make([]string, len(svcs)) - - // Shuffle order as we go since health endpoint doesn't - perm := rand.Perm(len(addrs)) - for i, se := range svcs { - // Pick location in output array based on next permutation position - j := perm[i] - addrs[j] = fmt.Sprintf("%s:%d", se.Service.Address, se.Service.Port) - } - - return addrs, nil - - case "prepared_query": - pqr, _, err := u.cfg.Consul.PreparedQuery().Execute(u.cfg.DestinationName, - nil) - if err != nil { - return nil, err - } - - addrs := make([]string, 0, len(svcs)) - for _, se := range pqr.Nodes { - addrs = append(addrs, fmt.Sprintf("%s:%d", se.Service.Address, - se.Service.Port)) - } - - // PreparedQuery execution already shuffles the result - return addrs, nil - - default: - u.fatal(fmt.Errorf("invalid destination type %s", u.cfg.DestinationType)) - } -} - -func (u *Upstream) fatal(err Error) { - log.Printf("[ERROR] upstream %s stopping on error: %s", u.cfg.LocalAddress, - err) - u.fatalErr = err -} - -// String returns a string representation for the upstream for debugging or -// use as a unique key. -func (u *Upstream) String() string { - return u.cfg.String() -} -*/ diff --git a/proxy/upstream_test.go b/proxy/upstream_test.go deleted file mode 100644 index 79bca0136..000000000 --- a/proxy/upstream_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package proxy - -import ( - "net" - "testing" - - "github.com/hashicorp/consul/connect" - "github.com/stretchr/testify/require" -) - -func TestUpstream(t *testing.T) { - tests := []struct { - name string - cfg UpstreamConfig - }{ - { - name: "service", - cfg: UpstreamConfig{ - DestinationType: "service", - DestinationNamespace: "default", - DestinationName: "db", - ConnectTimeoutMs: 100, - }, - }, - { - name: "prepared_query", - cfg: UpstreamConfig{ - DestinationType: "prepared_query", - DestinationNamespace: "default", - DestinationName: "geo-db", - ConnectTimeoutMs: 100, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - addrs := TestLocalBindAddrs(t, 2) - - testApp, err := NewTestTCPServer(t, addrs[0]) - require.Nil(t, err) - defer testApp.Close() - - // Create mock client that will "discover" our test tcp server as a target and - // skip TLS altogether. - client := &TestConnectClient{ - Server: testApp, - TLSConfig: connect.TestTLSConfig(t, "ca1", "web"), - } - - // Override cfg params - tt.cfg.LocalBindAddress = addrs[1] - tt.cfg.Client = client - - u := NewUpstream(tt.cfg) - - // Run proxy - r := NewRunner("test", u) - go r.Listen() - defer r.Stop() - - // Proxy and fake remote service are running, play the part of the app - // connecting to a remote connect service over TCP. - conn, err := net.Dial("tcp", tt.cfg.LocalBindAddress) - require.Nil(t, err) - TestEchoConn(t, conn, "") - - // Validate that discovery actually was called as we expected - require.Len(t, client.Calls, 1) - require.Equal(t, tt.cfg.DestinationType, client.Calls[0].typ) - require.Equal(t, tt.cfg.DestinationNamespace, client.Calls[0].ns) - require.Equal(t, tt.cfg.DestinationName, client.Calls[0].name) - }) - } -} From 51b1bc028d2055389c9ea483873185ab151787dc Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Tue, 3 Apr 2018 19:10:59 +0100 Subject: [PATCH 127/627] Rework connect/proxy and command/connect/proxy. End to end demo working again --- agent/connect/testing_ca.go | 2 +- command/connect/proxy/proxy.go | 37 ++- command/connect/proxy/proxy_test.go | 1 - connect/certgen/certgen.go | 2 +- connect/proxy/config.go | 223 ++++++++++++++++++ connect/proxy/config_test.go | 108 +++++++++ connect/proxy/conn.go | 61 +++++ connect/proxy/conn_test.go | 185 +++++++++++++++ connect/proxy/listener.go | 116 +++++++++ connect/proxy/listener_test.go | 91 +++++++ connect/proxy/proxy.go | 134 +++++++++++ connect/proxy/testdata/config-kitchensink.hcl | 32 +++ connect/proxy/testing.go | 105 +++++++++ connect/resolver.go | 19 +- connect/resolver_test.go | 1 - connect/service.go | 71 ++++-- connect/service_test.go | 84 ++++++- connect/testing.go | 85 +++++-- connect/tls.go | 14 +- connect/tls_test.go | 5 +- 20 files changed, 1279 insertions(+), 97 deletions(-) delete mode 100644 command/connect/proxy/proxy_test.go create mode 100644 connect/proxy/config.go create mode 100644 connect/proxy/config_test.go create mode 100644 connect/proxy/conn.go create mode 100644 connect/proxy/conn_test.go create mode 100644 connect/proxy/listener.go create mode 100644 connect/proxy/listener_test.go create mode 100644 connect/proxy/proxy.go create mode 100644 connect/proxy/testdata/config-kitchensink.hcl create mode 100644 connect/proxy/testing.go diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index 3fbcf2e02..e12372589 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -157,7 +157,7 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) (string, string t.Fatalf("error generating serial number: %s", err) } - // Genereate fresh private key + // Generate fresh private key pkSigner, pkPEM := testPrivateKey(t) // Cert template for generation diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index 237f4b7e2..362e70459 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -1,17 +1,15 @@ package proxy import ( - "context" "flag" "fmt" "io" "log" "net/http" - // Expose pprof if configured - _ "net/http/pprof" + _ "net/http/pprof" // Expose pprof if configured "github.com/hashicorp/consul/command/flags" - proxyImpl "github.com/hashicorp/consul/proxy" + proxyImpl "github.com/hashicorp/consul/connect/proxy" "github.com/hashicorp/consul/logger" "github.com/hashicorp/logutils" @@ -46,13 +44,14 @@ type cmd struct { func (c *cmd) init() { c.flags = flag.NewFlagSet("", flag.ContinueOnError) - c.flags.StringVar(&c.cfgFile, "insecure-dev-config", "", + c.flags.StringVar(&c.cfgFile, "dev-config", "", "If set, proxy config is read on startup from this file (in HCL or JSON"+ "format). If a config file is given, the proxy will use that instead of "+ "querying the local agent for it's configuration. It will not reload it "+ "except on startup. In this mode the proxy WILL NOT authorize incoming "+ "connections with the local agent which is totally insecure. This is "+ - "ONLY for development and testing.") + "ONLY for internal development and testing and will probably be removed "+ + "once proxy implementation is more complete..") c.flags.StringVar(&c.proxyID, "proxy-id", "", "The proxy's ID on the local agent.") @@ -121,31 +120,23 @@ func (c *cmd) Run(args []string) int { } } - ctx, cancel := context.WithCancel(context.Background()) + // Hook the shutdownCh up to close the proxy go func() { - err := p.Run(ctx) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed running proxy: %s", err)) - } - // If we exited early due to a fatal error, need to unblock the main - // routine. But we can't close shutdownCh since it might already be closed - // by a signal and there is no way to tell. We also can't send on it to - // unblock main routine since it's typed as receive only. So the best thing - // we can do is cancel the context and have the main routine select on both. - cancel() + <-c.shutdownCh + p.Close() }() - c.UI.Output("Consul Connect proxy running!") + c.UI.Output("Consul Connect proxy starting") c.UI.Output("Log data will now stream in as it occurs:\n") logGate.Flush() - // Wait for shutdown or context cancel (see Run() goroutine above) - select { - case <-c.shutdownCh: - cancel() - case <-ctx.Done(): + // Run the proxy + err = p.Serve() + if err != nil { + c.UI.Error(fmt.Sprintf("Failed running proxy: %s", err)) } + c.UI.Output("Consul Connect proxy shutdown") return 0 } diff --git a/command/connect/proxy/proxy_test.go b/command/connect/proxy/proxy_test.go deleted file mode 100644 index 943b369ff..000000000 --- a/command/connect/proxy/proxy_test.go +++ /dev/null @@ -1 +0,0 @@ -package proxy diff --git a/connect/certgen/certgen.go b/connect/certgen/certgen.go index 6fecf6ae1..89c424576 100644 --- a/connect/certgen/certgen.go +++ b/connect/certgen/certgen.go @@ -27,6 +27,7 @@ // NOTE: THIS IS A QUIRK OF OPENSSL; in Connect we distribute the roots alone // and stable intermediates like the XC cert to the _leaf_. package main // import "github.com/hashicorp/consul/connect/certgen" + import ( "flag" "fmt" @@ -42,7 +43,6 @@ import ( func main() { var numCAs = 2 var services = []string{"web", "db", "cache"} - //var slugRe = regexp.MustCompile("[^a-zA-Z0-9]+") var outDir string flag.StringVar(&outDir, "out-dir", "", diff --git a/connect/proxy/config.go b/connect/proxy/config.go new file mode 100644 index 000000000..a8f83d22c --- /dev/null +++ b/connect/proxy/config.go @@ -0,0 +1,223 @@ +package proxy + +import ( + "fmt" + "io/ioutil" + "log" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/connect" + "github.com/hashicorp/hcl" +) + +// Config is the publicly configurable state for an entire proxy instance. It's +// mostly used as the format for the local-file config mode which is mostly for +// dev/testing. In normal use, different parts of this config are pulled from +// different locations (e.g. command line, agent config endpoint, agent +// certificate endpoints). +type Config struct { + // ProxyID is the identifier for this proxy as registered in Consul. It's only + // guaranteed to be unique per agent. + ProxyID string `json:"proxy_id" hcl:"proxy_id"` + + // Token is the authentication token provided for queries to the local agent. + Token string `json:"token" hcl:"token"` + + // ProxiedServiceID is the identifier of the service this proxy is representing. + ProxiedServiceID string `json:"proxied_service_id" hcl:"proxied_service_id"` + + // ProxiedServiceNamespace is the namespace of the service this proxy is + // representing. + ProxiedServiceNamespace string `json:"proxied_service_namespace" hcl:"proxied_service_namespace"` + + // PublicListener configures the mTLS listener. + PublicListener PublicListenerConfig `json:"public_listener" hcl:"public_listener"` + + // Upstreams configures outgoing proxies for remote connect services. + Upstreams []UpstreamConfig `json:"upstreams" hcl:"upstreams"` + + // DevCAFile allows passing the file path to PEM encoded root certificate + // bundle to be used in development instead of the ones supplied by Connect. + DevCAFile string `json:"dev_ca_file" hcl:"dev_ca_file"` + + // DevServiceCertFile allows passing the file path to PEM encoded service + // certificate (client and server) to be used in development instead of the + // ones supplied by Connect. + DevServiceCertFile string `json:"dev_service_cert_file" hcl:"dev_service_cert_file"` + + // DevServiceKeyFile allows passing the file path to PEM encoded service + // private key to be used in development instead of the ones supplied by + // Connect. + DevServiceKeyFile string `json:"dev_service_key_file" hcl:"dev_service_key_file"` + + // service is a connect.Service instance representing the proxied service. It + // is created internally by the code responsible for setting up config as it + // may depend on other external dependencies + service *connect.Service +} + +// PublicListenerConfig contains the parameters needed for the incoming mTLS +// listener. +type PublicListenerConfig struct { + // BindAddress is the host:port the public mTLS listener will bind to. + BindAddress string `json:"bind_address" hcl:"bind_address"` + + // LocalServiceAddress is the host:port for the proxied application. This + // should be on loopback or otherwise protected as it's plain TCP. + LocalServiceAddress string `json:"local_service_address" hcl:"local_service_address"` + + // LocalConnectTimeout is the timeout for establishing connections with the + // local backend. Defaults to 1000 (1s). + LocalConnectTimeoutMs int `json:"local_connect_timeout_ms" hcl:"local_connect_timeout_ms"` + + // HandshakeTimeout is the timeout for incoming mTLS clients to complete a + // handshake. Setting this low avoids DOS by malicious clients holding + // resources open. Defaults to 10000 (10s). + HandshakeTimeoutMs int `json:"handshake_timeout_ms" hcl:"handshake_timeout_ms"` +} + +// applyDefaults sets zero-valued params to a sane default. +func (plc *PublicListenerConfig) applyDefaults() { + if plc.LocalConnectTimeoutMs == 0 { + plc.LocalConnectTimeoutMs = 1000 + } + if plc.HandshakeTimeoutMs == 0 { + plc.HandshakeTimeoutMs = 10000 + } +} + +// UpstreamConfig configures an upstream (outgoing) listener. +type UpstreamConfig struct { + // LocalAddress is the host:port to listen on for local app connections. + LocalBindAddress string `json:"local_bind_address" hcl:"local_bind_address,attr"` + + // DestinationName is the service name of the destination. + DestinationName string `json:"destination_name" hcl:"destination_name,attr"` + + // DestinationNamespace is the namespace of the destination. + DestinationNamespace string `json:"destination_namespace" hcl:"destination_namespace,attr"` + + // DestinationType determines which service discovery method is used to find a + // candidate instance to connect to. + DestinationType string `json:"destination_type" hcl:"destination_type,attr"` + + // DestinationDatacenter is the datacenter the destination is in. If empty, + // defaults to discovery within the same datacenter. + DestinationDatacenter string `json:"destination_datacenter" hcl:"destination_datacenter,attr"` + + // ConnectTimeout is the timeout for establishing connections with the remote + // service instance. Defaults to 10,000 (10s). + ConnectTimeoutMs int `json:"connect_timeout_ms" hcl:"connect_timeout_ms,attr"` + + // resolver is used to plug in the service discover mechanism. It can be used + // in tests to bypass discovery. In real usage it is used to inject the + // api.Client dependency from the remainder of the config struct parsed from + // the user JSON using the UpstreamResolverFromClient helper. + resolver connect.Resolver +} + +// applyDefaults sets zero-valued params to a sane default. +func (uc *UpstreamConfig) applyDefaults() { + if uc.ConnectTimeoutMs == 0 { + uc.ConnectTimeoutMs = 10000 + } +} + +// String returns a string that uniquely identifies the Upstream. Used for +// identifying the upstream in log output and map keys. +func (uc *UpstreamConfig) String() string { + return fmt.Sprintf("%s->%s:%s/%s", uc.LocalBindAddress, uc.DestinationType, + uc.DestinationNamespace, uc.DestinationName) +} + +// UpstreamResolverFromClient returns a ConsulResolver that can resolve the +// given UpstreamConfig using the provided api.Client dependency. +func UpstreamResolverFromClient(client *api.Client, + cfg UpstreamConfig) connect.Resolver { + + // For now default to service as it has the most natural meaning and the error + // that the service doesn't exist is probably reasonable if misconfigured. We + // should probably handle actual configs that have invalid types at a higher + // level anyway (like when parsing). + typ := connect.ConsulResolverTypeService + if cfg.DestinationType == "prepared_query" { + typ = connect.ConsulResolverTypePreparedQuery + } + return &connect.ConsulResolver{ + Client: client, + Namespace: cfg.DestinationNamespace, + Name: cfg.DestinationName, + Type: typ, + Datacenter: cfg.DestinationDatacenter, + } +} + +// ConfigWatcher is a simple interface to allow dynamic configurations from +// plugggable sources. +type ConfigWatcher interface { + // Watch returns a channel that will deliver new Configs if something external + // provokes it. + Watch() <-chan *Config +} + +// StaticConfigWatcher is a simple ConfigWatcher that delivers a static Config +// once and then never changes it. +type StaticConfigWatcher struct { + ch chan *Config +} + +// NewStaticConfigWatcher returns a ConfigWatcher for a config that never +// changes. It assumes only one "watcher" will ever call Watch. The config is +// delivered on the first call but will never be delivered again to allow +// callers to call repeatedly (e.g. select in a loop). +func NewStaticConfigWatcher(cfg *Config) *StaticConfigWatcher { + sc := &StaticConfigWatcher{ + // Buffer it so we can queue up the config for first delivery. + ch: make(chan *Config, 1), + } + sc.ch <- cfg + return sc +} + +// Watch implements ConfigWatcher on a static configuration for compatibility. +// It returns itself on the channel once and then leaves it open. +func (sc *StaticConfigWatcher) Watch() <-chan *Config { + return sc.ch +} + +// ParseConfigFile parses proxy configuration from a file for local dev. +func ParseConfigFile(filename string) (*Config, error) { + bs, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + var cfg Config + + err = hcl.Unmarshal(bs, &cfg) + if err != nil { + return nil, err + } + + cfg.PublicListener.applyDefaults() + for idx := range cfg.Upstreams { + cfg.Upstreams[idx].applyDefaults() + } + + return &cfg, nil +} + +// AgentConfigWatcher watches the local Consul agent for proxy config changes. +type AgentConfigWatcher struct { + client *api.Client + proxyID string + logger *log.Logger +} + +// Watch implements ConfigWatcher. +func (w *AgentConfigWatcher) Watch() <-chan *Config { + watch := make(chan *Config) + // TODO implement me, note we need to discover the Service instance to use and + // set it on the Config we return. + return watch +} diff --git a/connect/proxy/config_test.go b/connect/proxy/config_test.go new file mode 100644 index 000000000..96782b12e --- /dev/null +++ b/connect/proxy/config_test.go @@ -0,0 +1,108 @@ +package proxy + +import ( + "testing" + + "github.com/hashicorp/consul/connect" + "github.com/stretchr/testify/require" +) + +func TestParseConfigFile(t *testing.T) { + cfg, err := ParseConfigFile("testdata/config-kitchensink.hcl") + require.Nil(t, err) + + expect := &Config{ + ProxyID: "foo", + Token: "11111111-2222-3333-4444-555555555555", + ProxiedServiceID: "web", + ProxiedServiceNamespace: "default", + PublicListener: PublicListenerConfig{ + BindAddress: ":9999", + LocalServiceAddress: "127.0.0.1:5000", + LocalConnectTimeoutMs: 1000, + HandshakeTimeoutMs: 10000, // From defaults + }, + Upstreams: []UpstreamConfig{ + { + LocalBindAddress: "127.0.0.1:6000", + DestinationName: "db", + DestinationNamespace: "default", + DestinationType: "service", + ConnectTimeoutMs: 10000, + }, + { + LocalBindAddress: "127.0.0.1:6001", + DestinationName: "geo-cache", + DestinationNamespace: "default", + DestinationType: "prepared_query", + ConnectTimeoutMs: 10000, + }, + }, + DevCAFile: "connect/testdata/ca1-ca-consul-internal.cert.pem", + DevServiceCertFile: "connect/testdata/ca1-svc-web.cert.pem", + DevServiceKeyFile: "connect/testdata/ca1-svc-web.key.pem", + } + + require.Equal(t, expect, cfg) +} + +func TestUpstreamResolverFromClient(t *testing.T) { + tests := []struct { + name string + cfg UpstreamConfig + want *connect.ConsulResolver + }{ + { + name: "service", + cfg: UpstreamConfig{ + DestinationNamespace: "foo", + DestinationName: "web", + DestinationDatacenter: "ny1", + DestinationType: "service", + }, + want: &connect.ConsulResolver{ + Namespace: "foo", + Name: "web", + Datacenter: "ny1", + Type: connect.ConsulResolverTypeService, + }, + }, + { + name: "prepared_query", + cfg: UpstreamConfig{ + DestinationNamespace: "foo", + DestinationName: "web", + DestinationDatacenter: "ny1", + DestinationType: "prepared_query", + }, + want: &connect.ConsulResolver{ + Namespace: "foo", + Name: "web", + Datacenter: "ny1", + Type: connect.ConsulResolverTypePreparedQuery, + }, + }, + { + name: "unknown behaves like service", + cfg: UpstreamConfig{ + DestinationNamespace: "foo", + DestinationName: "web", + DestinationDatacenter: "ny1", + DestinationType: "junk", + }, + want: &connect.ConsulResolver{ + Namespace: "foo", + Name: "web", + Datacenter: "ny1", + Type: connect.ConsulResolverTypeService, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Client doesn't really matter as long as it's passed through. + got := UpstreamResolverFromClient(nil, tt.cfg) + require.Equal(t, tt.want, got) + }) + } +} diff --git a/connect/proxy/conn.go b/connect/proxy/conn.go new file mode 100644 index 000000000..70019e55c --- /dev/null +++ b/connect/proxy/conn.go @@ -0,0 +1,61 @@ +package proxy + +import ( + "io" + "net" + "sync/atomic" +) + +// Conn represents a single proxied TCP connection. +type Conn struct { + src, dst net.Conn + stopping int32 +} + +// NewConn returns a conn joining the two given net.Conn +func NewConn(src, dst net.Conn) *Conn { + return &Conn{ + src: src, + dst: dst, + stopping: 0, + } +} + +// Close closes both connections. +func (c *Conn) Close() error { + // Note that net.Conn.Close can be called multiple times and atomic store is + // idempotent so no need to ensure we only do this once. + // + // Also note that we don't wait for CopyBytes to return here since we are + // closing the conns which is the only externally visible sideeffect of that + // goroutine running and there should be no way for it to hang or leak once + // the conns are closed so we can save the extra coordination. + atomic.StoreInt32(&c.stopping, 1) + c.src.Close() + c.dst.Close() + return nil +} + +// CopyBytes will continuously copy bytes in both directions between src and dst +// until either connection is closed. +func (c *Conn) CopyBytes() error { + defer c.Close() + + go func() { + // Need this since Copy is only guaranteed to stop when it's source reader + // (second arg) hits EOF or error but either conn might close first possibly + // causing this goroutine to exit but not the outer one. See + // TestConnSrcClosing which will fail if you comment the defer below. + defer c.Close() + io.Copy(c.dst, c.src) + }() + + _, err := io.Copy(c.src, c.dst) + // Note that we don't wait for the other goroutine to finish because it either + // already has due to it's src conn closing, or it will once our defer fires + // and closes the source conn. No need for the extra coordination. + if atomic.LoadInt32(&c.stopping) == 1 { + return nil + } + return err +} diff --git a/connect/proxy/conn_test.go b/connect/proxy/conn_test.go new file mode 100644 index 000000000..a37720ea0 --- /dev/null +++ b/connect/proxy/conn_test.go @@ -0,0 +1,185 @@ +package proxy + +import ( + "bufio" + "io" + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// Assert io.Closer implementation +var _ io.Closer = new(Conn) + +// testConnPairSetup creates a TCP connection by listening on a random port, and +// returns both ends. Ready to have data sent down them. It also returns a +// closer function that will close both conns and the listener. +func testConnPairSetup(t *testing.T) (net.Conn, net.Conn, func()) { + t.Helper() + + l, err := net.Listen("tcp", "localhost:0") + require.Nil(t, err) + + ch := make(chan net.Conn, 1) + go func() { + src, err := l.Accept() + require.Nil(t, err) + ch <- src + }() + + dst, err := net.Dial("tcp", l.Addr().String()) + require.Nil(t, err) + + src := <-ch + + stopper := func() { + l.Close() + src.Close() + dst.Close() + } + + return src, dst, stopper +} + +// testConnPipelineSetup creates a pipeline consiting of two TCP connection +// pairs and a Conn that copies bytes between them. Data flow looks like this: +// +// src1 <---> dst1 <== Conn.CopyBytes ==> src2 <---> dst2 +// +// The returned values are the src1 and dst2 which should be able to send and +// receive to each other via the Conn, the Conn itself (not running), and a +// stopper func to close everything. +func testConnPipelineSetup(t *testing.T) (net.Conn, net.Conn, *Conn, func()) { + src1, dst1, stop1 := testConnPairSetup(t) + src2, dst2, stop2 := testConnPairSetup(t) + c := NewConn(dst1, src2) + return src1, dst2, c, func() { + c.Close() + stop1() + stop2() + } +} + +func TestConn(t *testing.T) { + src, dst, c, stop := testConnPipelineSetup(t) + defer stop() + + retCh := make(chan error, 1) + go func() { + retCh <- c.CopyBytes() + }() + + // Now write/read into the other ends of the pipes (src1, dst2) + srcR := bufio.NewReader(src) + dstR := bufio.NewReader(dst) + + _, err := src.Write([]byte("ping 1\n")) + require.Nil(t, err) + _, err = dst.Write([]byte("ping 2\n")) + require.Nil(t, err) + + got, err := dstR.ReadString('\n') + require.Nil(t, err) + require.Equal(t, "ping 1\n", got) + + got, err = srcR.ReadString('\n') + require.Nil(t, err) + require.Equal(t, "ping 2\n", got) + + _, err = src.Write([]byte("pong 1\n")) + require.Nil(t, err) + _, err = dst.Write([]byte("pong 2\n")) + require.Nil(t, err) + + got, err = dstR.ReadString('\n') + require.Nil(t, err) + require.Equal(t, "pong 1\n", got) + + got, err = srcR.ReadString('\n') + require.Nil(t, err) + require.Equal(t, "pong 2\n", got) + + c.Close() + + ret := <-retCh + require.Nil(t, ret, "Close() should not cause error return") +} + +func TestConnSrcClosing(t *testing.T) { + src, dst, c, stop := testConnPipelineSetup(t) + defer stop() + + retCh := make(chan error, 1) + go func() { + retCh <- c.CopyBytes() + }() + + // Wait until we can actually get some bytes through both ways so we know that + // the copy goroutines are running. + srcR := bufio.NewReader(src) + dstR := bufio.NewReader(dst) + + _, err := src.Write([]byte("ping 1\n")) + require.Nil(t, err) + _, err = dst.Write([]byte("ping 2\n")) + require.Nil(t, err) + + got, err := dstR.ReadString('\n') + require.Nil(t, err) + require.Equal(t, "ping 1\n", got) + got, err = srcR.ReadString('\n') + require.Nil(t, err) + require.Equal(t, "ping 2\n", got) + + // If we close the src conn, we expect CopyBytes to return and dst to be + // closed too. No good way to assert that the conn is closed really other than + // assume the retCh receive will hang unless CopyBytes exits and that + // CopyBytes defers Closing both. + testTimer := time.AfterFunc(3*time.Second, func() { + panic("test timeout") + }) + src.Close() + <-retCh + testTimer.Stop() +} + +func TestConnDstClosing(t *testing.T) { + src, dst, c, stop := testConnPipelineSetup(t) + defer stop() + + retCh := make(chan error, 1) + go func() { + retCh <- c.CopyBytes() + }() + + // Wait until we can actually get some bytes through both ways so we know that + // the copy goroutines are running. + srcR := bufio.NewReader(src) + dstR := bufio.NewReader(dst) + + _, err := src.Write([]byte("ping 1\n")) + require.Nil(t, err) + _, err = dst.Write([]byte("ping 2\n")) + require.Nil(t, err) + + got, err := dstR.ReadString('\n') + require.Nil(t, err) + require.Equal(t, "ping 1\n", got) + got, err = srcR.ReadString('\n') + require.Nil(t, err) + require.Equal(t, "ping 2\n", got) + + // If we close the dst conn, we expect CopyBytes to return and src to be + // closed too. No good way to assert that the conn is closed really other than + // assume the retCh receive will hang unless CopyBytes exits and that + // CopyBytes defers Closing both. i.e. if this test doesn't time out it's + // good! + testTimer := time.AfterFunc(3*time.Second, func() { + panic("test timeout") + }) + src.Close() + <-retCh + testTimer.Stop() +} diff --git a/connect/proxy/listener.go b/connect/proxy/listener.go new file mode 100644 index 000000000..c003cb19c --- /dev/null +++ b/connect/proxy/listener.go @@ -0,0 +1,116 @@ +package proxy + +import ( + "context" + "crypto/tls" + "errors" + "log" + "net" + "sync/atomic" + "time" + + "github.com/hashicorp/consul/connect" +) + +// Listener is the implementation of a specific proxy listener. It has pluggable +// Listen and Dial methods to suit public mTLS vs upstream semantics. It handles +// the lifecycle of the listener and all connections opened through it +type Listener struct { + // Service is the connect service instance to use. + Service *connect.Service + + listenFunc func() (net.Listener, error) + dialFunc func() (net.Conn, error) + + stopFlag int32 + stopChan chan struct{} + + logger *log.Logger +} + +// NewPublicListener returns a Listener setup to listen for public mTLS +// connections and proxy them to the configured local application over TCP. +func NewPublicListener(svc *connect.Service, cfg PublicListenerConfig, + logger *log.Logger) *Listener { + return &Listener{ + Service: svc, + listenFunc: func() (net.Listener, error) { + return tls.Listen("tcp", cfg.BindAddress, svc.ServerTLSConfig()) + }, + dialFunc: func() (net.Conn, error) { + return net.DialTimeout("tcp", cfg.LocalServiceAddress, + time.Duration(cfg.LocalConnectTimeoutMs)*time.Millisecond) + }, + stopChan: make(chan struct{}), + logger: logger, + } +} + +// NewUpstreamListener returns a Listener setup to listen locally for TCP +// connections that are proxied to a discovered Connect service instance. +func NewUpstreamListener(svc *connect.Service, cfg UpstreamConfig, + logger *log.Logger) *Listener { + return &Listener{ + Service: svc, + listenFunc: func() (net.Listener, error) { + return net.Listen("tcp", cfg.LocalBindAddress) + }, + dialFunc: func() (net.Conn, error) { + if cfg.resolver == nil { + return nil, errors.New("no resolver provided") + } + ctx, cancel := context.WithTimeout(context.Background(), + time.Duration(cfg.ConnectTimeoutMs)*time.Millisecond) + defer cancel() + return svc.Dial(ctx, cfg.resolver) + }, + stopChan: make(chan struct{}), + logger: logger, + } +} + +// Serve runs the listener until it is stopped. +func (l *Listener) Serve() error { + listen, err := l.listenFunc() + if err != nil { + return err + } + + for { + conn, err := listen.Accept() + if err != nil { + if atomic.LoadInt32(&l.stopFlag) == 1 { + return nil + } + return err + } + + go l.handleConn(conn) + } + return nil +} + +// handleConn is the internal connection handler goroutine. +func (l *Listener) handleConn(src net.Conn) { + defer src.Close() + + dst, err := l.dialFunc() + if err != nil { + l.logger.Printf("[ERR] failed to dial: %s", err) + return + } + // Note no need to defer dst.Close() since conn handles that for us. + conn := NewConn(src, dst) + defer conn.Close() + + err = conn.CopyBytes() + if err != nil { + l.logger.Printf("[ERR] connection failed: %s", err) + return + } +} + +// Close terminates the listener and all active connections. +func (l *Listener) Close() error { + return nil +} diff --git a/connect/proxy/listener_test.go b/connect/proxy/listener_test.go new file mode 100644 index 000000000..ce41c81e5 --- /dev/null +++ b/connect/proxy/listener_test.go @@ -0,0 +1,91 @@ +package proxy + +import ( + "context" + "log" + "net" + "os" + "testing" + + agConnect "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/connect" + "github.com/stretchr/testify/require" +) + +func TestPublicListener(t *testing.T) { + ca := agConnect.TestCA(t, nil) + addrs := TestLocalBindAddrs(t, 2) + + cfg := PublicListenerConfig{ + BindAddress: addrs[0], + LocalServiceAddress: addrs[1], + HandshakeTimeoutMs: 100, + LocalConnectTimeoutMs: 100, + } + + testApp, err := NewTestTCPServer(t, cfg.LocalServiceAddress) + require.Nil(t, err) + defer testApp.Close() + + svc := connect.TestService(t, "db", ca) + + l := NewPublicListener(svc, cfg, log.New(os.Stderr, "", log.LstdFlags)) + + // Run proxy + go func() { + err := l.Serve() + require.Nil(t, err) + }() + defer l.Close() + + // Proxy and backend are running, play the part of a TLS client using same + // cert for now. + conn, err := svc.Dial(context.Background(), &connect.StaticResolver{ + Addr: addrs[0], + CertURI: agConnect.TestSpiffeIDService(t, "db"), + }) + require.Nilf(t, err, "unexpected err: %s", err) + TestEchoConn(t, conn, "") +} + +func TestUpstreamListener(t *testing.T) { + ca := agConnect.TestCA(t, nil) + addrs := TestLocalBindAddrs(t, 1) + + // Run a test server that we can dial. + testSvr := connect.NewTestServer(t, "db", ca) + go func() { + err := testSvr.Serve() + require.Nil(t, err) + }() + defer testSvr.Close() + + cfg := UpstreamConfig{ + DestinationType: "service", + DestinationNamespace: "default", + DestinationName: "db", + ConnectTimeoutMs: 100, + LocalBindAddress: addrs[0], + resolver: &connect.StaticResolver{ + Addr: testSvr.Addr, + CertURI: agConnect.TestSpiffeIDService(t, "db"), + }, + } + + svc := connect.TestService(t, "web", ca) + + l := NewUpstreamListener(svc, cfg, log.New(os.Stderr, "", log.LstdFlags)) + + // Run proxy + go func() { + err := l.Serve() + require.Nil(t, err) + }() + defer l.Close() + + // Proxy and fake remote service are running, play the part of the app + // connecting to a remote connect service over TCP. + conn, err := net.Dial("tcp", cfg.LocalBindAddress) + require.Nilf(t, err, "unexpected err: %s", err) + TestEchoConn(t, conn, "") +} diff --git a/connect/proxy/proxy.go b/connect/proxy/proxy.go new file mode 100644 index 000000000..bda6f3afb --- /dev/null +++ b/connect/proxy/proxy.go @@ -0,0 +1,134 @@ +package proxy + +import ( + "log" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/connect" +) + +// Proxy implements the built-in connect proxy. +type Proxy struct { + proxyID string + client *api.Client + cfgWatcher ConfigWatcher + stopChan chan struct{} + logger *log.Logger +} + +// NewFromConfigFile returns a Proxy instance configured just from a local file. +// This is intended mostly for development and bypasses the normal mechanisms +// for fetching config and certificates from the local agent. +func NewFromConfigFile(client *api.Client, filename string, + logger *log.Logger) (*Proxy, error) { + cfg, err := ParseConfigFile(filename) + if err != nil { + return nil, err + } + + service, err := connect.NewDevServiceFromCertFiles(cfg.ProxiedServiceID, + client, logger, cfg.DevCAFile, cfg.DevServiceCertFile, + cfg.DevServiceKeyFile) + if err != nil { + return nil, err + } + cfg.service = service + + p := &Proxy{ + proxyID: cfg.ProxyID, + client: client, + cfgWatcher: NewStaticConfigWatcher(cfg), + stopChan: make(chan struct{}), + logger: logger, + } + return p, nil +} + +// New returns a Proxy with the given id, consuming the provided (configured) +// agent. It is ready to Run(). +func New(client *api.Client, proxyID string, logger *log.Logger) (*Proxy, error) { + p := &Proxy{ + proxyID: proxyID, + client: client, + cfgWatcher: &AgentConfigWatcher{ + client: client, + proxyID: proxyID, + logger: logger, + }, + stopChan: make(chan struct{}), + logger: logger, + } + return p, nil +} + +// Serve the proxy instance until a fatal error occurs or proxy is closed. +func (p *Proxy) Serve() error { + + var cfg *Config + + // Watch for config changes (initial setup happens on first "change") + for { + select { + case newCfg := <-p.cfgWatcher.Watch(): + p.logger.Printf("[DEBUG] got new config") + if newCfg.service == nil { + p.logger.Printf("[ERR] new config has nil service") + continue + } + if cfg == nil { + // Initial setup + + newCfg.PublicListener.applyDefaults() + l := NewPublicListener(newCfg.service, newCfg.PublicListener, p.logger) + err := p.startListener("public listener", l) + if err != nil { + return err + } + } + + // TODO(banks) update/remove upstreams properly based on a diff with current. Can + // store a map of uc.String() to Listener here and then use it to only + // start one of each and stop/modify if changes occur. + for _, uc := range newCfg.Upstreams { + uc.applyDefaults() + uc.resolver = UpstreamResolverFromClient(p.client, uc) + + l := NewUpstreamListener(newCfg.service, uc, p.logger) + err := p.startListener(uc.String(), l) + if err != nil { + p.logger.Printf("[ERR] failed to start upstream %s: %s", uc.String(), + err) + } + } + cfg = newCfg + + case <-p.stopChan: + return nil + } + } +} + +// startPublicListener is run from the internal state machine loop +func (p *Proxy) startListener(name string, l *Listener) error { + go func() { + err := l.Serve() + if err != nil { + p.logger.Printf("[ERR] %s stopped with error: %s", name, err) + return + } + p.logger.Printf("[INFO] %s stopped", name) + }() + + go func() { + <-p.stopChan + l.Close() + }() + + return nil +} + +// Close stops the proxy and terminates all active connections. It must be +// called only once. +func (p *Proxy) Close() { + close(p.stopChan) +} diff --git a/connect/proxy/testdata/config-kitchensink.hcl b/connect/proxy/testdata/config-kitchensink.hcl new file mode 100644 index 000000000..2bda99791 --- /dev/null +++ b/connect/proxy/testdata/config-kitchensink.hcl @@ -0,0 +1,32 @@ +# Example proxy config with everything specified + +proxy_id = "foo" +token = "11111111-2222-3333-4444-555555555555" + +proxied_service_id = "web" +proxied_service_namespace = "default" + +# Assumes running consul in dev mode from the repo root... +dev_ca_file = "connect/testdata/ca1-ca-consul-internal.cert.pem" +dev_service_cert_file = "connect/testdata/ca1-svc-web.cert.pem" +dev_service_key_file = "connect/testdata/ca1-svc-web.key.pem" + +public_listener { + bind_address = ":9999" + local_service_address = "127.0.0.1:5000" +} + +upstreams = [ + { + local_bind_address = "127.0.0.1:6000" + destination_name = "db" + destination_namespace = "default" + destination_type = "service" + }, + { + local_bind_address = "127.0.0.1:6001" + destination_name = "geo-cache" + destination_namespace = "default" + destination_type = "prepared_query" + } +] diff --git a/connect/proxy/testing.go b/connect/proxy/testing.go new file mode 100644 index 000000000..9ed8c41c4 --- /dev/null +++ b/connect/proxy/testing.go @@ -0,0 +1,105 @@ +package proxy + +import ( + "fmt" + "io" + "log" + "net" + "sync/atomic" + + "github.com/hashicorp/consul/lib/freeport" + "github.com/mitchellh/go-testing-interface" + "github.com/stretchr/testify/require" +) + +// TestLocalBindAddrs returns n localhost address:port strings with free ports +// for binding test listeners to. +func TestLocalBindAddrs(t testing.T, n int) []string { + ports := freeport.GetT(t, n) + addrs := make([]string, n) + for i, p := range ports { + addrs[i] = fmt.Sprintf("localhost:%d", p) + } + return addrs +} + +// TestTCPServer is a simple TCP echo server for use during tests. +type TestTCPServer struct { + l net.Listener + stopped int32 + accepted, closed, active int32 +} + +// NewTestTCPServer opens as a listening socket on the given address and returns +// a TestTCPServer serving requests to it. The server is already started and can +// be stopped by calling Close(). +func NewTestTCPServer(t testing.T, addr string) (*TestTCPServer, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + log.Printf("test tcp server listening on %s", addr) + s := &TestTCPServer{ + l: l, + } + go s.accept() + return s, nil +} + +// Close stops the server +func (s *TestTCPServer) Close() { + atomic.StoreInt32(&s.stopped, 1) + if s.l != nil { + s.l.Close() + } +} + +func (s *TestTCPServer) accept() error { + for { + conn, err := s.l.Accept() + if err != nil { + if atomic.LoadInt32(&s.stopped) == 1 { + log.Printf("test tcp echo server %s stopped", s.l.Addr()) + return nil + } + log.Printf("test tcp echo server %s failed: %s", s.l.Addr(), err) + return err + } + + atomic.AddInt32(&s.accepted, 1) + atomic.AddInt32(&s.active, 1) + + go func(c net.Conn) { + io.Copy(c, c) + atomic.AddInt32(&s.closed, 1) + atomic.AddInt32(&s.active, -1) + }(conn) + } +} + +// TestEchoConn attempts to write some bytes to conn and expects to read them +// back within a short timeout (10ms). If prefix is not empty we expect it to be +// poresent at the start of all echoed responses (for example to distinguish +// between multiple echo server instances). +func TestEchoConn(t testing.T, conn net.Conn, prefix string) { + t.Helper() + + // Write some bytes and read them back + n, err := conn.Write([]byte("Hello World")) + require.Equal(t, 11, n) + require.Nil(t, err) + + expectLen := 11 + len(prefix) + + buf := make([]byte, expectLen) + // read until our buffer is full - it might be separate packets if prefix is + // in use. + got := 0 + for got < expectLen { + n, err = conn.Read(buf[got:]) + require.Nilf(t, err, "err: %s", err) + got += n + } + require.Equal(t, expectLen, got) + require.Equal(t, prefix+"Hello World", string(buf[:])) +} diff --git a/connect/resolver.go b/connect/resolver.go index 41dc70e82..9873fcdf1 100644 --- a/connect/resolver.go +++ b/connect/resolver.go @@ -10,7 +10,9 @@ import ( testing "github.com/mitchellh/go-testing-interface" ) -// Resolver is the interface implemented by a service discovery mechanism. +// Resolver is the interface implemented by a service discovery mechanism to get +// the address and identity of an instance to connect to via Connect as a +// client. type Resolver interface { // Resolve returns a single service instance to connect to. Implementations // may attempt to ensure the instance returned is currently available. It is @@ -19,7 +21,10 @@ type Resolver interface { // increases reliability. The context passed can be used to impose timeouts // which may or may not be respected by implementations that make network // calls to resolve the service. The addr returned is a string in any valid - // form for passing directly to `net.Dial("tcp", addr)`. + // form for passing directly to `net.Dial("tcp", addr)`. The certURI + // represents the identity of the service instance. It will be matched against + // the TLS certificate URI SAN presented by the server and the connection + // rejected if they don't match. Resolve(ctx context.Context) (addr string, certURI connect.CertURI, err error) } @@ -33,7 +38,8 @@ type StaticResolver struct { Addr string // CertURL is the _identity_ we expect the server to present in it's TLS - // certificate. It must be an exact match or the connection will be rejected. + // certificate. It must be an exact URI string match or the connection will be + // rejected. CertURI connect.CertURI } @@ -56,13 +62,14 @@ type ConsulResolver struct { // panic. Client *api.Client - // Namespace of the query target + // Namespace of the query target. Namespace string - // Name of the query target + // Name of the query target. Name string - // Type of the query target, + // Type of the query target. Should be one of the defined ConsulResolverType* + // constants. Currently defaults to ConsulResolverTypeService. Type int // Datacenter to resolve in, empty indicates agent's local DC. diff --git a/connect/resolver_test.go b/connect/resolver_test.go index 29a40e3d3..3ab439add 100644 --- a/connect/resolver_test.go +++ b/connect/resolver_test.go @@ -41,7 +41,6 @@ func TestStaticResolver_Resolve(t *testing.T) { } func TestConsulResolver_Resolve(t *testing.T) { - // Setup a local test agent to query agent := agent.NewTestAgent("test-consul", "") defer agent.Shutdown() diff --git a/connect/service.go b/connect/service.go index db83ce5aa..6bbda0807 100644 --- a/connect/service.go +++ b/connect/service.go @@ -3,6 +3,7 @@ package connect import ( "context" "crypto/tls" + "errors" "log" "net" "net/http" @@ -10,6 +11,7 @@ import ( "time" "github.com/hashicorp/consul/api" + "golang.org/x/net/http2" ) // Service represents a Consul service that accepts and/or connects via Connect. @@ -41,10 +43,17 @@ type Service struct { client *api.Client // serverTLSCfg is the (reloadable) TLS config we use for serving. - serverTLSCfg *ReloadableTLSConfig + serverTLSCfg *reloadableTLSConfig // clientTLSCfg is the (reloadable) TLS config we use for dialling. - clientTLSCfg *ReloadableTLSConfig + clientTLSCfg *reloadableTLSConfig + + // httpResolverFromAddr is a function that returns a Resolver from a string + // address for HTTP clients. It's privately pluggable to make testing easier + // but will default to a simple method to parse the host as a Consul DNS host. + // + // TODO(banks): write the proper implementation + httpResolverFromAddr func(addr string) (Resolver, error) logger *log.Logger } @@ -65,8 +74,8 @@ func NewServiceWithLogger(serviceID string, client *api.Client, client: client, logger: logger, } - s.serverTLSCfg = NewReloadableTLSConfig(defaultTLSConfig(serverVerifyCerts)) - s.clientTLSCfg = NewReloadableTLSConfig(defaultTLSConfig(clientVerifyCerts)) + s.serverTLSCfg = newReloadableTLSConfig(defaultTLSConfig(serverVerifyCerts)) + s.clientTLSCfg = newReloadableTLSConfig(defaultTLSConfig(clientVerifyCerts)) // TODO(banks) run the background certificate sync return s, nil @@ -86,12 +95,12 @@ func NewDevServiceFromCertFiles(serviceID string, client *api.Client, return nil, err } - // Note that NewReloadableTLSConfig makes a copy so we can re-use the same + // Note that newReloadableTLSConfig makes a copy so we can re-use the same // base for both client and server with swapped verifiers. tlsCfg.VerifyPeerCertificate = serverVerifyCerts - s.serverTLSCfg = NewReloadableTLSConfig(tlsCfg) + s.serverTLSCfg = newReloadableTLSConfig(tlsCfg) tlsCfg.VerifyPeerCertificate = clientVerifyCerts - s.clientTLSCfg = NewReloadableTLSConfig(tlsCfg) + s.clientTLSCfg = newReloadableTLSConfig(tlsCfg) return s, nil } @@ -121,6 +130,8 @@ func (s *Service) Dial(ctx context.Context, resolver Resolver) (net.Conn, error) if err != nil { return nil, err } + s.logger.Printf("[DEBUG] resolved service instance: %s (%s)", addr, + certURI.URI()) var dialer net.Dialer tcpConn, err := dialer.DialContext(ctx, "tcp", addr) if err != nil { @@ -133,8 +144,8 @@ func (s *Service) Dial(ctx context.Context, resolver Resolver) (net.Conn, error) if ok { tlsConn.SetDeadline(deadline) } - err = tlsConn.Handshake() - if err != nil { + // Perform handshake + if err = tlsConn.Handshake(); err != nil { tlsConn.Close() return nil, err } @@ -149,20 +160,27 @@ func (s *Service) Dial(ctx context.Context, resolver Resolver) (net.Conn, error) tlsConn.Close() return nil, err } - + s.logger.Printf("[DEBUG] successfully connected to %s (%s)", addr, + certURI.URI()) return tlsConn, nil } -// HTTPDialContext is compatible with http.Transport.DialContext. It expects the -// addr hostname to be specified using Consul DNS query syntax, e.g. +// HTTPDialTLS is compatible with http.Transport.DialTLS. It expects the addr +// hostname to be specified using Consul DNS query syntax, e.g. // "web.service.consul". It converts that into the equivalent ConsulResolver and // then call s.Dial with the resolver. This is low level, clients should // typically use HTTPClient directly. -func (s *Service) HTTPDialContext(ctx context.Context, network, +func (s *Service) HTTPDialTLS(network, addr string) (net.Conn, error) { - var r ConsulResolver - // TODO(banks): parse addr into ConsulResolver - return s.Dial(ctx, &r) + if s.httpResolverFromAddr == nil { + return nil, errors.New("no http resolver configured") + } + r, err := s.httpResolverFromAddr(addr) + if err != nil { + return nil, err + } + // TODO(banks): figure out how to do timeouts better. + return s.Dial(context.Background(), r) } // HTTPClient returns an *http.Client configured to dial remote Consul Connect @@ -172,14 +190,27 @@ func (s *Service) HTTPDialContext(ctx context.Context, network, // API rather than just relying on Consul DNS. Hostnames that are not valid // Consul DNS queries will fail. func (s *Service) HTTPClient() *http.Client { + t := &http.Transport{ + // Sadly we can't use DialContext hook since that is expected to return a + // plain TCP connection an http.Client tries to start a TLS handshake over + // it. We need to control the handshake to be able to do our validation. + // So we have to use the older DialTLS which means no context/timeout + // support. + // + // TODO(banks): figure out how users can configure a timeout when using + // this and/or compatibility with http.Request.WithContext. + DialTLS: s.HTTPDialTLS, + } + // Need to manually re-enable http2 support since we set custom DialTLS. + // See https://golang.org/src/net/http/transport.go?s=8692:9036#L228 + http2.ConfigureTransport(t) return &http.Client{ - Transport: &http.Transport{ - DialContext: s.HTTPDialContext, - }, + Transport: t, } } // Close stops the service and frees resources. -func (s *Service) Close() { +func (s *Service) Close() error { // TODO(banks): stop background activity if started + return nil } diff --git a/connect/service_test.go b/connect/service_test.go index a2adfe7f1..7bc4c97f2 100644 --- a/connect/service_test.go +++ b/connect/service_test.go @@ -2,14 +2,22 @@ package connect import ( "context" + "crypto/tls" "fmt" + "io" + "io/ioutil" + "net/http" "testing" "time" "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/testutil/retry" "github.com/stretchr/testify/require" ) +// Assert io.Closer implementation +var _ io.Closer = new(Service) + func TestService_Dial(t *testing.T) { ca := connect.TestCA(t, nil) @@ -53,30 +61,26 @@ func TestService_Dial(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - s, err := NewService("web", nil) - require.Nil(err) - - // Force TLSConfig - s.clientTLSCfg = NewReloadableTLSConfig(TestTLSConfig(t, "web", ca)) + s := TestService(t, "web", ca) ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() - testSvc := NewTestService(t, tt.presentService, ca) - testSvc.TimeoutHandshake = !tt.handshake + testSvr := NewTestServer(t, tt.presentService, ca) + testSvr.TimeoutHandshake = !tt.handshake if tt.accept { go func() { - err := testSvc.Serve() + err := testSvr.Serve() require.Nil(err) }() - defer testSvc.Close() + defer testSvr.Close() } // Always expect to be connecting to a "DB" resolver := &StaticResolver{ - Addr: testSvc.Addr, + Addr: testSvr.Addr, CertURI: connect.TestSpiffeIDService(t, "db"), } @@ -92,6 +96,7 @@ func TestService_Dial(t *testing.T) { if tt.wantErr == "" { require.Nil(err) + require.IsType(&tls.Conn{}, conn) } else { require.NotNil(err) require.Contains(err.Error(), tt.wantErr) @@ -103,3 +108,62 @@ func TestService_Dial(t *testing.T) { }) } } + +func TestService_ServerTLSConfig(t *testing.T) { + // TODO(banks): it's mostly meaningless to test this now since we directly set + // the tlsCfg in our TestService helper which is all we'd be asserting on here + // not the actual implementation. Once agent tls fetching is built, it becomes + // more meaningful to actually verify it's returning the correct config. +} + +func TestService_HTTPClient(t *testing.T) { + require := require.New(t) + ca := connect.TestCA(t, nil) + + s := TestService(t, "web", ca) + + // Run a test HTTP server + testSvr := NewTestServer(t, "backend", ca) + defer testSvr.Close() + go func() { + err := testSvr.ServeHTTPS(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Hello, I am Backend")) + })) + require.Nil(t, err) + }() + + // TODO(banks): this will talk http2 on both client and server. I hit some + // compatibility issues when testing though need to make sure that the http + // server with our TLSConfig can actually support HTTP/1.1 as well. Could make + // this a table test with all 4 permutations of client/server http version + // support. + + // Still get connection refused some times so retry on those + retry.Run(t, func(r *retry.R) { + // Hook the service resolver to avoid needing full agent setup. + s.httpResolverFromAddr = func(addr string) (Resolver, error) { + // Require in this goroutine seems to block causing a timeout on the Get. + //require.Equal("https://backend.service.consul:443", addr) + return &StaticResolver{ + Addr: testSvr.Addr, + CertURI: connect.TestSpiffeIDService(t, "backend"), + }, nil + } + + client := s.HTTPClient() + client.Timeout = 1 * time.Second + + resp, err := client.Get("https://backend.service.consul/foo") + r.Check(err) + defer resp.Body.Close() + + bodyBytes, err := ioutil.ReadAll(resp.Body) + r.Check(err) + + got := string(bodyBytes) + want := "Hello, I am Backend" + if got != want { + r.Fatalf("got %s, want %s", got, want) + } + }) +} diff --git a/connect/testing.go b/connect/testing.go index f6fa438cf..235ff6001 100644 --- a/connect/testing.go +++ b/connect/testing.go @@ -5,26 +5,33 @@ import ( "crypto/x509" "fmt" "io" + "log" "net" + "net/http" "sync/atomic" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib/freeport" testing "github.com/mitchellh/go-testing-interface" - "github.com/stretchr/testify/require" ) -// testVerifier creates a helper verifyFunc that can be set in a tls.Config and -// records calls made, passing back the certificates presented via the returned -// channel. The channel is buffered so up to 128 verification calls can be made -// without reading the chan before verification blocks. -func testVerifier(t testing.T, returnErr error) (verifyFunc, chan [][]byte) { - ch := make(chan [][]byte, 128) - return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { - ch <- rawCerts - return returnErr - }, ch +// TestService returns a Service instance based on a static TLS Config. +func TestService(t testing.T, service string, ca *structs.CARoot) *Service { + t.Helper() + + // Don't need to talk to client since we are setting TLSConfig locally + svc, err := NewService(service, nil) + if err != nil { + t.Fatal(err) + } + + svc.serverTLSCfg = newReloadableTLSConfig( + TestTLSConfigWithVerifier(t, service, ca, serverVerifyCerts)) + svc.clientTLSCfg = newReloadableTLSConfig( + TestTLSConfigWithVerifier(t, service, ca, clientVerifyCerts)) + + return svc } // TestTLSConfig returns a *tls.Config suitable for use during tests. @@ -32,7 +39,16 @@ func TestTLSConfig(t testing.T, service string, ca *structs.CARoot) *tls.Config t.Helper() // Insecure default (nil verifier) - cfg := defaultTLSConfig(nil) + return TestTLSConfigWithVerifier(t, service, ca, nil) +} + +// TestTLSConfigWithVerifier returns a *tls.Config suitable for use during +// tests, it will use the given verifyFunc to verify tls certificates. +func TestTLSConfigWithVerifier(t testing.T, service string, ca *structs.CARoot, + verifier verifyFunc) *tls.Config { + t.Helper() + + cfg := defaultTLSConfig(verifier) cfg.Certificates = []tls.Certificate{TestSvcKeyPair(t, service, ca)} cfg.RootCAs = TestCAPool(t, ca) cfg.ClientCAs = TestCAPool(t, ca) @@ -55,7 +71,9 @@ func TestSvcKeyPair(t testing.T, service string, ca *structs.CARoot) tls.Certifi t.Helper() certPEM, keyPEM := connect.TestLeaf(t, service, ca) cert, err := tls.X509KeyPair([]byte(certPEM), []byte(keyPEM)) - require.Nil(t, err) + if err != nil { + t.Fatal(err) + } return cert } @@ -65,13 +83,15 @@ func TestPeerCertificates(t testing.T, service string, ca *structs.CARoot) []*x5 t.Helper() certPEM, _ := connect.TestLeaf(t, service, ca) cert, err := connect.ParseCert(certPEM) - require.Nil(t, err) + if err != nil { + t.Fatal(err) + } return []*x509.Certificate{cert} } -// TestService runs a service listener that can be used to test clients. It's +// TestServer runs a service listener that can be used to test clients. It's // behaviour can be controlled by the struct members. -type TestService struct { +type TestServer struct { // The service name to serve. Service string // The (test) CA to use for generating certs. @@ -91,11 +111,11 @@ type TestService struct { stopChan chan struct{} } -// NewTestService returns a TestService. It should be closed when test is +// NewTestServer returns a TestServer. It should be closed when test is // complete. -func NewTestService(t testing.T, service string, ca *structs.CARoot) *TestService { +func NewTestServer(t testing.T, service string, ca *structs.CARoot) *TestServer { ports := freeport.GetT(t, 1) - return &TestService{ + return &TestServer{ Service: service, CA: ca, stopChan: make(chan struct{}), @@ -104,14 +124,16 @@ func NewTestService(t testing.T, service string, ca *structs.CARoot) *TestServic } } -// Serve runs a TestService and blocks until it is closed or errors. -func (s *TestService) Serve() error { +// Serve runs a tcp echo server and blocks until it is closed or errors. If +// TimeoutHandshake is set it won't start TLS handshake on new connections. +func (s *TestServer) Serve() error { // Just accept TCP conn but so we can control timing of accept/handshake l, err := net.Listen("tcp", s.Addr) if err != nil { return err } s.l = l + log.Printf("test connect service listening on %s", s.Addr) for { conn, err := s.l.Accept() @@ -122,12 +144,14 @@ func (s *TestService) Serve() error { return err } - // Ignore the conn if we are not actively ha + // Ignore the conn if we are not actively handshaking if !s.TimeoutHandshake { // Upgrade conn to TLS conn = tls.Server(conn, s.TLSCfg) // Run an echo service + log.Printf("test connect service accepted conn from %s, "+ + " running echo service", conn.RemoteAddr()) go io.Copy(conn, conn) } @@ -141,8 +165,20 @@ func (s *TestService) Serve() error { return nil } -// Close stops a TestService -func (s *TestService) Close() { +// ServeHTTPS runs an HTTPS server with the given config. It invokes the passed +// Handler for all requests. +func (s *TestServer) ServeHTTPS(h http.Handler) error { + srv := http.Server{ + Addr: s.Addr, + TLSConfig: s.TLSCfg, + Handler: h, + } + log.Printf("starting test connect HTTPS server on %s", s.Addr) + return srv.ListenAndServeTLS("", "") +} + +// Close stops a TestServer +func (s *TestServer) Close() error { old := atomic.SwapInt32(&s.stopFlag, 1) if old == 0 { if s.l != nil { @@ -150,4 +186,5 @@ func (s *TestService) Close() { } close(s.stopChan) } + return nil } diff --git a/connect/tls.go b/connect/tls.go index 8d3bc3a94..89d5ccb54 100644 --- a/connect/tls.go +++ b/connect/tls.go @@ -42,27 +42,27 @@ func defaultTLSConfig(verify verifyFunc) *tls.Config { } } -// ReloadableTLSConfig exposes a tls.Config that can have it's certificates +// reloadableTLSConfig exposes a tls.Config that can have it's certificates // reloaded. On a server, this uses GetConfigForClient to pass the current // tls.Config or client certificate for each acceptted connection. On a client, // this uses GetClientCertificate to provide the current client certificate. -type ReloadableTLSConfig struct { +type reloadableTLSConfig struct { mu sync.Mutex // cfg is the current config to use for new connections cfg *tls.Config } -// NewReloadableTLSConfig returns a reloadable config currently set to base. -func NewReloadableTLSConfig(base *tls.Config) *ReloadableTLSConfig { - c := &ReloadableTLSConfig{} +// newReloadableTLSConfig returns a reloadable config currently set to base. +func newReloadableTLSConfig(base *tls.Config) *reloadableTLSConfig { + c := &reloadableTLSConfig{} c.SetTLSConfig(base) return c } // TLSConfig returns a *tls.Config that will dynamically load certs. It's // suitable for use in either a client or server. -func (c *ReloadableTLSConfig) TLSConfig() *tls.Config { +func (c *reloadableTLSConfig) TLSConfig() *tls.Config { c.mu.Lock() cfgCopy := c.cfg c.mu.Unlock() @@ -71,7 +71,7 @@ func (c *ReloadableTLSConfig) TLSConfig() *tls.Config { // SetTLSConfig sets the config used for future connections. It is safe to call // from any goroutine. -func (c *ReloadableTLSConfig) SetTLSConfig(cfg *tls.Config) error { +func (c *reloadableTLSConfig) SetTLSConfig(cfg *tls.Config) error { copy := cfg.Clone() copy.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { current := c.TLSConfig() diff --git a/connect/tls_test.go b/connect/tls_test.go index 3605f22db..64c473c1e 100644 --- a/connect/tls_test.go +++ b/connect/tls_test.go @@ -10,10 +10,9 @@ import ( func TestReloadableTLSConfig(t *testing.T) { require := require.New(t) - verify, _ := testVerifier(t, nil) - base := defaultTLSConfig(verify) + base := defaultTLSConfig(nil) - c := NewReloadableTLSConfig(base) + c := newReloadableTLSConfig(base) // The dynamic config should be the one we loaded (with some different hooks) got := c.TLSConfig() From adc5589329a8b3b1809208d18ce6e0b8c002641c Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 5 Apr 2018 12:41:49 +0100 Subject: [PATCH 128/627] Allow duplicate source or destination, but enforce uniqueness across all four. --- agent/consul/state/intention.go | 44 ++++++++++++- agent/consul/state/intention_test.go | 96 +++++++++++++++++++++++++--- agent/structs/intention.go | 23 ++++++- agent/structs/intention_test.go | 24 +++++++ 4 files changed, 174 insertions(+), 13 deletions(-) diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index bc8bb0213..907bdf1ab 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -29,7 +29,9 @@ func intentionsTableSchema() *memdb.TableSchema { "destination": &memdb.IndexSchema{ Name: "destination", AllowMissing: true, - Unique: true, + // This index is not unique since we need uniqueness across the whole + // 4-tuple. + Unique: false, Indexer: &memdb.CompoundIndex{ Indexes: []memdb.Indexer{ &memdb.StringFieldIndex{ @@ -46,6 +48,25 @@ func intentionsTableSchema() *memdb.TableSchema { "source": &memdb.IndexSchema{ Name: "source", AllowMissing: true, + // This index is not unique since we need uniqueness across the whole + // 4-tuple. + Unique: false, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "SourceNS", + Lowercase: true, + }, + &memdb.StringFieldIndex{ + Field: "SourceName", + Lowercase: true, + }, + }, + }, + }, + "source_destination": &memdb.IndexSchema{ + Name: "source_destination", + AllowMissing: true, Unique: true, Indexer: &memdb.CompoundIndex{ Indexes: []memdb.Indexer{ @@ -57,6 +78,14 @@ func intentionsTableSchema() *memdb.TableSchema { Field: "SourceName", Lowercase: true, }, + &memdb.StringFieldIndex{ + Field: "DestinationNS", + Lowercase: true, + }, + &memdb.StringFieldIndex{ + Field: "DestinationName", + Lowercase: true, + }, }, }, }, @@ -142,7 +171,7 @@ func (s *Store) intentionSetTxn(tx *memdb.Txn, idx uint64, ixn *structs.Intentio // Check for an existing intention existing, err := tx.First(intentionsTableName, "id", ixn.ID) if err != nil { - return fmt.Errorf("failed intention looup: %s", err) + return fmt.Errorf("failed intention lookup: %s", err) } if existing != nil { oldIxn := existing.(*structs.Intention) @@ -153,6 +182,17 @@ func (s *Store) intentionSetTxn(tx *memdb.Txn, idx uint64, ixn *structs.Intentio } ixn.ModifyIndex = idx + // Check for duplicates on the 4-tuple. + duplicate, err := tx.First(intentionsTableName, "source_destination", + ixn.SourceNS, ixn.SourceName, ixn.DestinationNS, ixn.DestinationName) + if err != nil { + return fmt.Errorf("failed intention lookup: %s", err) + } + if duplicate != nil { + dupIxn := duplicate.(*structs.Intention) + return fmt.Errorf("duplicate intention found: %s", dupIxn.String()) + } + // We always force meta to be non-nil so that we its an empty map. // This makes it easy for API responses to not nil-check this everywhere. if ixn.Meta == nil { diff --git a/agent/consul/state/intention_test.go b/agent/consul/state/intention_test.go index d4c63647a..743f698af 100644 --- a/agent/consul/state/intention_test.go +++ b/agent/consul/state/intention_test.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestStore_IntentionGet_none(t *testing.T) { @@ -32,21 +33,29 @@ func TestStore_IntentionSetGet_basic(t *testing.T) { // Build a valid intention ixn := &structs.Intention{ - ID: testUUID(), - Meta: map[string]string{}, + ID: testUUID(), + SourceNS: "default", + SourceName: "*", + DestinationNS: "default", + DestinationName: "web", + Meta: map[string]string{}, } // Inserting a with empty ID is disallowed. assert.Nil(s.IntentionSet(1, ixn)) // Make sure the index got updated. - assert.Equal(s.maxIndex(intentionsTableName), uint64(1)) + assert.Equal(uint64(1), s.maxIndex(intentionsTableName)) assert.True(watchFired(ws), "watch fired") // Read it back out and verify it. expected := &structs.Intention{ - ID: ixn.ID, - Meta: map[string]string{}, + ID: ixn.ID, + SourceNS: "default", + SourceName: "*", + DestinationNS: "default", + DestinationName: "web", + Meta: map[string]string{}, RaftIndex: structs.RaftIndex{ CreateIndex: 1, ModifyIndex: 1, @@ -64,7 +73,7 @@ func TestStore_IntentionSetGet_basic(t *testing.T) { assert.Nil(s.IntentionSet(2, ixn)) // Make sure the index got updated. - assert.Equal(s.maxIndex(intentionsTableName), uint64(2)) + assert.Equal(uint64(2), s.maxIndex(intentionsTableName)) assert.True(watchFired(ws), "watch fired") // Read it back and verify the data was updated @@ -75,6 +84,24 @@ func TestStore_IntentionSetGet_basic(t *testing.T) { assert.Nil(err) assert.Equal(expected.ModifyIndex, idx) assert.Equal(expected, actual) + + // Attempt to insert another intention with duplicate 4-tuple + ixn = &structs.Intention{ + ID: testUUID(), + SourceNS: "default", + SourceName: "*", + DestinationNS: "default", + DestinationName: "web", + Meta: map[string]string{}, + } + + // Duplicate 4-tuple should cause an error + ws = memdb.NewWatchSet() + assert.NotNil(s.IntentionSet(3, ixn)) + + // Make sure the index did NOT get updated. + assert.Equal(uint64(2), s.maxIndex(intentionsTableName)) + assert.False(watchFired(ws), "watch not fired") } func TestStore_IntentionSet_emptyId(t *testing.T) { @@ -305,6 +332,31 @@ func TestStore_IntentionMatch_table(t *testing.T) { }, }, }, + + { + "single exact namespace/name with duplicate destinations", + [][]string{ + // 4-tuple specifies src and destination to test duplicate destinations + // with different sources. We flip them around to test in both + // directions. The first pair are the ones searched on in both cases so + // the duplicates need to be there. + {"foo", "bar", "foo", "*"}, + {"foo", "bar", "bar", "*"}, + {"*", "*", "*", "*"}, + }, + [][]string{ + {"foo", "bar"}, + }, + [][][]string{ + { + // Note the first two have the same precedence so we rely on arbitrary + // lexicographical tie-break behaviour. + {"foo", "bar", "bar", "*"}, + {"foo", "bar", "foo", "*"}, + {"*", "*", "*", "*"}, + }, + }, + }, } // testRunner implements the test for a single case, but can be @@ -321,9 +373,17 @@ func TestStore_IntentionMatch_table(t *testing.T) { case structs.IntentionMatchDestination: ixn.DestinationNS = v[0] ixn.DestinationName = v[1] + if len(v) == 4 { + ixn.SourceNS = v[2] + ixn.SourceName = v[3] + } case structs.IntentionMatchSource: ixn.SourceNS = v[0] ixn.SourceName = v[1] + if len(v) == 4 { + ixn.DestinationNS = v[2] + ixn.DestinationName = v[3] + } } assert.Nil(s.IntentionSet(idx, ixn)) @@ -345,7 +405,7 @@ func TestStore_IntentionMatch_table(t *testing.T) { assert.Nil(err) // Should have equal lengths - assert.Len(matches, len(tc.Expected)) + require.Len(t, matches, len(tc.Expected)) // Verify matches for i, expected := range tc.Expected { @@ -353,9 +413,27 @@ func TestStore_IntentionMatch_table(t *testing.T) { for _, ixn := range matches[i] { switch typ { case structs.IntentionMatchDestination: - actual = append(actual, []string{ixn.DestinationNS, ixn.DestinationName}) + if len(expected) > 1 && len(expected[0]) == 4 { + actual = append(actual, []string{ + ixn.DestinationNS, + ixn.DestinationName, + ixn.SourceNS, + ixn.SourceName, + }) + } else { + actual = append(actual, []string{ixn.DestinationNS, ixn.DestinationName}) + } case structs.IntentionMatchSource: - actual = append(actual, []string{ixn.SourceNS, ixn.SourceName}) + if len(expected) > 1 && len(expected[0]) == 4 { + actual = append(actual, []string{ + ixn.SourceNS, + ixn.SourceName, + ixn.DestinationNS, + ixn.DestinationName, + }) + } else { + actual = append(actual, []string{ixn.SourceNS, ixn.SourceName}) + } } } diff --git a/agent/structs/intention.go b/agent/structs/intention.go index d801635c9..316c9632b 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -166,7 +166,7 @@ func (x *Intention) GetACLPrefix() (string, bool) { // String returns a human-friendly string for this intention. func (x *Intention) String() string { - return fmt.Sprintf("%s %s/%s => %s/%s (ID: %s", + return fmt.Sprintf("%s %s/%s => %s/%s (ID: %s)", strings.ToUpper(string(x.Action)), x.SourceNS, x.SourceName, x.DestinationNS, x.DestinationName, @@ -305,7 +305,26 @@ func (s IntentionPrecedenceSorter) Less(i, j int) bool { // Next test the # of exact values in source aExact = s.countExact(a.SourceNS, a.SourceName) bExact = s.countExact(b.SourceNS, b.SourceName) - return aExact > bExact + if aExact != bExact { + return aExact > bExact + } + + // Tie break on lexicographic order of the 4-tuple in canonical form (SrcNS, + // Src, DstNS, Dst). This is arbitrary but it keeps sorting deterministic + // which is a nice property for consistency. It is arguably open to abuse if + // implementations rely on this however by definition the order among + // same-precedence rules is arbitrary and doesn't affect whether an allow or + // deny rule is acted on since all applicable rules are checked. + if a.SourceNS != b.SourceNS { + return a.SourceNS < b.SourceNS + } + if a.SourceName != b.SourceName { + return a.SourceName < b.SourceName + } + if a.DestinationNS != b.DestinationNS { + return a.DestinationNS < b.DestinationNS + } + return a.DestinationName < b.DestinationName } // countExact counts the number of exact values (not wildcards) in diff --git a/agent/structs/intention_test.go b/agent/structs/intention_test.go index 948ae920e..cda88632f 100644 --- a/agent/structs/intention_test.go +++ b/agent/structs/intention_test.go @@ -192,6 +192,30 @@ func TestIntentionPrecedenceSorter(t *testing.T) { {"*", "*", "*", "*"}, }, }, + { + "tiebreak deterministically", + [][]string{ + {"a", "*", "a", "b"}, + {"a", "*", "a", "a"}, + {"b", "a", "a", "a"}, + {"a", "b", "a", "a"}, + {"a", "a", "b", "a"}, + {"a", "a", "a", "b"}, + {"a", "a", "a", "a"}, + }, + [][]string{ + // Exact matches first in lexicographical order (arbitrary but + // deterministic) + {"a", "a", "a", "a"}, + {"a", "a", "a", "b"}, + {"a", "a", "b", "a"}, + {"a", "b", "a", "a"}, + {"b", "a", "a", "a"}, + // Wildcards next, lexicographical + {"a", "*", "a", "a"}, + {"a", "*", "a", "b"}, + }, + }, } for _, tc := range cases { From 280382c25fa13632626e6e798f4c9054150cadea Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 5 Apr 2018 12:53:42 +0100 Subject: [PATCH 129/627] Add tests all the way up through the endpoints to ensure duplicate src/destination is supported and so ultimately deny/allow nesting works. Also adds a sanity check test for `api.Agent().ConnectAuthorize()` and a fix for a trivial bug in it. --- agent/agent_endpoint_test.go | 78 +++++++++++++++++++++++++ agent/consul/intention_endpoint_test.go | 35 +++++++---- agent/intentions_endpoint_test.go | 33 +++++++---- api/agent.go | 2 +- api/agent_test.go | 23 ++++++++ 5 files changed, 148 insertions(+), 23 deletions(-) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 1b017fa78..1c6f7d830 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2293,6 +2293,84 @@ func TestAgentConnectAuthorize_deny(t *testing.T) { assert.Contains(obj.Reason, "Matched") } +func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + target := "db" + + // Create some intentions + { + // Deny wildcard to DB + req := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + req.Intention.SourceNS = structs.IntentionDefaultNamespace + req.Intention.SourceName = "*" + req.Intention.DestinationNS = structs.IntentionDefaultNamespace + req.Intention.DestinationName = target + req.Intention.Action = structs.IntentionActionDeny + + var reply string + assert.Nil(a.RPC("Intention.Apply", &req, &reply)) + } + { + // Allow web to DB + req := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + req.Intention.SourceNS = structs.IntentionDefaultNamespace + req.Intention.SourceName = "web" + req.Intention.DestinationNS = structs.IntentionDefaultNamespace + req.Intention.DestinationName = target + req.Intention.Action = structs.IntentionActionAllow + + var reply string + assert.Nil(a.RPC("Intention.Apply", &req, &reply)) + } + + // Web should be allowed + { + args := &structs.ConnectAuthorizeRequest{ + Target: target, + ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), + } + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + resp := httptest.NewRecorder() + respRaw, err := a.srv.AgentConnectAuthorize(resp, req) + assert.Nil(err) + assert.Equal(200, resp.Code) + + obj := respRaw.(*connectAuthorizeResp) + assert.True(obj.Authorized) + assert.Contains(obj.Reason, "Matched") + } + + // API should be denied + { + args := &structs.ConnectAuthorizeRequest{ + Target: target, + ClientCertURI: connect.TestSpiffeIDService(t, "api").URI().String(), + } + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + resp := httptest.NewRecorder() + respRaw, err := a.srv.AgentConnectAuthorize(resp, req) + assert.Nil(err) + assert.Equal(200, resp.Code) + + obj := respRaw.(*connectAuthorizeResp) + assert.False(obj.Authorized) + assert.Contains(obj.Reason, "Matched") + } +} + // Test that authorize fails without service:write for the target service. func TestAgentConnectAuthorize_serviceWrite(t *testing.T) { t.Parallel() diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index a1e1ae751..dfac4fc45 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -830,12 +830,13 @@ func TestIntentionMatch_good(t *testing.T) { // Create some records { insert := [][]string{ - {"foo", "*"}, - {"foo", "bar"}, - {"foo", "baz"}, // shouldn't match - {"bar", "bar"}, // shouldn't match - {"bar", "*"}, // shouldn't match - {"*", "*"}, + {"foo", "*", "foo", "*"}, + {"foo", "*", "foo", "bar"}, + {"foo", "*", "foo", "baz"}, // shouldn't match + {"foo", "*", "bar", "bar"}, // shouldn't match + {"foo", "*", "bar", "*"}, // shouldn't match + {"foo", "*", "*", "*"}, + {"bar", "*", "foo", "bar"}, // duplicate destination different source } for _, v := range insert { @@ -843,10 +844,10 @@ func TestIntentionMatch_good(t *testing.T) { Datacenter: "dc1", Op: structs.IntentionOpCreate, Intention: &structs.Intention{ - SourceNS: "default", - SourceName: "test", - DestinationNS: v[0], - DestinationName: v[1], + SourceNS: v[0], + SourceName: v[1], + DestinationNS: v[2], + DestinationName: v[3], Action: structs.IntentionActionAllow, }, } @@ -874,10 +875,20 @@ func TestIntentionMatch_good(t *testing.T) { assert.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Match", req, &resp)) assert.Len(resp.Matches, 1) - expected := [][]string{{"foo", "bar"}, {"foo", "*"}, {"*", "*"}} + expected := [][]string{ + {"bar", "*", "foo", "bar"}, + {"foo", "*", "foo", "bar"}, + {"foo", "*", "foo", "*"}, + {"foo", "*", "*", "*"}, + } var actual [][]string for _, ixn := range resp.Matches[0] { - actual = append(actual, []string{ixn.DestinationNS, ixn.DestinationName}) + actual = append(actual, []string{ + ixn.SourceNS, + ixn.SourceName, + ixn.DestinationNS, + ixn.DestinationName, + }) } assert.Equal(expected, actual) } diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index 4df0bf312..d4d68f26c 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -74,12 +74,13 @@ func TestIntentionsMatch_basic(t *testing.T) { // Create some intentions { insert := [][]string{ - {"foo", "*"}, - {"foo", "bar"}, - {"foo", "baz"}, // shouldn't match - {"bar", "bar"}, // shouldn't match - {"bar", "*"}, // shouldn't match - {"*", "*"}, + {"foo", "*", "foo", "*"}, + {"foo", "*", "foo", "bar"}, + {"foo", "*", "foo", "baz"}, // shouldn't match + {"foo", "*", "bar", "bar"}, // shouldn't match + {"foo", "*", "bar", "*"}, // shouldn't match + {"foo", "*", "*", "*"}, + {"bar", "*", "foo", "bar"}, // duplicate destination different source } for _, v := range insert { @@ -88,8 +89,10 @@ func TestIntentionsMatch_basic(t *testing.T) { Op: structs.IntentionOpCreate, Intention: structs.TestIntention(t), } - ixn.Intention.DestinationNS = v[0] - ixn.Intention.DestinationName = v[1] + ixn.Intention.SourceNS = v[0] + ixn.Intention.SourceName = v[1] + ixn.Intention.DestinationNS = v[2] + ixn.Intention.DestinationName = v[3] // Create var reply string @@ -108,9 +111,19 @@ func TestIntentionsMatch_basic(t *testing.T) { assert.Len(value, 1) var actual [][]string - expected := [][]string{{"foo", "bar"}, {"foo", "*"}, {"*", "*"}} + expected := [][]string{ + {"bar", "*", "foo", "bar"}, + {"foo", "*", "foo", "bar"}, + {"foo", "*", "foo", "*"}, + {"foo", "*", "*", "*"}, + } for _, ixn := range value["foo/bar"] { - actual = append(actual, []string{ixn.DestinationNS, ixn.DestinationName}) + actual = append(actual, []string{ + ixn.SourceNS, + ixn.SourceName, + ixn.DestinationNS, + ixn.DestinationName, + }) } assert.Equal(expected, actual) diff --git a/api/agent.go b/api/agent.go index 50d334d71..6b662fa2c 100644 --- a/api/agent.go +++ b/api/agent.go @@ -530,7 +530,7 @@ func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, e if err != nil { return nil, err } - resp.Body.Close() + defer resp.Body.Close() var out AgentAuthorize if err := decodeBody(resp, &out); err != nil { diff --git a/api/agent_test.go b/api/agent_test.go index 653512be9..6186bffe3 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -996,3 +996,26 @@ func TestAPI_AgentConnectCARoots_empty(t *testing.T) { require.Equal(uint64(0), meta.LastIndex) require.Len(list.Roots, 0) } + +// TODO(banks): once we have CA stuff setup properly we can probably make this +// much more complete. This is just a sanity check that the agent code basically +// works. +func TestAPI_AgentConnectAuthorize(t *testing.T) { + t.Parallel() + + require := require.New(t) + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + params := &AgentAuthorizeParams{ + Target: "foo", + ClientCertSerial: "fake", + // Importing connect.TestSpiffeIDService creates an import cycle + ClientCertURI: "spiffe://123.consul/ns/default/dc/ny1/svc/web", + } + auth, err := agent.ConnectAuthorize(params) + require.Nil(err) + require.True(auth.Authorized) + require.Equal(auth.Reason, "ACLs disabled, access is allowed by default") +} From 78e48fd547a5e4b4abffd6e665de29c4ae492a0c Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Mon, 16 Apr 2018 16:00:20 +0100 Subject: [PATCH 130/627] Added connect proxy config and local agent state setup on boot. --- agent/agent.go | 79 +++++++++++++++ agent/agent_test.go | 102 +++++++++++++++++++ agent/config/builder.go | 82 +++++++++++++++ agent/config/config.go | 43 ++++++++ agent/config/runtime.go | 35 +++++++ agent/config/runtime_test.go | 79 ++++++++++++++- agent/local/state.go | 181 ++++++++++++++++++++++++++++++++-- agent/local/state_test.go | 129 ++++++++++++++++++++++++ agent/structs/connect.go | 76 ++++++++++++++ agent/structs/connect_test.go | 115 +++++++++++++++++++++ 10 files changed, 911 insertions(+), 10 deletions(-) create mode 100644 agent/structs/connect_test.go diff --git a/agent/agent.go b/agent/agent.go index 4410ff293..b988029ce 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -246,6 +246,8 @@ func LocalConfig(cfg *config.RuntimeConfig) local.Config { NodeID: cfg.NodeID, NodeName: cfg.NodeName, TaggedAddresses: map[string]string{}, + ProxyBindMinPort: cfg.ConnectProxyBindMinPort, + ProxyBindMaxPort: cfg.ConnectProxyBindMaxPort, } for k, v := range cfg.TaggedAddresses { lc.TaggedAddresses[k] = v @@ -328,6 +330,9 @@ func (a *Agent) Start() error { if err := a.loadServices(c); err != nil { return err } + if err := a.loadProxies(c); err != nil { + return err + } if err := a.loadChecks(c); err != nil { return err } @@ -1973,6 +1978,58 @@ func (a *Agent) RemoveCheck(checkID types.CheckID, persist bool) error { return nil } +// AddProxy adds a new local Connect Proxy instance to be managed by the agent. +// +// It REQUIRES that the service that is being proxied is already present in the +// local state. Note that this is only used for agent-managed proxies so we can +// ensure that we always make this true. For externally managed and registered +// proxies we explicitly allow the proxy to be registered first to make +// bootstrap ordering of a new service simpler but the same is not true here +// since this is only ever called when setting up a _managed_ proxy which was +// registered as part of a service registration either from config or HTTP API +// call. +func (a *Agent) AddProxy(proxy *structs.ConnectManagedProxy, persist bool) error { + // Lookup the target service token in state if there is one. + token := a.State.ServiceToken(proxy.TargetServiceID) + + // Add the proxy to local state first since we may need to assign a port which + // needs to be coordinate under state lock. AddProxy will generate the + // NodeService for the proxy populated with the allocated (or configured) port + // and an ID, but it doesn't add it to the agent directly since that could + // deadlock and we may need to coordinate adding it and persisting etc. + proxyService, err := a.State.AddProxy(proxy, token) + if err != nil { + return err + } + + // TODO(banks): register proxy health checks. + err = a.AddService(proxyService, nil, persist, token) + if err != nil { + // Remove the state too + a.State.RemoveProxy(proxyService.ID) + return err + } + + // TODO(banks): persist some of the local proxy state (not the _proxy_ token). + return nil +} + +// RemoveProxy stops and removes a local proxy instance. +func (a *Agent) RemoveProxy(proxyID string, persist bool) error { + // Validate proxyID + if proxyID == "" { + return fmt.Errorf("proxyID missing") + } + + if err := a.State.RemoveProxy(proxyID); err != nil { + return err + } + + // TODO(banks): unpersist proxy + + return nil +} + func (a *Agent) cancelCheckMonitors(checkID types.CheckID) { // Stop any monitors delete(a.checkReapAfter, checkID) @@ -2366,6 +2423,25 @@ func (a *Agent) unloadChecks() error { return nil } +// loadProxies will load connect proxy definitions from configuration and +// persisted definitions on disk, and load them into the local agent. +func (a *Agent) loadProxies(conf *config.RuntimeConfig) error { + for _, proxy := range conf.ConnectProxies { + if err := a.AddProxy(proxy, false); err != nil { + return fmt.Errorf("failed adding proxy: %s", err) + } + } + + // TODO(banks): persist proxy state and re-load it here? + return nil +} + +// unloadProxies will deregister all proxies known to the local agent. +func (a *Agent) unloadProxies() error { + // TODO(banks): implement me + return nil +} + // snapshotCheckState is used to snapshot the current state of the health // checks. This is done before we reload our checks, so that we can properly // restore into the same state. @@ -2514,6 +2590,9 @@ func (a *Agent) ReloadConfig(newCfg *config.RuntimeConfig) error { if err := a.loadServices(newCfg); err != nil { return fmt.Errorf("Failed reloading services: %s", err) } + if err := a.loadProxies(newCfg); err != nil { + return fmt.Errorf("Failed reloading proxies: %s", err) + } if err := a.loadChecks(newCfg); err != nil { return fmt.Errorf("Failed reloading checks: %s", err) } diff --git a/agent/agent_test.go b/agent/agent_test.go index df1593bd9..2ee42d7db 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -15,6 +15,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" @@ -2235,3 +2237,103 @@ func TestAgent_reloadWatchesHTTPS(t *testing.T) { t.Fatalf("bad: %s", err) } } + +func TestAgent_AddProxy(t *testing.T) { + t.Parallel() + a := NewTestAgent(t.Name(), ` + node_name = "node1" + `) + defer a.Shutdown() + + // Register a target service we can use + reg := &structs.NodeService{ + Service: "web", + Port: 8080, + } + require.NoError(t, a.AddService(reg, nil, false, "")) + + tests := []struct { + desc string + proxy *structs.ConnectManagedProxy + wantErr bool + }{ + { + desc: "basic proxy adding, unregistered service", + proxy: &structs.ConnectManagedProxy{ + ExecMode: structs.ProxyExecModeDaemon, + Command: "consul connect proxy", + Config: map[string]interface{}{ + "foo": "bar", + }, + TargetServiceID: "db", // non-existent service. + }, + // Target service must be registered. + wantErr: true, + }, + { + desc: "basic proxy adding, unregistered service", + proxy: &structs.ConnectManagedProxy{ + ExecMode: structs.ProxyExecModeDaemon, + Command: "consul connect proxy", + Config: map[string]interface{}{ + "foo": "bar", + }, + TargetServiceID: "web", + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.desc, func(t *testing.T) { + require := require.New(t) + + err := a.AddProxy(tt.proxy, false) + if tt.wantErr { + require.Error(err) + return + } + require.NoError(err) + + // Test the ID was created as we expect. + got := a.State.Proxy("web-proxy") + require.Equal(tt.proxy, got) + }) + } +} + +func TestAgent_RemoveProxy(t *testing.T) { + t.Parallel() + a := NewTestAgent(t.Name(), ` + node_name = "node1" + `) + defer a.Shutdown() + require := require.New(t) + + // Register a target service we can use + reg := &structs.NodeService{ + Service: "web", + Port: 8080, + } + require.NoError(a.AddService(reg, nil, false, "")) + + // Add a proxy for web + pReg := &structs.ConnectManagedProxy{ + TargetServiceID: "web", + } + require.NoError(a.AddProxy(pReg, false)) + + // Test the ID was created as we expect. + gotProxy := a.State.Proxy("web-proxy") + require.Equal(pReg, gotProxy) + + err := a.RemoveProxy("web-proxy", false) + require.NoError(err) + + gotProxy = a.State.Proxy("web-proxy") + require.Nil(gotProxy) + + // Removing invalid proxy should be an error + err = a.RemoveProxy("foobar", false) + require.Error(err) +} diff --git a/agent/config/builder.go b/agent/config/builder.go index 6048dab92..a6338ae14 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -322,8 +322,15 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { } var services []*structs.ServiceDefinition + var proxies []*structs.ConnectManagedProxy for _, service := range c.Services { services = append(services, b.serviceVal(&service)) + // Register any connect proxies requested + if proxy := b.connectManagedProxyVal(&service); proxy != nil { + proxies = append(proxies, proxy) + } + // TODO(banks): support connect-native registrations (v.Connect.Enabled == + // true) } if c.Service != nil { services = append(services, b.serviceVal(c.Service)) @@ -520,6 +527,9 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { consulRaftHeartbeatTimeout := b.durationVal("consul.raft.heartbeat_timeout", c.Consul.Raft.HeartbeatTimeout) * time.Duration(performanceRaftMultiplier) consulRaftLeaderLeaseTimeout := b.durationVal("consul.raft.leader_lease_timeout", c.Consul.Raft.LeaderLeaseTimeout) * time.Duration(performanceRaftMultiplier) + // Connect proxy defaults. + proxyBindMinPort, proxyBindMaxPort := b.connectProxyPortRange(c.Connect) + // ---------------------------------------------------------------- // build runtime config // @@ -638,6 +648,9 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { CheckUpdateInterval: b.durationVal("check_update_interval", c.CheckUpdateInterval), Checks: checks, ClientAddrs: clientAddrs, + ConnectProxies: proxies, + ConnectProxyBindMinPort: proxyBindMinPort, + ConnectProxyBindMaxPort: proxyBindMaxPort, DataDir: b.stringVal(c.DataDir), Datacenter: strings.ToLower(b.stringVal(c.Datacenter)), DevMode: b.boolVal(b.Flags.DevMode), @@ -1010,6 +1023,75 @@ func (b *Builder) serviceVal(v *ServiceDefinition) *structs.ServiceDefinition { } } +func (b *Builder) connectManagedProxyVal(v *ServiceDefinition) *structs.ConnectManagedProxy { + if v.Connect == nil || v.Connect.Proxy == nil { + return nil + } + + p := v.Connect.Proxy + + targetID := b.stringVal(v.ID) + if targetID == "" { + targetID = b.stringVal(v.Name) + } + + execMode := structs.ProxyExecModeDaemon + if p.ExecMode != nil { + switch *p.ExecMode { + case "daemon": + execMode = structs.ProxyExecModeDaemon + case "script": + execMode = structs.ProxyExecModeScript + default: + b.err = multierror.Append(fmt.Errorf( + "service[%s]: invalid connect proxy exec_mode: %s", targetID, + *p.ExecMode)) + return nil + } + } + + return &structs.ConnectManagedProxy{ + ExecMode: execMode, + Command: b.stringVal(p.Command), + Config: p.Config, + // ProxyService will be setup when the agent registers the configured + // proxies and starts them etc. We could do it here but we may need to do + // things like probe the OS for a free port etc. And we have enough info to + // resolve all this later. + ProxyService: nil, + TargetServiceID: targetID, + } +} + +func (b *Builder) connectProxyPortRange(v *Connect) (int, int) { + // Choose this default range just because. There are zero "safe" ranges that + // don't have something somewhere that uses them which is why this is + // configurable. We rely on the host not having any of these ports for non + // agent managed proxies. I went with 20k because I know of at least one + // super-common server memcached that defaults to the 10k range. + start := 20000 + end := 20256 // 256 proxies on a host is enough for anyone ;) + + if v == nil || v.ProxyDefaults == nil { + return start, end + } + + min, max := v.ProxyDefaults.BindMinPort, v.ProxyDefaults.BindMaxPort + if min == nil && max == nil { + return start, end + } + + // If either was set show a warning if the overall range was invalid + if min == nil || max == nil || *max < *min { + b.warn("Connect proxy_defaults bind_min_port and bind_max_port must both "+ + "be set with max >= min. To disable automatic port allocation set both "+ + "to 0. Using default range %d..%d.", start, end) + return start, end + } + + return *min, *max +} + func (b *Builder) boolVal(v *bool) bool { if v == nil { return false diff --git a/agent/config/config.go b/agent/config/config.go index 79d274d0d..f652c9076 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -159,6 +159,7 @@ type Config struct { CheckUpdateInterval *string `json:"check_update_interval,omitempty" hcl:"check_update_interval" mapstructure:"check_update_interval"` Checks []CheckDefinition `json:"checks,omitempty" hcl:"checks" mapstructure:"checks"` ClientAddr *string `json:"client_addr,omitempty" hcl:"client_addr" mapstructure:"client_addr"` + Connect *Connect `json:"connect,omitempty" hcl:"connect" mapstructure:"connect"` DNS DNS `json:"dns_config,omitempty" hcl:"dns_config" mapstructure:"dns_config"` DNSDomain *string `json:"domain,omitempty" hcl:"domain" mapstructure:"domain"` DNSRecursors []string `json:"recursors,omitempty" hcl:"recursors" mapstructure:"recursors"` @@ -324,6 +325,7 @@ type ServiceDefinition struct { Checks []CheckDefinition `json:"checks,omitempty" hcl:"checks" mapstructure:"checks"` Token *string `json:"token,omitempty" hcl:"token" mapstructure:"token"` EnableTagOverride *bool `json:"enable_tag_override,omitempty" hcl:"enable_tag_override" mapstructure:"enable_tag_override"` + Connect *ServiceConnect `json:"connect,omitempty" hcl:"connect" mapstructure:"connect"` } type CheckDefinition struct { @@ -349,6 +351,47 @@ type CheckDefinition struct { DeregisterCriticalServiceAfter *string `json:"deregister_critical_service_after,omitempty" hcl:"deregister_critical_service_after" mapstructure:"deregister_critical_service_after"` } +// ServiceConnect is the connect block within a service registration +type ServiceConnect struct { + // TODO(banks) add way to specify that the app is connect-native + // Proxy configures a connect proxy instance for the service + Proxy *ServiceConnectProxy `json:"proxy,omitempty" hcl:"proxy" mapstructure:"proxy"` +} + +type ServiceConnectProxy struct { + Command *string `json:"command,omitempty" hcl:"command" mapstructure:"command"` + ExecMode *string `json:"exec_mode,omitempty" hcl:"exec_mode" mapstructure:"exec_mode"` + Config map[string]interface{} `json:"config,omitempty" hcl:"config" mapstructure:"config"` +} + +// Connect is the agent-global connect configuration. +type Connect struct { + // Enabled opts the agent into connect. It should be set on all clients and + // servers in a cluster for correct connect operation. TODO(banks) review that. + Enabled bool `json:"enabled,omitempty" hcl:"enabled" mapstructure:"enabled"` + ProxyDefaults *ConnectProxyDefaults `json:"proxy_defaults,omitempty" hcl:"proxy_defaults" mapstructure:"proxy_defaults"` +} + +// ConnectProxyDefaults is the agent-global connect proxy configuration. +type ConnectProxyDefaults struct { + // BindMinPort, BindMaxPort are the inclusive lower and upper bounds on the + // port range allocated to the agent to assign to connect proxies that have no + // bind_port specified. + BindMinPort *int `json:"bind_min_port,omitempty" hcl:"bind_min_port" mapstructure:"bind_min_port"` + BindMaxPort *int `json:"bind_max_port,omitempty" hcl:"bind_max_port" mapstructure:"bind_max_port"` + // ExecMode is used where a registration doesn't include an exec_mode. + // Defaults to daemon. + ExecMode *string `json:"exec_mode,omitempty" hcl:"exec_mode" mapstructure:"exec_mode"` + // DaemonCommand is used to start proxy in exec_mode = daemon if not specified + // at registration time. + DaemonCommand *string `json:"daemon_command,omitempty" hcl:"daemon_command" mapstructure:"daemon_command"` + // ScriptCommand is used to start proxy in exec_mode = script if not specified + // at registration time. + ScriptCommand *string `json:"script_command,omitempty" hcl:"script_command" mapstructure:"script_command"` + // Config is merged into an Config specified at registration time. + Config map[string]interface{} `json:"config,omitempty" hcl:"config" mapstructure:"config"` +} + type DNS struct { AllowStale *bool `json:"allow_stale,omitempty" hcl:"allow_stale" mapstructure:"allow_stale"` ARecordLimit *int `json:"a_record_limit,omitempty" hcl:"a_record_limit" mapstructure:"a_record_limit"` diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 66e7e79e7..55c15d14e 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -616,6 +616,41 @@ type RuntimeConfig struct { // flag: -client string ClientAddrs []*net.IPAddr + // ConnectEnabled opts the agent into connect. It should be set on all clients + // and servers in a cluster for correct connect operation. TODO(banks) review + // that. + ConnectEnabled bool + + // ConnectProxies is a list of configured proxies taken from the "connect" + // block of service registrations. + ConnectProxies []*structs.ConnectManagedProxy + + // ConnectProxyBindMinPort is the inclusive start of the range of ports + // allocated to the agent for starting proxy listeners on where no explicit + // port is specified. + ConnectProxyBindMinPort int + + // ConnectProxyBindMaxPort is the inclusive end of the range of ports + // allocated to the agent for starting proxy listeners on where no explicit + // port is specified. + ConnectProxyBindMaxPort int + + // ConnectProxyDefaultExecMode is used where a registration doesn't include an + // exec_mode. Defaults to daemon. + ConnectProxyDefaultExecMode *string + + // ConnectProxyDefaultDaemonCommand is used to start proxy in exec_mode = + // daemon if not specified at registration time. + ConnectProxyDefaultDaemonCommand *string + + // ConnectProxyDefaultScriptCommand is used to start proxy in exec_mode = + // script if not specified at registration time. + ConnectProxyDefaultScriptCommand *string + + // ConnectProxyDefaultConfig is merged with any config specified at + // registration time to allow global control of defaults. + ConnectProxyDefaultConfig map[string]interface{} + // DNSAddrs contains the list of TCP and UDP addresses the DNS server will // bind to. If the DNS endpoint is disabled (ports.dns <= 0) the list is // empty. diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 060215c35..e990f0689 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -2353,6 +2353,21 @@ func TestFullConfig(t *testing.T) { ], "check_update_interval": "16507s", "client_addr": "93.83.18.19", + "connect": { + "enabled": true, + "proxy_defaults": { + "bind_min_port": 2000, + "bind_max_port": 3000, + "exec_mode": "script", + "daemon_command": "consul connect proxy", + "script_command": "proxyctl.sh", + "config": { + "foo": "bar", + "connect_timeout_ms": 1000, + "pedantic_mode": true + } + } + }, "data_dir": "` + dataDir + `", "datacenter": "rzo029wg", "disable_anonymous_signature": true, @@ -2613,7 +2628,16 @@ func TestFullConfig(t *testing.T) { "ttl": "11222s", "deregister_critical_service_after": "68482s" } - ] + ], + "connect": { + "proxy": { + "exec_mode": "daemon", + "command": "awesome-proxy", + "config": { + "foo": "qux" + } + } + } } ], "session_ttl_min": "26627s", @@ -2786,6 +2810,21 @@ func TestFullConfig(t *testing.T) { ] check_update_interval = "16507s" client_addr = "93.83.18.19" + connect { + enabled = true + proxy_defaults { + bind_min_port = 2000 + bind_max_port = 3000 + exec_mode = "script" + daemon_command = "consul connect proxy" + script_command = "proxyctl.sh" + config = { + foo = "bar" + connect_timeout_ms = 1000 + pedantic_mode = true + } + } + } data_dir = "` + dataDir + `" datacenter = "rzo029wg" disable_anonymous_signature = true @@ -3047,6 +3086,15 @@ func TestFullConfig(t *testing.T) { deregister_critical_service_after = "68482s" } ] + connect { + proxy { + exec_mode = "daemon" + command = "awesome-proxy" + config = { + foo = "qux" + } + } + } } ] session_ttl_min = "26627s" @@ -3355,8 +3403,23 @@ func TestFullConfig(t *testing.T) { DeregisterCriticalServiceAfter: 13209 * time.Second, }, }, - CheckUpdateInterval: 16507 * time.Second, - ClientAddrs: []*net.IPAddr{ipAddr("93.83.18.19")}, + CheckUpdateInterval: 16507 * time.Second, + ClientAddrs: []*net.IPAddr{ipAddr("93.83.18.19")}, + ConnectProxies: []*structs.ConnectManagedProxy{ + { + ExecMode: structs.ProxyExecModeDaemon, + Command: "awesome-proxy", + Config: map[string]interface{}{ + "foo": "qux", // Overriden by service + // Note globals are not merged here but on rendering to the proxy + // endpoint. That's because proxies can be added later too so merging + // at config time is redundant if we have to do it later anyway. + }, + TargetServiceID: "MRHVMZuD", + }, + }, + ConnectProxyBindMinPort: 2000, + ConnectProxyBindMaxPort: 3000, DNSAddrs: []net.Addr{tcpAddr("93.95.95.81:7001"), udpAddr("93.95.95.81:7001")}, DNSARecordLimit: 29907, DNSAllowStale: true, @@ -4018,6 +4081,14 @@ func TestSanitize(t *testing.T) { } ], "ClientAddrs": [], + "ConnectEnabled": false, + "ConnectProxies": [], + "ConnectProxyBindMaxPort": 0, + "ConnectProxyBindMinPort": 0, + "ConnectProxyDefaultConfig": {}, + "ConnectProxyDefaultDaemonCommand": null, + "ConnectProxyDefaultExecMode": null, + "ConnectProxyDefaultScriptCommand": null, "ConsulCoordinateUpdateBatchSize": 0, "ConsulCoordinateUpdateMaxBatches": 0, "ConsulCoordinateUpdatePeriod": "15s", @@ -4150,9 +4221,11 @@ func TestSanitize(t *testing.T) { "Checks": [], "EnableTagOverride": false, "ID": "", + "Kind": "", "Meta": {}, "Name": "foo", "Port": 0, + "ProxyDestination": "", "Tags": [], "Token": "hidden" } diff --git a/agent/local/state.go b/agent/local/state.go index f19e88a76..47a006943 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -3,6 +3,7 @@ package local import ( "fmt" "log" + "math/rand" "reflect" "strconv" "strings" @@ -10,6 +11,8 @@ import ( "sync/atomic" "time" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" @@ -27,6 +30,8 @@ type Config struct { NodeID types.NodeID NodeName string TaggedAddresses map[string]string + ProxyBindMinPort int + ProxyBindMaxPort int } // ServiceState describes the state of a service record. @@ -107,6 +112,21 @@ type rpc interface { RPC(method string, args interface{}, reply interface{}) error } +// ManagedProxy represents the local state for a registered proxy instance. +type ManagedProxy struct { + Proxy *structs.ConnectManagedProxy + + // ProxyToken is a special local-only security token that grants the bearer + // access to the proxy's config as well as allowing it to request certificates + // on behalf of the TargetService. Certain connect endpoints will validate + // against this token and if it matches will then use the TargetService.Token + // to actually authenticate the upstream RPC on behalf of the service. This + // token is passed securely to the proxy process via ENV vars and should never + // be exposed any other way. Unmanaged proxies will never see this and need to + // use service-scoped ACL tokens distributed externally. + ProxyToken string +} + // State is used to represent the node's services, // and checks. We use it to perform anti-entropy with the // catalog representation @@ -150,17 +170,28 @@ type State struct { // tokens contains the ACL tokens tokens *token.Store + + // managedProxies is a map of all manged connect proxies registered locally on + // this agent. This is NOT kept in sync with servers since it's agent-local + // config only. Proxy instances have separate service registrations in the + // services map above which are kept in sync via anti-entropy. Un-managed + // proxies (that registered themselves separately from the service + // registration) do not appear here as the agent doesn't need to manage their + // process nor config. The _do_ still exist in services above though as + // services with Kind == connect-proxy. + managedProxies map[string]*ManagedProxy } -// NewLocalState creates a new local state for the agent. +// NewState creates a new local state for the agent. func NewState(c Config, lg *log.Logger, tokens *token.Store) *State { l := &State{ - config: c, - logger: lg, - services: make(map[string]*ServiceState), - checks: make(map[types.CheckID]*CheckState), - metadata: make(map[string]string), - tokens: tokens, + config: c, + logger: lg, + services: make(map[string]*ServiceState), + checks: make(map[types.CheckID]*CheckState), + metadata: make(map[string]string), + tokens: tokens, + managedProxies: make(map[string]*ManagedProxy), } l.SetDiscardCheckOutput(c.DiscardCheckOutput) return l @@ -529,6 +560,142 @@ func (l *State) CriticalCheckStates() map[types.CheckID]*CheckState { return m } +// AddProxy is used to add a connect proxy entry to the local state. This +// assumes the proxy's NodeService is already registered via Agent.AddService +// (since that has to do other book keeping). The token passed here is the ACL +// token the service used to register itself so must have write on service +// record. +func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*structs.NodeService, error) { + if proxy == nil { + return nil, fmt.Errorf("no proxy") + } + + // Lookup the local service + target := l.Service(proxy.TargetServiceID) + if target == nil { + return nil, fmt.Errorf("target service ID %s not registered", + proxy.TargetServiceID) + } + + // Get bind info from config + cfg, err := proxy.ParseConfig() + if err != nil { + return nil, err + } + + // Construct almost all of the NodeService that needs to be registered by the + // caller outside of the lock. + svc := &structs.NodeService{ + Kind: structs.ServiceKindConnectProxy, + ID: target.ID + "-proxy", + Service: target.ID + "-proxy", + ProxyDestination: target.Service, + Address: cfg.BindAddress, + Port: cfg.BindPort, + } + + pToken, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + // Lock now. We can't lock earlier as l.Service would deadlock and shouldn't + // anyway to minimise the critical section. + l.Lock() + defer l.Unlock() + + // Allocate port if needed (min and max inclusive) + rangeLen := l.config.ProxyBindMaxPort - l.config.ProxyBindMinPort + 1 + if svc.Port < 1 && l.config.ProxyBindMinPort > 0 && rangeLen > 0 { + // This should be a really short list so don't bother optimising lookup yet. + OUTER: + for _, offset := range rand.Perm(rangeLen) { + p := l.config.ProxyBindMinPort + offset + // See if this port was already allocated to another proxy + for _, other := range l.managedProxies { + if other.Proxy.ProxyService.Port == p { + // allready taken, skip to next random pick in the range + continue OUTER + } + } + // We made it through all existing proxies without a match so claim this one + svc.Port = p + break + } + } + // If no ports left (or auto ports disabled) fail + if svc.Port < 1 { + return nil, fmt.Errorf("no port provided for proxy bind_port and none "+ + " left in the allocated range [%d, %d]", l.config.ProxyBindMinPort, + l.config.ProxyBindMaxPort) + } + + proxy.ProxyService = svc + + // All set, add the proxy and return the service + l.managedProxies[svc.ID] = &ManagedProxy{ + Proxy: proxy, + ProxyToken: pToken, + } + + // No need to trigger sync as proxy state is local only. + return svc, nil +} + +// RemoveProxy is used to remove a proxy entry from the local state. +func (l *State) RemoveProxy(id string) error { + l.Lock() + defer l.Unlock() + + p := l.managedProxies[id] + if p == nil { + return fmt.Errorf("Proxy %s does not exist", id) + } + delete(l.managedProxies, id) + + // No need to trigger sync as proxy state is local only. + return nil +} + +// Proxy returns the local proxy state. +func (l *State) Proxy(id string) *structs.ConnectManagedProxy { + l.RLock() + defer l.RUnlock() + + p := l.managedProxies[id] + if p == nil { + return nil + } + return p.Proxy +} + +// Proxies returns the locally registered proxies. +func (l *State) Proxies() map[string]*structs.ConnectManagedProxy { + l.RLock() + defer l.RUnlock() + + m := make(map[string]*structs.ConnectManagedProxy) + for id, p := range l.managedProxies { + m[id] = p.Proxy + } + return m +} + +// ProxyToken returns the local proxy token for a given proxy. Note this is not +// an ACL token so it won't fallback to using the agent-configured default ACL +// token. If the proxy doesn't exist an error is returned, otherwise the token +// is guaranteed to exist. +func (l *State) ProxyToken(id string) (string, error) { + l.RLock() + defer l.RUnlock() + + p := l.managedProxies[id] + if p == nil { + return "", fmt.Errorf("proxy %s not registered", id) + } + return p.ProxyToken, nil +} + // Metadata returns the local node metadata fields that the // agent is aware of and are being kept in sync with the server func (l *State) Metadata() map[string]string { diff --git a/agent/local/state_test.go b/agent/local/state_test.go index d0c006a95..6950cd477 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -3,10 +3,14 @@ package local_test import ( "errors" "fmt" + "log" + "os" "reflect" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent" "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/local" @@ -1664,3 +1668,128 @@ func checksInSync(state *local.State, wantChecks int) error { } return nil } + +func TestStateProxyManagement(t *testing.T) { + t.Parallel() + + state := local.NewState(local.Config{ + ProxyPortRangeStart: 20000, + ProxyPortRangeEnd: 20002, + }, log.New(os.Stderr, "", log.LstdFlags), &token.Store{}) + + // Stub state syncing + state.TriggerSyncChanges = func() {} + + p1 := structs.ConnectManagedProxy{ + ExecMode: structs.ProxyExecModeDaemon, + Command: "consul connect proxy", + TargetServiceID: "web", + } + + require := require.New(t) + assert := assert.New(t) + + _, err := state.AddProxy(&p1, "fake-token") + require.Error(err, "should fail as the target service isn't registered") + + // Sanity check done, lets add a couple of target services to the state + err = state.AddService(&structs.NodeService{ + Service: "web", + }, "fake-token-web") + require.NoError(err) + err = state.AddService(&structs.NodeService{ + Service: "cache", + }, "fake-token-cache") + require.NoError(err) + require.NoError(err) + err = state.AddService(&structs.NodeService{ + Service: "db", + }, "fake-token-db") + require.NoError(err) + + // Should work now + svc, err := state.AddProxy(&p1, "fake-token") + require.NoError(err) + + assert.Equal("web-proxy", svc.ID) + assert.Equal("web-proxy", svc.Service) + assert.Equal(structs.ServiceKindConnectProxy, svc.Kind) + assert.Equal("web", svc.ProxyDestination) + assert.Equal("", svc.Address, "should have empty address by default") + // Port is non-deterministic but could be either of 20000 or 20001 + assert.Contains([]int{20000, 20001}, svc.Port) + + // Second proxy should claim other port + p2 := p1 + p2.TargetServiceID = "cache" + svc2, err := state.AddProxy(&p2, "fake-token") + require.NoError(err) + assert.Contains([]int{20000, 20001}, svc2.Port) + assert.NotEqual(svc.Port, svc2.Port) + + // Just saving this for later... + p2Token, err := state.ProxyToken(svc2.ID) + require.NoError(err) + + // Third proxy should fail as all ports are used + p3 := p1 + p3.TargetServiceID = "db" + _, err = state.AddProxy(&p3, "fake-token") + require.Error(err) + + // But if we set a port explicitly it should be OK + p3.Config = map[string]interface{}{ + "bind_port": 1234, + "bind_address": "0.0.0.0", + } + svc3, err := state.AddProxy(&p3, "fake-token") + require.NoError(err) + require.Equal("0.0.0.0", svc3.Address) + require.Equal(1234, svc3.Port) + + // Remove one of the auto-assigned proxies + err = state.RemoveProxy(svc2.ID) + require.NoError(err) + + // Should be able to create a new proxy for that service with the port (it + // should have been "freed"). + p4 := p2 + svc4, err := state.AddProxy(&p4, "fake-token") + require.NoError(err) + assert.Contains([]int{20000, 20001}, svc2.Port) + assert.Equal(svc4.Port, svc2.Port, "should get the same port back that we freed") + + // Remove a proxy that doesn't exist should error + err = state.RemoveProxy("nope") + require.Error(err) + + assert.Equal(&p4, state.Proxy(p4.ProxyService.ID), + "should fetch the right proxy details") + assert.Nil(state.Proxy("nope")) + + proxies := state.Proxies() + assert.Len(proxies, 3) + assert.Equal(&p1, proxies[svc.ID]) + assert.Equal(&p4, proxies[svc4.ID]) + assert.Equal(&p3, proxies[svc3.ID]) + + tokens := make([]string, 4) + tokens[0], err = state.ProxyToken(svc.ID) + require.NoError(err) + // p2 not registered anymore but lets make sure p4 got a new token when it + // re-registered with same ID. + tokens[1] = p2Token + tokens[2], err = state.ProxyToken(svc3.ID) + require.NoError(err) + tokens[3], err = state.ProxyToken(svc4.ID) + require.NoError(err) + + // Quick check all are distinct + for i := 0; i < len(tokens)-1; i++ { + assert.Len(tokens[i], 36) // Sanity check for UUIDish thing. + for j := i + 1; j < len(tokens); j++ { + assert.NotEqual(tokens[i], tokens[j], "tokens for proxy %d and %d match", + i+1, j+1) + } + } +} diff --git a/agent/structs/connect.go b/agent/structs/connect.go index 7f08615d3..6f11c5fe3 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -1,5 +1,9 @@ package structs +import ( + "github.com/mitchellh/mapstructure" +) + // ConnectAuthorizeRequest is the structure of a request to authorize // a connection. type ConnectAuthorizeRequest struct { @@ -15,3 +19,75 @@ type ConnectAuthorizeRequest struct { ClientCertURI string ClientCertSerial string } + +// ProxyExecMode encodes the mode for running a managed connect proxy. +type ProxyExecMode int + +const ( + // ProxyExecModeDaemon executes a proxy process as a supervised daemon. + ProxyExecModeDaemon ProxyExecMode = iota + + // ProxyExecModeScript executes a proxy config script on each change to it's + // config. + ProxyExecModeScript +) + +// ConnectManagedProxy represents the agent-local state for a configured proxy +// instance. This is never stored or sent to the servers and is only used to +// store the config for the proxy that the agent needs to track. For now it's +// really generic with only the fields the agent needs to act on defined while +// the rest of the proxy config is passed as opaque bag of attributes to support +// arbitrary config params for third-party proxy integrations. "External" +// proxies by definition register themselves and manage their own config +// externally so are never represented in agent state. +type ConnectManagedProxy struct { + // ExecMode is one of daemon or script. + ExecMode ProxyExecMode + + // Command is the command to execute. Empty defaults to self-invoking the same + // consul binary with proxy subcomand for ProxyExecModeDaemon and is an error + // for ProxyExecModeScript. + Command string + + // Config is the arbitrary configuration data provided with the registration. + Config map[string]interface{} + + // ProxyService is a pointer to the local proxy's service record for + // convenience. The proxies ID and name etc. can be read from there. It may be + // nil if the agent is starting up and hasn't registered the service yet. + ProxyService *NodeService + + // TargetServiceID is the ID of the target service on the localhost. It may + // not exist yet since bootstrapping is allowed to happen in either order. + TargetServiceID string +} + +// ConnectManagedProxyConfig represents the parts of the proxy config the agent +// needs to understand. It's bad UX to make the user specify these separately +// just to make parsing simpler for us so this encapsulates the fields in +// ConnectManagedProxy.Config that we care about. They are all optoinal anyway +// and this is used to decode them with mapstructure. +type ConnectManagedProxyConfig struct { + BindAddress string `mapstructure:"bind_address"` + BindPort int `mapstructure:"bind_port"` +} + +// ParseConfig attempts to read the fields we care about from the otherwise +// opaque config map. They are all optional but it may fail if one is specified +// but an invalid value. +func (p *ConnectManagedProxy) ParseConfig() (*ConnectManagedProxyConfig, error) { + var cfg ConnectManagedProxyConfig + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + ErrorUnused: false, + WeaklyTypedInput: true, // allow string port etc. + Result: &cfg, + }) + if err != nil { + return nil, err + } + err = d.Decode(p.Config) + if err != nil { + return nil, err + } + return &cfg, nil +} diff --git a/agent/structs/connect_test.go b/agent/structs/connect_test.go new file mode 100644 index 000000000..905ae09ef --- /dev/null +++ b/agent/structs/connect_test.go @@ -0,0 +1,115 @@ +package structs + +import ( + "reflect" + "testing" +) + +func TestConnectManagedProxy_ParseConfig(t *testing.T) { + tests := []struct { + name string + config map[string]interface{} + want *ConnectManagedProxyConfig + wantErr bool + }{ + { + name: "empty", + config: nil, + want: &ConnectManagedProxyConfig{}, + wantErr: false, + }, + { + name: "specified", + config: map[string]interface{}{ + "bind_address": "127.0.0.1", + "bind_port": 1234, + }, + want: &ConnectManagedProxyConfig{ + BindAddress: "127.0.0.1", + BindPort: 1234, + }, + wantErr: false, + }, + { + name: "stringy port", + config: map[string]interface{}{ + "bind_address": "127.0.0.1", + "bind_port": "1234", + }, + want: &ConnectManagedProxyConfig{ + BindAddress: "127.0.0.1", + BindPort: 1234, + }, + wantErr: false, + }, + { + name: "empty addr", + config: map[string]interface{}{ + "bind_address": "", + "bind_port": "1234", + }, + want: &ConnectManagedProxyConfig{ + BindAddress: "", + BindPort: 1234, + }, + wantErr: false, + }, + { + name: "empty port", + config: map[string]interface{}{ + "bind_address": "127.0.0.1", + "bind_port": "", + }, + want: nil, + wantErr: true, + }, + { + name: "junk address", + config: map[string]interface{}{ + "bind_address": 42, + "bind_port": "", + }, + want: nil, + wantErr: true, + }, + { + name: "zero port, missing addr", + config: map[string]interface{}{ + "bind_port": 0, + }, + want: &ConnectManagedProxyConfig{ + BindPort: 0, + }, + wantErr: false, + }, + { + name: "extra fields present", + config: map[string]interface{}{ + "bind_port": 1234, + "flamingos": true, + "upstream": []map[string]interface{}{ + {"foo": "bar"}, + }, + }, + want: &ConnectManagedProxyConfig{ + BindPort: 1234, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := &ConnectManagedProxy{ + Config: tt.config, + } + got, err := p.ParseConfig() + if (err != nil) != tt.wantErr { + t.Errorf("ConnectManagedProxy.ParseConfig() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ConnectManagedProxy.ParseConfig() = %v, want %v", got, tt.want) + } + }) + } +} From c2266b134ae03f29ef2ebac156359e58823b2900 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Tue, 17 Apr 2018 13:29:02 +0100 Subject: [PATCH 131/627] HTTP agent registration allows proxy to be defined. --- agent/agent.go | 12 +++-- agent/agent_endpoint.go | 14 +++++ agent/agent_endpoint_test.go | 79 +++++++++++++++++++++++++++-- agent/config/builder.go | 50 ++++-------------- agent/config/runtime.go | 4 -- agent/config/runtime_test.go | 28 +++++----- agent/structs/service_definition.go | 62 ++++++++++++++++++++++ 7 files changed, 182 insertions(+), 67 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index b988029ce..03f7677d0 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2426,9 +2426,15 @@ func (a *Agent) unloadChecks() error { // loadProxies will load connect proxy definitions from configuration and // persisted definitions on disk, and load them into the local agent. func (a *Agent) loadProxies(conf *config.RuntimeConfig) error { - for _, proxy := range conf.ConnectProxies { - if err := a.AddProxy(proxy, false); err != nil { - return fmt.Errorf("failed adding proxy: %s", err) + for _, svc := range conf.Services { + if svc.Connect != nil { + proxy, err := svc.ConnectManagedProxy() + if err != nil { + return fmt.Errorf("failed adding proxy: %s", err) + } + if err := a.AddProxy(proxy, false); err != nil { + return fmt.Errorf("failed adding proxy: %s", err) + } } } diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 722909467..43013785f 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -589,10 +589,24 @@ func (s *HTTPServer) AgentRegisterService(resp http.ResponseWriter, req *http.Re return nil, err } + // Get any proxy registrations + proxy, err := args.ConnectManagedProxy() + if err != nil { + resp.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(resp, err.Error()) + return nil, nil + } + // Add the service. if err := s.agent.AddService(ns, chkTypes, true, token); err != nil { return nil, err } + // Add proxy (which will add proxy service so do it before we trigger sync) + if proxy != nil { + if err := s.agent.AddProxy(proxy, true); err != nil { + return nil, err + } + } s.syncChanges() return nil, nil } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 1c6f7d830..9d8591126 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -26,6 +26,7 @@ import ( "github.com/hashicorp/serf/serf" "github.com/pascaldekloe/goe/verify" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func makeReadOnlyAgentACL(t *testing.T, srv *HTTPServer) string { @@ -1369,10 +1370,78 @@ func TestAgent_RegisterService_InvalidAddress(t *testing.T) { } } -// This tests local agent service registration of a connect proxy. This -// verifies that it is put in the local state store properly for syncing -// later. -func TestAgent_RegisterService_ConnectProxy(t *testing.T) { +// This tests local agent service registration with a managed proxy. +func TestAgent_RegisterService_ManagedConnectProxy(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + require := require.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Register a proxy. Note that the destination doesn't exist here on + // this agent or in the catalog at all. This is intended and part + // of the design. + args := &structs.ServiceDefinition{ + Name: "web", + Port: 8000, + // This is needed just because empty check struct (not pointer) get json + // encoded as object with zero values and then decoded back to object with + // zero values _except that the header map is an empty map not a nil map_. + // So our check to see if s.Check.Empty() returns false since DeepEqual + // considers empty maps and nil maps to be different types. Then the request + // fails validation because the Check definition isn't valid... This is jank + // we should fix but it's another yak I don't want to shave right now. + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{ + ExecMode: "script", + Command: "proxy.sh", + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=abc123", jsonReader(args)) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentRegisterService(resp, req) + assert.NoError(err) + assert.Nil(obj) + require.Equal(200, resp.Code, "request failed with body: %s", + resp.Body.String()) + + // Ensure the target service + _, ok := a.State.Services()["web"] + assert.True(ok, "has service") + + // Ensure the proxy service was registered + proxySvc, ok := a.State.Services()["web-proxy"] + require.True(ok, "has proxy service") + assert.Equal(structs.ServiceKindConnectProxy, proxySvc.Kind) + assert.Equal("web", proxySvc.ProxyDestination) + assert.NotEmpty(proxySvc.Port, "a port should have been assigned") + + // Ensure proxy itself was registered + proxy := a.State.Proxy("web-proxy") + require.NotNil(proxy) + assert.Equal(structs.ProxyExecModeScript, proxy.ExecMode) + assert.Equal("proxy.sh", proxy.Command) + assert.Equal(args.Connect.Proxy.Config, proxy.Config) + + // Ensure the token was configured + assert.Equal("abc123", a.State.ServiceToken("web")) + assert.Equal("abc123", a.State.ServiceToken("web-proxy")) +} + +// This tests local agent service registration of a unmanaged connect proxy. +// This verifies that it is put in the local state store properly for syncing +// later. Note that _managed_ connect proxies are registered as part of the +// target service's registration. +func TestAgent_RegisterService_UnmanagedConnectProxy(t *testing.T) { t.Parallel() assert := assert.New(t) @@ -1411,7 +1480,7 @@ func TestAgent_RegisterService_ConnectProxy(t *testing.T) { // This tests that connect proxy validation is done for local agent // registration. This doesn't need to test validation exhaustively since // that is done via a table test in the structs package. -func TestAgent_RegisterService_ConnectProxyInvalid(t *testing.T) { +func TestAgent_RegisterService_UnmanagedConnectProxyInvalid(t *testing.T) { t.Parallel() assert := assert.New(t) diff --git a/agent/config/builder.go b/agent/config/builder.go index a6338ae14..ec36e9ab0 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -322,15 +322,8 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { } var services []*structs.ServiceDefinition - var proxies []*structs.ConnectManagedProxy for _, service := range c.Services { services = append(services, b.serviceVal(&service)) - // Register any connect proxies requested - if proxy := b.connectManagedProxyVal(&service); proxy != nil { - proxies = append(proxies, proxy) - } - // TODO(banks): support connect-native registrations (v.Connect.Enabled == - // true) } if c.Service != nil { services = append(services, b.serviceVal(c.Service)) @@ -648,7 +641,6 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { CheckUpdateInterval: b.durationVal("check_update_interval", c.CheckUpdateInterval), Checks: checks, ClientAddrs: clientAddrs, - ConnectProxies: proxies, ConnectProxyBindMinPort: proxyBindMinPort, ConnectProxyBindMaxPort: proxyBindMaxPort, DataDir: b.stringVal(c.DataDir), @@ -1020,46 +1012,26 @@ func (b *Builder) serviceVal(v *ServiceDefinition) *structs.ServiceDefinition { Token: b.stringVal(v.Token), EnableTagOverride: b.boolVal(v.EnableTagOverride), Checks: checks, + Connect: b.serviceConnectVal(v.Connect), } } -func (b *Builder) connectManagedProxyVal(v *ServiceDefinition) *structs.ConnectManagedProxy { - if v.Connect == nil || v.Connect.Proxy == nil { +func (b *Builder) serviceConnectVal(v *ServiceConnect) *structs.ServiceDefinitionConnect { + if v == nil { return nil } - p := v.Connect.Proxy - - targetID := b.stringVal(v.ID) - if targetID == "" { - targetID = b.stringVal(v.Name) - } - - execMode := structs.ProxyExecModeDaemon - if p.ExecMode != nil { - switch *p.ExecMode { - case "daemon": - execMode = structs.ProxyExecModeDaemon - case "script": - execMode = structs.ProxyExecModeScript - default: - b.err = multierror.Append(fmt.Errorf( - "service[%s]: invalid connect proxy exec_mode: %s", targetID, - *p.ExecMode)) - return nil + var proxy *structs.ServiceDefinitionConnectProxy + if v.Proxy != nil { + proxy = &structs.ServiceDefinitionConnectProxy{ + ExecMode: b.stringVal(v.Proxy.ExecMode), + Command: b.stringVal(v.Proxy.Command), + Config: v.Proxy.Config, } } - return &structs.ConnectManagedProxy{ - ExecMode: execMode, - Command: b.stringVal(p.Command), - Config: p.Config, - // ProxyService will be setup when the agent registers the configured - // proxies and starts them etc. We could do it here but we may need to do - // things like probe the OS for a free port etc. And we have enough info to - // resolve all this later. - ProxyService: nil, - TargetServiceID: targetID, + return &structs.ServiceDefinitionConnect{ + Proxy: proxy, } } diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 55c15d14e..b31630d27 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -621,10 +621,6 @@ type RuntimeConfig struct { // that. ConnectEnabled bool - // ConnectProxies is a list of configured proxies taken from the "connect" - // block of service registrations. - ConnectProxies []*structs.ConnectManagedProxy - // ConnectProxyBindMinPort is the inclusive start of the range of ports // allocated to the agent for starting proxy listeners on where no explicit // port is specified. diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index e990f0689..773b7a036 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -3403,21 +3403,8 @@ func TestFullConfig(t *testing.T) { DeregisterCriticalServiceAfter: 13209 * time.Second, }, }, - CheckUpdateInterval: 16507 * time.Second, - ClientAddrs: []*net.IPAddr{ipAddr("93.83.18.19")}, - ConnectProxies: []*structs.ConnectManagedProxy{ - { - ExecMode: structs.ProxyExecModeDaemon, - Command: "awesome-proxy", - Config: map[string]interface{}{ - "foo": "qux", // Overriden by service - // Note globals are not merged here but on rendering to the proxy - // endpoint. That's because proxies can be added later too so merging - // at config time is redundant if we have to do it later anyway. - }, - TargetServiceID: "MRHVMZuD", - }, - }, + CheckUpdateInterval: 16507 * time.Second, + ClientAddrs: []*net.IPAddr{ipAddr("93.83.18.19")}, ConnectProxyBindMinPort: 2000, ConnectProxyBindMaxPort: 3000, DNSAddrs: []net.Addr{tcpAddr("93.95.95.81:7001"), udpAddr("93.95.95.81:7001")}, @@ -3592,6 +3579,15 @@ func TestFullConfig(t *testing.T) { DeregisterCriticalServiceAfter: 68482 * time.Second, }, }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{ + ExecMode: "daemon", + Command: "awesome-proxy", + Config: map[string]interface{}{ + "foo": "qux", + }, + }, + }, }, { ID: "dLOXpSCI", @@ -4082,7 +4078,6 @@ func TestSanitize(t *testing.T) { ], "ClientAddrs": [], "ConnectEnabled": false, - "ConnectProxies": [], "ConnectProxyBindMaxPort": 0, "ConnectProxyBindMinPort": 0, "ConnectProxyDefaultConfig": {}, @@ -4219,6 +4214,7 @@ func TestSanitize(t *testing.T) { "Timeout": "0s" }, "Checks": [], + "Connect": null, "EnableTagOverride": false, "ID": "", "Kind": "", diff --git a/agent/structs/service_definition.go b/agent/structs/service_definition.go index a10f1527f..ad77d8e3b 100644 --- a/agent/structs/service_definition.go +++ b/agent/structs/service_definition.go @@ -1,5 +1,9 @@ package structs +import ( + "fmt" +) + // ServiceDefinition is used to JSON decode the Service definitions. For // documentation on specific fields see NodeService which is better documented. type ServiceDefinition struct { @@ -15,6 +19,7 @@ type ServiceDefinition struct { Token string EnableTagOverride bool ProxyDestination string + Connect *ServiceDefinitionConnect } func (s *ServiceDefinition) NodeService() *NodeService { @@ -35,6 +40,45 @@ func (s *ServiceDefinition) NodeService() *NodeService { return ns } +// ConnectManagedProxy returns a ConnectManagedProxy from the ServiceDefinition +// if one is configured validly. Note that is may return nil if no proxy is +// configured and will also return nil error in this case too as it's an +// expected case. The error returned indicates that there was an attempt to +// configure a proxy made but that it was invalid input, e.g. invalid +// "exec_mode". +func (s *ServiceDefinition) ConnectManagedProxy() (*ConnectManagedProxy, error) { + if s.Connect == nil || s.Connect.Proxy == nil { + return nil, nil + } + + // NodeService performs some simple normalization like copying ID from Name + // which we shouldn't hard code ourselves here... + ns := s.NodeService() + + execMode := ProxyExecModeDaemon + switch s.Connect.Proxy.ExecMode { + case "": + execMode = ProxyExecModeDaemon + case "daemon": + execMode = ProxyExecModeDaemon + case "script": + execMode = ProxyExecModeScript + default: + return nil, fmt.Errorf("invalid exec mode: %s", s.Connect.Proxy.ExecMode) + } + + p := &ConnectManagedProxy{ + ExecMode: execMode, + Command: s.Connect.Proxy.Command, + Config: s.Connect.Proxy.Config, + // ProxyService will be setup when the agent registers the configured + // proxies and starts them etc. + TargetServiceID: ns.ID, + } + + return p, nil +} + func (s *ServiceDefinition) CheckTypes() (checks CheckTypes, err error) { if !s.Check.Empty() { err := s.Check.Validate() @@ -51,3 +95,21 @@ func (s *ServiceDefinition) CheckTypes() (checks CheckTypes, err error) { } return checks, nil } + +// ServiceDefinitionConnect is the connect block within a service registration. +// Note this is duplicated in config.ServiceConnect and needs to be kept in +// sync. +type ServiceDefinitionConnect struct { + // TODO(banks) add way to specify that the app is connect-native + // Proxy configures a connect proxy instance for the service + Proxy *ServiceDefinitionConnectProxy `json:"proxy,omitempty" hcl:"proxy" mapstructure:"proxy"` +} + +// ServiceDefinitionConnectProxy is the connect proxy config within a service +// registration. Note this is duplicated in config.ServiceConnectProxy and needs +// to be kept in sync. +type ServiceDefinitionConnectProxy struct { + Command string `json:"command,omitempty" hcl:"command" mapstructure:"command"` + ExecMode string `json:"exec_mode,omitempty" hcl:"exec_mode" mapstructure:"exec_mode"` + Config map[string]interface{} `json:"config,omitempty" hcl:"config" mapstructure:"config"` +} From 44afb5c69906856a02b414258e359535876e1a19 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Wed, 18 Apr 2018 21:05:30 +0100 Subject: [PATCH 132/627] Agent Connect Proxy config endpoint with hash-based blocking --- agent/agent_endpoint.go | 119 +++++++++++++++++++++++++ agent/agent_endpoint_test.go | 168 ++++++++++++++++++++++++++++++++++- agent/local/state.go | 47 +++++----- agent/local/state_test.go | 72 +++++++++++---- agent/structs/connect.go | 25 ++++++ 5 files changed, 386 insertions(+), 45 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 43013785f..e7cec596a 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -7,6 +7,7 @@ import ( "net/url" "strconv" "strings" + "time" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/checks" @@ -26,6 +27,7 @@ import ( // NOTE(mitcehllh): This is temporary while certs are stubbed out. "github.com/mitchellh/go-testing-interface" + "github.com/mitchellh/hashstructure" ) type Self struct { @@ -896,6 +898,123 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http. return &reply, nil } +// GET /v1/agent/connect/proxy/:proxy_service_id +// +// Returns the local proxy config for the identified proxy. Requires token= +// param with the correct local ProxyToken (not ACL token). +func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Get the proxy ID. Note that this is the ID of a proxy's service instance. + id := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/proxy/") + + // Maybe block + var queryOpts structs.QueryOptions + if parseWait(resp, req, &queryOpts) { + // parseWait returns an error itself + return nil, nil + } + + // Parse hash specially since it's only this endpoint that uses it currently. + // Eventually this should happen in parseWait and end up in QueryOptions but I + // didn't want to make very general changes right away. + hash := req.URL.Query().Get("hash") + + return s.agentLocalBlockingQuery(hash, &queryOpts, + func(updateCh chan struct{}) (string, interface{}, error) { + // Retrieve the proxy specified + proxy := s.agent.State.Proxy(id) + if proxy == nil { + resp.WriteHeader(http.StatusNotFound) + fmt.Fprintf(resp, "unknown proxy service ID: %s", id) + return "", nil, nil + } + + // Lookup the target service as a convenience + target := s.agent.State.Service(proxy.Proxy.TargetServiceID) + if target == nil { + // Not found since this endpoint is only useful for agent-managed proxies so + // service missing means the service was deregistered racily with this call. + resp.WriteHeader(http.StatusNotFound) + fmt.Fprintf(resp, "unknown target service ID: %s", proxy.Proxy.TargetServiceID) + return "", nil, nil + } + + // Setup "watch" on the proxy being modified and respond on chan if it is. + go func() { + select { + case <-updateCh: + // blocking query timedout or was cancelled. Abort + return + case <-proxy.WatchCh: + // Proxy was updated or removed, report it + updateCh <- struct{}{} + } + }() + + hash, err := hashstructure.Hash(proxy.Proxy, nil) + if err != nil { + return "", nil, err + } + contentHash := fmt.Sprintf("%x", hash) + + reply := &structs.ConnectManageProxyResponse{ + ProxyServiceID: proxy.Proxy.ProxyService.ID, + TargetServiceID: target.ID, + TargetServiceName: target.Service, + ContentHash: contentHash, + ExecMode: proxy.Proxy.ExecMode.String(), + Command: proxy.Proxy.Command, + Config: proxy.Proxy.Config, + } + return contentHash, reply, nil + }) + return nil, nil +} + +type agentLocalBlockingFunc func(updateCh chan struct{}) (string, interface{}, error) + +func (s *HTTPServer) agentLocalBlockingQuery(hash string, + queryOpts *structs.QueryOptions, fn agentLocalBlockingFunc) (interface{}, error) { + + var timer *time.Timer + + if hash != "" { + // TODO(banks) at least define these defaults somewhere in a const. Would be + // nice not to duplicate the ones in consul/rpc.go too... + wait := queryOpts.MaxQueryTime + if wait == 0 { + wait = 5 * time.Minute + } + if wait > 10*time.Minute { + wait = 10 * time.Minute + } + // Apply a small amount of jitter to the request. + wait += lib.RandomStagger(wait / 16) + timer = time.NewTimer(wait) + } + + ch := make(chan struct{}) + + for { + curHash, curResp, err := fn(ch) + if err != nil { + return curResp, err + } + // Hash was passed and matches current one, wait for update or timeout. + if timer != nil && hash == curHash { + select { + case <-ch: + // Update happened, loop to fetch a new value + continue + case <-timer.C: + // Timeout, stop the watcher goroutine and return what we have + close(ch) + break + } + } + return curResp, err + } +} + // AgentConnectAuthorize // // POST /v1/agent/connect/authorize diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 9d8591126..4e73556ec 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -24,6 +24,7 @@ import ( "github.com/hashicorp/consul/testutil/retry" "github.com/hashicorp/consul/types" "github.com/hashicorp/serf/serf" + "github.com/mitchellh/copystructure" "github.com/pascaldekloe/goe/verify" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -1428,9 +1429,9 @@ func TestAgent_RegisterService_ManagedConnectProxy(t *testing.T) { // Ensure proxy itself was registered proxy := a.State.Proxy("web-proxy") require.NotNil(proxy) - assert.Equal(structs.ProxyExecModeScript, proxy.ExecMode) - assert.Equal("proxy.sh", proxy.Command) - assert.Equal(args.Connect.Proxy.Config, proxy.Config) + assert.Equal(structs.ProxyExecModeScript, proxy.Proxy.ExecMode) + assert.Equal("proxy.sh", proxy.Proxy.Command) + assert.Equal(args.Connect.Proxy.Config, proxy.Proxy.Config) // Ensure the token was configured assert.Equal("abc123", a.State.ServiceToken("web")) @@ -2200,6 +2201,167 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { // TODO(mitchellh): verify the private key matches the cert } +func TestAgentConnectProxy(t *testing.T) { + t.Parallel() + + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Define a local service with a managed proxy. It's registered in the test + // loop to make sure agent state is predictable whatever order tests execute + // since some alter this service config. + reg := &structs.ServiceDefinition{ + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{ + Config: map[string]interface{}{ + "bind_port": 1234, + "connect_timeout_ms": 500, + "upstreams": []map[string]interface{}{ + { + "destination_name": "db", + "local_port": 3131, + }, + }, + }, + }, + }, + } + + expectedResponse := &structs.ConnectManageProxyResponse{ + ProxyServiceID: "test-proxy", + TargetServiceID: "test", + TargetServiceName: "test", + ContentHash: "a15dccb216d38a6e", + ExecMode: "daemon", + Command: "", + Config: map[string]interface{}{ + "upstreams": []interface{}{ + map[string]interface{}{ + "destination_name": "db", + "local_port": float64(3131), + }, + }, + "bind_port": float64(1234), + "connect_timeout_ms": float64(500), + }, + } + + ur, err := copystructure.Copy(expectedResponse) + require.NoError(t, err) + updatedResponse := ur.(*structs.ConnectManageProxyResponse) + updatedResponse.ContentHash = "22bc9233a52c08fd" + upstreams := updatedResponse.Config["upstreams"].([]interface{}) + upstreams = append(upstreams, + map[string]interface{}{ + "destination_name": "cache", + "local_port": float64(4242), + }) + updatedResponse.Config["upstreams"] = upstreams + + tests := []struct { + name string + url string + updateFunc func() + wantWait time.Duration + wantCode int + wantErr bool + wantResp *structs.ConnectManageProxyResponse + }{ + { + name: "simple fetch", + url: "/v1/agent/connect/proxy/test-proxy", + wantCode: 200, + wantErr: false, + wantResp: expectedResponse, + }, + { + name: "blocking fetch timeout, no change", + url: "/v1/agent/connect/proxy/test-proxy?hash=a15dccb216d38a6e&wait=100ms", + wantWait: 100 * time.Millisecond, + wantCode: 200, + wantErr: false, + wantResp: expectedResponse, + }, + { + name: "blocking fetch old hash should return immediately", + url: "/v1/agent/connect/proxy/test-proxy?hash=123456789abcd&wait=10m", + wantCode: 200, + wantErr: false, + wantResp: expectedResponse, + }, + { + name: "blocking fetch returns change", + url: "/v1/agent/connect/proxy/test-proxy?hash=a15dccb216d38a6e", + updateFunc: func() { + time.Sleep(100 * time.Millisecond) + // Re-register with new proxy config + r2, err := copystructure.Copy(reg) + require.NoError(t, err) + reg2 := r2.(*structs.ServiceDefinition) + reg2.Connect.Proxy.Config = updatedResponse.Config + req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(r2)) + resp := httptest.NewRecorder() + _, err = a.srv.AgentRegisterService(resp, req) + require.NoError(t, err) + require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String()) + }, + wantWait: 100 * time.Millisecond, + wantCode: 200, + wantErr: false, + wantResp: updatedResponse, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + // Register the basic service to ensure it's in a known state to start. + { + req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + require.NoError(err) + require.Equal(200, resp.Code, "body: %s", resp.Body.String()) + } + + req, _ := http.NewRequest("GET", tt.url, nil) + resp := httptest.NewRecorder() + if tt.updateFunc != nil { + go tt.updateFunc() + } + start := time.Now() + obj, err := a.srv.AgentConnectProxyConfig(resp, req) + elapsed := time.Now().Sub(start) + + if tt.wantErr { + require.Error(err) + } else { + require.NoError(err) + } + if tt.wantCode != 0 { + require.Equal(tt.wantCode, resp.Code, "body: %s", resp.Body.String()) + } + if tt.wantWait != 0 { + assert.True(elapsed >= tt.wantWait, "should have waited at least %s, "+ + "took %s", tt.wantWait, elapsed) + } else { + assert.True(elapsed < 10*time.Millisecond, "should not have waited, "+ + "took %s", elapsed) + } + + assert.Equal(tt.wantResp, obj) + }) + } +} + func TestAgentConnectAuthorize_badBody(t *testing.T) { t.Parallel() diff --git a/agent/local/state.go b/agent/local/state.go index 47a006943..839b3cdb2 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -125,6 +125,10 @@ type ManagedProxy struct { // be exposed any other way. Unmanaged proxies will never see this and need to // use service-scoped ACL tokens distributed externally. ProxyToken string + + // WatchCh is a close-only chan that is closed when the proxy is removed or + // updated. + WatchCh chan struct{} } // State is used to represent the node's services, @@ -171,7 +175,7 @@ type State struct { // tokens contains the ACL tokens tokens *token.Store - // managedProxies is a map of all manged connect proxies registered locally on + // managedProxies is a map of all managed connect proxies registered locally on // this agent. This is NOT kept in sync with servers since it's agent-local // config only. Proxy instances have separate service registrations in the // services map above which are kept in sync via anti-entropy. Un-managed @@ -633,9 +637,17 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*str proxy.ProxyService = svc // All set, add the proxy and return the service + if old, ok := l.managedProxies[svc.ID]; ok { + // Notify watchers of the existing proxy config that it's changing. Note + // this is safe here even before the map is updated since we still hold the + // state lock and the watcher can't re-read the new config until we return + // anyway. + close(old.WatchCh) + } l.managedProxies[svc.ID] = &ManagedProxy{ Proxy: proxy, ProxyToken: pToken, + WatchCh: make(chan struct{}), } // No need to trigger sync as proxy state is local only. @@ -653,49 +665,32 @@ func (l *State) RemoveProxy(id string) error { } delete(l.managedProxies, id) + // Notify watchers of the existing proxy config that it's changed. + close(p.WatchCh) + // No need to trigger sync as proxy state is local only. return nil } // Proxy returns the local proxy state. -func (l *State) Proxy(id string) *structs.ConnectManagedProxy { +func (l *State) Proxy(id string) *ManagedProxy { l.RLock() defer l.RUnlock() - - p := l.managedProxies[id] - if p == nil { - return nil - } - return p.Proxy + return l.managedProxies[id] } // Proxies returns the locally registered proxies. -func (l *State) Proxies() map[string]*structs.ConnectManagedProxy { +func (l *State) Proxies() map[string]*ManagedProxy { l.RLock() defer l.RUnlock() - m := make(map[string]*structs.ConnectManagedProxy) + m := make(map[string]*ManagedProxy) for id, p := range l.managedProxies { - m[id] = p.Proxy + m[id] = p } return m } -// ProxyToken returns the local proxy token for a given proxy. Note this is not -// an ACL token so it won't fallback to using the agent-configured default ACL -// token. If the proxy doesn't exist an error is returned, otherwise the token -// is guaranteed to exist. -func (l *State) ProxyToken(id string) (string, error) { - l.RLock() - defer l.RUnlock() - - p := l.managedProxies[id] - if p == nil { - return "", fmt.Errorf("proxy %s not registered", id) - } - return p.ProxyToken, nil -} - // Metadata returns the local node metadata fields that the // agent is aware of and are being kept in sync with the server func (l *State) Metadata() map[string]string { diff --git a/agent/local/state_test.go b/agent/local/state_test.go index 6950cd477..a8890a540 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -6,6 +6,7 @@ import ( "log" "os" "reflect" + "sync" "testing" "time" @@ -1673,8 +1674,8 @@ func TestStateProxyManagement(t *testing.T) { t.Parallel() state := local.NewState(local.Config{ - ProxyPortRangeStart: 20000, - ProxyPortRangeEnd: 20002, + ProxyBindMinPort: 20000, + ProxyBindMaxPort: 20001, }, log.New(os.Stderr, "", log.LstdFlags), &token.Store{}) // Stub state syncing @@ -1707,6 +1708,20 @@ func TestStateProxyManagement(t *testing.T) { }, "fake-token-db") require.NoError(err) + // Record initial local modify index + lastModifyIndex := state.LocalModifyIndex() + assertModIndexUpdate := func(id string) { + t.Helper() + nowIndex := state.LocalModifyIndex() + assert.True(lastModifyIndex < nowIndex) + if id != "" { + p := state.Proxy(id) + require.NotNil(p) + assert.True(lastModifyIndex < p.ModifyIndex) + } + lastModifyIndex = nowIndex + } + // Should work now svc, err := state.AddProxy(&p1, "fake-token") require.NoError(err) @@ -1718,6 +1733,7 @@ func TestStateProxyManagement(t *testing.T) { assert.Equal("", svc.Address, "should have empty address by default") // Port is non-deterministic but could be either of 20000 or 20001 assert.Contains([]int{20000, 20001}, svc.Port) + assertModIndexUpdate(svc.ID) // Second proxy should claim other port p2 := p1 @@ -1726,10 +1742,10 @@ func TestStateProxyManagement(t *testing.T) { require.NoError(err) assert.Contains([]int{20000, 20001}, svc2.Port) assert.NotEqual(svc.Port, svc2.Port) + assertModIndexUpdate(svc2.ID) - // Just saving this for later... - p2Token, err := state.ProxyToken(svc2.ID) - require.NoError(err) + // Store this for later + p2token := state.Proxy(svc2.ID).ProxyToken // Third proxy should fail as all ports are used p3 := p1 @@ -1746,6 +1762,32 @@ func TestStateProxyManagement(t *testing.T) { require.NoError(err) require.Equal("0.0.0.0", svc3.Address) require.Equal(1234, svc3.Port) + assertModIndexUpdate(svc3.ID) + + // Update config of an already registered proxy should work + p3updated := p3 + p3updated.Config["foo"] = "bar" + // Setup multiple watchers who should all witness the change + gotP3 := state.Proxy(svc3.ID) + require.NotNil(gotP3) + var watchWg sync.WaitGroup + for i := 0; i < 3; i++ { + watchWg.Add(1) + go func() { + <-gotP3.WatchCh + watchWg.Done() + }() + } + svc3, err = state.AddProxy(&p3updated, "fake-token") + require.NoError(err) + require.Equal("0.0.0.0", svc3.Address) + require.Equal(1234, svc3.Port) + gotProxy3 := state.Proxy(svc3.ID) + require.NotNil(gotProxy3) + require.Equal(p3updated.Config, gotProxy3.Proxy.Config) + assertModIndexUpdate(svc3.ID) // update must change mod index + // All watchers should have fired so this should not hang the test! + watchWg.Wait() // Remove one of the auto-assigned proxies err = state.RemoveProxy(svc2.ID) @@ -1758,31 +1800,29 @@ func TestStateProxyManagement(t *testing.T) { require.NoError(err) assert.Contains([]int{20000, 20001}, svc2.Port) assert.Equal(svc4.Port, svc2.Port, "should get the same port back that we freed") + assertModIndexUpdate(svc4.ID) // Remove a proxy that doesn't exist should error err = state.RemoveProxy("nope") require.Error(err) - assert.Equal(&p4, state.Proxy(p4.ProxyService.ID), + assert.Equal(&p4, state.Proxy(p4.ProxyService.ID).Proxy, "should fetch the right proxy details") assert.Nil(state.Proxy("nope")) proxies := state.Proxies() assert.Len(proxies, 3) - assert.Equal(&p1, proxies[svc.ID]) - assert.Equal(&p4, proxies[svc4.ID]) - assert.Equal(&p3, proxies[svc3.ID]) + assert.Equal(&p1, proxies[svc.ID].Proxy) + assert.Equal(&p4, proxies[svc4.ID].Proxy) + assert.Equal(&p3, proxies[svc3.ID].Proxy) tokens := make([]string, 4) - tokens[0], err = state.ProxyToken(svc.ID) - require.NoError(err) + tokens[0] = state.Proxy(svc.ID).ProxyToken // p2 not registered anymore but lets make sure p4 got a new token when it // re-registered with same ID. - tokens[1] = p2Token - tokens[2], err = state.ProxyToken(svc3.ID) - require.NoError(err) - tokens[3], err = state.ProxyToken(svc4.ID) - require.NoError(err) + tokens[1] = p2token + tokens[2] = state.Proxy(svc2.ID).ProxyToken + tokens[3] = state.Proxy(svc3.ID).ProxyToken // Quick check all are distinct for i := 0; i < len(tokens)-1; i++ { diff --git a/agent/structs/connect.go b/agent/structs/connect.go index 6f11c5fe3..d879718b2 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -32,6 +32,18 @@ const ( ProxyExecModeScript ) +// String implements Stringer +func (m ProxyExecMode) String() string { + switch m { + case ProxyExecModeDaemon: + return "daemon" + case ProxyExecModeScript: + return "script" + default: + return "unknown" + } +} + // ConnectManagedProxy represents the agent-local state for a configured proxy // instance. This is never stored or sent to the servers and is only used to // store the config for the proxy that the agent needs to track. For now it's @@ -91,3 +103,16 @@ func (p *ConnectManagedProxy) ParseConfig() (*ConnectManagedProxyConfig, error) } return &cfg, nil } + +// ConnectManageProxyResponse is the public response object we return for +// queries on local proxy config state. It's similar to ConnectManagedProxy but +// with some fields re-arranged. +type ConnectManageProxyResponse struct { + ProxyServiceID string + TargetServiceID string + TargetServiceName string + ContentHash string + ExecMode string + Command string + Config map[string]interface{} +} From cbd860665120fd9b2faeddbe78aaeba99be33ece Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Wed, 18 Apr 2018 21:48:58 +0100 Subject: [PATCH 133/627] Add X-Consul-ContentHash header; implement removing all proxies; add load/unload test. --- agent/agent.go | 6 +++++- agent/agent_endpoint.go | 10 ++++++++-- agent/agent_endpoint_test.go | 2 ++ 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 03f7677d0..ce6a26d0c 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2444,7 +2444,11 @@ func (a *Agent) loadProxies(conf *config.RuntimeConfig) error { // unloadProxies will deregister all proxies known to the local agent. func (a *Agent) unloadProxies() error { - // TODO(banks): implement me + for id := range a.State.Proxies() { + if err := a.RemoveProxy(id, false); err != nil { + return fmt.Errorf("Failed deregistering proxy '%s': %s", id, err) + } + } return nil } diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index e7cec596a..b3e3741a8 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -918,7 +918,7 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http // didn't want to make very general changes right away. hash := req.URL.Query().Get("hash") - return s.agentLocalBlockingQuery(hash, &queryOpts, + return s.agentLocalBlockingQuery(resp, hash, &queryOpts, func(updateCh chan struct{}) (string, interface{}, error) { // Retrieve the proxy specified proxy := s.agent.State.Proxy(id) @@ -972,7 +972,11 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http type agentLocalBlockingFunc func(updateCh chan struct{}) (string, interface{}, error) -func (s *HTTPServer) agentLocalBlockingQuery(hash string, +// agentLocalBlockingQuery performs a blocking query in a generic way against +// local agent state that has no RPC or raft to back it. It uses `hash` paramter +// instead of an `index`. The resp is needed to write the `X-Consul-ContentHash` +// header back on return no Status nor body content is ever written to it. +func (s *HTTPServer) agentLocalBlockingQuery(resp http.ResponseWriter, hash string, queryOpts *structs.QueryOptions, fn agentLocalBlockingFunc) (interface{}, error) { var timer *time.Timer @@ -1011,6 +1015,8 @@ func (s *HTTPServer) agentLocalBlockingQuery(hash string, break } } + + resp.Header().Set("X-Consul-ContentHash", curHash) return curResp, err } } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 4e73556ec..b34ac508a 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2358,6 +2358,8 @@ func TestAgentConnectProxy(t *testing.T) { } assert.Equal(tt.wantResp, obj) + + assert.Equal(tt.wantResp.ContentHash, resp.Header().Get("X-Consul-ContentHash")) }) } } From aed5e5b03e6041fe27d4762a9b8e99740b179a7d Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Wed, 18 Apr 2018 22:03:51 +0100 Subject: [PATCH 134/627] Super ugly hack to get TeamCity build to work for this PR without adding a vendor that is being added elsewhere and will conflict... --- GNUmakefile | 2 ++ agent/agent.go | 3 ++ agent/agent_test.go | 69 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+) diff --git a/GNUmakefile b/GNUmakefile index bebe8bce5..030fa003d 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -40,6 +40,8 @@ bin: tools dev: changelogfmt vendorfmt dev-build dev-build: + @echo "--> TEMPORARY HACK: installing hashstructure to make CI pass until we vendor it upstream" + go get github.com/mitchellh/hashstructure @echo "--> Building consul" mkdir -p pkg/$(GOOS)_$(GOARCH)/ bin/ go install -ldflags '$(GOLDFLAGS)' -tags '$(GOTAGS)' diff --git a/agent/agent.go b/agent/agent.go index ce6a26d0c..277bdd046 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2588,6 +2588,9 @@ func (a *Agent) ReloadConfig(newCfg *config.RuntimeConfig) error { // First unload all checks, services, and metadata. This lets us begin the reload // with a clean slate. + if err := a.unloadProxies(); err != nil { + return fmt.Errorf("Failed unloading proxies: %s", err) + } if err := a.unloadServices(); err != nil { return fmt.Errorf("Failed unloading services: %s", err) } diff --git a/agent/agent_test.go b/agent/agent_test.go index 2ee42d7db..bf24425bc 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -1640,6 +1640,75 @@ func TestAgent_unloadServices(t *testing.T) { } } +func TestAgent_loadProxies(t *testing.T) { + t.Parallel() + a := NewTestAgent(t.Name(), ` + service = { + id = "rabbitmq" + name = "rabbitmq" + port = 5672 + token = "abc123" + connect { + proxy { + config { + bind_port = 1234 + } + } + } + } + `) + defer a.Shutdown() + + services := a.State.Services() + if _, ok := services["rabbitmq"]; !ok { + t.Fatalf("missing service") + } + if token := a.State.ServiceToken("rabbitmq"); token != "abc123" { + t.Fatalf("bad: %s", token) + } + if _, ok := services["rabbitmq-proxy"]; !ok { + t.Fatalf("missing proxy service") + } + if token := a.State.ServiceToken("rabbitmq-proxy"); token != "abc123" { + t.Fatalf("bad: %s", token) + } + proxies := a.State.Proxies() + if _, ok := proxies["rabbitmq-proxy"]; !ok { + t.Fatalf("missing proxy") + } +} + +func TestAgent_unloadProxies(t *testing.T) { + t.Parallel() + a := NewTestAgent(t.Name(), ` + service = { + id = "rabbitmq" + name = "rabbitmq" + port = 5672 + token = "abc123" + connect { + proxy { + config { + bind_port = 1234 + } + } + } + } + `) + defer a.Shutdown() + + // Sanity check it's there + require.NotNil(t, a.State.Proxy("rabbitmq-proxy")) + + // Unload all proxies + if err := a.unloadProxies(); err != nil { + t.Fatalf("err: %s", err) + } + if len(a.State.Proxies()) != 0 { + t.Fatalf("should have unloaded proxies") + } +} + func TestAgent_Service_MaintenanceMode(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") From 8a4410b549c36f1fde582830cf9c48988bead671 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 19 Apr 2018 11:15:32 +0100 Subject: [PATCH 135/627] Refactor localBlockingQuery to use memdb.WatchSet. Much simpler and correct as a bonus! --- agent/agent_endpoint.go | 57 +++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 33 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index b3e3741a8..24685ee92 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -9,6 +9,9 @@ import ( "strings" "time" + "github.com/hashicorp/go-memdb" + "github.com/mitchellh/hashstructure" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/config" @@ -27,7 +30,6 @@ import ( // NOTE(mitcehllh): This is temporary while certs are stubbed out. "github.com/mitchellh/go-testing-interface" - "github.com/mitchellh/hashstructure" ) type Self struct { @@ -919,7 +921,7 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http hash := req.URL.Query().Get("hash") return s.agentLocalBlockingQuery(resp, hash, &queryOpts, - func(updateCh chan struct{}) (string, interface{}, error) { + func(ws memdb.WatchSet) (string, interface{}, error) { // Retrieve the proxy specified proxy := s.agent.State.Proxy(id) if proxy == nil { @@ -938,17 +940,8 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http return "", nil, nil } - // Setup "watch" on the proxy being modified and respond on chan if it is. - go func() { - select { - case <-updateCh: - // blocking query timedout or was cancelled. Abort - return - case <-proxy.WatchCh: - // Proxy was updated or removed, report it - updateCh <- struct{}{} - } - }() + // Watch the proxy for changes + ws.Add(proxy.WatchCh) hash, err := hashstructure.Hash(proxy.Proxy, nil) if err != nil { @@ -970,7 +963,7 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http return nil, nil } -type agentLocalBlockingFunc func(updateCh chan struct{}) (string, interface{}, error) +type agentLocalBlockingFunc func(ws memdb.WatchSet) (string, interface{}, error) // agentLocalBlockingQuery performs a blocking query in a generic way against // local agent state that has no RPC or raft to back it. It uses `hash` paramter @@ -979,7 +972,10 @@ type agentLocalBlockingFunc func(updateCh chan struct{}) (string, interface{}, e func (s *HTTPServer) agentLocalBlockingQuery(resp http.ResponseWriter, hash string, queryOpts *structs.QueryOptions, fn agentLocalBlockingFunc) (interface{}, error) { - var timer *time.Timer + // If we are not blocking we can skip tracking and allocating - nil WatchSet + // is still valid to call Add on and will just be a no op. + var ws memdb.WatchSet + var timeout *time.Timer if hash != "" { // TODO(banks) at least define these defaults somewhere in a const. Would be @@ -993,31 +989,26 @@ func (s *HTTPServer) agentLocalBlockingQuery(resp http.ResponseWriter, hash stri } // Apply a small amount of jitter to the request. wait += lib.RandomStagger(wait / 16) - timer = time.NewTimer(wait) + timeout = time.NewTimer(wait) + ws = memdb.NewWatchSet() } - ch := make(chan struct{}) - for { - curHash, curResp, err := fn(ch) + curHash, curResp, err := fn(ws) if err != nil { return curResp, err } - // Hash was passed and matches current one, wait for update or timeout. - if timer != nil && hash == curHash { - select { - case <-ch: - // Update happened, loop to fetch a new value - continue - case <-timer.C: - // Timeout, stop the watcher goroutine and return what we have - close(ch) - break - } + // Return immediately if there is no timeout, the hash is different or the + // Watch returns true (indicating timeout fired). Note that Watch on a nil + // WatchSet immediately returns false which would incorrectly cause this to + // loop and repeat again, however we rely on the invariant that ws == nil + // IFF timeout == nil in which case the Watch call is never invoked. + if timeout == nil || hash != curHash || ws.Watch(timeout.C) { + resp.Header().Set("X-Consul-ContentHash", curHash) + return curResp, err } - - resp.Header().Set("X-Consul-ContentHash", curHash) - return curResp, err + // Watch returned false indicating a change was detected, loop and repeat + // the callback to load the new value. } } From 9d11cd9bf4c226e29edc44c178ac48c8c29c15e7 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 19 Apr 2018 12:06:32 +0100 Subject: [PATCH 136/627] Fix various test failures and vet warnings. Intention de-duplication in previously merged PR actualy failed some tests that were not caught be me or CI. I ran the test files for state changes but they happened not to trigger this case so I made sure they did first and then fixed. That fixed some upstream intention endpoint tests that I'd not run as part of testing the previous fix. --- agent/agent_endpoint.go | 1 - agent/agent_test.go | 4 +- agent/consul/state/intention.go | 5 ++- agent/consul/state/intention_test.go | 62 +++++++++++++++------------- agent/local/state_test.go | 36 +++------------- connect/proxy/listener.go | 1 - connect/testing.go | 2 - connect/tls_test.go | 4 +- 8 files changed, 48 insertions(+), 67 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 24685ee92..c19b776ac 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -960,7 +960,6 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http } return contentHash, reply, nil }) - return nil, nil } type agentLocalBlockingFunc func(ws memdb.WatchSet) (string, interface{}, error) diff --git a/agent/agent_test.go b/agent/agent_test.go index bf24425bc..c22ce56ba 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -2366,7 +2366,7 @@ func TestAgent_AddProxy(t *testing.T) { // Test the ID was created as we expect. got := a.State.Proxy("web-proxy") - require.Equal(tt.proxy, got) + require.Equal(tt.proxy, got.Proxy) }) } } @@ -2394,7 +2394,7 @@ func TestAgent_RemoveProxy(t *testing.T) { // Test the ID was created as we expect. gotProxy := a.State.Proxy("web-proxy") - require.Equal(pReg, gotProxy) + require.Equal(pReg, gotProxy.Proxy) err := a.RemoveProxy("web-proxy", false) require.NoError(err) diff --git a/agent/consul/state/intention.go b/agent/consul/state/intention.go index 907bdf1ab..91a61ffe1 100644 --- a/agent/consul/state/intention.go +++ b/agent/consul/state/intention.go @@ -190,7 +190,10 @@ func (s *Store) intentionSetTxn(tx *memdb.Txn, idx uint64, ixn *structs.Intentio } if duplicate != nil { dupIxn := duplicate.(*structs.Intention) - return fmt.Errorf("duplicate intention found: %s", dupIxn.String()) + // Same ID is OK - this is an update + if dupIxn.ID != ixn.ID { + return fmt.Errorf("duplicate intention found: %s", dupIxn.String()) + } } // We always force meta to be non-nil so that we its an empty map. diff --git a/agent/consul/state/intention_test.go b/agent/consul/state/intention_test.go index 743f698af..fbf43c19b 100644 --- a/agent/consul/state/intention_test.go +++ b/agent/consul/state/intention_test.go @@ -42,7 +42,7 @@ func TestStore_IntentionSetGet_basic(t *testing.T) { } // Inserting a with empty ID is disallowed. - assert.Nil(s.IntentionSet(1, ixn)) + assert.NoError(s.IntentionSet(1, ixn)) // Make sure the index got updated. assert.Equal(uint64(1), s.maxIndex(intentionsTableName)) @@ -64,13 +64,18 @@ func TestStore_IntentionSetGet_basic(t *testing.T) { ws = memdb.NewWatchSet() idx, actual, err := s.IntentionGet(ws, ixn.ID) - assert.Nil(err) + assert.NoError(err) assert.Equal(expected.CreateIndex, idx) assert.Equal(expected, actual) // Change a value and test updating ixn.SourceNS = "foo" - assert.Nil(s.IntentionSet(2, ixn)) + assert.NoError(s.IntentionSet(2, ixn)) + + // Change a value that isn't in the unique 4 tuple and check we don't + // incorrectly consider this a duplicate when updating. + ixn.Action = structs.IntentionActionDeny + assert.NoError(s.IntentionSet(2, ixn)) // Make sure the index got updated. assert.Equal(uint64(2), s.maxIndex(intentionsTableName)) @@ -78,10 +83,11 @@ func TestStore_IntentionSetGet_basic(t *testing.T) { // Read it back and verify the data was updated expected.SourceNS = ixn.SourceNS + expected.Action = structs.IntentionActionDeny expected.ModifyIndex = 2 ws = memdb.NewWatchSet() idx, actual, err = s.IntentionGet(ws, ixn.ID) - assert.Nil(err) + assert.NoError(err) assert.Equal(expected.ModifyIndex, idx) assert.Equal(expected, actual) @@ -97,7 +103,7 @@ func TestStore_IntentionSetGet_basic(t *testing.T) { // Duplicate 4-tuple should cause an error ws = memdb.NewWatchSet() - assert.NotNil(s.IntentionSet(3, ixn)) + assert.Error(s.IntentionSet(3, ixn)) // Make sure the index did NOT get updated. assert.Equal(uint64(2), s.maxIndex(intentionsTableName)) @@ -110,11 +116,11 @@ func TestStore_IntentionSet_emptyId(t *testing.T) { ws := memdb.NewWatchSet() _, _, err := s.IntentionGet(ws, testUUID()) - assert.Nil(err) + assert.NoError(err) // Inserting a with empty ID is disallowed. err = s.IntentionSet(1, &structs.Intention{}) - assert.NotNil(err) + assert.Error(err) assert.Contains(err.Error(), ErrMissingIntentionID.Error()) // Index is not updated if nothing is saved. @@ -134,16 +140,16 @@ func TestStore_IntentionSet_updateCreatedAt(t *testing.T) { } // Insert - assert.Nil(s.IntentionSet(1, &ixn)) + assert.NoError(s.IntentionSet(1, &ixn)) // Change a value and test updating ixnUpdate := ixn ixnUpdate.CreatedAt = now.Add(10 * time.Second) - assert.Nil(s.IntentionSet(2, &ixnUpdate)) + assert.NoError(s.IntentionSet(2, &ixnUpdate)) // Read it back and verify _, actual, err := s.IntentionGet(nil, ixn.ID) - assert.Nil(err) + assert.NoError(err) assert.Equal(now, actual.CreatedAt) } @@ -157,11 +163,11 @@ func TestStore_IntentionSet_metaNil(t *testing.T) { } // Insert - assert.Nil(s.IntentionSet(1, &ixn)) + assert.NoError(s.IntentionSet(1, &ixn)) // Read it back and verify _, actual, err := s.IntentionGet(nil, ixn.ID) - assert.Nil(err) + assert.NoError(err) assert.NotNil(actual.Meta) } @@ -176,11 +182,11 @@ func TestStore_IntentionSet_metaSet(t *testing.T) { } // Insert - assert.Nil(s.IntentionSet(1, &ixn)) + assert.NoError(s.IntentionSet(1, &ixn)) // Read it back and verify _, actual, err := s.IntentionGet(nil, ixn.ID) - assert.Nil(err) + assert.NoError(err) assert.Equal(ixn.Meta, actual.Meta) } @@ -191,18 +197,18 @@ func TestStore_IntentionDelete(t *testing.T) { // Call Get to populate the watch set ws := memdb.NewWatchSet() _, _, err := s.IntentionGet(ws, testUUID()) - assert.Nil(err) + assert.NoError(err) // Create ixn := &structs.Intention{ID: testUUID()} - assert.Nil(s.IntentionSet(1, ixn)) + assert.NoError(s.IntentionSet(1, ixn)) // Make sure the index got updated. assert.Equal(s.maxIndex(intentionsTableName), uint64(1)) assert.True(watchFired(ws), "watch fired") // Delete - assert.Nil(s.IntentionDelete(2, ixn.ID)) + assert.NoError(s.IntentionDelete(2, ixn.ID)) // Make sure the index got updated. assert.Equal(s.maxIndex(intentionsTableName), uint64(2)) @@ -210,7 +216,7 @@ func TestStore_IntentionDelete(t *testing.T) { // Sanity check to make sure it's not there. idx, actual, err := s.IntentionGet(nil, ixn.ID) - assert.Nil(err) + assert.NoError(err) assert.Equal(idx, uint64(2)) assert.Nil(actual) } @@ -222,7 +228,7 @@ func TestStore_IntentionsList(t *testing.T) { // Querying with no results returns nil. ws := memdb.NewWatchSet() idx, res, err := s.Intentions(ws) - assert.Nil(err) + assert.NoError(err) assert.Nil(res) assert.Equal(idx, uint64(0)) @@ -244,7 +250,7 @@ func TestStore_IntentionsList(t *testing.T) { // Create for i, ixn := range ixns { - assert.Nil(s.IntentionSet(uint64(1+i), ixn)) + assert.NoError(s.IntentionSet(uint64(1+i), ixn)) } assert.True(watchFired(ws), "watch fired") @@ -268,7 +274,7 @@ func TestStore_IntentionsList(t *testing.T) { }, } idx, actual, err := s.Intentions(nil) - assert.Nil(err) + assert.NoError(err) assert.Equal(idx, uint64(2)) assert.Equal(expected, actual) } @@ -386,7 +392,7 @@ func TestStore_IntentionMatch_table(t *testing.T) { } } - assert.Nil(s.IntentionSet(idx, ixn)) + assert.NoError(s.IntentionSet(idx, ixn)) idx++ } @@ -402,7 +408,7 @@ func TestStore_IntentionMatch_table(t *testing.T) { // Match _, matches, err := s.IntentionMatch(nil, args) - assert.Nil(err) + assert.NoError(err) // Should have equal lengths require.Len(t, matches, len(tc.Expected)) @@ -478,7 +484,7 @@ func TestStore_Intention_Snapshot_Restore(t *testing.T) { // Now create for i, ixn := range ixns { - assert.Nil(s.IntentionSet(uint64(4+i), ixn)) + assert.NoError(s.IntentionSet(uint64(4+i), ixn)) } // Snapshot the queries. @@ -486,7 +492,7 @@ func TestStore_Intention_Snapshot_Restore(t *testing.T) { defer snap.Close() // Alter the real state store. - assert.Nil(s.IntentionDelete(7, ixns[0].ID)) + assert.NoError(s.IntentionDelete(7, ixns[0].ID)) // Verify the snapshot. assert.Equal(snap.LastIndex(), uint64(6)) @@ -520,7 +526,7 @@ func TestStore_Intention_Snapshot_Restore(t *testing.T) { }, } dump, err := snap.Intentions() - assert.Nil(err) + assert.NoError(err) assert.Equal(expected, dump) // Restore the values into a new state store. @@ -528,13 +534,13 @@ func TestStore_Intention_Snapshot_Restore(t *testing.T) { s := testStateStore(t) restore := s.Restore() for _, ixn := range dump { - assert.Nil(restore.Intention(ixn)) + assert.NoError(restore.Intention(ixn)) } restore.Commit() // Read the restored values back out and verify that they match. idx, actual, err := s.Intentions(nil) - assert.Nil(err) + assert.NoError(err) assert.Equal(idx, uint64(6)) assert.Equal(expected, actual) }() diff --git a/agent/local/state_test.go b/agent/local/state_test.go index a8890a540..16975d963 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -6,10 +6,11 @@ import ( "log" "os" "reflect" - "sync" "testing" "time" + "github.com/hashicorp/go-memdb" + "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent" @@ -1708,20 +1709,6 @@ func TestStateProxyManagement(t *testing.T) { }, "fake-token-db") require.NoError(err) - // Record initial local modify index - lastModifyIndex := state.LocalModifyIndex() - assertModIndexUpdate := func(id string) { - t.Helper() - nowIndex := state.LocalModifyIndex() - assert.True(lastModifyIndex < nowIndex) - if id != "" { - p := state.Proxy(id) - require.NotNil(p) - assert.True(lastModifyIndex < p.ModifyIndex) - } - lastModifyIndex = nowIndex - } - // Should work now svc, err := state.AddProxy(&p1, "fake-token") require.NoError(err) @@ -1733,7 +1720,6 @@ func TestStateProxyManagement(t *testing.T) { assert.Equal("", svc.Address, "should have empty address by default") // Port is non-deterministic but could be either of 20000 or 20001 assert.Contains([]int{20000, 20001}, svc.Port) - assertModIndexUpdate(svc.ID) // Second proxy should claim other port p2 := p1 @@ -1742,7 +1728,6 @@ func TestStateProxyManagement(t *testing.T) { require.NoError(err) assert.Contains([]int{20000, 20001}, svc2.Port) assert.NotEqual(svc.Port, svc2.Port) - assertModIndexUpdate(svc2.ID) // Store this for later p2token := state.Proxy(svc2.ID).ProxyToken @@ -1762,7 +1747,6 @@ func TestStateProxyManagement(t *testing.T) { require.NoError(err) require.Equal("0.0.0.0", svc3.Address) require.Equal(1234, svc3.Port) - assertModIndexUpdate(svc3.ID) // Update config of an already registered proxy should work p3updated := p3 @@ -1770,14 +1754,8 @@ func TestStateProxyManagement(t *testing.T) { // Setup multiple watchers who should all witness the change gotP3 := state.Proxy(svc3.ID) require.NotNil(gotP3) - var watchWg sync.WaitGroup - for i := 0; i < 3; i++ { - watchWg.Add(1) - go func() { - <-gotP3.WatchCh - watchWg.Done() - }() - } + var ws memdb.WatchSet + ws.Add(gotP3.WatchCh) svc3, err = state.AddProxy(&p3updated, "fake-token") require.NoError(err) require.Equal("0.0.0.0", svc3.Address) @@ -1785,9 +1763,8 @@ func TestStateProxyManagement(t *testing.T) { gotProxy3 := state.Proxy(svc3.ID) require.NotNil(gotProxy3) require.Equal(p3updated.Config, gotProxy3.Proxy.Config) - assertModIndexUpdate(svc3.ID) // update must change mod index - // All watchers should have fired so this should not hang the test! - watchWg.Wait() + assert.False(ws.Watch(time.After(500*time.Millisecond)), + "watch should have fired so ws.Watch should not timeout") // Remove one of the auto-assigned proxies err = state.RemoveProxy(svc2.ID) @@ -1800,7 +1777,6 @@ func TestStateProxyManagement(t *testing.T) { require.NoError(err) assert.Contains([]int{20000, 20001}, svc2.Port) assert.Equal(svc4.Port, svc2.Port, "should get the same port back that we freed") - assertModIndexUpdate(svc4.ID) // Remove a proxy that doesn't exist should error err = state.RemoveProxy("nope") diff --git a/connect/proxy/listener.go b/connect/proxy/listener.go index c003cb19c..51ab761ca 100644 --- a/connect/proxy/listener.go +++ b/connect/proxy/listener.go @@ -87,7 +87,6 @@ func (l *Listener) Serve() error { go l.handleConn(conn) } - return nil } // handleConn is the internal connection handler goroutine. diff --git a/connect/testing.go b/connect/testing.go index 235ff6001..9f6e4f781 100644 --- a/connect/testing.go +++ b/connect/testing.go @@ -161,8 +161,6 @@ func (s *TestServer) Serve() error { c.Close() }(conn) } - - return nil } // ServeHTTPS runs an HTTPS server with the given config. It invokes the passed diff --git a/connect/tls_test.go b/connect/tls_test.go index 64c473c1e..d13b78661 100644 --- a/connect/tls_test.go +++ b/connect/tls_test.go @@ -16,7 +16,7 @@ func TestReloadableTLSConfig(t *testing.T) { // The dynamic config should be the one we loaded (with some different hooks) got := c.TLSConfig() - expect := *base + expect := base.Clone() // Equal and even cmp.Diff fail on tls.Config due to unexported fields in // each. Compare a few things to prove it's returning the bits we // specifically set. @@ -39,7 +39,7 @@ func TestReloadableTLSConfig(t *testing.T) { // Change the passed config to ensure SetTLSConfig made a copy otherwise this // is racey. - expect = *new + expect = new.Clone() new.Certificates = nil // The dynamic config should be the one we loaded (with some different hooks) From d8ac823ab16fdc6c06a6a499dc5d6f0d0fc6ad19 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 19 Apr 2018 13:01:20 +0100 Subject: [PATCH 137/627] Make test output more useful now we uses testify with multi-line error messages --- GNUmakefile | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 030fa003d..d77342892 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -80,10 +80,14 @@ test: other-consul dev-build vet @# _something_ to stop them terminating us due to inactivity... { go test $(GOTEST_FLAGS) -tags '$(GOTAGS)' -timeout 5m $(GOTEST_PKGS) 2>&1 ; echo $$? > exit-code ; } | tee test.log | egrep '^(ok|FAIL)\s*github.com/hashicorp/consul' @echo "Exit code: $$(cat exit-code)" >> test.log - @grep -A5 'DATA RACE' test.log || true + @# This prints all the race report between ====== lines + @awk '/^WARNING: DATA RACE/ {do_print=1; print "=================="} do_print==1 {print} /^={10,}/ {do_print=0}' test.log || true @grep -A10 'panic: test timed out' test.log || true - @grep -A1 -- '--- SKIP:' test.log || true - @grep -A1 -- '--- FAIL:' test.log || true + @# Prints all the failure output until the next non-indented line - testify + @# helpers often output multiple lines for readability but useless if we can't + @# see them. + @awk '/--- SKIP/ {do_print=1} /^[^[:space:]]/ {do_print=0} do_print==1 {print}' test.log || true + @awk '/--- FAIL/ {do_print=1} /^[^[:space:]]/ {do_print=0} do_print==1 {print}' test.log || true @grep '^FAIL' test.log || true @if [ "$$(cat exit-code)" == "0" ] ; then echo "PASS" ; exit 0 ; else exit 1 ; fi From a90f69faa4aaae8badf64c87e7a255d6889d04ca Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Fri, 20 Apr 2018 14:24:24 +0100 Subject: [PATCH 138/627] Adds `api` client code and tests for new Proxy Config endpoint, registering with proxy and seeing proxy config in /agent/services list. --- GNUmakefile | 12 ++-- agent/agent_endpoint.go | 57 +++++++++++++------ agent/agent_endpoint_test.go | 64 +++++++++++---------- agent/structs/connect.go | 13 ----- agent/structs/service_definition.go | 8 +-- api/agent.go | 72 +++++++++++++++++++++++- api/agent_test.go | 87 ++++++++++++++++++++++++++++- api/api.go | 9 +++ 8 files changed, 251 insertions(+), 71 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index d77342892..2c412d9e5 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -82,12 +82,14 @@ test: other-consul dev-build vet @echo "Exit code: $$(cat exit-code)" >> test.log @# This prints all the race report between ====== lines @awk '/^WARNING: DATA RACE/ {do_print=1; print "=================="} do_print==1 {print} /^={10,}/ {do_print=0}' test.log || true - @grep -A10 'panic: test timed out' test.log || true + @grep -A10 'panic: ' test.log || true @# Prints all the failure output until the next non-indented line - testify - @# helpers often output multiple lines for readability but useless if we can't - @# see them. - @awk '/--- SKIP/ {do_print=1} /^[^[:space:]]/ {do_print=0} do_print==1 {print}' test.log || true - @awk '/--- FAIL/ {do_print=1} /^[^[:space:]]/ {do_print=0} do_print==1 {print}' test.log || true + @# helpers often output multiple lines for readability but useless if we can't + @# see them. Un-intuitive order of matches is necessary. No || true because + @# awk always returns true even if there is no match and it breaks non-bash + @# shells locally. + @awk '/^[^[:space:]]/ {do_print=0} /--- SKIP/ {do_print=1} do_print==1 {print}' test.log + @awk '/^[^[:space:]]/ {do_print=0} /--- FAIL/ {do_print=1} do_print==1 {print}' test.log @grep '^FAIL' test.log || true @if [ "$$(cat exit-code)" == "0" ] ; then echo "PASS" ; exit 0 ; else exit 1 ; fi diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index c19b776ac..c1bf6fbe1 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -162,25 +162,48 @@ func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request) return nil, err } + proxies := s.agent.State.Proxies() + + // Convert into api.AgentService since that includes Connect config but so far + // NodeService doesn't need to internally. They are otherwise identical since + // that is the struct used in client for reading the one we output here + // anyway. + agentSvcs := make(map[string]*api.AgentService) + // Use empty list instead of nil for id, s := range services { - if s.Tags == nil || s.Meta == nil { - clone := *s - if s.Tags == nil { - clone.Tags = make([]string, 0) - } else { - clone.Tags = s.Tags - } - if s.Meta == nil { - clone.Meta = make(map[string]string) - } else { - clone.Meta = s.Meta - } - services[id] = &clone + as := &api.AgentService{ + Kind: api.ServiceKind(s.Kind), + ID: s.ID, + Service: s.Service, + Tags: s.Tags, + Port: s.Port, + Address: s.Address, + EnableTagOverride: s.EnableTagOverride, + CreateIndex: s.CreateIndex, + ModifyIndex: s.ModifyIndex, + ProxyDestination: s.ProxyDestination, } + if as.Tags == nil { + as.Tags = []string{} + } + if as.Meta == nil { + as.Meta = map[string]string{} + } + // Attach Connect configs if the exist + if proxy, ok := proxies[id+"-proxy"]; ok { + as.Connect = &api.AgentServiceConnect{ + Proxy: &api.AgentServiceConnectProxy{ + ExecMode: api.ProxyExecMode(proxy.Proxy.ExecMode.String()), + Command: proxy.Proxy.Command, + Config: proxy.Proxy.Config, + }, + } + } + agentSvcs[id] = as } - return services, nil + return agentSvcs, nil } func (s *HTTPServer) AgentChecks(resp http.ResponseWriter, req *http.Request) (interface{}, error) { @@ -904,7 +927,7 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http. // // Returns the local proxy config for the identified proxy. Requires token= // param with the correct local ProxyToken (not ACL token). -func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http.Request) (interface{}, error) { +func (s *HTTPServer) ConnectProxyConfig(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Get the proxy ID. Note that this is the ID of a proxy's service instance. id := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/proxy/") @@ -949,12 +972,12 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http } contentHash := fmt.Sprintf("%x", hash) - reply := &structs.ConnectManageProxyResponse{ + reply := &api.ConnectProxyConfig{ ProxyServiceID: proxy.Proxy.ProxyService.ID, TargetServiceID: target.ID, TargetServiceName: target.Service, ContentHash: contentHash, - ExecMode: proxy.Proxy.ExecMode.String(), + ExecMode: api.ProxyExecMode(proxy.Proxy.ExecMode.String()), Command: proxy.Proxy.Command, Config: proxy.Proxy.Config, } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index b34ac508a..32cb6ab98 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -57,25 +57,39 @@ func TestAgent_Services(t *testing.T) { Tags: []string{"master"}, Port: 5000, } - a.State.AddService(srv1, "") + require.NoError(t, a.State.AddService(srv1, "")) + + // Add a managed proxy for that service + prxy1 := &structs.ConnectManagedProxy{ + ExecMode: structs.ProxyExecModeScript, + Command: "proxy.sh", + Config: map[string]interface{}{ + "bind_port": 1234, + "foo": "bar", + }, + TargetServiceID: "mysql", + } + _, err := a.State.AddProxy(prxy1, "") + require.NoError(t, err) req, _ := http.NewRequest("GET", "/v1/agent/services", nil) obj, err := a.srv.AgentServices(nil, req) if err != nil { t.Fatalf("Err: %v", err) } - val := obj.(map[string]*structs.NodeService) - if len(val) != 1 { - t.Fatalf("bad services: %v", obj) - } - if val["mysql"].Port != 5000 { - t.Fatalf("bad service: %v", obj) - } + val := obj.(map[string]*api.AgentService) + assert.Lenf(t, val, 1, "bad services: %v", obj) + assert.Equal(t, 5000, val["mysql"].Port) + assert.NotNil(t, val["mysql"].Connect) + assert.NotNil(t, val["mysql"].Connect.Proxy) + assert.Equal(t, prxy1.ExecMode.String(), string(val["mysql"].Connect.Proxy.ExecMode)) + assert.Equal(t, prxy1.Command, val["mysql"].Connect.Proxy.Command) + assert.Equal(t, prxy1.Config, val["mysql"].Connect.Proxy.Config) } // This tests that the agent services endpoint (/v1/agent/services) returns // Connect proxies. -func TestAgent_Services_ConnectProxy(t *testing.T) { +func TestAgent_Services_ExternalConnectProxy(t *testing.T) { t.Parallel() assert := assert.New(t) @@ -94,10 +108,10 @@ func TestAgent_Services_ConnectProxy(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/services", nil) obj, err := a.srv.AgentServices(nil, req) assert.Nil(err) - val := obj.(map[string]*structs.NodeService) + val := obj.(map[string]*api.AgentService) assert.Len(val, 1) actual := val["db-proxy"] - assert.Equal(structs.ServiceKindConnectProxy, actual.Kind) + assert.Equal(api.ServiceKindConnectProxy, actual.Kind) assert.Equal("db", actual.ProxyDestination) } @@ -120,7 +134,7 @@ func TestAgent_Services_ACLFilter(t *testing.T) { if err != nil { t.Fatalf("Err: %v", err) } - val := obj.(map[string]*structs.NodeService) + val := obj.(map[string]*api.AgentService) if len(val) != 0 { t.Fatalf("bad: %v", obj) } @@ -132,7 +146,7 @@ func TestAgent_Services_ACLFilter(t *testing.T) { if err != nil { t.Fatalf("Err: %v", err) } - val := obj.(map[string]*structs.NodeService) + val := obj.(map[string]*api.AgentService) if len(val) != 1 { t.Fatalf("bad: %v", obj) } @@ -1383,21 +1397,11 @@ func TestAgent_RegisterService_ManagedConnectProxy(t *testing.T) { // Register a proxy. Note that the destination doesn't exist here on // this agent or in the catalog at all. This is intended and part // of the design. - args := &structs.ServiceDefinition{ + args := &api.AgentServiceRegistration{ Name: "web", Port: 8000, - // This is needed just because empty check struct (not pointer) get json - // encoded as object with zero values and then decoded back to object with - // zero values _except that the header map is an empty map not a nil map_. - // So our check to see if s.Check.Empty() returns false since DeepEqual - // considers empty maps and nil maps to be different types. Then the request - // fails validation because the Check definition isn't valid... This is jank - // we should fix but it's another yak I don't want to shave right now. - Check: structs.CheckType{ - TTL: 15 * time.Second, - }, - Connect: &structs.ServiceDefinitionConnect{ - Proxy: &structs.ServiceDefinitionConnectProxy{ + Connect: &api.AgentServiceConnect{ + Proxy: &api.AgentServiceConnectProxy{ ExecMode: "script", Command: "proxy.sh", Config: map[string]interface{}{ @@ -2233,7 +2237,7 @@ func TestAgentConnectProxy(t *testing.T) { }, } - expectedResponse := &structs.ConnectManageProxyResponse{ + expectedResponse := &api.ConnectProxyConfig{ ProxyServiceID: "test-proxy", TargetServiceID: "test", TargetServiceName: "test", @@ -2254,7 +2258,7 @@ func TestAgentConnectProxy(t *testing.T) { ur, err := copystructure.Copy(expectedResponse) require.NoError(t, err) - updatedResponse := ur.(*structs.ConnectManageProxyResponse) + updatedResponse := ur.(*api.ConnectProxyConfig) updatedResponse.ContentHash = "22bc9233a52c08fd" upstreams := updatedResponse.Config["upstreams"].([]interface{}) upstreams = append(upstreams, @@ -2271,7 +2275,7 @@ func TestAgentConnectProxy(t *testing.T) { wantWait time.Duration wantCode int wantErr bool - wantResp *structs.ConnectManageProxyResponse + wantResp *api.ConnectProxyConfig }{ { name: "simple fetch", @@ -2338,7 +2342,7 @@ func TestAgentConnectProxy(t *testing.T) { go tt.updateFunc() } start := time.Now() - obj, err := a.srv.AgentConnectProxyConfig(resp, req) + obj, err := a.srv.ConnectProxyConfig(resp, req) elapsed := time.Now().Sub(start) if tt.wantErr { diff --git a/agent/structs/connect.go b/agent/structs/connect.go index d879718b2..5f907c1ab 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -103,16 +103,3 @@ func (p *ConnectManagedProxy) ParseConfig() (*ConnectManagedProxyConfig, error) } return &cfg, nil } - -// ConnectManageProxyResponse is the public response object we return for -// queries on local proxy config state. It's similar to ConnectManagedProxy but -// with some fields re-arranged. -type ConnectManageProxyResponse struct { - ProxyServiceID string - TargetServiceID string - TargetServiceName string - ContentHash string - ExecMode string - Command string - Config map[string]interface{} -} diff --git a/agent/structs/service_definition.go b/agent/structs/service_definition.go index ad77d8e3b..2ed424178 100644 --- a/agent/structs/service_definition.go +++ b/agent/structs/service_definition.go @@ -102,14 +102,14 @@ func (s *ServiceDefinition) CheckTypes() (checks CheckTypes, err error) { type ServiceDefinitionConnect struct { // TODO(banks) add way to specify that the app is connect-native // Proxy configures a connect proxy instance for the service - Proxy *ServiceDefinitionConnectProxy `json:"proxy,omitempty" hcl:"proxy" mapstructure:"proxy"` + Proxy *ServiceDefinitionConnectProxy } // ServiceDefinitionConnectProxy is the connect proxy config within a service // registration. Note this is duplicated in config.ServiceConnectProxy and needs // to be kept in sync. type ServiceDefinitionConnectProxy struct { - Command string `json:"command,omitempty" hcl:"command" mapstructure:"command"` - ExecMode string `json:"exec_mode,omitempty" hcl:"exec_mode" mapstructure:"exec_mode"` - Config map[string]interface{} `json:"config,omitempty" hcl:"config" mapstructure:"config"` + Command string + ExecMode string + Config map[string]interface{} } diff --git a/api/agent.go b/api/agent.go index 6b662fa2c..a81fd96f8 100644 --- a/api/agent.go +++ b/api/agent.go @@ -21,6 +21,23 @@ const ( ServiceKindConnectProxy ServiceKind = "connect-proxy" ) +// ProxyExecMode is the execution mode for a managed Connect proxy. +type ProxyExecMode string + +const ( + // ProxyExecModeDaemon indicates that the proxy command should be long-running + // and should be started and supervised by the agent until it's target service + // is deregistered. + ProxyExecModeDaemon ProxyExecMode = "daemon" + + // ProxyExecModeScript indicates that the proxy command should be invoke to + // completion on each change to the configuration of lifecycle event. The + // script typically fetches the config and certificates from the agent API and + // then configures an externally managed daemon, perhaps starting and stopping + // it if necessary. + ProxyExecModeScript ProxyExecMode = "script" +) + // AgentCheck represents a check known to the agent type AgentCheck struct { Node string @@ -47,6 +64,20 @@ type AgentService struct { CreateIndex uint64 ModifyIndex uint64 ProxyDestination string + Connect *AgentServiceConnect +} + +// AgentServiceConnect represents the Connect configuration of a service. +type AgentServiceConnect struct { + Proxy *AgentServiceConnectProxy +} + +// AgentServiceConnectProxy represents the Connect Proxy configuration of a +// service. +type AgentServiceConnectProxy struct { + ExecMode ProxyExecMode + Command string + Config map[string]interface{} } // AgentMember represents a cluster member known to the agent @@ -89,7 +120,8 @@ type AgentServiceRegistration struct { Meta map[string]string `json:",omitempty"` Check *AgentServiceCheck Checks AgentServiceChecks - ProxyDestination string `json:",omitempty"` + ProxyDestination string `json:",omitempty"` + Connect *AgentServiceConnect `json:",omitempty"` } // AgentCheckRegistration is used to register a new check @@ -185,6 +217,18 @@ type AgentAuthorize struct { Reason string } +// ConnectProxyConfig is the response structure for agent-local proxy +// configuration. +type ConnectProxyConfig struct { + ProxyServiceID string + TargetServiceID string + TargetServiceName string + ContentHash string + ExecMode ProxyExecMode + Command string + Config map[string]interface{} +} + // Agent can be used to query the Agent endpoints type Agent struct { c *Client @@ -286,6 +330,7 @@ func (a *Agent) Services() (map[string]*AgentService, error) { if err := decodeBody(resp, &out); err != nil { return nil, err } + return out, nil } @@ -587,6 +632,31 @@ func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *Qu return &out, qm, nil } +// ConnectProxyConfig gets the configuration for a local managed proxy instance. +// +// Note that this uses an unconventional blocking mechanism since it's +// agent-local state. That means there is no persistent raft index so we block +// based on object hash instead. +func (a *Agent) ConnectProxyConfig(proxyServiceID string, q *QueryOptions) (*ConnectProxyConfig, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/connect/proxy/"+proxyServiceID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ConnectProxyConfig + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + // EnableServiceMaintenance toggles service maintenance mode on // for the given service ID. func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { diff --git a/api/agent_test.go b/api/agent_test.go index 6186bffe3..01d35ae15 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -186,7 +186,64 @@ func TestAPI_AgentServices(t *testing.T) { } } -func TestAPI_AgentServices_ConnectProxy(t *testing.T) { +func TestAPI_AgentServices_ManagedConnectProxy(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Check: &AgentServiceCheck{ + TTL: "15s", + }, + Connect: &AgentServiceConnect{ + Proxy: &AgentServiceConnectProxy{ + ExecMode: ProxyExecModeScript, + Command: "foo.rb", + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + services, err := agent.Services() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := services["foo"]; !ok { + t.Fatalf("missing service: %v", services) + } + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + chk, ok := checks["service:foo"] + if !ok { + t.Fatalf("missing check: %v", checks) + } + + // Checks should default to critical + if chk.Status != HealthCritical { + t.Fatalf("Bad: %#v", chk) + } + + // Proxy config should be present in response + require.Equal(t, reg.Connect, services["foo"].Connect) + + if err := agent.ServiceDeregister("foo"); err != nil { + t.Fatalf("err: %v", err) + } +} + +func TestAPI_AgentServices_ExternalConnectProxy(t *testing.T) { t.Parallel() c, s := makeClient(t) defer s.Stop() @@ -1019,3 +1076,31 @@ func TestAPI_AgentConnectAuthorize(t *testing.T) { require.True(auth.Authorized) require.Equal(auth.Reason, "ACLs disabled, access is allowed by default") } + +func TestAPI_AgentConnectProxyConfig(t *testing.T) { + t.Parallel() + c, s := makeClient(t) + defer s.Stop() + + agent := c.Agent() + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + Check: &AgentServiceCheck{ + CheckID: "foo-ttl", + TTL: "15s", + }, + } + if err := agent.ServiceRegister(reg); err != nil { + t.Fatalf("err: %v", err) + } + + checks, err := agent.Checks() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, ok := checks["foo-ttl"]; !ok { + t.Fatalf("missing check: %v", checks) + } +} diff --git a/api/api.go b/api/api.go index 1cdc21e33..6f3034d90 100644 --- a/api/api.go +++ b/api/api.go @@ -82,6 +82,12 @@ type QueryOptions struct { // until the timeout or the next index is reached WaitIndex uint64 + // WaitHash is used by some endpoints instead of WaitIndex to perform blocking + // on state based on a hash of the response rather than a monotonic index. + // This is required when the state being blocked on is not stored in Raft, for + // example agent-local proxy configuration. + WaitHash string + // WaitTime is used to bound the duration of a wait. // Defaults to that of the Config, but can be overridden. WaitTime time.Duration @@ -533,6 +539,9 @@ func (r *request) setQueryOptions(q *QueryOptions) { if q.WaitTime != 0 { r.params.Set("wait", durToMsec(q.WaitTime)) } + if q.WaitHash != "" { + r.params.Set("hash", q.WaitHash) + } if q.Token != "" { r.header.Set("X-Consul-Token", q.Token) } From f7ff16669fe466c217d245fd3c6d3d80ef6643a1 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 6 Apr 2018 17:13:22 -0700 Subject: [PATCH 139/627] Add the Connect CA config to the state store --- agent/consul/state/connect_ca.go | 124 ++++++++++++++++++++++- agent/consul/state/connect_ca_test.go | 137 ++++++++++++++++++++++++++ 2 files changed, 260 insertions(+), 1 deletion(-) diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go index 95e763b8b..f5962b084 100644 --- a/agent/consul/state/connect_ca.go +++ b/agent/consul/state/connect_ca.go @@ -8,9 +8,28 @@ import ( ) const ( - caRootTableName = "connect-ca-roots" + caConfigTableName = "connect-ca-config" + caRootTableName = "connect-ca-roots" ) +// caConfigTableSchema returns a new table schema used for storing +// the CA config for Connect. +func caConfigTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: caConfigTableName, + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + AllowMissing: true, + Unique: true, + Indexer: &memdb.ConditionalIndex{ + Conditional: func(obj interface{}) (bool, error) { return true, nil }, + }, + }, + }, + } +} + // caRootTableSchema returns a new table schema used for storing // CA roots for Connect. func caRootTableSchema() *memdb.TableSchema { @@ -30,9 +49,112 @@ func caRootTableSchema() *memdb.TableSchema { } func init() { + registerSchema(caConfigTableSchema) registerSchema(caRootTableSchema) } +// CAConfig is used to pull the CA config from the snapshot. +func (s *Snapshot) CAConfig() (*structs.CAConfiguration, error) { + c, err := s.tx.First("connect-ca-config", "id") + if err != nil { + return nil, err + } + + config, ok := c.(*structs.CAConfiguration) + if !ok { + return nil, nil + } + + return config, nil +} + +// CAConfig is used when restoring from a snapshot. +func (s *Restore) CAConfig(config *structs.CAConfiguration) error { + if err := s.tx.Insert("connect-ca-config", config); err != nil { + return fmt.Errorf("failed restoring CA config: %s", err) + } + + return nil +} + +// CAConfig is used to get the current Autopilot configuration. +func (s *Store) CAConfig() (uint64, *structs.CAConfiguration, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + // Get the autopilot config + c, err := tx.First("connect-ca-config", "id") + if err != nil { + return 0, nil, fmt.Errorf("failed CA config lookup: %s", err) + } + + config, ok := c.(*structs.CAConfiguration) + if !ok { + return 0, nil, nil + } + + return config.ModifyIndex, config, nil +} + +// CASetConfig is used to set the current Autopilot configuration. +func (s *Store) CASetConfig(idx uint64, config *structs.CAConfiguration) error { + tx := s.db.Txn(true) + defer tx.Abort() + + s.caSetConfigTxn(idx, tx, config) + + tx.Commit() + return nil +} + +// CACheckAndSetConfig is used to try updating the CA configuration with a +// given Raft index. If the CAS index specified is not equal to the last observed index +// for the config, then the call is a noop, +func (s *Store) CACheckAndSetConfig(idx, cidx uint64, config *structs.CAConfiguration) (bool, error) { + tx := s.db.Txn(true) + defer tx.Abort() + + // Check for an existing config + existing, err := tx.First("connect-ca-config", "id") + if err != nil { + return false, fmt.Errorf("failed CA config lookup: %s", err) + } + + // If the existing index does not match the provided CAS + // index arg, then we shouldn't update anything and can safely + // return early here. + e, ok := existing.(*structs.CAConfiguration) + if !ok || e.ModifyIndex != cidx { + return false, nil + } + + s.caSetConfigTxn(idx, tx, config) + + tx.Commit() + return true, nil +} + +func (s *Store) caSetConfigTxn(idx uint64, tx *memdb.Txn, config *structs.CAConfiguration) error { + // Check for an existing config + existing, err := tx.First("connect-ca-config", "id") + if err != nil { + return fmt.Errorf("failed CA config lookup: %s", err) + } + + // Set the indexes. + if existing != nil { + config.CreateIndex = existing.(*structs.CAConfiguration).CreateIndex + } else { + config.CreateIndex = idx + } + config.ModifyIndex = idx + + if err := tx.Insert("connect-ca-config", config); err != nil { + return fmt.Errorf("failed updating CA config: %s", err) + } + return nil +} + // CARoots is used to pull all the CA roots for the snapshot. func (s *Snapshot) CARoots() (structs.CARoots, error) { ixns, err := s.tx.Get(caRootTableName, "id") diff --git a/agent/consul/state/connect_ca_test.go b/agent/consul/state/connect_ca_test.go index cd77eac7c..cd37f526b 100644 --- a/agent/consul/state/connect_ca_test.go +++ b/agent/consul/state/connect_ca_test.go @@ -1,14 +1,151 @@ package state import ( + "reflect" "testing" + "time" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" + "github.com/pascaldekloe/goe/verify" "github.com/stretchr/testify/assert" ) +func TestStore_CAConfig(t *testing.T) { + s := testStateStore(t) + + expected := &structs.CAConfiguration{ + Provider: "consul", + Config: map[string]interface{}{ + "PrivateKey": "asdf", + "RootCert": "qwer", + "RotationPeriod": 90 * 24 * time.Hour, + }, + } + + if err := s.CASetConfig(0, expected); err != nil { + t.Fatal(err) + } + + idx, config, err := s.CAConfig() + if err != nil { + t.Fatal(err) + } + if idx != 0 { + t.Fatalf("bad: %d", idx) + } + if !reflect.DeepEqual(expected, config) { + t.Fatalf("bad: %#v, %#v", expected, config) + } +} + +func TestStore_CAConfigCAS(t *testing.T) { + s := testStateStore(t) + + expected := &structs.CAConfiguration{ + Provider: "consul", + } + + if err := s.CASetConfig(0, expected); err != nil { + t.Fatal(err) + } + // Do an extra operation to move the index up by 1 for the + // check-and-set operation after this + if err := s.CASetConfig(1, expected); err != nil { + t.Fatal(err) + } + + // Do a CAS with an index lower than the entry + ok, err := s.CACheckAndSetConfig(2, 0, &structs.CAConfiguration{ + Provider: "static", + }) + if ok || err != nil { + t.Fatalf("expected (false, nil), got: (%v, %#v)", ok, err) + } + + // Check that the index is untouched and the entry + // has not been updated. + idx, config, err := s.CAConfig() + if err != nil { + t.Fatal(err) + } + if idx != 1 { + t.Fatalf("bad: %d", idx) + } + if config.Provider != "consul" { + t.Fatalf("bad: %#v", config) + } + + // Do another CAS, this time with the correct index + ok, err = s.CACheckAndSetConfig(2, 1, &structs.CAConfiguration{ + Provider: "static", + }) + if !ok || err != nil { + t.Fatalf("expected (true, nil), got: (%v, %#v)", ok, err) + } + + // Make sure the config was updated + idx, config, err = s.CAConfig() + if err != nil { + t.Fatal(err) + } + if idx != 2 { + t.Fatalf("bad: %d", idx) + } + if config.Provider != "static" { + t.Fatalf("bad: %#v", config) + } +} + +func TestStore_CAConfig_Snapshot_Restore(t *testing.T) { + s := testStateStore(t) + before := &structs.CAConfiguration{ + Provider: "consul", + Config: map[string]interface{}{ + "PrivateKey": "asdf", + "RootCert": "qwer", + "RotationPeriod": 90 * 24 * time.Hour, + }, + } + if err := s.CASetConfig(99, before); err != nil { + t.Fatal(err) + } + + snap := s.Snapshot() + defer snap.Close() + + after := &structs.CAConfiguration{ + Provider: "static", + Config: map[string]interface{}{}, + } + if err := s.CASetConfig(100, after); err != nil { + t.Fatal(err) + } + + snapped, err := snap.CAConfig() + if err != nil { + t.Fatalf("err: %s", err) + } + verify.Values(t, "", before, snapped) + + s2 := testStateStore(t) + restore := s2.Restore() + if err := restore.CAConfig(snapped); err != nil { + t.Fatalf("err: %s", err) + } + restore.Commit() + + idx, res, err := s2.CAConfig() + if err != nil { + t.Fatalf("err: %s", err) + } + if idx != 99 { + t.Fatalf("bad index: %d", idx) + } + verify.Values(t, "", before, res) +} + func TestStore_CARootSetList(t *testing.T) { assert := assert.New(t) s := testStateStore(t) From ebdda17a301c035e5d2baae3e46a9fdd84f85819 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 6 Apr 2018 17:58:45 -0700 Subject: [PATCH 140/627] Add CA config set to fsm operations --- agent/consul/fsm/commands_oss.go | 13 ++++- agent/consul/fsm/commands_oss_test.go | 70 ++++++++++++++++++++++++++- 2 files changed, 81 insertions(+), 2 deletions(-) diff --git a/agent/consul/fsm/commands_oss.go b/agent/consul/fsm/commands_oss.go index 2d2627748..a5ef33efc 100644 --- a/agent/consul/fsm/commands_oss.go +++ b/agent/consul/fsm/commands_oss.go @@ -283,7 +283,18 @@ func (c *FSM) applyConnectCAOperation(buf []byte, index uint64) interface{} { defer metrics.MeasureSinceWithLabels([]string{"fsm", "ca"}, time.Now(), []metrics.Label{{Name: "op", Value: string(req.Op)}}) switch req.Op { - case structs.CAOpSet: + case structs.CAOpSetConfig: + if req.Config.ModifyIndex != 0 { + act, err := c.state.CACheckAndSetConfig(index, req.Config.ModifyIndex, req.Config) + if err != nil { + return err + } + + return act + } + + return c.state.CASetConfig(index, req.Config) + case structs.CAOpSetRoots: act, err := c.state.CARootSetCAS(index, req.Index, req.Roots) if err != nil { return err diff --git a/agent/consul/fsm/commands_oss_test.go b/agent/consul/fsm/commands_oss_test.go index 81852a9c4..a6552240c 100644 --- a/agent/consul/fsm/commands_oss_test.go +++ b/agent/consul/fsm/commands_oss_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/consul/types" "github.com/hashicorp/go-uuid" "github.com/hashicorp/serf/coordinate" + "github.com/mitchellh/mapstructure" "github.com/pascaldekloe/goe/verify" "github.com/stretchr/testify/assert" ) @@ -1219,6 +1220,73 @@ func TestFSM_Intention_CRUD(t *testing.T) { } } +func TestFSM_CAConfig(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + fsm, err := New(nil, os.Stderr) + assert.Nil(err) + + // Set the autopilot config using a request. + req := structs.CARequest{ + Op: structs.CAOpSetConfig, + Config: &structs.CAConfiguration{ + Provider: "consul", + Config: map[string]interface{}{ + "PrivateKey": "asdf", + "RootCert": "qwer", + "RotationPeriod": 90 * 24 * time.Hour, + }, + }, + } + buf, err := structs.Encode(structs.ConnectCARequestType, req) + assert.Nil(err) + resp := fsm.Apply(makeLog(buf)) + if _, ok := resp.(error); ok { + t.Fatalf("bad: %v", resp) + } + + // Verify key is set directly in the state store. + _, config, err := fsm.state.CAConfig() + if err != nil { + t.Fatalf("err: %v", err) + } + var conf *connect.ConsulCAProviderConfig + if err := mapstructure.WeakDecode(config.Config, &conf); err != nil { + t.Fatalf("error decoding config: %s, %v", err, config.Config) + } + if got, want := config.Provider, req.Config.Provider; got != want { + t.Fatalf("got %v, want %v", got, want) + } + if got, want := conf.PrivateKey, "asdf"; got != want { + t.Fatalf("got %v, want %v", got, want) + } + if got, want := conf.RootCert, "qwer"; got != want { + t.Fatalf("got %v, want %v", got, want) + } + if got, want := conf.RotationPeriod, 90*24*time.Hour; got != want { + t.Fatalf("got %v, want %v", got, want) + } + + // Now use CAS and provide an old index + req.Config.Provider = "static" + req.Config.ModifyIndex = config.ModifyIndex - 1 + buf, err = structs.Encode(structs.ConnectCARequestType, req) + if err != nil { + t.Fatalf("err: %v", err) + } + resp = fsm.Apply(makeLog(buf)) + if _, ok := resp.(error); ok { + t.Fatalf("bad: %v", resp) + } + + _, config, err = fsm.state.CAConfig() + assert.Nil(err) + if config.Provider != "static" { + t.Fatalf("bad: %v", config.Provider) + } +} + func TestFSM_CARoots(t *testing.T) { t.Parallel() @@ -1233,7 +1301,7 @@ func TestFSM_CARoots(t *testing.T) { // Create a new request. req := structs.CARequest{ - Op: structs.CAOpSet, + Op: structs.CAOpSetRoots, Roots: []*structs.CARoot{ca1, ca2}, } From 4d0713d5bb8c0580a13e49e6564a392c13bf18ee Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Sun, 8 Apr 2018 21:56:11 -0700 Subject: [PATCH 141/627] Add the CA provider interface and built-in provider --- agent/connect/ca_provider.go | 278 +++++++++++++++++++++++++++++++++++ 1 file changed, 278 insertions(+) create mode 100644 agent/connect/ca_provider.go diff --git a/agent/connect/ca_provider.go b/agent/connect/ca_provider.go new file mode 100644 index 000000000..2aa1881f8 --- /dev/null +++ b/agent/connect/ca_provider.go @@ -0,0 +1,278 @@ +package connect + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net/url" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/consul/agent/structs" + uuid "github.com/hashicorp/go-uuid" + "github.com/mitchellh/mapstructure" +) + +// CAProvider is the interface for Consul to interact with +// an external CA that provides leaf certificate signing for +// given SpiffeIDServices. +type CAProvider interface { + SetConfiguration(raw map[string]interface{}) error + ActiveRoot() (*structs.CARoot, error) + ActiveIntermediate() (*structs.CARoot, error) + RotateIntermediate() error + Sign(*SpiffeIDService, *x509.CertificateRequest) (*structs.IssuedCert, error) +} + +type ConsulCAProviderConfig struct { + PrivateKey string + RootCert string + RotationPeriod time.Duration +} + +type ConsulCAProvider struct { + config *ConsulCAProviderConfig + + // todo(kyhavlov): store these directly in the state store + // and pass a reference to the state to this provider instead of + // having these values here + privateKey string + caRoot *structs.CARoot + caIndex uint64 + sync.RWMutex +} + +func NewConsulCAProvider(rawConfig map[string]interface{}) (*ConsulCAProvider, error) { + provider := &ConsulCAProvider{} + provider.SetConfiguration(rawConfig) + + return provider, nil +} + +func (c *ConsulCAProvider) SetConfiguration(raw map[string]interface{}) error { + conf, err := decodeConfig(raw) + if err != nil { + return err + } + + c.config = conf + return nil +} + +func decodeConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { + var config *ConsulCAProviderConfig + if err := mapstructure.WeakDecode(raw, &config); err != nil { + return nil, fmt.Errorf("error decoding config: %s", err) + } + + return config, nil +} + +func (c *ConsulCAProvider) ActiveRoot() (*structs.CARoot, error) { + if c.privateKey == "" { + pk, err := generatePrivateKey() + if err != nil { + return nil, err + } + c.privateKey = pk + } + + if c.caRoot == nil { + ca, err := c.generateCA() + if err != nil { + return nil, err + } + c.caRoot = ca + } + + return c.caRoot, nil +} + +func (c *ConsulCAProvider) ActiveIntermediate() (*structs.CARoot, error) { + return c.ActiveRoot() +} + +func (c *ConsulCAProvider) RotateIntermediate() error { + ca, err := c.generateCA() + if err != nil { + return err + } + c.caRoot = ca + + return nil +} + +// Sign returns a new certificate valid for the given SpiffeIDService +// using the current CA. +func (c *ConsulCAProvider) Sign(serviceId *SpiffeIDService, csr *x509.CertificateRequest) (*structs.IssuedCert, error) { + // The serial number for the cert. + // todo(kyhavlov): increment this based on raft index once the provider uses + // the state store directly + sn, err := rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) + if err != nil { + return nil, fmt.Errorf("error generating serial number: %s", err) + } + + // Create the keyId for the cert from the signing public key. + signer, err := ParseSigner(c.privateKey) + if err != nil { + return nil, err + } + if signer == nil { + return nil, fmt.Errorf("error signing cert: Consul CA not initialized yet") + } + keyId, err := KeyId(signer.Public()) + if err != nil { + return nil, err + } + + // Parse the CA cert + caCert, err := ParseCert(c.caRoot.RootCert) + if err != nil { + return nil, fmt.Errorf("error parsing CA cert: %s", err) + } + + // Cert template for generation + template := x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{CommonName: serviceId.Service}, + URIs: csr.URIs, + Signature: csr.Signature, + SignatureAlgorithm: csr.SignatureAlgorithm, + PublicKeyAlgorithm: csr.PublicKeyAlgorithm, + PublicKey: csr.PublicKey, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageDataEncipherment | + x509.KeyUsageKeyAgreement | + x509.KeyUsageDigitalSignature | + x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + NotAfter: time.Now().Add(3 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: keyId, + SubjectKeyId: keyId, + } + + // Create the certificate, PEM encode it and return that value. + var buf bytes.Buffer + bs, err := x509.CreateCertificate( + rand.Reader, &template, caCert, signer.Public(), signer) + if err != nil { + return nil, fmt.Errorf("error generating certificate: %s", err) + } + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + return nil, fmt.Errorf("error encoding private key: %s", err) + } + + // Set the response + return &structs.IssuedCert{ + SerialNumber: HexString(template.SerialNumber.Bytes()), + CertPEM: buf.String(), + Service: serviceId.Service, + ServiceURI: template.URIs[0].String(), + ValidAfter: template.NotBefore, + ValidBefore: template.NotAfter, + }, nil +} + +// generatePrivateKey returns a new private key +func generatePrivateKey() (string, error) { + var pk *ecdsa.PrivateKey + + // If we have no key, then create a new one. + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return "", fmt.Errorf("error generating private key: %s", err) + } + + bs, err := x509.MarshalECPrivateKey(pk) + if err != nil { + return "", fmt.Errorf("error generating private key: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) + if err != nil { + return "", fmt.Errorf("error encoding private key: %s", err) + } + + return buf.String(), nil +} + +// generateCA makes a new root CA using the given private key +func (c *ConsulCAProvider) generateCA() (*structs.CARoot, error) { + privKey, err := ParseSigner(c.privateKey) + if err != nil { + return nil, err + } + + name := fmt.Sprintf("Consul CA %d", atomic.AddUint64(&c.caIndex, 1)) + + // The serial number for the cert + sn, err := testSerialNumber() + if err != nil { + return nil, err + } + + // The URI (SPIFFE compatible) for the cert + id := &SpiffeIDSigning{ClusterID: testClusterID, Domain: "consul"} + keyId, err := KeyId(privKey.Public()) + if err != nil { + return nil, err + } + + // Create the CA cert + template := x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{CommonName: name}, + URIs: []*url.URL{id.URI()}, + PermittedDNSDomainsCritical: true, + PermittedDNSDomains: []string{id.URI().Hostname()}, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageCertSign | + x509.KeyUsageCRLSign | + x509.KeyUsageDigitalSignature, + IsCA: true, + NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: keyId, + SubjectKeyId: keyId, + } + + bs, err := x509.CreateCertificate( + rand.Reader, &template, &template, privKey.Public(), privKey) + if err != nil { + return nil, fmt.Errorf("error generating CA certificate: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + return nil, fmt.Errorf("error encoding private key: %s", err) + } + + // Generate an ID for the new intermediate + rootId, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + return &structs.CARoot{ + ID: rootId, + Name: name, + RootCert: buf.String(), + SigningKey: c.privateKey, + Active: true, + }, nil +} From e26819ed9c00aaa426b85a800fe6d45f2119caec Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Sun, 8 Apr 2018 21:56:46 -0700 Subject: [PATCH 142/627] Add the bootstrap config for the CA --- agent/consul/config.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/agent/consul/config.go b/agent/consul/config.go index 6966b5628..df4e55e42 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/consul/agent/consul/autopilot" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/lib" "github.com/hashicorp/consul/tlsutil" "github.com/hashicorp/consul/types" @@ -346,6 +347,10 @@ type Config struct { // autopilot tasks, such as promoting eligible non-voters and removing // dead servers. AutopilotInterval time.Duration + + // CAConfig is used to apply the initial Connect CA configuration when + // bootstrapping. + CAConfig *structs.CAConfiguration } // CheckProtocolVersion validates the protocol version. @@ -427,6 +432,15 @@ func DefaultConfig() *Config { ServerHealthInterval: 2 * time.Second, AutopilotInterval: 10 * time.Second, + + CAConfig: &structs.CAConfiguration{ + Provider: "consul", + Config: map[string]interface{}{ + "PrivateKey": "", + "RootCert": "", + "RotationPeriod": 90 * 24 * time.Hour, + }, + }, } // Increase our reap interval to 3 days instead of 24h. From a40db26ffeea869ee9d9e6d6472e47caff28315c Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Sun, 8 Apr 2018 21:57:32 -0700 Subject: [PATCH 143/627] Add CA bootstrapping on establishing leadership --- agent/consul/leader.go | 107 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/agent/consul/leader.go b/agent/consul/leader.go index d950d71ba..516201262 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -1,12 +1,15 @@ package consul import ( + "crypto/x509" "fmt" "net" "strconv" "sync" "time" + "github.com/hashicorp/consul/agent/connect" + "github.com/armon/go-metrics" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/autopilot" @@ -210,6 +213,10 @@ func (s *Server) establishLeadership() error { s.getOrCreateAutopilotConfig() s.autopilot.Start() + + // todo(kyhavlov): start a goroutine here for handling periodic CA rotation + s.bootstrapCA() + s.setConsistentReadReady() return nil } @@ -359,6 +366,106 @@ func (s *Server) getOrCreateAutopilotConfig() *autopilot.Config { return config } +// getOrCreateCAConfig is used to get the CA config, initializing it if necessary +func (s *Server) getOrCreateCAConfig() (*structs.CAConfiguration, error) { + state := s.fsm.State() + _, config, err := state.CAConfig() + if err != nil { + return nil, err + } + if config != nil { + return config, nil + } + + config = s.config.CAConfig + req := structs.CARequest{ + Op: structs.CAOpSetConfig, + Config: config, + } + if _, err = s.raftApply(structs.ConnectCARequestType, req); err != nil { + return nil, err + } + + return config, nil +} + +// bootstrapCA handles the initialization of a new CA provider +func (s *Server) bootstrapCA() error { + conf, err := s.getOrCreateCAConfig() + if err != nil { + return err + } + + // Initialize the right provider based on the config + var provider connect.CAProvider + switch conf.Provider { + case structs.ConsulCAProvider: + provider, err = connect.NewConsulCAProvider(conf.Config) + if err != nil { + return err + } + default: + return fmt.Errorf("unknown CA provider %q", conf.Provider) + } + + s.caProviderLock.Lock() + s.caProvider = provider + s.caProviderLock.Unlock() + + // Get the intermediate cert from the CA + trustedCA, err := provider.ActiveIntermediate() + if err != nil { + return fmt.Errorf("error getting intermediate cert: %v", err) + } + + // Check if this CA is already initialized + state := s.fsm.State() + _, root, err := state.CARootActive(nil) + if err != nil { + return err + } + // Exit early if the root is already in the state store. + if root != nil && root.ID == trustedCA.ID { + return nil + } + + // Get the highest index + idx, _, err := state.CARoots(nil) + if err != nil { + return err + } + + // Store the intermediate in raft + resp, err := s.raftApply(structs.ConnectCARequestType, &structs.CARequest{ + Op: structs.CAOpSetRoots, + Index: idx, + Roots: []*structs.CARoot{trustedCA}, + }) + if err != nil { + s.logger.Printf("[ERR] connect: Apply failed %v", err) + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } + + s.logger.Printf("[INFO] connect: initialized CA with provider %q", conf.Provider) + + return nil +} + +// signConnectCert signs a cert for a service using the currently configured CA provider +func (s *Server) signConnectCert(service *connect.SpiffeIDService, csr *x509.CertificateRequest) (*structs.IssuedCert, error) { + s.caProviderLock.RLock() + defer s.caProviderLock.RUnlock() + + cert, err := s.caProvider.Sign(service, csr) + if err != nil { + return nil, err + } + return cert, nil +} + // reconcileReaped is used to reconcile nodes that have failed and been reaped // from Serf but remain in the catalog. This is done by looking for unknown nodes with serfHealth checks registered. // We generate a "reap" event to cause the node to be cleaned up. From fc9ef9741b267b502a48f533ebf5f1e069236b20 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Sun, 8 Apr 2018 21:58:31 -0700 Subject: [PATCH 144/627] Hook the CA RPC endpoint into the provider interface --- agent/consul/connect_ca_endpoint.go | 208 +++++++--------------------- agent/consul/server.go | 6 + agent/structs/connect_ca.go | 26 +++- 3 files changed, 78 insertions(+), 162 deletions(-) diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 4efdafc06..84cffc85d 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -1,22 +1,13 @@ package consul import ( - "bytes" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" "fmt" - "math/big" - "time" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" - "github.com/hashicorp/go-uuid" - "github.com/mitchellh/go-testing-interface" - "github.com/mitchellh/mapstructure" ) // ConnectCA manages the Connect CA. @@ -25,81 +16,54 @@ type ConnectCA struct { srv *Server } -// ConfigurationSet updates the configuration for the CA. -// -// NOTE(mitchellh): This whole implementation is temporary until the real -// CA plugin work comes in. For now, this is only used to configure a single -// static CA root. -func (s *ConnectCA) ConfigurationSet( - args *structs.CAConfiguration, - reply *interface{}) error { - // NOTE(mitchellh): This is the temporary hardcoding of a static CA - // provider. This will allow us to test agent implementations and so on - // with an incomplete CA for now. - if args.Provider != "static" { - return fmt.Errorf("The CA provider can only be 'static' for now") - } - - // Config is the configuration allowed for our static provider - var config struct { - Name string - CertPEM string - PrivateKeyPEM string - Generate bool - } - if err := mapstructure.Decode(args.Config, &config); err != nil { - return fmt.Errorf("error decoding config: %s", err) - } - - // Basic validation so demos aren't super jank - if config.Name == "" { - return fmt.Errorf("Name must be set") - } - if config.CertPEM == "" || config.PrivateKeyPEM == "" { - if !config.Generate { - return fmt.Errorf( - "CertPEM and PrivateKeyPEM must be set, or Generate must be true") - } - } - - // Convenience to auto-generate the cert - if config.Generate { - ca := connect.TestCA(&testing.RuntimeT{}, nil) - config.CertPEM = ca.RootCert - config.PrivateKeyPEM = ca.SigningKey - } - - // TODO(mitchellh): verify that the private key is valid for the cert - - // Generate an ID for this - id, err := uuid.GenerateUUID() - if err != nil { +// ConfigurationGet returns the configuration for the CA. +func (s *ConnectCA) ConfigurationGet( + args *structs.DCSpecificRequest, + reply *structs.CAConfiguration) error { + if done, err := s.srv.forward("ConnectCA.ConfigurationGet", args, args, reply); done { return err } - // Get the highest index + // This action requires operator read access. + rule, err := s.srv.resolveToken(args.Token) + if err != nil { + return err + } + if rule != nil && !rule.OperatorRead() { + return acl.ErrPermissionDenied + } + state := s.srv.fsm.State() - idx, _, err := state.CARoots(nil) + _, config, err := state.CAConfig() if err != nil { return err } + *reply = *config + + return nil +} + +// ConfigurationSet updates the configuration for the CA. +func (s *ConnectCA) ConfigurationSet( + args *structs.CARequest, + reply *interface{}) error { + if done, err := s.srv.forward("ConnectCA.ConfigurationSet", args, args, reply); done { + return err + } + + // This action requires operator read access. + rule, err := s.srv.resolveToken(args.Token) + if err != nil { + return err + } + if rule != nil && !rule.OperatorWrite() { + return acl.ErrPermissionDenied + } // Commit - resp, err := s.srv.raftApply(structs.ConnectCARequestType, &structs.CARequest{ - Op: structs.CAOpSet, - Index: idx, - Roots: []*structs.CARoot{ - &structs.CARoot{ - ID: id, - Name: config.Name, - RootCert: config.CertPEM, - SigningKey: config.PrivateKeyPEM, - Active: true, - }, - }, - }) + args.Op = structs.CAOpSetConfig + resp, err := s.srv.raftApply(structs.ConnectCARequestType, args) if err != nil { - s.srv.logger.Printf("[ERR] consul.test: Apply failed %v", err) return err } if respErr, ok := resp.(error); ok { @@ -157,13 +121,13 @@ func (s *ConnectCA) Roots( } // Sign signs a certificate for a service. -// -// NOTE(mitchellh): There is a LOT missing from this. I do next to zero -// validation of the incoming CSR, the way the cert is signed probably -// isn't right, we're not using enough of the CSR fields, etc. func (s *ConnectCA) Sign( args *structs.CASignRequest, reply *structs.IssuedCert) error { + if done, err := s.srv.forward("ConnectCA.Sign", args, args, reply); done { + return err + } + // Parse the CSR csr, err := connect.ParseCSR(args.CSR) if err != nil { @@ -180,93 +144,15 @@ func (s *ConnectCA) Sign( return fmt.Errorf("SPIFFE ID in CSR must be a service ID") } - // Get the currently active root - state := s.srv.fsm.State() - _, root, err := state.CARootActive(nil) + // todo(kyhavlov): more validation on the CSR before signing + + cert, err := s.srv.signConnectCert(serviceId, csr) if err != nil { return err } - if root == nil { - return fmt.Errorf("no active CA found") - } - - // Determine the signing certificate. It is the set signing cert - // unless that is empty, in which case it is identically to the public - // cert. - certPem := root.SigningCert - if certPem == "" { - certPem = root.RootCert - } - - // Parse the CA cert and signing key from the root - caCert, err := connect.ParseCert(certPem) - if err != nil { - return fmt.Errorf("error parsing CA cert: %s", err) - } - signer, err := connect.ParseSigner(root.SigningKey) - if err != nil { - return fmt.Errorf("error parsing signing key: %s", err) - } - - // The serial number for the cert. NOTE(mitchellh): in the final - // implementation this should be monotonically increasing based on - // some raft state. - sn, err := rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) - if err != nil { - return fmt.Errorf("error generating serial number: %s", err) - } - - // Create the keyId for the cert from the signing public key. - keyId, err := connect.KeyId(signer.Public()) - if err != nil { - return err - } - - // Cert template for generation - template := x509.Certificate{ - SerialNumber: sn, - Subject: pkix.Name{CommonName: serviceId.Service}, - URIs: csr.URIs, - Signature: csr.Signature, - SignatureAlgorithm: csr.SignatureAlgorithm, - PublicKeyAlgorithm: csr.PublicKeyAlgorithm, - PublicKey: csr.PublicKey, - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageDataEncipherment | - x509.KeyUsageKeyAgreement | - x509.KeyUsageDigitalSignature | - x509.KeyUsageKeyEncipherment, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageClientAuth, - x509.ExtKeyUsageServerAuth, - }, - NotAfter: time.Now().Add(3 * 24 * time.Hour), - NotBefore: time.Now(), - AuthorityKeyId: keyId, - SubjectKeyId: keyId, - } - - // Create the certificate, PEM encode it and return that value. - var buf bytes.Buffer - bs, err := x509.CreateCertificate( - rand.Reader, &template, caCert, signer.Public(), signer) - if err != nil { - return fmt.Errorf("error generating certificate: %s", err) - } - err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) - if err != nil { - return fmt.Errorf("error encoding private key: %s", err) - } // Set the response - *reply = structs.IssuedCert{ - SerialNumber: connect.HexString(template.SerialNumber.Bytes()), - CertPEM: buf.String(), - Service: serviceId.Service, - ServiceURI: template.URIs[0].String(), - ValidAfter: template.NotBefore, - ValidBefore: template.NotAfter, - } + *reply = *cert return nil } diff --git a/agent/consul/server.go b/agent/consul/server.go index 23fbf337c..fef016829 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -17,6 +17,8 @@ import ( "sync/atomic" "time" + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/consul/autopilot" "github.com/hashicorp/consul/agent/consul/fsm" @@ -96,6 +98,10 @@ type Server struct { // autopilotWaitGroup is used to block until Autopilot shuts down. autopilotWaitGroup sync.WaitGroup + // caProvider is the current CA provider in use for Connect. + caProvider connect.CAProvider + caProviderLock sync.RWMutex + // Consul configuration config *Config diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 5ac8a0fc2..af8f82653 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -96,7 +96,8 @@ type IssuedCert struct { type CAOp string const ( - CAOpSet CAOp = "set" + CAOpSetRoots CAOp = "set-roots" + CAOpSetConfig CAOp = "set-config" ) // CARequest is used to modify connect CA data. This is used by the @@ -106,14 +107,33 @@ type CARequest struct { // other fields are required. Op CAOp + // Datacenter is the target for this request. + Datacenter string + // Index is used by CAOpSet for a CAS operation. Index uint64 // Roots is a list of roots. This is used for CAOpSet. One root must // always be active. Roots []*CARoot + + // Config is the configuration for the current CA plugin. + Config *CAConfiguration + + // WriteRequest is a common struct containing ACL tokens and other + // write-related common elements for requests. + WriteRequest } +// RequestDatacenter returns the datacenter for a given request. +func (q *CARequest) RequestDatacenter() string { + return q.Datacenter +} + +const ( + ConsulCAProvider = "consul" +) + // CAConfiguration is the configuration for the current CA plugin. type CAConfiguration struct { // Provider is the CA provider implementation to use. @@ -123,4 +143,8 @@ type CAConfiguration struct { // should only contain primitive values and containers (such as lists // and maps). Config map[string]interface{} + + // CreateIndex/ModifyIndex store the create/modify indexes of this configuration. + CreateIndex uint64 + ModifyIndex uint64 } From 9fefac745ea41bf11fe1c6d2d89b7df11305fad2 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Sun, 8 Apr 2018 21:59:08 -0700 Subject: [PATCH 145/627] Update the CA config endpoint to enable GETs --- agent/connect_ca_endpoint.go | 22 ++++++++++++++++++++-- agent/http_oss.go | 2 +- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/agent/connect_ca_endpoint.go b/agent/connect_ca_endpoint.go index 43eeb8644..979005df1 100644 --- a/agent/connect_ca_endpoint.go +++ b/agent/connect_ca_endpoint.go @@ -26,6 +26,9 @@ func (s *HTTPServer) ConnectCARoots(resp http.ResponseWriter, req *http.Request) // /v1/connect/ca/configuration func (s *HTTPServer) ConnectCAConfiguration(resp http.ResponseWriter, req *http.Request) (interface{}, error) { switch req.Method { + case "GET": + return s.ConnectCAConfigurationGet(resp, req) + case "PUT": return s.ConnectCAConfigurationSet(resp, req) @@ -34,12 +37,27 @@ func (s *HTTPServer) ConnectCAConfiguration(resp http.ResponseWriter, req *http. } } +// GEt /v1/connect/ca/configuration +func (s *HTTPServer) ConnectCAConfigurationGet(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Method is tested in ConnectCAConfiguration + var args structs.DCSpecificRequest + if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { + return nil, nil + } + + var reply structs.CAConfiguration + err := s.agent.RPC("ConnectCA.ConfigurationGet", &args, &reply) + return reply, err +} + // PUT /v1/connect/ca/configuration func (s *HTTPServer) ConnectCAConfigurationSet(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Method is tested in ConnectCAConfiguration - var args structs.CAConfiguration - if err := decodeBody(req, &args, nil); err != nil { + var args structs.CARequest + s.parseDC(req, &args.Datacenter) + s.parseToken(req, &args.Token) + if err := decodeBody(req, &args.Config, nil); err != nil { resp.WriteHeader(http.StatusBadRequest) fmt.Fprintf(resp, "Request decode failed: %v", err) return nil, nil diff --git a/agent/http_oss.go b/agent/http_oss.go index 774388ad3..124a26875 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -43,7 +43,7 @@ func init() { registerEndpoint("/v1/catalog/services", []string{"GET"}, (*HTTPServer).CatalogServices) registerEndpoint("/v1/catalog/service/", []string{"GET"}, (*HTTPServer).CatalogServiceNodes) registerEndpoint("/v1/catalog/node/", []string{"GET"}, (*HTTPServer).CatalogNodeServices) - registerEndpoint("/v1/connect/ca/configuration", []string{"PUT"}, (*HTTPServer).ConnectCAConfiguration) + registerEndpoint("/v1/connect/ca/configuration", []string{"GET", "PUT"}, (*HTTPServer).ConnectCAConfiguration) registerEndpoint("/v1/connect/ca/roots", []string{"GET"}, (*HTTPServer).ConnectCARoots) registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPServer).IntentionEndpoint) registerEndpoint("/v1/connect/intentions/match", []string{"GET"}, (*HTTPServer).IntentionMatch) From 80eddb0bfb25ad0396bc43ee330e253450200727 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Sun, 8 Apr 2018 21:59:59 -0700 Subject: [PATCH 146/627] Fix the testing endpoint's root set op --- agent/consul/testing_endpoint.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/consul/testing_endpoint.go b/agent/consul/testing_endpoint.go index e47e0e737..6e3cec12f 100644 --- a/agent/consul/testing_endpoint.go +++ b/agent/consul/testing_endpoint.go @@ -27,7 +27,7 @@ func (s *Test) ConnectCASetRoots( // Commit resp, err := s.srv.raftApply(structs.ConnectCARequestType, &structs.CARequest{ - Op: structs.CAOpSet, + Op: structs.CAOpSetRoots, Index: idx, Roots: args, }) From a585a0ba102dac88bd4a610239ce84f233fae16c Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 20 Apr 2018 01:30:34 -0700 Subject: [PATCH 147/627] Have the built in CA store its state in raft --- agent/connect/ca_provider.go | 262 +--------------------- agent/consul/connect_ca_endpoint.go | 1 + agent/consul/connect_ca_provider.go | 322 ++++++++++++++++++++++++++++ agent/consul/fsm/commands_oss.go | 7 + agent/consul/leader.go | 17 +- agent/consul/state/connect_ca.go | 123 +++++++++-- agent/structs/connect_ca.go | 27 ++- 7 files changed, 474 insertions(+), 285 deletions(-) create mode 100644 agent/consul/connect_ca_provider.go diff --git a/agent/connect/ca_provider.go b/agent/connect/ca_provider.go index 2aa1881f8..ca0ccf9b0 100644 --- a/agent/connect/ca_provider.go +++ b/agent/connect/ca_provider.go @@ -1,23 +1,9 @@ package connect import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "math/big" - "net/url" - "sync" - "sync/atomic" - "time" "github.com/hashicorp/consul/agent/structs" - uuid "github.com/hashicorp/go-uuid" - "github.com/mitchellh/mapstructure" ) // CAProvider is the interface for Consul to interact with @@ -27,252 +13,6 @@ type CAProvider interface { SetConfiguration(raw map[string]interface{}) error ActiveRoot() (*structs.CARoot, error) ActiveIntermediate() (*structs.CARoot, error) - RotateIntermediate() error + GenerateIntermediate() (*structs.CARoot, error) Sign(*SpiffeIDService, *x509.CertificateRequest) (*structs.IssuedCert, error) } - -type ConsulCAProviderConfig struct { - PrivateKey string - RootCert string - RotationPeriod time.Duration -} - -type ConsulCAProvider struct { - config *ConsulCAProviderConfig - - // todo(kyhavlov): store these directly in the state store - // and pass a reference to the state to this provider instead of - // having these values here - privateKey string - caRoot *structs.CARoot - caIndex uint64 - sync.RWMutex -} - -func NewConsulCAProvider(rawConfig map[string]interface{}) (*ConsulCAProvider, error) { - provider := &ConsulCAProvider{} - provider.SetConfiguration(rawConfig) - - return provider, nil -} - -func (c *ConsulCAProvider) SetConfiguration(raw map[string]interface{}) error { - conf, err := decodeConfig(raw) - if err != nil { - return err - } - - c.config = conf - return nil -} - -func decodeConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { - var config *ConsulCAProviderConfig - if err := mapstructure.WeakDecode(raw, &config); err != nil { - return nil, fmt.Errorf("error decoding config: %s", err) - } - - return config, nil -} - -func (c *ConsulCAProvider) ActiveRoot() (*structs.CARoot, error) { - if c.privateKey == "" { - pk, err := generatePrivateKey() - if err != nil { - return nil, err - } - c.privateKey = pk - } - - if c.caRoot == nil { - ca, err := c.generateCA() - if err != nil { - return nil, err - } - c.caRoot = ca - } - - return c.caRoot, nil -} - -func (c *ConsulCAProvider) ActiveIntermediate() (*structs.CARoot, error) { - return c.ActiveRoot() -} - -func (c *ConsulCAProvider) RotateIntermediate() error { - ca, err := c.generateCA() - if err != nil { - return err - } - c.caRoot = ca - - return nil -} - -// Sign returns a new certificate valid for the given SpiffeIDService -// using the current CA. -func (c *ConsulCAProvider) Sign(serviceId *SpiffeIDService, csr *x509.CertificateRequest) (*structs.IssuedCert, error) { - // The serial number for the cert. - // todo(kyhavlov): increment this based on raft index once the provider uses - // the state store directly - sn, err := rand.Int(rand.Reader, (&big.Int{}).Exp(big.NewInt(2), big.NewInt(159), nil)) - if err != nil { - return nil, fmt.Errorf("error generating serial number: %s", err) - } - - // Create the keyId for the cert from the signing public key. - signer, err := ParseSigner(c.privateKey) - if err != nil { - return nil, err - } - if signer == nil { - return nil, fmt.Errorf("error signing cert: Consul CA not initialized yet") - } - keyId, err := KeyId(signer.Public()) - if err != nil { - return nil, err - } - - // Parse the CA cert - caCert, err := ParseCert(c.caRoot.RootCert) - if err != nil { - return nil, fmt.Errorf("error parsing CA cert: %s", err) - } - - // Cert template for generation - template := x509.Certificate{ - SerialNumber: sn, - Subject: pkix.Name{CommonName: serviceId.Service}, - URIs: csr.URIs, - Signature: csr.Signature, - SignatureAlgorithm: csr.SignatureAlgorithm, - PublicKeyAlgorithm: csr.PublicKeyAlgorithm, - PublicKey: csr.PublicKey, - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageDataEncipherment | - x509.KeyUsageKeyAgreement | - x509.KeyUsageDigitalSignature | - x509.KeyUsageKeyEncipherment, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageClientAuth, - x509.ExtKeyUsageServerAuth, - }, - NotAfter: time.Now().Add(3 * 24 * time.Hour), - NotBefore: time.Now(), - AuthorityKeyId: keyId, - SubjectKeyId: keyId, - } - - // Create the certificate, PEM encode it and return that value. - var buf bytes.Buffer - bs, err := x509.CreateCertificate( - rand.Reader, &template, caCert, signer.Public(), signer) - if err != nil { - return nil, fmt.Errorf("error generating certificate: %s", err) - } - err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) - if err != nil { - return nil, fmt.Errorf("error encoding private key: %s", err) - } - - // Set the response - return &structs.IssuedCert{ - SerialNumber: HexString(template.SerialNumber.Bytes()), - CertPEM: buf.String(), - Service: serviceId.Service, - ServiceURI: template.URIs[0].String(), - ValidAfter: template.NotBefore, - ValidBefore: template.NotAfter, - }, nil -} - -// generatePrivateKey returns a new private key -func generatePrivateKey() (string, error) { - var pk *ecdsa.PrivateKey - - // If we have no key, then create a new one. - pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return "", fmt.Errorf("error generating private key: %s", err) - } - - bs, err := x509.MarshalECPrivateKey(pk) - if err != nil { - return "", fmt.Errorf("error generating private key: %s", err) - } - - var buf bytes.Buffer - err = pem.Encode(&buf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) - if err != nil { - return "", fmt.Errorf("error encoding private key: %s", err) - } - - return buf.String(), nil -} - -// generateCA makes a new root CA using the given private key -func (c *ConsulCAProvider) generateCA() (*structs.CARoot, error) { - privKey, err := ParseSigner(c.privateKey) - if err != nil { - return nil, err - } - - name := fmt.Sprintf("Consul CA %d", atomic.AddUint64(&c.caIndex, 1)) - - // The serial number for the cert - sn, err := testSerialNumber() - if err != nil { - return nil, err - } - - // The URI (SPIFFE compatible) for the cert - id := &SpiffeIDSigning{ClusterID: testClusterID, Domain: "consul"} - keyId, err := KeyId(privKey.Public()) - if err != nil { - return nil, err - } - - // Create the CA cert - template := x509.Certificate{ - SerialNumber: sn, - Subject: pkix.Name{CommonName: name}, - URIs: []*url.URL{id.URI()}, - PermittedDNSDomainsCritical: true, - PermittedDNSDomains: []string{id.URI().Hostname()}, - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageCertSign | - x509.KeyUsageCRLSign | - x509.KeyUsageDigitalSignature, - IsCA: true, - NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), - NotBefore: time.Now(), - AuthorityKeyId: keyId, - SubjectKeyId: keyId, - } - - bs, err := x509.CreateCertificate( - rand.Reader, &template, &template, privKey.Public(), privKey) - if err != nil { - return nil, fmt.Errorf("error generating CA certificate: %s", err) - } - - var buf bytes.Buffer - err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) - if err != nil { - return nil, fmt.Errorf("error encoding private key: %s", err) - } - - // Generate an ID for the new intermediate - rootId, err := uuid.GenerateUUID() - if err != nil { - return nil, err - } - - return &structs.CARoot{ - ID: rootId, - Name: name, - RootCert: buf.String(), - SigningKey: c.privateKey, - Active: true, - }, nil -} diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 84cffc85d..d0c582165 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -61,6 +61,7 @@ func (s *ConnectCA) ConfigurationSet( } // Commit + // todo(kyhavlov): trigger a bootstrap here when the provider changes args.Op = structs.CAOpSetConfig resp, err := s.srv.raftApply(structs.ConnectCARequestType, args) if err != nil { diff --git a/agent/consul/connect_ca_provider.go b/agent/consul/connect_ca_provider.go new file mode 100644 index 000000000..9beb6bfac --- /dev/null +++ b/agent/consul/connect_ca_provider.go @@ -0,0 +1,322 @@ +package consul + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "math/big" + "net/url" + "sync" + "time" + + "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/structs" + uuid "github.com/hashicorp/go-uuid" + "github.com/mitchellh/mapstructure" +) + +type ConsulCAProviderConfig struct { + PrivateKey string + RootCert string + RotationPeriod time.Duration +} + +type ConsulCAProvider struct { + config *ConsulCAProviderConfig + + // todo(kyhavlov): store these directly in the state store + // and pass a reference to the state to this provider instead of + // having these values here + srv *Server + sync.RWMutex +} + +func NewConsulCAProvider(rawConfig map[string]interface{}, srv *Server) (*ConsulCAProvider, error) { + provider := &ConsulCAProvider{srv: srv} + provider.SetConfiguration(rawConfig) + + return provider, nil +} + +func (c *ConsulCAProvider) SetConfiguration(raw map[string]interface{}) error { + conf, err := decodeConfig(raw) + if err != nil { + return err + } + + c.config = conf + return nil +} + +func decodeConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { + var config *ConsulCAProviderConfig + if err := mapstructure.WeakDecode(raw, &config); err != nil { + return nil, fmt.Errorf("error decoding config: %s", err) + } + + return config, nil +} + +// Return the active root CA and generate a new one if needed +func (c *ConsulCAProvider) ActiveRoot() (*structs.CARoot, error) { + state := c.srv.fsm.State() + _, providerState, err := state.CAProviderState() + if err != nil { + return nil, err + } + + var update bool + var newState structs.CAConsulProviderState + if providerState != nil { + newState = *providerState + } + + // Generate a private key if needed + if providerState == nil || providerState.PrivateKey == "" { + pk, err := generatePrivateKey() + if err != nil { + return nil, err + } + newState.PrivateKey = pk + update = true + } + + // Generate a root CA if needed + if providerState == nil || providerState.CARoot == nil { + ca, err := c.generateCA(newState.PrivateKey, newState.RootIndex+1) + if err != nil { + return nil, err + } + newState.CARoot = ca + newState.RootIndex += 1 + update = true + } + + // Update the provider state if we generated a new private key/cert + if update { + args := &structs.CARequest{ + Op: structs.CAOpSetProviderState, + ProviderState: &newState, + } + resp, err := c.srv.raftApply(structs.ConnectCARequestType, args) + if err != nil { + return nil, err + } + if respErr, ok := resp.(error); ok { + return nil, respErr + } + } + return newState.CARoot, nil +} + +func (c *ConsulCAProvider) ActiveIntermediate() (*structs.CARoot, error) { + return c.ActiveRoot() +} + +func (c *ConsulCAProvider) GenerateIntermediate() (*structs.CARoot, error) { + state := c.srv.fsm.State() + _, providerState, err := state.CAProviderState() + if err != nil { + return nil, err + } + if providerState == nil { + return nil, fmt.Errorf("CA provider not yet initialized") + } + + ca, err := c.generateCA(providerState.PrivateKey, providerState.RootIndex+1) + if err != nil { + return nil, err + } + + return ca, nil +} + +// Sign returns a new certificate valid for the given SpiffeIDService +// using the current CA. +func (c *ConsulCAProvider) Sign(serviceId *connect.SpiffeIDService, csr *x509.CertificateRequest) (*structs.IssuedCert, error) { + // Get the provider state + state := c.srv.fsm.State() + _, providerState, err := state.CAProviderState() + if err != nil { + return nil, err + } + + // Create the keyId for the cert from the signing public key. + signer, err := connect.ParseSigner(providerState.PrivateKey) + if err != nil { + return nil, err + } + if signer == nil { + return nil, fmt.Errorf("error signing cert: Consul CA not initialized yet") + } + keyId, err := connect.KeyId(signer.Public()) + if err != nil { + return nil, err + } + + // Parse the CA cert + caCert, err := connect.ParseCert(providerState.CARoot.RootCert) + if err != nil { + return nil, fmt.Errorf("error parsing CA cert: %s", err) + } + + // Cert template for generation + sn := &big.Int{} + sn.SetUint64(providerState.LeafIndex + 1) + template := x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{CommonName: serviceId.Service}, + URIs: csr.URIs, + Signature: csr.Signature, + SignatureAlgorithm: csr.SignatureAlgorithm, + PublicKeyAlgorithm: csr.PublicKeyAlgorithm, + PublicKey: csr.PublicKey, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageDataEncipherment | + x509.KeyUsageKeyAgreement | + x509.KeyUsageDigitalSignature | + x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + NotAfter: time.Now().Add(3 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: keyId, + SubjectKeyId: keyId, + } + + // Create the certificate, PEM encode it and return that value. + var buf bytes.Buffer + bs, err := x509.CreateCertificate( + rand.Reader, &template, caCert, signer.Public(), signer) + if err != nil { + return nil, fmt.Errorf("error generating certificate: %s", err) + } + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + return nil, fmt.Errorf("error encoding private key: %s", err) + } + + // Increment the leaf cert index + newState := *providerState + newState.LeafIndex += 1 + args := &structs.CARequest{ + Op: structs.CAOpSetProviderState, + ProviderState: &newState, + } + resp, err := c.srv.raftApply(structs.ConnectCARequestType, args) + if err != nil { + return nil, err + } + if respErr, ok := resp.(error); ok { + return nil, respErr + } + + // Set the response + return &structs.IssuedCert{ + SerialNumber: connect.HexString(template.SerialNumber.Bytes()), + CertPEM: buf.String(), + Service: serviceId.Service, + ServiceURI: template.URIs[0].String(), + ValidAfter: template.NotBefore, + ValidBefore: template.NotAfter, + }, nil +} + +// generatePrivateKey returns a new private key +func generatePrivateKey() (string, error) { + var pk *ecdsa.PrivateKey + + // If we have no key, then create a new one. + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return "", fmt.Errorf("error generating private key: %s", err) + } + + bs, err := x509.MarshalECPrivateKey(pk) + if err != nil { + return "", fmt.Errorf("error generating private key: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) + if err != nil { + return "", fmt.Errorf("error encoding private key: %s", err) + } + + return buf.String(), nil +} + +// generateCA makes a new root CA using the current private key +func (c *ConsulCAProvider) generateCA(privateKey string, sn uint64) (*structs.CARoot, error) { + state := c.srv.fsm.State() + _, config, err := state.CAConfig() + if err != nil { + return nil, err + } + + privKey, err := connect.ParseSigner(privateKey) + if err != nil { + return nil, err + } + + name := fmt.Sprintf("Consul CA %d", sn) + + // The URI (SPIFFE compatible) for the cert + id := &connect.SpiffeIDSigning{ClusterID: config.ClusterSerial, Domain: "consul"} + keyId, err := connect.KeyId(privKey.Public()) + if err != nil { + return nil, err + } + + // Create the CA cert + serialNum := &big.Int{} + serialNum.SetUint64(sn) + template := x509.Certificate{ + SerialNumber: serialNum, + Subject: pkix.Name{CommonName: name}, + URIs: []*url.URL{id.URI()}, + PermittedDNSDomainsCritical: true, + PermittedDNSDomains: []string{id.URI().Hostname()}, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageCertSign | + x509.KeyUsageCRLSign | + x509.KeyUsageDigitalSignature, + IsCA: true, + NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: keyId, + SubjectKeyId: keyId, + } + + bs, err := x509.CreateCertificate( + rand.Reader, &template, &template, privKey.Public(), privKey) + if err != nil { + return nil, fmt.Errorf("error generating CA certificate: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + return nil, fmt.Errorf("error encoding private key: %s", err) + } + + // Generate an ID for the new CA cert + rootId, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + + return &structs.CARoot{ + ID: rootId, + Name: name, + RootCert: buf.String(), + Active: true, + }, nil +} diff --git a/agent/consul/fsm/commands_oss.go b/agent/consul/fsm/commands_oss.go index a5ef33efc..99755194b 100644 --- a/agent/consul/fsm/commands_oss.go +++ b/agent/consul/fsm/commands_oss.go @@ -300,6 +300,13 @@ func (c *FSM) applyConnectCAOperation(buf []byte, index uint64) interface{} { return err } + return act + case structs.CAOpSetProviderState: + act, err := c.state.CASetProviderState(index, req.ProviderState) + if err != nil { + return err + } + return act default: c.logger.Printf("[WARN] consul.fsm: Invalid CA operation '%s'", req.Op) diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 516201262..fca3fa07f 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -9,6 +9,7 @@ import ( "time" "github.com/hashicorp/consul/agent/connect" + uuid "github.com/hashicorp/go-uuid" "github.com/armon/go-metrics" "github.com/hashicorp/consul/acl" @@ -377,7 +378,13 @@ func (s *Server) getOrCreateCAConfig() (*structs.CAConfiguration, error) { return config, nil } + sn, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + config = s.config.CAConfig + config.ClusterSerial = sn req := structs.CARequest{ Op: structs.CAOpSetConfig, Config: config, @@ -400,7 +407,7 @@ func (s *Server) bootstrapCA() error { var provider connect.CAProvider switch conf.Provider { case structs.ConsulCAProvider: - provider, err = connect.NewConsulCAProvider(conf.Config) + provider, err = NewConsulCAProvider(conf.Config, s) if err != nil { return err } @@ -412,10 +419,10 @@ func (s *Server) bootstrapCA() error { s.caProvider = provider s.caProviderLock.Unlock() - // Get the intermediate cert from the CA - trustedCA, err := provider.ActiveIntermediate() + // Get the active root cert from the CA + trustedCA, err := provider.ActiveRoot() if err != nil { - return fmt.Errorf("error getting intermediate cert: %v", err) + return fmt.Errorf("error getting root cert: %v", err) } // Check if this CA is already initialized @@ -435,7 +442,7 @@ func (s *Server) bootstrapCA() error { return err } - // Store the intermediate in raft + // Store the root cert in raft resp, err := s.raftApply(structs.ConnectCARequestType, &structs.CARequest{ Op: structs.CAOpSetRoots, Index: idx, diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go index f5962b084..2cce8028b 100644 --- a/agent/consul/state/connect_ca.go +++ b/agent/consul/state/connect_ca.go @@ -8,8 +8,9 @@ import ( ) const ( - caConfigTableName = "connect-ca-config" - caRootTableName = "connect-ca-roots" + caConfigTableName = "connect-ca-config" + caRootTableName = "connect-ca-roots" + caProviderTableName = "connect-ca-builtin" ) // caConfigTableSchema returns a new table schema used for storing @@ -48,14 +49,34 @@ func caRootTableSchema() *memdb.TableSchema { } } +// caProviderTableSchema returns a new table schema used for storing +// the built-in CA provider's state for connect. This is only used by +// the internal Consul CA provider. +func caProviderTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: caProviderTableName, + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + AllowMissing: false, + Unique: true, + Indexer: &memdb.ConditionalIndex{ + Conditional: func(obj interface{}) (bool, error) { return true, nil }, + }, + }, + }, + } +} + func init() { registerSchema(caConfigTableSchema) registerSchema(caRootTableSchema) + registerSchema(caProviderTableSchema) } // CAConfig is used to pull the CA config from the snapshot. func (s *Snapshot) CAConfig() (*structs.CAConfiguration, error) { - c, err := s.tx.First("connect-ca-config", "id") + c, err := s.tx.First(caConfigTableName, "id") if err != nil { return nil, err } @@ -70,7 +91,7 @@ func (s *Snapshot) CAConfig() (*structs.CAConfiguration, error) { // CAConfig is used when restoring from a snapshot. func (s *Restore) CAConfig(config *structs.CAConfiguration) error { - if err := s.tx.Insert("connect-ca-config", config); err != nil { + if err := s.tx.Insert(caConfigTableName, config); err != nil { return fmt.Errorf("failed restoring CA config: %s", err) } @@ -83,7 +104,7 @@ func (s *Store) CAConfig() (uint64, *structs.CAConfiguration, error) { defer tx.Abort() // Get the autopilot config - c, err := tx.First("connect-ca-config", "id") + c, err := tx.First(caConfigTableName, "id") if err != nil { return 0, nil, fmt.Errorf("failed CA config lookup: %s", err) } @@ -101,7 +122,9 @@ func (s *Store) CASetConfig(idx uint64, config *structs.CAConfiguration) error { tx := s.db.Txn(true) defer tx.Abort() - s.caSetConfigTxn(idx, tx, config) + if err := s.caSetConfigTxn(idx, tx, config); err != nil { + return err + } tx.Commit() return nil @@ -115,7 +138,7 @@ func (s *Store) CACheckAndSetConfig(idx, cidx uint64, config *structs.CAConfigur defer tx.Abort() // Check for an existing config - existing, err := tx.First("connect-ca-config", "id") + existing, err := tx.First(caConfigTableName, "id") if err != nil { return false, fmt.Errorf("failed CA config lookup: %s", err) } @@ -128,7 +151,9 @@ func (s *Store) CACheckAndSetConfig(idx, cidx uint64, config *structs.CAConfigur return false, nil } - s.caSetConfigTxn(idx, tx, config) + if err := s.caSetConfigTxn(idx, tx, config); err != nil { + return false, err + } tx.Commit() return true, nil @@ -136,20 +161,22 @@ func (s *Store) CACheckAndSetConfig(idx, cidx uint64, config *structs.CAConfigur func (s *Store) caSetConfigTxn(idx uint64, tx *memdb.Txn, config *structs.CAConfiguration) error { // Check for an existing config - existing, err := tx.First("connect-ca-config", "id") + prev, err := tx.First(caConfigTableName, "id") if err != nil { return fmt.Errorf("failed CA config lookup: %s", err) } - // Set the indexes. - if existing != nil { - config.CreateIndex = existing.(*structs.CAConfiguration).CreateIndex + // Set the indexes, prevent the cluster ID from changing. + if prev != nil { + existing := prev.(*structs.CAConfiguration) + config.CreateIndex = existing.CreateIndex + config.ClusterSerial = existing.ClusterSerial } else { config.CreateIndex = idx } config.ModifyIndex = idx - if err := tx.Insert("connect-ca-config", config); err != nil { + if err := tx.Insert(caConfigTableName, config); err != nil { return fmt.Errorf("failed updating CA config: %s", err) } return nil @@ -289,3 +316,73 @@ func (s *Store) CARootSetCAS(idx, cidx uint64, rs []*structs.CARoot) (bool, erro tx.Commit() return true, nil } + +// CAProviderState is used to pull the built-in provider state from the snapshot. +func (s *Snapshot) CAProviderState() (*structs.CAConsulProviderState, error) { + c, err := s.tx.First(caProviderTableName, "id") + if err != nil { + return nil, err + } + + state, ok := c.(*structs.CAConsulProviderState) + if !ok { + return nil, nil + } + + return state, nil +} + +// CAProviderState is used when restoring from a snapshot. +func (s *Restore) CAProviderState(state *structs.CAConsulProviderState) error { + if err := s.tx.Insert(caProviderTableName, state); err != nil { + return fmt.Errorf("failed restoring built-in CA state: %s", err) + } + + return nil +} + +// CAProviderState is used to get the current Consul CA provider state. +func (s *Store) CAProviderState() (uint64, *structs.CAConsulProviderState, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + // Get the autopilot config + c, err := tx.First(caProviderTableName, "id") + if err != nil { + return 0, nil, fmt.Errorf("failed built-in CA state lookup: %s", err) + } + + state, ok := c.(*structs.CAConsulProviderState) + if !ok { + return 0, nil, nil + } + + return state.ModifyIndex, state, nil +} + +// CASetProviderState is used to set the current built-in CA provider state. +func (s *Store) CASetProviderState(idx uint64, state *structs.CAConsulProviderState) (bool, error) { + tx := s.db.Txn(true) + defer tx.Abort() + + // Check for an existing config + existing, err := tx.First(caProviderTableName, "id") + if err != nil { + return false, fmt.Errorf("failed built-in CA state lookup: %s", err) + } + + // Set the indexes. + if existing != nil { + state.CreateIndex = existing.(*structs.CAConfiguration).CreateIndex + } else { + state.CreateIndex = idx + } + state.ModifyIndex = idx + + if err := tx.Insert(caProviderTableName, state); err != nil { + return false, fmt.Errorf("failed updating built-in CA state: %s", err) + } + tx.Commit() + + return true, nil +} diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index af8f82653..a923c0361 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -96,8 +96,9 @@ type IssuedCert struct { type CAOp string const ( - CAOpSetRoots CAOp = "set-roots" - CAOpSetConfig CAOp = "set-config" + CAOpSetRoots CAOp = "set-roots" + CAOpSetConfig CAOp = "set-config" + CAOpSetProviderState CAOp = "set-provider-state" ) // CARequest is used to modify connect CA data. This is used by the @@ -110,7 +111,7 @@ type CARequest struct { // Datacenter is the target for this request. Datacenter string - // Index is used by CAOpSet for a CAS operation. + // Index is used by CAOpSetRoots and CAOpSetConfig for a CAS operation. Index uint64 // Roots is a list of roots. This is used for CAOpSet. One root must @@ -120,6 +121,9 @@ type CARequest struct { // Config is the configuration for the current CA plugin. Config *CAConfiguration + // ProviderState is the state for the builtin CA provider. + ProviderState *CAConsulProviderState + // WriteRequest is a common struct containing ACL tokens and other // write-related common elements for requests. WriteRequest @@ -136,6 +140,9 @@ const ( // CAConfiguration is the configuration for the current CA plugin. type CAConfiguration struct { + // Unique identifier for the cluster + ClusterSerial string `json:"-"` + // Provider is the CA provider implementation to use. Provider string @@ -144,7 +151,15 @@ type CAConfiguration struct { // and maps). Config map[string]interface{} - // CreateIndex/ModifyIndex store the create/modify indexes of this configuration. - CreateIndex uint64 - ModifyIndex uint64 + RaftIndex +} + +// CAConsulProviderState is used to track the built-in Consul CA provider's state. +type CAConsulProviderState struct { + PrivateKey string + CARoot *CARoot + RootIndex uint64 + LeafIndex uint64 + + RaftIndex } From bbfcb278e189edd48fa83c90fab9972178f50f4c Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 20 Apr 2018 18:46:02 -0700 Subject: [PATCH 148/627] Add the root rotation mechanism to the CA config endpoint --- agent/connect/ca.go | 12 ++ agent/connect/ca_provider.go | 3 +- agent/consul/connect_ca_endpoint.go | 104 +++++++++++- agent/consul/connect_ca_provider.go | 254 +++++++++++++++++----------- agent/consul/fsm/commands_oss.go | 17 ++ agent/consul/leader.go | 45 +++-- agent/consul/state/connect_ca.go | 86 ++++++++-- agent/structs/connect_ca.go | 10 +- 8 files changed, 396 insertions(+), 135 deletions(-) diff --git a/agent/connect/ca.go b/agent/connect/ca.go index bca9392d3..818af9f9f 100644 --- a/agent/connect/ca.go +++ b/agent/connect/ca.go @@ -38,6 +38,18 @@ func ParseSigner(pemValue string) (crypto.Signer, error) { case "EC PRIVATE KEY": return x509.ParseECPrivateKey(block.Bytes) + case "PRIVATE KEY": + signer, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + pk, ok := signer.(crypto.Signer) + if !ok { + return nil, fmt.Errorf("private key is not a valid format") + } + + return pk, nil + default: return nil, fmt.Errorf("unknown PEM block type for signing key: %s", block.Type) } diff --git a/agent/connect/ca_provider.go b/agent/connect/ca_provider.go index ca0ccf9b0..dc70c6a58 100644 --- a/agent/connect/ca_provider.go +++ b/agent/connect/ca_provider.go @@ -10,9 +10,10 @@ import ( // an external CA that provides leaf certificate signing for // given SpiffeIDServices. type CAProvider interface { - SetConfiguration(raw map[string]interface{}) error ActiveRoot() (*structs.CARoot, error) ActiveIntermediate() (*structs.CARoot, error) GenerateIntermediate() (*structs.CARoot, error) Sign(*SpiffeIDService, *x509.CertificateRequest) (*structs.IssuedCert, error) + //SignCA(*x509.CertificateRequest) (*structs.IssuedCert, error) + Teardown() error } diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index d0c582165..128c1493d 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -2,6 +2,7 @@ package consul import ( "fmt" + "reflect" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" @@ -60,9 +61,95 @@ func (s *ConnectCA) ConfigurationSet( return acl.ErrPermissionDenied } - // Commit - // todo(kyhavlov): trigger a bootstrap here when the provider changes - args.Op = structs.CAOpSetConfig + // Exit early if it's a no-op change + state := s.srv.fsm.State() + _, config, err := state.CAConfig() + if err != nil { + return err + } + if args.Config.Provider == config.Provider && reflect.DeepEqual(args.Config.Config, config.Config) { + return nil + } + + // Create a new instance of the provider described by the config + // and get the current active root CA. This acts as a good validation + // of the config and makes sure the provider is functioning correctly + // before we commit any changes to Raft. + newProvider, err := s.srv.createCAProvider(args.Config) + if err != nil { + return fmt.Errorf("could not initialize provider: %v", err) + } + + newActiveRoot, err := newProvider.ActiveRoot() + if err != nil { + return err + } + + // Compare the new provider's root CA ID to the current one. If they + // match, just update the existing provider with the new config. + // If they don't match, begin the root rotation process. + _, root, err := state.CARootActive(nil) + if err != nil { + return err + } + + if root != nil && root.ID == newActiveRoot.ID { + args.Op = structs.CAOpSetConfig + resp, err := s.srv.raftApply(structs.ConnectCARequestType, args) + if err != nil { + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } + + // If the config has been committed, update the local provider instance + s.srv.setCAProvider(newProvider) + + s.srv.logger.Printf("[INFO] connect: provider config updated") + + return nil + } + + // At this point, we know the config change has trigged a root rotation, + // either by swapping the provider type or changing the provider's config + // to use a different root certificate. + + // If it's a config change that would trigger a rotation (different provider/root): + // -1. Create an instance of the provider described by the new config + // 2. Get the intermediate from the new provider + // 3. Generate a CSR for the new intermediate, call SignCA on the old/current provider + // to get the cross-signed intermediate + // ~4. Get the active root for the new provider, append the intermediate from step 3 + // to its list of intermediates + // -5. Update the roots and CA config in the state store at the same time, finally switching + // to the new provider + // -6. Call teardown on the old provider, so it can clean up whatever it needs to + + /*_, err := newProvider.ActiveIntermediate() + if err != nil { + return err + }*/ + + // Update the roots and CA config in the state store at the same time + idx, roots, err := state.CARoots(nil) + if err != nil { + return err + } + + var newRoots structs.CARoots + for _, r := range roots { + newRoot := *r + if newRoot.Active { + newRoot.Active = false + } + newRoots = append(newRoots, &newRoot) + } + newRoots = append(newRoots, newActiveRoot) + + args.Op = structs.CAOpSetRootsAndConfig + args.Index = idx + args.Roots = newRoots resp, err := s.srv.raftApply(structs.ConnectCARequestType, args) if err != nil { return err @@ -71,6 +158,17 @@ func (s *ConnectCA) ConfigurationSet( return respErr } + // If the config has been committed, update the local provider instance + // and call teardown on the old provider + oldProvider := s.srv.getCAProvider() + s.srv.setCAProvider(newProvider) + + if err := oldProvider.Teardown(); err != nil { + return err + } + + s.srv.logger.Printf("[INFO] connect: CA rotated to the new root under %q provider", args.Config.Provider) + return nil } diff --git a/agent/consul/connect_ca_provider.go b/agent/consul/connect_ca_provider.go index 9beb6bfac..b72a9ee36 100644 --- a/agent/consul/connect_ca_provider.go +++ b/agent/consul/connect_ca_provider.go @@ -29,28 +29,94 @@ type ConsulCAProviderConfig struct { type ConsulCAProvider struct { config *ConsulCAProviderConfig - // todo(kyhavlov): store these directly in the state store - // and pass a reference to the state to this provider instead of - // having these values here + id string srv *Server sync.RWMutex } +// NewConsulCAProvider returns a new instance of the Consul CA provider, +// bootstrapping its state in the state store necessary func NewConsulCAProvider(rawConfig map[string]interface{}, srv *Server) (*ConsulCAProvider, error) { - provider := &ConsulCAProvider{srv: srv} - provider.SetConfiguration(rawConfig) - - return provider, nil -} - -func (c *ConsulCAProvider) SetConfiguration(raw map[string]interface{}) error { - conf, err := decodeConfig(raw) + conf, err := decodeConfig(rawConfig) if err != nil { - return err + return nil, err + } + provider := &ConsulCAProvider{ + config: conf, + srv: srv, + id: fmt.Sprintf("%s,%s", conf.PrivateKey, conf.RootCert), } - c.config = conf - return nil + // Check if this configuration of the provider has already been + // initialized in the state store. + state := srv.fsm.State() + _, providerState, err := state.CAProviderState(provider.id) + if err != nil { + return nil, err + } + + // Exit early if the state store has already been populated for this config. + if providerState != nil { + return provider, nil + } + + newState := structs.CAConsulProviderState{ + ID: provider.id, + } + + // Write the initial provider state to get the index to use for the + // CA serial number. + { + args := &structs.CARequest{ + Op: structs.CAOpSetProviderState, + ProviderState: &newState, + } + resp, err := srv.raftApply(structs.ConnectCARequestType, args) + if err != nil { + return nil, err + } + if respErr, ok := resp.(error); ok { + return nil, respErr + } + } + + idx, _, err := state.CAProviderState(provider.id) + if err != nil { + return nil, err + } + + // Generate a private key if needed + if conf.PrivateKey == "" { + pk, err := generatePrivateKey() + if err != nil { + return nil, err + } + newState.PrivateKey = pk + } else { + newState.PrivateKey = conf.PrivateKey + } + + // Generate the root CA + ca, err := provider.generateCA(newState.PrivateKey, conf.RootCert, idx+1) + if err != nil { + return nil, fmt.Errorf("error generating CA: %v", err) + } + newState.CARoot = ca + + // Write the provider state + args := &structs.CARequest{ + Op: structs.CAOpSetProviderState, + ProviderState: &newState, + } + resp, err := srv.raftApply(structs.ConnectCARequestType, args) + if err != nil { + return nil, err + } + if respErr, ok := resp.(error); ok { + return nil, respErr + } + + return provider, nil } func decodeConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { @@ -59,59 +125,22 @@ func decodeConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { return nil, fmt.Errorf("error decoding config: %s", err) } + if config.PrivateKey == "" && config.RootCert != "" { + return nil, fmt.Errorf("must provide a private key when providing a root cert") + } + return config, nil } // Return the active root CA and generate a new one if needed func (c *ConsulCAProvider) ActiveRoot() (*structs.CARoot, error) { state := c.srv.fsm.State() - _, providerState, err := state.CAProviderState() + _, providerState, err := state.CAProviderState(c.id) if err != nil { return nil, err } - var update bool - var newState structs.CAConsulProviderState - if providerState != nil { - newState = *providerState - } - - // Generate a private key if needed - if providerState == nil || providerState.PrivateKey == "" { - pk, err := generatePrivateKey() - if err != nil { - return nil, err - } - newState.PrivateKey = pk - update = true - } - - // Generate a root CA if needed - if providerState == nil || providerState.CARoot == nil { - ca, err := c.generateCA(newState.PrivateKey, newState.RootIndex+1) - if err != nil { - return nil, err - } - newState.CARoot = ca - newState.RootIndex += 1 - update = true - } - - // Update the provider state if we generated a new private key/cert - if update { - args := &structs.CARequest{ - Op: structs.CAOpSetProviderState, - ProviderState: &newState, - } - resp, err := c.srv.raftApply(structs.ConnectCARequestType, args) - if err != nil { - return nil, err - } - if respErr, ok := resp.(error); ok { - return nil, respErr - } - } - return newState.CARoot, nil + return providerState.CARoot, nil } func (c *ConsulCAProvider) ActiveIntermediate() (*structs.CARoot, error) { @@ -120,15 +149,12 @@ func (c *ConsulCAProvider) ActiveIntermediate() (*structs.CARoot, error) { func (c *ConsulCAProvider) GenerateIntermediate() (*structs.CARoot, error) { state := c.srv.fsm.State() - _, providerState, err := state.CAProviderState() + idx, providerState, err := state.CAProviderState(c.id) if err != nil { return nil, err } - if providerState == nil { - return nil, fmt.Errorf("CA provider not yet initialized") - } - ca, err := c.generateCA(providerState.PrivateKey, providerState.RootIndex+1) + ca, err := c.generateCA(providerState.PrivateKey, "", idx+1) if err != nil { return nil, err } @@ -136,12 +162,34 @@ func (c *ConsulCAProvider) GenerateIntermediate() (*structs.CARoot, error) { return ca, nil } +// Remove the state store entry for this provider instance. +func (c *ConsulCAProvider) Teardown() error { + args := &structs.CARequest{ + Op: structs.CAOpDeleteProviderState, + ProviderState: &structs.CAConsulProviderState{ID: c.id}, + } + resp, err := c.srv.raftApply(structs.ConnectCARequestType, args) + if err != nil { + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } + + return nil +} + // Sign returns a new certificate valid for the given SpiffeIDService // using the current CA. func (c *ConsulCAProvider) Sign(serviceId *connect.SpiffeIDService, csr *x509.CertificateRequest) (*structs.IssuedCert, error) { + // Lock during the signing so we don't use the same index twice + // for different cert serial numbers. + c.Lock() + defer c.Unlock() + // Get the provider state state := c.srv.fsm.State() - _, providerState, err := state.CAProviderState() + _, providerState, err := state.CAProviderState(c.id) if err != nil { return nil, err } @@ -254,7 +302,7 @@ func generatePrivateKey() (string, error) { } // generateCA makes a new root CA using the current private key -func (c *ConsulCAProvider) generateCA(privateKey string, sn uint64) (*structs.CARoot, error) { +func (c *ConsulCAProvider) generateCA(privateKey, contents string, sn uint64) (*structs.CARoot, error) { state := c.srv.fsm.State() _, config, err := state.CAConfig() if err != nil { @@ -263,48 +311,54 @@ func (c *ConsulCAProvider) generateCA(privateKey string, sn uint64) (*structs.CA privKey, err := connect.ParseSigner(privateKey) if err != nil { - return nil, err + return nil, fmt.Errorf("error parsing private key %q: %v", privateKey, err) } name := fmt.Sprintf("Consul CA %d", sn) - // The URI (SPIFFE compatible) for the cert - id := &connect.SpiffeIDSigning{ClusterID: config.ClusterSerial, Domain: "consul"} - keyId, err := connect.KeyId(privKey.Public()) - if err != nil { - return nil, err - } + pemContents := contents - // Create the CA cert - serialNum := &big.Int{} - serialNum.SetUint64(sn) - template := x509.Certificate{ - SerialNumber: serialNum, - Subject: pkix.Name{CommonName: name}, - URIs: []*url.URL{id.URI()}, - PermittedDNSDomainsCritical: true, - PermittedDNSDomains: []string{id.URI().Hostname()}, - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageCertSign | - x509.KeyUsageCRLSign | - x509.KeyUsageDigitalSignature, - IsCA: true, - NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), - NotBefore: time.Now(), - AuthorityKeyId: keyId, - SubjectKeyId: keyId, - } + if pemContents == "" { + // The URI (SPIFFE compatible) for the cert + id := &connect.SpiffeIDSigning{ClusterID: config.ClusterSerial, Domain: "consul"} + keyId, err := connect.KeyId(privKey.Public()) + if err != nil { + return nil, err + } - bs, err := x509.CreateCertificate( - rand.Reader, &template, &template, privKey.Public(), privKey) - if err != nil { - return nil, fmt.Errorf("error generating CA certificate: %s", err) - } + // Create the CA cert + serialNum := &big.Int{} + serialNum.SetUint64(sn) + template := x509.Certificate{ + SerialNumber: serialNum, + Subject: pkix.Name{CommonName: name}, + URIs: []*url.URL{id.URI()}, + PermittedDNSDomainsCritical: true, + PermittedDNSDomains: []string{id.URI().Hostname()}, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageCertSign | + x509.KeyUsageCRLSign | + x509.KeyUsageDigitalSignature, + IsCA: true, + NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: keyId, + SubjectKeyId: keyId, + } - var buf bytes.Buffer - err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) - if err != nil { - return nil, fmt.Errorf("error encoding private key: %s", err) + bs, err := x509.CreateCertificate( + rand.Reader, &template, &template, privKey.Public(), privKey) + if err != nil { + return nil, fmt.Errorf("error generating CA certificate: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + return nil, fmt.Errorf("error encoding private key: %s", err) + } + + pemContents = buf.String() } // Generate an ID for the new CA cert @@ -316,7 +370,7 @@ func (c *ConsulCAProvider) generateCA(privateKey string, sn uint64) (*structs.CA return &structs.CARoot{ ID: rootId, Name: name, - RootCert: buf.String(), + RootCert: pemContents, Active: true, }, nil } diff --git a/agent/consul/fsm/commands_oss.go b/agent/consul/fsm/commands_oss.go index 99755194b..5292bd0f5 100644 --- a/agent/consul/fsm/commands_oss.go +++ b/agent/consul/fsm/commands_oss.go @@ -307,6 +307,23 @@ func (c *FSM) applyConnectCAOperation(buf []byte, index uint64) interface{} { return err } + return act + case structs.CAOpDeleteProviderState: + if err := c.state.CADeleteProviderState(req.ProviderState.ID); err != nil { + return err + } + + return true + case structs.CAOpSetRootsAndConfig: + act, err := c.state.CARootSetCAS(index, req.Index, req.Roots) + if err != nil { + return err + } + + if err := c.state.CASetConfig(index+1, req.Config); err != nil { + return err + } + return act default: c.logger.Printf("[WARN] consul.fsm: Invalid CA operation '%s'", req.Op) diff --git a/agent/consul/leader.go b/agent/consul/leader.go index fca3fa07f..8d62ca1aa 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -396,7 +396,7 @@ func (s *Server) getOrCreateCAConfig() (*structs.CAConfiguration, error) { return config, nil } -// bootstrapCA handles the initialization of a new CA provider +// bootstrapCA creates a CA provider from the current configuration. func (s *Server) bootstrapCA() error { conf, err := s.getOrCreateCAConfig() if err != nil { @@ -404,20 +404,12 @@ func (s *Server) bootstrapCA() error { } // Initialize the right provider based on the config - var provider connect.CAProvider - switch conf.Provider { - case structs.ConsulCAProvider: - provider, err = NewConsulCAProvider(conf.Config, s) - if err != nil { - return err - } - default: - return fmt.Errorf("unknown CA provider %q", conf.Provider) + provider, err := s.createCAProvider(conf) + if err != nil { + return err } - s.caProviderLock.Lock() - s.caProvider = provider - s.caProviderLock.Unlock() + s.setCAProvider(provider) // Get the active root cert from the CA trustedCA, err := provider.ActiveRoot() @@ -425,13 +417,14 @@ func (s *Server) bootstrapCA() error { return fmt.Errorf("error getting root cert: %v", err) } - // Check if this CA is already initialized + // Check if the CA root is already initialized and exit if it is. + // Every change to the CA after this initial bootstrapping should + // be done through the rotation process. state := s.fsm.State() _, root, err := state.CARootActive(nil) if err != nil { return err } - // Exit early if the root is already in the state store. if root != nil && root.ID == trustedCA.ID { return nil } @@ -461,6 +454,28 @@ func (s *Server) bootstrapCA() error { return nil } +// createProvider returns a connect CA provider from the given config. +func (s *Server) createCAProvider(conf *structs.CAConfiguration) (connect.CAProvider, error) { + switch conf.Provider { + case structs.ConsulCAProvider: + return NewConsulCAProvider(conf.Config, s) + default: + return nil, fmt.Errorf("unknown CA provider %q", conf.Provider) + } +} + +func (s *Server) getCAProvider() connect.CAProvider { + s.caProviderLock.RLock() + defer s.caProviderLock.RUnlock() + return s.caProvider +} + +func (s *Server) setCAProvider(newProvider connect.CAProvider) { + s.caProviderLock.Lock() + defer s.caProviderLock.Unlock() + s.caProvider = newProvider +} + // signConnectCert signs a cert for a service using the currently configured CA provider func (s *Server) signConnectCert(service *connect.SpiffeIDService, csr *x509.CertificateRequest) (*structs.IssuedCert, error) { s.caProviderLock.RLock() diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go index 2cce8028b..17e274992 100644 --- a/agent/consul/state/connect_ca.go +++ b/agent/consul/state/connect_ca.go @@ -60,8 +60,8 @@ func caProviderTableSchema() *memdb.TableSchema { Name: "id", AllowMissing: false, Unique: true, - Indexer: &memdb.ConditionalIndex{ - Conditional: func(obj interface{}) (bool, error) { return true, nil }, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", }, }, }, @@ -98,12 +98,12 @@ func (s *Restore) CAConfig(config *structs.CAConfiguration) error { return nil } -// CAConfig is used to get the current Autopilot configuration. +// CAConfig is used to get the current CA configuration. func (s *Store) CAConfig() (uint64, *structs.CAConfiguration, error) { tx := s.db.Txn(false) defer tx.Abort() - // Get the autopilot config + // Get the CA config c, err := tx.First(caConfigTableName, "id") if err != nil { return 0, nil, fmt.Errorf("failed CA config lookup: %s", err) @@ -117,7 +117,7 @@ func (s *Store) CAConfig() (uint64, *structs.CAConfiguration, error) { return config.ModifyIndex, config, nil } -// CASetConfig is used to set the current Autopilot configuration. +// CASetConfig is used to set the current CA configuration. func (s *Store) CASetConfig(idx uint64, config *structs.CAConfiguration) error { tx := s.db.Txn(true) defer tx.Abort() @@ -341,13 +341,16 @@ func (s *Restore) CAProviderState(state *structs.CAConsulProviderState) error { return nil } -// CAProviderState is used to get the current Consul CA provider state. -func (s *Store) CAProviderState() (uint64, *structs.CAConsulProviderState, error) { +// CAProviderState is used to get the Consul CA provider state for the given ID. +func (s *Store) CAProviderState(id string) (uint64, *structs.CAConsulProviderState, error) { tx := s.db.Txn(false) defer tx.Abort() - // Get the autopilot config - c, err := tx.First(caProviderTableName, "id") + // Get the index + idx := maxIndexTxn(tx, caProviderTableName) + + // Get the provider config + c, err := tx.First(caProviderTableName, "id", id) if err != nil { return 0, nil, fmt.Errorf("failed built-in CA state lookup: %s", err) } @@ -357,7 +360,28 @@ func (s *Store) CAProviderState() (uint64, *structs.CAConsulProviderState, error return 0, nil, nil } - return state.ModifyIndex, state, nil + return idx, state, nil +} + +// CAProviderStates is used to get the Consul CA provider state for the given ID. +func (s *Store) CAProviderStates() (uint64, []*structs.CAConsulProviderState, error) { + tx := s.db.Txn(false) + defer tx.Abort() + + // Get the index + idx := maxIndexTxn(tx, caProviderTableName) + + // Get all + iter, err := tx.Get(caProviderTableName, "id") + if err != nil { + return 0, nil, fmt.Errorf("failed CA provider state lookup: %s", err) + } + + var results []*structs.CAConsulProviderState + for v := iter.Next(); v != nil; v = iter.Next() { + results = append(results, v.(*structs.CAConsulProviderState)) + } + return idx, results, nil } // CASetProviderState is used to set the current built-in CA provider state. @@ -366,14 +390,14 @@ func (s *Store) CASetProviderState(idx uint64, state *structs.CAConsulProviderSt defer tx.Abort() // Check for an existing config - existing, err := tx.First(caProviderTableName, "id") + existing, err := tx.First(caProviderTableName, "id", state.ID) if err != nil { return false, fmt.Errorf("failed built-in CA state lookup: %s", err) } // Set the indexes. if existing != nil { - state.CreateIndex = existing.(*structs.CAConfiguration).CreateIndex + state.CreateIndex = existing.(*structs.CAConsulProviderState).CreateIndex } else { state.CreateIndex = idx } @@ -382,7 +406,45 @@ func (s *Store) CASetProviderState(idx uint64, state *structs.CAConsulProviderSt if err := tx.Insert(caProviderTableName, state); err != nil { return false, fmt.Errorf("failed updating built-in CA state: %s", err) } + + // Update the index + if err := tx.Insert("index", &IndexEntry{caProviderTableName, idx}); err != nil { + return false, fmt.Errorf("failed updating index: %s", err) + } + tx.Commit() return true, nil } + +// CADeleteProviderState is used to remove the Consul CA provider state for the given ID. +func (s *Store) CADeleteProviderState(id string) error { + tx := s.db.Txn(true) + defer tx.Abort() + + // Get the index + idx := maxIndexTxn(tx, caProviderTableName) + + // Check for an existing config + existing, err := tx.First(caProviderTableName, "id", id) + if err != nil { + return fmt.Errorf("failed built-in CA state lookup: %s", err) + } + if existing == nil { + return nil + } + + providerState := existing.(*structs.CAConsulProviderState) + + // Do the delete and update the index + if err := tx.Delete(caProviderTableName, providerState); err != nil { + return err + } + if err := tx.Insert("index", &IndexEntry{caProviderTableName, idx}); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } + + tx.Commit() + + return nil +} diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index a923c0361..1e2959dd1 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -96,9 +96,11 @@ type IssuedCert struct { type CAOp string const ( - CAOpSetRoots CAOp = "set-roots" - CAOpSetConfig CAOp = "set-config" - CAOpSetProviderState CAOp = "set-provider-state" + CAOpSetRoots CAOp = "set-roots" + CAOpSetConfig CAOp = "set-config" + CAOpSetProviderState CAOp = "set-provider-state" + CAOpDeleteProviderState CAOp = "delete-provider-state" + CAOpSetRootsAndConfig CAOp = "set-roots-config" ) // CARequest is used to modify connect CA data. This is used by the @@ -156,9 +158,9 @@ type CAConfiguration struct { // CAConsulProviderState is used to track the built-in Consul CA provider's state. type CAConsulProviderState struct { + ID string PrivateKey string CARoot *CARoot - RootIndex uint64 LeafIndex uint64 RaftIndex From 43f13d5a0b1c09822ad7e60940d2c03539aab709 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 20 Apr 2018 20:39:51 -0700 Subject: [PATCH 149/627] Add cross-signing mechanism to root rotation --- agent/connect/ca_provider.go | 21 +++- agent/consul/connect_ca_endpoint.go | 40 ++++---- agent/consul/connect_ca_provider.go | 119 +++++++++++++++++++++-- agent/consul/connect_ca_provider_test.go | 34 +++++++ agent/consul/leader.go | 13 --- agent/structs/connect_ca.go | 4 + 6 files changed, 191 insertions(+), 40 deletions(-) create mode 100644 agent/consul/connect_ca_provider_test.go diff --git a/agent/connect/ca_provider.go b/agent/connect/ca_provider.go index dc70c6a58..9a53d02a0 100644 --- a/agent/connect/ca_provider.go +++ b/agent/connect/ca_provider.go @@ -10,10 +10,27 @@ import ( // an external CA that provides leaf certificate signing for // given SpiffeIDServices. type CAProvider interface { + // Active root returns the currently active root CA for this + // provider. This should be a parent of the certificate returned by + // ActiveIntermediate() ActiveRoot() (*structs.CARoot, error) + + // ActiveIntermediate returns the current signing cert used by this + // provider for generating SPIFFE leaf certs. ActiveIntermediate() (*structs.CARoot, error) - GenerateIntermediate() (*structs.CARoot, error) + + // GenerateIntermediate returns a new intermediate signing cert, a + // cross-signing CSR for it and sets it to the active intermediate. + GenerateIntermediate() (*structs.CARoot, *x509.CertificateRequest, error) + + // Sign signs a leaf certificate used by Connect proxies from a CSR. Sign(*SpiffeIDService, *x509.CertificateRequest) (*structs.IssuedCert, error) - //SignCA(*x509.CertificateRequest) (*structs.IssuedCert, error) + + // SignCA signs a CA CSR and returns the resulting cross-signed cert. + SignCA(*x509.CertificateRequest) (string, error) + + // Teardown performs any necessary cleanup that should happen when the provider + // is shut down permanently, such as removing a temporary PKI backend in Vault + // created for an intermediate CA. Teardown() error } diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 128c1493d..9a3adeb99 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -116,20 +116,24 @@ func (s *ConnectCA) ConfigurationSet( // to use a different root certificate. // If it's a config change that would trigger a rotation (different provider/root): - // -1. Create an instance of the provider described by the new config - // 2. Get the intermediate from the new provider - // 3. Generate a CSR for the new intermediate, call SignCA on the old/current provider + // 1. Get the intermediate from the new provider + // 2. Generate a CSR for the new intermediate, call SignCA on the old/current provider // to get the cross-signed intermediate - // ~4. Get the active root for the new provider, append the intermediate from step 3 + // 3. Get the active root for the new provider, append the intermediate from step 3 // to its list of intermediates - // -5. Update the roots and CA config in the state store at the same time, finally switching - // to the new provider - // -6. Call teardown on the old provider, so it can clean up whatever it needs to - - /*_, err := newProvider.ActiveIntermediate() + _, csr, err := newProvider.GenerateIntermediate() if err != nil { return err - }*/ + } + + oldProvider := s.srv.getCAProvider() + xcCert, err := oldProvider.SignCA(csr) + if err != nil { + return err + } + + // Add the cross signed cert to the new root's intermediates + newActiveRoot.Intermediates = []string{xcCert} // Update the roots and CA config in the state store at the same time idx, roots, err := state.CARoots(nil) @@ -160,7 +164,6 @@ func (s *ConnectCA) ConfigurationSet( // If the config has been committed, update the local provider instance // and call teardown on the old provider - oldProvider := s.srv.getCAProvider() s.srv.setCAProvider(newProvider) if err := oldProvider.Teardown(); err != nil { @@ -202,11 +205,12 @@ func (s *ConnectCA) Roots( // directly to the structure in the memdb store. reply.Roots[i] = &structs.CARoot{ - ID: r.ID, - Name: r.Name, - RootCert: r.RootCert, - RaftIndex: r.RaftIndex, - Active: r.Active, + ID: r.ID, + Name: r.Name, + RootCert: r.RootCert, + Intermediates: r.Intermediates, + RaftIndex: r.RaftIndex, + Active: r.Active, } if r.Active { @@ -245,7 +249,9 @@ func (s *ConnectCA) Sign( // todo(kyhavlov): more validation on the CSR before signing - cert, err := s.srv.signConnectCert(serviceId, csr) + provider := s.srv.getCAProvider() + + cert, err := provider.Sign(serviceId, csr) if err != nil { return err } diff --git a/agent/consul/connect_ca_provider.go b/agent/consul/connect_ca_provider.go index b72a9ee36..6f0508ce1 100644 --- a/agent/consul/connect_ca_provider.go +++ b/agent/consul/connect_ca_provider.go @@ -143,23 +143,58 @@ func (c *ConsulCAProvider) ActiveRoot() (*structs.CARoot, error) { return providerState.CARoot, nil } +// We aren't maintaining separate root/intermediate CAs for the builtin +// provider, so just return the root. func (c *ConsulCAProvider) ActiveIntermediate() (*structs.CARoot, error) { return c.ActiveRoot() } -func (c *ConsulCAProvider) GenerateIntermediate() (*structs.CARoot, error) { +// We aren't maintaining separate root/intermediate CAs for the builtin +// provider, so just generate a CSR for the active root. +func (c *ConsulCAProvider) GenerateIntermediate() (*structs.CARoot, *x509.CertificateRequest, error) { + ca, err := c.ActiveIntermediate() + if err != nil { + return nil, nil, err + } + state := c.srv.fsm.State() - idx, providerState, err := state.CAProviderState(c.id) + _, providerState, err := state.CAProviderState(c.id) if err != nil { - return nil, err + return nil, nil, err + } + _, config, err := state.CAConfig() + if err != nil { + return nil, nil, err } - ca, err := c.generateCA(providerState.PrivateKey, "", idx+1) - if err != nil { - return nil, err + id := &connect.SpiffeIDSigning{ClusterID: config.ClusterSerial, Domain: "consul"} + template := &x509.CertificateRequest{ + URIs: []*url.URL{id.URI()}, } - return ca, nil + signer, err := connect.ParseSigner(providerState.PrivateKey) + if err != nil { + return nil, nil, err + } + + // Create the CSR itself + var csrBuf bytes.Buffer + bs, err := x509.CreateCertificateRequest(rand.Reader, template, signer) + if err != nil { + return nil, nil, fmt.Errorf("error creating CSR: %s", err) + } + + err = pem.Encode(&csrBuf, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: bs}) + if err != nil { + return nil, nil, fmt.Errorf("error encoding CSR: %s", err) + } + + csr, err := connect.ParseCSR(csrBuf.String()) + if err != nil { + return nil, nil, err + } + + return ca, csr, err } // Remove the state store entry for this provider instance. @@ -194,7 +229,7 @@ func (c *ConsulCAProvider) Sign(serviceId *connect.SpiffeIDService, csr *x509.Ce return nil, err } - // Create the keyId for the cert from the signing public key. + // Create the keyId for the cert from the signing private key. signer, err := connect.ParseSigner(providerState.PrivateKey) if err != nil { return nil, err @@ -277,6 +312,74 @@ func (c *ConsulCAProvider) Sign(serviceId *connect.SpiffeIDService, csr *x509.Ce }, nil } +// SignCA returns an intermediate CA cert signed by the current active root. +func (c *ConsulCAProvider) SignCA(csr *x509.CertificateRequest) (string, error) { + c.Lock() + defer c.Unlock() + + // Get the provider state + state := c.srv.fsm.State() + _, providerState, err := state.CAProviderState(c.id) + if err != nil { + return "", err + } + + privKey, err := connect.ParseSigner(providerState.PrivateKey) + if err != nil { + return "", fmt.Errorf("error parsing private key %q: %v", providerState.PrivateKey, err) + } + + name := fmt.Sprintf("Consul cross-signed CA %d", providerState.LeafIndex+1) + + // The URI (SPIFFE compatible) for the cert + _, config, err := state.CAConfig() + if err != nil { + return "", err + } + id := &connect.SpiffeIDSigning{ClusterID: config.ClusterSerial, Domain: "consul"} + keyId, err := connect.KeyId(privKey.Public()) + if err != nil { + return "", err + } + + // Create the CA cert + serialNum := &big.Int{} + serialNum.SetUint64(providerState.LeafIndex + 1) + template := x509.Certificate{ + SerialNumber: serialNum, + Subject: pkix.Name{CommonName: name}, + URIs: csr.URIs, + Signature: csr.Signature, + PublicKeyAlgorithm: csr.PublicKeyAlgorithm, + PublicKey: csr.PublicKey, + PermittedDNSDomainsCritical: true, + PermittedDNSDomains: []string{id.URI().Hostname()}, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageCertSign | + x509.KeyUsageCRLSign | + x509.KeyUsageDigitalSignature, + IsCA: true, + NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: keyId, + SubjectKeyId: keyId, + } + + bs, err := x509.CreateCertificate( + rand.Reader, &template, &template, privKey.Public(), privKey) + if err != nil { + return "", fmt.Errorf("error generating CA certificate: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + return "", fmt.Errorf("error encoding private key: %s", err) + } + + return buf.String(), nil +} + // generatePrivateKey returns a new private key func generatePrivateKey() (string, error) { var pk *ecdsa.PrivateKey diff --git a/agent/consul/connect_ca_provider_test.go b/agent/consul/connect_ca_provider_test.go new file mode 100644 index 000000000..adad3acba --- /dev/null +++ b/agent/consul/connect_ca_provider_test.go @@ -0,0 +1,34 @@ +package consul + +import ( + "os" + "testing" + + "github.com/hashicorp/consul/testrpc" + "github.com/stretchr/testify/assert" +) + +func TestCAProvider_Bootstrap(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + provider := s1.getCAProvider() + + root, err := provider.ActiveRoot() + assert.NoError(err) + + state := s1.fsm.State() + _, activeRoot, err := state.CARootActive(nil) + assert.NoError(err) + assert.Equal(root.ID, activeRoot.ID) + assert.Equal(root.Name, activeRoot.Name) + assert.Equal(root.RootCert, activeRoot.RootCert) +} diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 8d62ca1aa..91bacee2f 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -1,7 +1,6 @@ package consul import ( - "crypto/x509" "fmt" "net" "strconv" @@ -476,18 +475,6 @@ func (s *Server) setCAProvider(newProvider connect.CAProvider) { s.caProvider = newProvider } -// signConnectCert signs a cert for a service using the currently configured CA provider -func (s *Server) signConnectCert(service *connect.SpiffeIDService, csr *x509.CertificateRequest) (*structs.IssuedCert, error) { - s.caProviderLock.RLock() - defer s.caProviderLock.RUnlock() - - cert, err := s.caProvider.Sign(service, csr) - if err != nil { - return nil, err - } - return cert, nil -} - // reconcileReaped is used to reconcile nodes that have failed and been reaped // from Serf but remain in the catalog. This is done by looking for unknown nodes with serfHealth checks registered. // We generate a "reap" event to cause the node to be cleaned up. diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 1e2959dd1..33c355fca 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -31,6 +31,10 @@ type CARoot struct { // RootCert is the PEM-encoded public certificate. RootCert string + // Intermediates is a list of PEM-encoded intermediate certs to + // attach to any leaf certs signed by this CA. + Intermediates []string + // SigningCert is the PEM-encoded signing certificate and SigningKey // is the PEM-encoded private key for the signing certificate. These // may actually be empty if the CA plugin in use manages these for us. From 8584e9262e050975566671378b4c10f6b82e2526 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 3 Apr 2018 20:46:07 -0700 Subject: [PATCH 150/627] agent/cache: initial kind-of working cache --- agent/cache/cache.go | 239 ++++++++++++++++++++++++++++++++++++ agent/cache/cache_test.go | 200 ++++++++++++++++++++++++++++++ agent/cache/mock_RPC.go | 23 ++++ agent/cache/mock_Request.go | 37 ++++++ agent/cache/mock_Type.go | 30 +++++ agent/cache/request.go | 17 +++ agent/cache/testing.go | 84 +++++++++++++ agent/cache/type.go | 68 ++++++++++ 8 files changed, 698 insertions(+) create mode 100644 agent/cache/cache.go create mode 100644 agent/cache/cache_test.go create mode 100644 agent/cache/mock_RPC.go create mode 100644 agent/cache/mock_Request.go create mode 100644 agent/cache/mock_Type.go create mode 100644 agent/cache/request.go create mode 100644 agent/cache/testing.go create mode 100644 agent/cache/type.go diff --git a/agent/cache/cache.go b/agent/cache/cache.go new file mode 100644 index 000000000..d0172cdc0 --- /dev/null +++ b/agent/cache/cache.go @@ -0,0 +1,239 @@ +// Package cache provides caching features for data from a Consul server. +// +// While this is similar in some ways to the "agent/ae" package, a key +// difference is that with anti-entropy, the agent is the authoritative +// source so it resolves differences the server may have. With caching (this +// package), the server is the authoritative source and we do our best to +// balance performance and correctness, depending on the type of data being +// requested. +// +// Currently, the cache package supports only continuous, blocking query +// caching. This means that the cache update is edge-triggered by Consul +// server blocking queries. +package cache + +import ( + "fmt" + "sync" + "time" +) + +//go:generate mockery -all -inpkg + +// Pre-written options for type registration. These should not be modified. +var ( + // RegisterOptsPeriodic performs a periodic refresh of data fetched + // by the registered type. + RegisterOptsPeriodic = &RegisterOptions{ + Refresh: true, + RefreshTimer: 30 * time.Second, + RefreshTimeout: 5 * time.Minute, + } +) + +// TODO: DC-aware + +// RPC is an interface that an RPC client must implement. +type RPC interface { + RPC(method string, args interface{}, reply interface{}) error +} + +// Cache is a agent-local cache of Consul data. +type Cache struct { + // rpcClient is the RPC-client. + rpcClient RPC + + entriesLock sync.RWMutex + entries map[string]cacheEntry + + typesLock sync.RWMutex + types map[string]typeEntry +} + +type cacheEntry struct { + // Fields pertaining to the actual value + Value interface{} + Error error + Index uint64 + + // Metadata that is used for internal accounting + Valid bool + Fetching bool + Waiter chan struct{} +} + +// typeEntry is a single type that is registered with a Cache. +type typeEntry struct { + Type Type + Opts *RegisterOptions +} + +// New creates a new cache with the given RPC client and reasonable defaults. +// Further settings can be tweaked on the returned value. +func New(rpc RPC) *Cache { + return &Cache{ + rpcClient: rpc, + entries: make(map[string]cacheEntry), + types: make(map[string]typeEntry), + } +} + +// RegisterOptions are options that can be associated with a type being +// registered for the cache. This changes the behavior of the cache for +// this type. +type RegisterOptions struct { + // Refresh configures whether the data is actively refreshed or if + // the data is only refreshed on an explicit Get. The default (false) + // is to only request data on explicit Get. + Refresh bool + + // RefreshTimer is the time between attempting to refresh data. + // If this is zero, then data is refreshed immediately when a fetch + // is returned. + // + // RefreshTimeout determines the maximum query time for a refresh + // operation. This is specified as part of the query options and is + // expected to be implemented by the Type itself. + // + // Using these values, various "refresh" mechanisms can be implemented: + // + // * With a high timer duration and a low timeout, a timer-based + // refresh can be set that minimizes load on the Consul servers. + // + // * With a low timer and high timeout duration, a blocking-query-based + // refresh can be set so that changes in server data are recognized + // within the cache very quickly. + // + RefreshTimer time.Duration + RefreshTimeout time.Duration +} + +// RegisterType registers a cacheable type. +func (c *Cache) RegisterType(n string, typ Type, opts *RegisterOptions) { + c.typesLock.Lock() + defer c.typesLock.Unlock() + c.types[n] = typeEntry{Type: typ, Opts: opts} +} + +// Get loads the data for the given type and request. If data satisfying the +// minimum index is present in the cache, it is returned immediately. Otherwise, +// this will block until the data is available or the request timeout is +// reached. +// +// Multiple Get calls for the same Request (matching CacheKey value) will +// block on a single network request. +func (c *Cache) Get(t string, r Request) (interface{}, error) { + key := r.CacheKey() + idx := r.CacheMinIndex() + +RETRY_GET: + // Get the current value + c.entriesLock.RLock() + entry, ok := c.entries[key] + c.entriesLock.RUnlock() + + // If we have a current value and the index is greater than the + // currently stored index then we return that right away. If the + // index is zero and we have something in the cache we accept whatever + // we have. + if ok && entry.Valid && (idx == 0 || idx < entry.Index) { + return entry.Value, nil + } + + // At this point, we know we either don't have a value at all or the + // value we have is too old. We need to wait for new data. + waiter, err := c.fetch(t, r) + if err != nil { + return nil, err + } + + // Wait on our waiter and then retry the cache load + <-waiter + goto RETRY_GET +} + +func (c *Cache) fetch(t string, r Request) (<-chan struct{}, error) { + // Get the type that we're fetching + c.typesLock.RLock() + tEntry, ok := c.types[t] + c.typesLock.RUnlock() + if !ok { + return nil, fmt.Errorf("unknown type in cache: %s", t) + } + + // The cache key is used multiple times and might be dynamically + // constructed so let's just store it once here. + key := r.CacheKey() + + c.entriesLock.Lock() + defer c.entriesLock.Unlock() + entry, ok := c.entries[key] + + // If we already have an entry and it is actively fetching, then return + // the currently active waiter. + if ok && entry.Fetching { + return entry.Waiter, nil + } + + // If we don't have an entry, then create it. The entry must be marked + // as invalid so that it isn't returned as a valid value for a zero index. + if !ok { + entry = cacheEntry{Valid: false, Waiter: make(chan struct{})} + } + + // Set that we're fetching to true, which makes it so that future + // identical calls to fetch will return the same waiter rather than + // perform multiple fetches. + entry.Fetching = true + c.entries[key] = entry + + // The actual Fetch must be performed in a goroutine. + go func() { + // Start building the new entry by blocking on the fetch. + var newEntry cacheEntry + result, err := tEntry.Type.Fetch(FetchOptions{ + RPC: c.rpcClient, + MinIndex: entry.Index, + }, r) + newEntry.Value = result.Value + newEntry.Index = result.Index + newEntry.Error = err + + // This is a valid entry with a result + newEntry.Valid = true + + // Create a new waiter that will be used for the next fetch. + newEntry.Waiter = make(chan struct{}) + + // Insert + c.entriesLock.Lock() + c.entries[key] = newEntry + c.entriesLock.Unlock() + + // Trigger the waiter + close(entry.Waiter) + + // If refresh is enabled, run the refresh in due time. The refresh + // below might block, but saves us from spawning another goroutine. + if tEntry.Opts != nil && tEntry.Opts.Refresh { + c.refresh(tEntry.Opts, t, r) + } + }() + + return entry.Waiter, nil +} + +func (c *Cache) refresh(opts *RegisterOptions, t string, r Request) { + // Sanity-check, we should not schedule anything that has refresh disabled + if !opts.Refresh { + return + } + + // If we have a timer, wait for it + if opts.RefreshTimer > 0 { + time.Sleep(opts.RefreshTimer) + } + + // Trigger + c.fetch(t, r) +} diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go new file mode 100644 index 000000000..d82ded195 --- /dev/null +++ b/agent/cache/cache_test.go @@ -0,0 +1,200 @@ +package cache + +import ( + "sort" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +// Test a basic Get with no indexes (and therefore no blocking queries). +func TestCacheGet_noIndex(t *testing.T) { + t.Parallel() + + require := require.New(t) + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + c.RegisterType("t", typ, nil) + + // Configure the type + typ.Static(FetchResult{Value: 42}, nil).Times(1) + + // Get, should fetch + req := TestRequest(t, "hello", 0) + result, err := c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + + // Get, should not fetch since we already have a satisfying value + result, err = c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + + // Sleep a tiny bit just to let maybe some background calls happen + // then verify that we still only got the one call + time.Sleep(20 * time.Millisecond) + typ.AssertExpectations(t) +} + +// Test that Get blocks on the initial value +func TestCacheGet_blockingInitSameKey(t *testing.T) { + t.Parallel() + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + c.RegisterType("t", typ, nil) + + // Configure the type + triggerCh := make(chan time.Time) + typ.Static(FetchResult{Value: 42}, nil).WaitUntil(triggerCh).Times(1) + + // Perform multiple gets + getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 0)) + getCh2 := TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 0)) + + // They should block + select { + case <-getCh1: + t.Fatal("should block (ch1)") + case <-getCh2: + t.Fatal("should block (ch2)") + case <-time.After(50 * time.Millisecond): + } + + // Trigger it + close(triggerCh) + + // Should return + TestCacheGetChResult(t, getCh1, 42) + TestCacheGetChResult(t, getCh2, 42) +} + +// Test that Get with different cache keys both block on initial value +// but that the fetches were both properly called. +func TestCacheGet_blockingInitDiffKeys(t *testing.T) { + t.Parallel() + + require := require.New(t) + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + c.RegisterType("t", typ, nil) + + // Keep track of the keys + var keysLock sync.Mutex + var keys []string + + // Configure the type + triggerCh := make(chan time.Time) + typ.Static(FetchResult{Value: 42}, nil). + WaitUntil(triggerCh). + Times(2). + Run(func(args mock.Arguments) { + keysLock.Lock() + defer keysLock.Unlock() + keys = append(keys, args.Get(1).(Request).CacheKey()) + }) + + // Perform multiple gets + getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 0)) + getCh2 := TestCacheGetCh(t, c, "t", TestRequest(t, "goodbye", 0)) + + // They should block + select { + case <-getCh1: + t.Fatal("should block (ch1)") + case <-getCh2: + t.Fatal("should block (ch2)") + case <-time.After(50 * time.Millisecond): + } + + // Trigger it + close(triggerCh) + + // Should return both! + TestCacheGetChResult(t, getCh1, 42) + TestCacheGetChResult(t, getCh2, 42) + + // Verify proper keys + sort.Strings(keys) + require.Equal([]string{"goodbye", "hello"}, keys) +} + +// Test a get with an index set will wait until an index that is higher +// is set in the cache. +func TestCacheGet_blockingIndex(t *testing.T) { + t.Parallel() + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + c.RegisterType("t", typ, nil) + + // Configure the type + triggerCh := make(chan time.Time) + typ.Static(FetchResult{Value: 1, Index: 4}, nil).Once() + typ.Static(FetchResult{Value: 12, Index: 5}, nil).Once() + typ.Static(FetchResult{Value: 42, Index: 6}, nil).WaitUntil(triggerCh) + + // Fetch should block + resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 5)) + + // Should block + select { + case <-resultCh: + t.Fatal("should block") + case <-time.After(50 * time.Millisecond): + } + + // Wait a bit + close(triggerCh) + + // Should return + TestCacheGetChResult(t, resultCh, 42) +} + +// Test that a type registered with a periodic refresh will perform +// that refresh after the timer is up. +func TestCacheGet_periodicRefresh(t *testing.T) { + t.Parallel() + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + c.RegisterType("t", typ, &RegisterOptions{ + Refresh: true, + RefreshTimer: 100 * time.Millisecond, + RefreshTimeout: 5 * time.Minute, + }) + + // This is a bit weird, but we do this to ensure that the final + // call to the Fetch (if it happens, depends on timing) just blocks. + triggerCh := make(chan time.Time) + defer close(triggerCh) + + // Configure the type + typ.Static(FetchResult{Value: 1, Index: 4}, nil).Once() + typ.Static(FetchResult{Value: 12, Index: 5}, nil).Once() + typ.Static(FetchResult{Value: 12, Index: 5}, nil).WaitUntil(triggerCh) + + // Fetch should block + resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 0)) + TestCacheGetChResult(t, resultCh, 1) + + // Fetch again almost immediately should return old result + time.Sleep(5 * time.Millisecond) + resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 0)) + TestCacheGetChResult(t, resultCh, 1) + + // Wait for the timer + time.Sleep(200 * time.Millisecond) + resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 0)) + TestCacheGetChResult(t, resultCh, 12) +} diff --git a/agent/cache/mock_RPC.go b/agent/cache/mock_RPC.go new file mode 100644 index 000000000..a1100d2a7 --- /dev/null +++ b/agent/cache/mock_RPC.go @@ -0,0 +1,23 @@ +// Code generated by mockery v1.0.0 +package cache + +import mock "github.com/stretchr/testify/mock" + +// MockRPC is an autogenerated mock type for the RPC type +type MockRPC struct { + mock.Mock +} + +// RPC provides a mock function with given fields: method, args, reply +func (_m *MockRPC) RPC(method string, args interface{}, reply interface{}) error { + ret := _m.Called(method, args, reply) + + var r0 error + if rf, ok := ret.Get(0).(func(string, interface{}, interface{}) error); ok { + r0 = rf(method, args, reply) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/agent/cache/mock_Request.go b/agent/cache/mock_Request.go new file mode 100644 index 000000000..157912182 --- /dev/null +++ b/agent/cache/mock_Request.go @@ -0,0 +1,37 @@ +// Code generated by mockery v1.0.0 +package cache + +import mock "github.com/stretchr/testify/mock" + +// MockRequest is an autogenerated mock type for the Request type +type MockRequest struct { + mock.Mock +} + +// CacheKey provides a mock function with given fields: +func (_m *MockRequest) CacheKey() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// CacheMinIndex provides a mock function with given fields: +func (_m *MockRequest) CacheMinIndex() uint64 { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} diff --git a/agent/cache/mock_Type.go b/agent/cache/mock_Type.go new file mode 100644 index 000000000..110fc5787 --- /dev/null +++ b/agent/cache/mock_Type.go @@ -0,0 +1,30 @@ +// Code generated by mockery v1.0.0 +package cache + +import mock "github.com/stretchr/testify/mock" + +// MockType is an autogenerated mock type for the Type type +type MockType struct { + mock.Mock +} + +// Fetch provides a mock function with given fields: _a0, _a1 +func (_m *MockType) Fetch(_a0 FetchOptions, _a1 Request) (FetchResult, error) { + ret := _m.Called(_a0, _a1) + + var r0 FetchResult + if rf, ok := ret.Get(0).(func(FetchOptions, Request) FetchResult); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(FetchResult) + } + + var r1 error + if rf, ok := ret.Get(1).(func(FetchOptions, Request) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/agent/cache/request.go b/agent/cache/request.go new file mode 100644 index 000000000..c75c8ad84 --- /dev/null +++ b/agent/cache/request.go @@ -0,0 +1,17 @@ +package cache + +// Request is a cache-able request. +// +// This interface is typically implemented by request structures in +// the agent/structs package. +type Request interface { + // CacheKey is a unique cache key for this request. This key should + // absolutely uniquely identify this request, since any conflicting + // cache keys could result in invalid data being returned from the cache. + CacheKey() string + + // CacheMinIndex is the minimum index being queried. This is used to + // determine if we already have data satisfying the query or if we need + // to block until new data is available. + CacheMinIndex() uint64 +} diff --git a/agent/cache/testing.go b/agent/cache/testing.go new file mode 100644 index 000000000..7bf2bf891 --- /dev/null +++ b/agent/cache/testing.go @@ -0,0 +1,84 @@ +package cache + +import ( + "reflect" + "time" + + "github.com/mitchellh/go-testing-interface" + "github.com/stretchr/testify/mock" +) + +// TestCache returns a Cache instance configuring for testing. +func TestCache(t testing.T) *Cache { + // Simple but lets us do some fine-tuning later if we want to. + return New(TestRPC(t)) +} + +// TestCacheGetCh returns a channel that returns the result of the Get call. +// This is useful for testing timing and concurrency with Get calls. Any +// error will be logged, so the result value should always be asserted. +func TestCacheGetCh(t testing.T, c *Cache, typ string, r Request) <-chan interface{} { + resultCh := make(chan interface{}) + go func() { + result, err := c.Get(typ, r) + if err != nil { + t.Logf("Error: %s", err) + close(resultCh) + return + } + + resultCh <- result + }() + + return resultCh +} + +// TestCacheGetChResult tests that the result from TestCacheGetCh matches +// within a reasonable period of time (it expects it to be "immediate" but +// waits some milliseconds). +func TestCacheGetChResult(t testing.T, ch <-chan interface{}, expected interface{}) { + t.Helper() + + select { + case result := <-ch: + if !reflect.DeepEqual(result, expected) { + t.Fatalf("Result doesn't match!\n\n%#v\n\n%#v", result, expected) + } + case <-time.After(50 * time.Millisecond): + } +} + +// TestRequest returns a Request that returns the given cache key and index. +// The Reset method can be called to reset it for custom usage. +func TestRequest(t testing.T, key string, index uint64) *MockRequest { + req := &MockRequest{} + req.On("CacheKey").Return(key) + req.On("CacheMinIndex").Return(index) + return req +} + +// TestRPC returns a mock implementation of the RPC interface. +func TestRPC(t testing.T) *MockRPC { + // This function is relatively useless but this allows us to perhaps + // perform some initialization later. + return &MockRPC{} +} + +// TestType returns a MockType that can be used to setup expectations +// on data fetching. +func TestType(t testing.T) *MockType { + typ := &MockType{} + return typ +} + +// A bit weird, but we add methods to the auto-generated structs here so that +// they don't get clobbered. The helper methods are conveniences. + +// Static sets a static value to return for a call to Fetch. +func (m *MockType) Static(r FetchResult, err error) *mock.Call { + return m.Mock.On("Fetch", mock.Anything, mock.Anything).Return(r, err) +} + +func (m *MockRequest) Reset() { + m.Mock = mock.Mock{} +} diff --git a/agent/cache/type.go b/agent/cache/type.go new file mode 100644 index 000000000..fbb65761f --- /dev/null +++ b/agent/cache/type.go @@ -0,0 +1,68 @@ +package cache + +import ( + "time" +) + +// Type implement the logic to fetch certain types of data. +type Type interface { + // Fetch fetches a single unique item. + // + // The FetchOptions contain the index and timeouts for blocking queries. + // The CacheMinIndex value on the Request itself should NOT be used + // as the blocking index since a request may be reused multiple times + // as part of Refresh behavior. + // + // The return value is a FetchResult which contains information about + // the fetch. + Fetch(FetchOptions, Request) (FetchResult, error) +} + +// FetchOptions are various settable options when a Fetch is called. +type FetchOptions struct { + // RPC is the RPC client to communicate to a Consul server. + RPC RPC + + // MinIndex is the minimum index to be used for blocking queries. + // If blocking queries aren't supported for data being returned, + // this value can be ignored. + MinIndex uint64 + + // Timeout is the maximum time for the query. This must be implemented + // in the Fetch itself. + Timeout time.Duration +} + +// FetchResult is the result of a Type Fetch operation and contains the +// data along with metadata gathered from that operation. +type FetchResult struct { + // Value is the result of the fetch. + Value interface{} + + // Index is the corresponding index value for this data. + Index uint64 +} + +/* +type TypeCARoot struct{} + +func (c *TypeCARoot) Fetch(delegate RPC, idx uint64, req Request) (interface{}, uint64, error) { + // The request should be a DCSpecificRequest. + reqReal, ok := req.(*structs.DCSpecificRequest) + if !ok { + return nil, 0, fmt.Errorf( + "Internal cache failure: request wrong type: %T", req) + } + + // Set the minimum query index to our current index so we block + reqReal.QueryOptions.MinQueryIndex = idx + + // Fetch + var reply structs.IndexedCARoots + if err := delegate.RPC("ConnectCA.Roots", reqReal, &reply); err != nil { + return nil, 0, err + } + + return &reply, reply.QueryMeta.Index, nil +} +*/ From c69df79e0c698917ffe162c13a7d50a8d30c81fc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 8 Apr 2018 14:30:14 +0100 Subject: [PATCH 151/627] agent/cache: blank cache key means to always fetch --- agent/cache/cache.go | 70 ++++++++++++++++++++-------------- agent/cache/cache_test.go | 32 ++++++++++++++++ agent/cache/rpc.go | 8 ++++ agent/cache/testing.go | 2 +- agent/cache/type.go | 27 ------------- agent/cache/type_connect_ca.go | 39 +++++++++++++++++++ 6 files changed, 122 insertions(+), 56 deletions(-) create mode 100644 agent/cache/rpc.go create mode 100644 agent/cache/type_connect_ca.go diff --git a/agent/cache/cache.go b/agent/cache/cache.go index d0172cdc0..1c2f316dd 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -20,29 +20,10 @@ import ( //go:generate mockery -all -inpkg -// Pre-written options for type registration. These should not be modified. -var ( - // RegisterOptsPeriodic performs a periodic refresh of data fetched - // by the registered type. - RegisterOptsPeriodic = &RegisterOptions{ - Refresh: true, - RefreshTimer: 30 * time.Second, - RefreshTimeout: 5 * time.Minute, - } -) - -// TODO: DC-aware - -// RPC is an interface that an RPC client must implement. -type RPC interface { - RPC(method string, args interface{}, reply interface{}) error -} +// TODO: DC-aware, ACL-aware // Cache is a agent-local cache of Consul data. type Cache struct { - // rpcClient is the RPC-client. - rpcClient RPC - entriesLock sync.RWMutex entries map[string]cacheEntry @@ -50,6 +31,7 @@ type Cache struct { types map[string]typeEntry } +// cacheEntry stores a single cache entry. type cacheEntry struct { // Fields pertaining to the actual value Value interface{} @@ -68,13 +50,17 @@ type typeEntry struct { Opts *RegisterOptions } +// Options are options for the Cache. +type Options struct { + // Nothing currently, reserved. +} + // New creates a new cache with the given RPC client and reasonable defaults. // Further settings can be tweaked on the returned value. -func New(rpc RPC) *Cache { +func New(*Options) *Cache { return &Cache{ - rpcClient: rpc, - entries: make(map[string]cacheEntry), - types: make(map[string]typeEntry), + entries: make(map[string]cacheEntry), + types: make(map[string]typeEntry), } } @@ -124,7 +110,11 @@ func (c *Cache) RegisterType(n string, typ Type, opts *RegisterOptions) { // block on a single network request. func (c *Cache) Get(t string, r Request) (interface{}, error) { key := r.CacheKey() - idx := r.CacheMinIndex() + if key == "" { + // If no key is specified, then we do not cache this request. + // Pass directly through to the backend. + return c.fetchDirect(t, r) + } RETRY_GET: // Get the current value @@ -136,8 +126,11 @@ RETRY_GET: // currently stored index then we return that right away. If the // index is zero and we have something in the cache we accept whatever // we have. - if ok && entry.Valid && (idx == 0 || idx < entry.Index) { - return entry.Value, nil + if ok && entry.Valid { + idx := r.CacheMinIndex() + if idx == 0 || idx < entry.Index { + return entry.Value, nil + } } // At this point, we know we either don't have a value at all or the @@ -192,7 +185,6 @@ func (c *Cache) fetch(t string, r Request) (<-chan struct{}, error) { // Start building the new entry by blocking on the fetch. var newEntry cacheEntry result, err := tEntry.Type.Fetch(FetchOptions{ - RPC: c.rpcClient, MinIndex: entry.Index, }, r) newEntry.Value = result.Value @@ -223,6 +215,28 @@ func (c *Cache) fetch(t string, r Request) (<-chan struct{}, error) { return entry.Waiter, nil } +// fetchDirect fetches the given request with no caching. +func (c *Cache) fetchDirect(t string, r Request) (interface{}, error) { + // Get the type that we're fetching + c.typesLock.RLock() + tEntry, ok := c.types[t] + c.typesLock.RUnlock() + if !ok { + return nil, fmt.Errorf("unknown type in cache: %s", t) + } + + // Fetch it with the min index specified directly by the request. + result, err := tEntry.Type.Fetch(FetchOptions{ + MinIndex: r.CacheMinIndex(), + }, r) + if err != nil { + return nil, err + } + + // Return the result and ignore the rest + return result.Value, nil +} + func (c *Cache) refresh(opts *RegisterOptions, t string, r Request) { // Sanity-check, we should not schedule anything that has refresh disabled if !opts.Refresh { diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index d82ded195..69f99a628 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -41,6 +41,38 @@ func TestCacheGet_noIndex(t *testing.T) { typ.AssertExpectations(t) } +// Test a Get with a request that returns a blank cache key. This should +// force a backend request and skip the cache entirely. +func TestCacheGet_blankCacheKey(t *testing.T) { + t.Parallel() + + require := require.New(t) + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + c.RegisterType("t", typ, nil) + + // Configure the type + typ.Static(FetchResult{Value: 42}, nil).Times(2) + + // Get, should fetch + req := TestRequest(t, "", 0) + result, err := c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + + // Get, should not fetch since we already have a satisfying value + result, err = c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + + // Sleep a tiny bit just to let maybe some background calls happen + // then verify that we still only got the one call + time.Sleep(20 * time.Millisecond) + typ.AssertExpectations(t) +} + // Test that Get blocks on the initial value func TestCacheGet_blockingInitSameKey(t *testing.T) { t.Parallel() diff --git a/agent/cache/rpc.go b/agent/cache/rpc.go new file mode 100644 index 000000000..98976284a --- /dev/null +++ b/agent/cache/rpc.go @@ -0,0 +1,8 @@ +package cache + +// RPC is an interface that an RPC client must implement. This is a helper +// interface that is implemented by the agent delegate so that Type +// implementations can request RPC access. +type RPC interface { + RPC(method string, args interface{}, reply interface{}) error +} diff --git a/agent/cache/testing.go b/agent/cache/testing.go index 7bf2bf891..6a094c117 100644 --- a/agent/cache/testing.go +++ b/agent/cache/testing.go @@ -11,7 +11,7 @@ import ( // TestCache returns a Cache instance configuring for testing. func TestCache(t testing.T) *Cache { // Simple but lets us do some fine-tuning later if we want to. - return New(TestRPC(t)) + return New(nil) } // TestCacheGetCh returns a channel that returns the result of the Get call. diff --git a/agent/cache/type.go b/agent/cache/type.go index fbb65761f..6e8edeb5f 100644 --- a/agent/cache/type.go +++ b/agent/cache/type.go @@ -20,9 +20,6 @@ type Type interface { // FetchOptions are various settable options when a Fetch is called. type FetchOptions struct { - // RPC is the RPC client to communicate to a Consul server. - RPC RPC - // MinIndex is the minimum index to be used for blocking queries. // If blocking queries aren't supported for data being returned, // this value can be ignored. @@ -42,27 +39,3 @@ type FetchResult struct { // Index is the corresponding index value for this data. Index uint64 } - -/* -type TypeCARoot struct{} - -func (c *TypeCARoot) Fetch(delegate RPC, idx uint64, req Request) (interface{}, uint64, error) { - // The request should be a DCSpecificRequest. - reqReal, ok := req.(*structs.DCSpecificRequest) - if !ok { - return nil, 0, fmt.Errorf( - "Internal cache failure: request wrong type: %T", req) - } - - // Set the minimum query index to our current index so we block - reqReal.QueryOptions.MinQueryIndex = idx - - // Fetch - var reply structs.IndexedCARoots - if err := delegate.RPC("ConnectCA.Roots", reqReal, &reply); err != nil { - return nil, 0, err - } - - return &reply, reply.QueryMeta.Index, nil -} -*/ diff --git a/agent/cache/type_connect_ca.go b/agent/cache/type_connect_ca.go new file mode 100644 index 000000000..40bda72df --- /dev/null +++ b/agent/cache/type_connect_ca.go @@ -0,0 +1,39 @@ +package cache + +/* +import ( + "fmt" + + "github.com/hashicorp/consul/agent/structs" +) + +// TypeCARoot supports fetching the Connect CA roots. +type TypeCARoot struct { + RPC RPC +} + +func (c *TypeCARoot) Fetch(opts FetchOptions, req Request) (FetchResult, error) { + var result FetchResult + + // The request should be a DCSpecificRequest. + reqReal, ok := req.(*structs.DCSpecificRequest) + if !ok { + return result, fmt.Errorf( + "Internal cache failure: request wrong type: %T", req) + } + + // Set the minimum query index to our current index so we block + reqReal.QueryOptions.MinQueryIndex = opts.MinIndex + reqReal.QueryOptions.MaxQueryTime = opts.Timeout + + // Fetch + var reply structs.IndexedCARoots + if err := c.RPC.RPC("ConnectCA.Roots", reqReal, &reply); err != nil { + return result, err + } + + result.Value = &reply + result.Index = reply.QueryMeta.Index + return result, nil +} +*/ From ecc789ddb59ab7a0ceff8bdfc70b775923c73a8b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 8 Apr 2018 14:45:55 +0100 Subject: [PATCH 152/627] agent/cache: ConnectCA roots caching type --- agent/cache/type_connect_ca.go | 2 -- agent/cache/type_connect_ca_test.go | 55 +++++++++++++++++++++++++++++ agent/structs/structs.go | 20 +++++++++++ 3 files changed, 75 insertions(+), 2 deletions(-) create mode 100644 agent/cache/type_connect_ca_test.go diff --git a/agent/cache/type_connect_ca.go b/agent/cache/type_connect_ca.go index 40bda72df..6a0a6699c 100644 --- a/agent/cache/type_connect_ca.go +++ b/agent/cache/type_connect_ca.go @@ -1,6 +1,5 @@ package cache -/* import ( "fmt" @@ -36,4 +35,3 @@ func (c *TypeCARoot) Fetch(opts FetchOptions, req Request) (FetchResult, error) result.Index = reply.QueryMeta.Index return result, nil } -*/ diff --git a/agent/cache/type_connect_ca_test.go b/agent/cache/type_connect_ca_test.go new file mode 100644 index 000000000..359449d21 --- /dev/null +++ b/agent/cache/type_connect_ca_test.go @@ -0,0 +1,55 @@ +package cache + +import ( + "testing" + "time" + + "github.com/hashicorp/consul/agent/structs" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestTypeCARoot(t *testing.T) { + require := require.New(t) + rpc := TestRPC(t) + defer rpc.AssertExpectations(t) + typ := &TypeCARoot{RPC: rpc} + + // Expect the proper RPC call. This also sets the expected value + // since that is return-by-pointer in the arguments. + var resp *structs.IndexedCARoots + rpc.On("RPC", "ConnectCA.Roots", mock.Anything, mock.Anything).Return(nil). + Run(func(args mock.Arguments) { + req := args.Get(1).(*structs.DCSpecificRequest) + require.Equal(uint64(24), req.QueryOptions.MinQueryIndex) + require.Equal(1*time.Second, req.QueryOptions.MaxQueryTime) + + reply := args.Get(2).(*structs.IndexedCARoots) + reply.QueryMeta.Index = 48 + resp = reply + }) + + // Fetch + result, err := typ.Fetch(FetchOptions{ + MinIndex: 24, + Timeout: 1 * time.Second, + }, &structs.DCSpecificRequest{Datacenter: "dc1"}) + require.Nil(err) + require.Equal(FetchResult{ + Value: resp, + Index: 48, + }, result) +} + +func TestTypeCARoot_badReqType(t *testing.T) { + require := require.New(t) + rpc := TestRPC(t) + defer rpc.AssertExpectations(t) + typ := &TypeCARoot{RPC: rpc} + + // Fetch + _, err := typ.Fetch(FetchOptions{}, TestRequest(t, "foo", 64)) + require.NotNil(err) + require.Contains(err.Error(), "wrong type") + +} diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 4f25e50f0..d40c90baa 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -6,6 +6,7 @@ import ( "math/rand" "reflect" "regexp" + "strconv" "strings" "time" @@ -14,6 +15,7 @@ import ( "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/go-multierror" "github.com/hashicorp/serf/coordinate" + "github.com/mitchellh/hashstructure" ) type MessageType uint8 @@ -276,6 +278,24 @@ func (r *DCSpecificRequest) RequestDatacenter() string { return r.Datacenter } +func (r *DCSpecificRequest) CacheKey() string { + // To calculate the cache key we only hash the node filters. The + // datacenter is handled by the cache framework. The other fields are + // not, but should not be used in any cache types. + v, err := hashstructure.Hash(r.NodeMetaFilters, nil) + if err != nil { + // Empty string means do not cache. If we have an error we should + // just forward along to the server. + return "" + } + + return strconv.FormatUint(v, 10) +} + +func (r *DCSpecificRequest) CacheMinIndex() uint64 { + return r.QueryOptions.MinQueryIndex +} + // ServiceSpecificRequest is used to query about a specific service type ServiceSpecificRequest struct { Datacenter string From 72c82a9b29aac384dc67cb2511da09cd61567830 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 8 Apr 2018 15:08:34 +0100 Subject: [PATCH 153/627] agent/cache: Reorganize some files, RequestInfo struct, prepare for partitioning --- .../connect_ca.go} | 7 ++-- .../connect_ca_test.go} | 10 +++--- agent/{cache => cache-types}/mock_RPC.go | 2 +- agent/{cache => cache-types}/rpc.go | 4 ++- agent/cache-types/testing.go | 12 +++++++ agent/cache/cache.go | 22 ++++++------- agent/cache/cache_test.go | 23 ++++++------- agent/cache/mock_Request.go | 24 +++----------- agent/cache/request.go | 32 ++++++++++++++++--- agent/cache/testing.go | 12 ++----- agent/structs/structs.go | 18 +++++++---- 11 files changed, 94 insertions(+), 72 deletions(-) rename agent/{cache/type_connect_ca.go => cache-types/connect_ca.go} (79%) rename agent/{cache/type_connect_ca_test.go => cache-types/connect_ca_test.go} (83%) rename agent/{cache => cache-types}/mock_RPC.go (96%) rename agent/{cache => cache-types}/rpc.go (83%) create mode 100644 agent/cache-types/testing.go diff --git a/agent/cache/type_connect_ca.go b/agent/cache-types/connect_ca.go similarity index 79% rename from agent/cache/type_connect_ca.go rename to agent/cache-types/connect_ca.go index 6a0a6699c..85962b1fb 100644 --- a/agent/cache/type_connect_ca.go +++ b/agent/cache-types/connect_ca.go @@ -1,8 +1,9 @@ -package cache +package cachetype import ( "fmt" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/structs" ) @@ -11,8 +12,8 @@ type TypeCARoot struct { RPC RPC } -func (c *TypeCARoot) Fetch(opts FetchOptions, req Request) (FetchResult, error) { - var result FetchResult +func (c *TypeCARoot) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) { + var result cache.FetchResult // The request should be a DCSpecificRequest. reqReal, ok := req.(*structs.DCSpecificRequest) diff --git a/agent/cache/type_connect_ca_test.go b/agent/cache-types/connect_ca_test.go similarity index 83% rename from agent/cache/type_connect_ca_test.go rename to agent/cache-types/connect_ca_test.go index 359449d21..faf8317bd 100644 --- a/agent/cache/type_connect_ca_test.go +++ b/agent/cache-types/connect_ca_test.go @@ -1,9 +1,10 @@ -package cache +package cachetype import ( "testing" "time" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/structs" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -30,12 +31,12 @@ func TestTypeCARoot(t *testing.T) { }) // Fetch - result, err := typ.Fetch(FetchOptions{ + result, err := typ.Fetch(cache.FetchOptions{ MinIndex: 24, Timeout: 1 * time.Second, }, &structs.DCSpecificRequest{Datacenter: "dc1"}) require.Nil(err) - require.Equal(FetchResult{ + require.Equal(cache.FetchResult{ Value: resp, Index: 48, }, result) @@ -48,7 +49,8 @@ func TestTypeCARoot_badReqType(t *testing.T) { typ := &TypeCARoot{RPC: rpc} // Fetch - _, err := typ.Fetch(FetchOptions{}, TestRequest(t, "foo", 64)) + _, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest( + t, cache.RequestInfo{Key: "foo", MinIndex: 64})) require.NotNil(err) require.Contains(err.Error(), "wrong type") diff --git a/agent/cache/mock_RPC.go b/agent/cache-types/mock_RPC.go similarity index 96% rename from agent/cache/mock_RPC.go rename to agent/cache-types/mock_RPC.go index a1100d2a7..6f642c66b 100644 --- a/agent/cache/mock_RPC.go +++ b/agent/cache-types/mock_RPC.go @@ -1,5 +1,5 @@ // Code generated by mockery v1.0.0 -package cache +package cachetype import mock "github.com/stretchr/testify/mock" diff --git a/agent/cache/rpc.go b/agent/cache-types/rpc.go similarity index 83% rename from agent/cache/rpc.go rename to agent/cache-types/rpc.go index 98976284a..0aaf040f3 100644 --- a/agent/cache/rpc.go +++ b/agent/cache-types/rpc.go @@ -1,4 +1,6 @@ -package cache +package cachetype + +//go:generate mockery -all -inpkg // RPC is an interface that an RPC client must implement. This is a helper // interface that is implemented by the agent delegate so that Type diff --git a/agent/cache-types/testing.go b/agent/cache-types/testing.go new file mode 100644 index 000000000..bf68ec478 --- /dev/null +++ b/agent/cache-types/testing.go @@ -0,0 +1,12 @@ +package cachetype + +import ( + "github.com/mitchellh/go-testing-interface" +) + +// TestRPC returns a mock implementation of the RPC interface. +func TestRPC(t testing.T) *MockRPC { + // This function is relatively useless but this allows us to perhaps + // perform some initialization later. + return &MockRPC{} +} diff --git a/agent/cache/cache.go b/agent/cache/cache.go index 1c2f316dd..04323a4c5 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -109,8 +109,8 @@ func (c *Cache) RegisterType(n string, typ Type, opts *RegisterOptions) { // Multiple Get calls for the same Request (matching CacheKey value) will // block on a single network request. func (c *Cache) Get(t string, r Request) (interface{}, error) { - key := r.CacheKey() - if key == "" { + info := r.CacheInfo() + if info.Key == "" { // If no key is specified, then we do not cache this request. // Pass directly through to the backend. return c.fetchDirect(t, r) @@ -119,7 +119,7 @@ func (c *Cache) Get(t string, r Request) (interface{}, error) { RETRY_GET: // Get the current value c.entriesLock.RLock() - entry, ok := c.entries[key] + entry, ok := c.entries[info.Key] c.entriesLock.RUnlock() // If we have a current value and the index is greater than the @@ -127,8 +127,7 @@ RETRY_GET: // index is zero and we have something in the cache we accept whatever // we have. if ok && entry.Valid { - idx := r.CacheMinIndex() - if idx == 0 || idx < entry.Index { + if info.MinIndex == 0 || info.MinIndex < entry.Index { return entry.Value, nil } } @@ -154,13 +153,12 @@ func (c *Cache) fetch(t string, r Request) (<-chan struct{}, error) { return nil, fmt.Errorf("unknown type in cache: %s", t) } - // The cache key is used multiple times and might be dynamically - // constructed so let's just store it once here. - key := r.CacheKey() + // Grab the cache information while we're outside the lock. + info := r.CacheInfo() c.entriesLock.Lock() defer c.entriesLock.Unlock() - entry, ok := c.entries[key] + entry, ok := c.entries[info.Key] // If we already have an entry and it is actively fetching, then return // the currently active waiter. @@ -178,7 +176,7 @@ func (c *Cache) fetch(t string, r Request) (<-chan struct{}, error) { // identical calls to fetch will return the same waiter rather than // perform multiple fetches. entry.Fetching = true - c.entries[key] = entry + c.entries[info.Key] = entry // The actual Fetch must be performed in a goroutine. go func() { @@ -199,7 +197,7 @@ func (c *Cache) fetch(t string, r Request) (<-chan struct{}, error) { // Insert c.entriesLock.Lock() - c.entries[key] = newEntry + c.entries[info.Key] = newEntry c.entriesLock.Unlock() // Trigger the waiter @@ -227,7 +225,7 @@ func (c *Cache) fetchDirect(t string, r Request) (interface{}, error) { // Fetch it with the min index specified directly by the request. result, err := tEntry.Type.Fetch(FetchOptions{ - MinIndex: r.CacheMinIndex(), + MinIndex: r.CacheInfo().MinIndex, }, r) if err != nil { return nil, err diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index 69f99a628..1bfed590c 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -25,7 +25,7 @@ func TestCacheGet_noIndex(t *testing.T) { typ.Static(FetchResult{Value: 42}, nil).Times(1) // Get, should fetch - req := TestRequest(t, "hello", 0) + req := TestRequest(t, RequestInfo{Key: "hello"}) result, err := c.Get("t", req) require.Nil(err) require.Equal(42, result) @@ -57,7 +57,7 @@ func TestCacheGet_blankCacheKey(t *testing.T) { typ.Static(FetchResult{Value: 42}, nil).Times(2) // Get, should fetch - req := TestRequest(t, "", 0) + req := TestRequest(t, RequestInfo{Key: ""}) result, err := c.Get("t", req) require.Nil(err) require.Equal(42, result) @@ -87,8 +87,8 @@ func TestCacheGet_blockingInitSameKey(t *testing.T) { typ.Static(FetchResult{Value: 42}, nil).WaitUntil(triggerCh).Times(1) // Perform multiple gets - getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 0)) - getCh2 := TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 0)) + getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"})) + getCh2 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"})) // They should block select { @@ -131,12 +131,12 @@ func TestCacheGet_blockingInitDiffKeys(t *testing.T) { Run(func(args mock.Arguments) { keysLock.Lock() defer keysLock.Unlock() - keys = append(keys, args.Get(1).(Request).CacheKey()) + keys = append(keys, args.Get(1).(Request).CacheInfo().Key) }) // Perform multiple gets - getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 0)) - getCh2 := TestCacheGetCh(t, c, "t", TestRequest(t, "goodbye", 0)) + getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"})) + getCh2 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "goodbye"})) // They should block select { @@ -176,7 +176,8 @@ func TestCacheGet_blockingIndex(t *testing.T) { typ.Static(FetchResult{Value: 42, Index: 6}, nil).WaitUntil(triggerCh) // Fetch should block - resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 5)) + resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{ + Key: "hello", MinIndex: 5})) // Should block select { @@ -217,16 +218,16 @@ func TestCacheGet_periodicRefresh(t *testing.T) { typ.Static(FetchResult{Value: 12, Index: 5}, nil).WaitUntil(triggerCh) // Fetch should block - resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 0)) + resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"})) TestCacheGetChResult(t, resultCh, 1) // Fetch again almost immediately should return old result time.Sleep(5 * time.Millisecond) - resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 0)) + resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"})) TestCacheGetChResult(t, resultCh, 1) // Wait for the timer time.Sleep(200 * time.Millisecond) - resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, "hello", 0)) + resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"})) TestCacheGetChResult(t, resultCh, 12) } diff --git a/agent/cache/mock_Request.go b/agent/cache/mock_Request.go index 157912182..e3abd1515 100644 --- a/agent/cache/mock_Request.go +++ b/agent/cache/mock_Request.go @@ -8,29 +8,15 @@ type MockRequest struct { mock.Mock } -// CacheKey provides a mock function with given fields: -func (_m *MockRequest) CacheKey() string { +// CacheInfo provides a mock function with given fields: +func (_m *MockRequest) CacheInfo() RequestInfo { ret := _m.Called() - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { + var r0 RequestInfo + if rf, ok := ret.Get(0).(func() RequestInfo); ok { r0 = rf() } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// CacheMinIndex provides a mock function with given fields: -func (_m *MockRequest) CacheMinIndex() uint64 { - ret := _m.Called() - - var r0 uint64 - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) + r0 = ret.Get(0).(RequestInfo) } return r0 diff --git a/agent/cache/request.go b/agent/cache/request.go index c75c8ad84..b4a1b75d0 100644 --- a/agent/cache/request.go +++ b/agent/cache/request.go @@ -5,13 +5,35 @@ package cache // This interface is typically implemented by request structures in // the agent/structs package. type Request interface { - // CacheKey is a unique cache key for this request. This key should + // CacheInfo returns information used for caching this request. + CacheInfo() RequestInfo +} + +// RequestInfo represents cache information for a request. The caching +// framework uses this to control the behavior of caching and to determine +// cacheability. +type RequestInfo struct { + // Key is a unique cache key for this request. This key should // absolutely uniquely identify this request, since any conflicting // cache keys could result in invalid data being returned from the cache. - CacheKey() string + Key string - // CacheMinIndex is the minimum index being queried. This is used to + // Token is the ACL token associated with this request. + // + // Datacenter is the datacenter that the request is targeting. + // + // Both of these values are used to partition the cache. The cache framework + // today partitions data on these values to simplify behavior: by + // partitioning ACL tokens, the cache doesn't need to be smart about + // filtering results. By filtering datacenter results, the cache can + // service the multi-DC nature of Consul. This comes at the expense of + // working set size, but in general the effect is minimal. + Token string + Datacenter string + + // MinIndex is the minimum index being queried. This is used to // determine if we already have data satisfying the query or if we need - // to block until new data is available. - CacheMinIndex() uint64 + // to block until new data is available. If no index is available, the + // default value (zero) is acceptable. + MinIndex uint64 } diff --git a/agent/cache/testing.go b/agent/cache/testing.go index 6a094c117..365dc3b4e 100644 --- a/agent/cache/testing.go +++ b/agent/cache/testing.go @@ -50,20 +50,12 @@ func TestCacheGetChResult(t testing.T, ch <-chan interface{}, expected interface // TestRequest returns a Request that returns the given cache key and index. // The Reset method can be called to reset it for custom usage. -func TestRequest(t testing.T, key string, index uint64) *MockRequest { +func TestRequest(t testing.T, info RequestInfo) *MockRequest { req := &MockRequest{} - req.On("CacheKey").Return(key) - req.On("CacheMinIndex").Return(index) + req.On("CacheInfo").Return(info) return req } -// TestRPC returns a mock implementation of the RPC interface. -func TestRPC(t testing.T) *MockRPC { - // This function is relatively useless but this allows us to perhaps - // perform some initialization later. - return &MockRPC{} -} - // TestType returns a MockType that can be used to setup expectations // on data fetching. func TestType(t testing.T) *MockType { diff --git a/agent/structs/structs.go b/agent/structs/structs.go index d40c90baa..19a9c7313 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/types" "github.com/hashicorp/go-msgpack/codec" @@ -278,18 +279,23 @@ func (r *DCSpecificRequest) RequestDatacenter() string { return r.Datacenter } -func (r *DCSpecificRequest) CacheKey() string { +func (r *DCSpecificRequest) CacheInfo() cache.RequestInfo { + info := cache.RequestInfo{ + MinIndex: r.QueryOptions.MinQueryIndex, + } + // To calculate the cache key we only hash the node filters. The // datacenter is handled by the cache framework. The other fields are // not, but should not be used in any cache types. v, err := hashstructure.Hash(r.NodeMetaFilters, nil) - if err != nil { - // Empty string means do not cache. If we have an error we should - // just forward along to the server. - return "" + if err == nil { + // If there is an error, we don't set the key. A blank key forces + // no cache for this request so the request is forwarded directly + // to the server. + info.Key = strconv.FormatUint(v, 10) } - return strconv.FormatUint(v, 10) + return info } func (r *DCSpecificRequest) CacheMinIndex() uint64 { From 286217cbd847925651821ae9009e20761cab9029 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 10 Apr 2018 16:05:34 +0100 Subject: [PATCH 154/627] agent/cache: partition by DC/ACL token --- agent/cache/cache.go | 48 +++++++++++++++++++++++------------- agent/cache/cache_test.go | 52 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 17 deletions(-) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index 04323a4c5..c512476d5 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -20,15 +20,23 @@ import ( //go:generate mockery -all -inpkg -// TODO: DC-aware, ACL-aware - // Cache is a agent-local cache of Consul data. type Cache struct { - entriesLock sync.RWMutex - entries map[string]cacheEntry - + // types stores the list of data types that the cache knows how to service. + // These can be dynamically registered with RegisterType. typesLock sync.RWMutex types map[string]typeEntry + + // entries contains the actual cache data. + // + // NOTE(mitchellh): The entry map key is currently a string in the format + // of "//" in order to properly partition + // requests to different datacenters and ACL tokens. This format has some + // big drawbacks: we can't evict by datacenter, ACL token, etc. For an + // initial implementaiton this works and the tests are agnostic to the + // internal storage format so changing this should be possible safely. + entriesLock sync.RWMutex + entries map[string]cacheEntry } // cacheEntry stores a single cache entry. @@ -116,10 +124,13 @@ func (c *Cache) Get(t string, r Request) (interface{}, error) { return c.fetchDirect(t, r) } + // Get the actual key for our entry + key := c.entryKey(&info) + RETRY_GET: // Get the current value c.entriesLock.RLock() - entry, ok := c.entries[info.Key] + entry, ok := c.entries[key] c.entriesLock.RUnlock() // If we have a current value and the index is greater than the @@ -134,7 +145,7 @@ RETRY_GET: // At this point, we know we either don't have a value at all or the // value we have is too old. We need to wait for new data. - waiter, err := c.fetch(t, r) + waiter, err := c.fetch(t, key, r) if err != nil { return nil, err } @@ -144,7 +155,13 @@ RETRY_GET: goto RETRY_GET } -func (c *Cache) fetch(t string, r Request) (<-chan struct{}, error) { +// entryKey returns the key for the entry in the cache. See the note +// about the entry key format in the structure docs for Cache. +func (c *Cache) entryKey(r *RequestInfo) string { + return fmt.Sprintf("%s/%s/%s", r.Datacenter, r.Token, r.Key) +} + +func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { // Get the type that we're fetching c.typesLock.RLock() tEntry, ok := c.types[t] @@ -153,12 +170,9 @@ func (c *Cache) fetch(t string, r Request) (<-chan struct{}, error) { return nil, fmt.Errorf("unknown type in cache: %s", t) } - // Grab the cache information while we're outside the lock. - info := r.CacheInfo() - c.entriesLock.Lock() defer c.entriesLock.Unlock() - entry, ok := c.entries[info.Key] + entry, ok := c.entries[key] // If we already have an entry and it is actively fetching, then return // the currently active waiter. @@ -176,7 +190,7 @@ func (c *Cache) fetch(t string, r Request) (<-chan struct{}, error) { // identical calls to fetch will return the same waiter rather than // perform multiple fetches. entry.Fetching = true - c.entries[info.Key] = entry + c.entries[key] = entry // The actual Fetch must be performed in a goroutine. go func() { @@ -197,7 +211,7 @@ func (c *Cache) fetch(t string, r Request) (<-chan struct{}, error) { // Insert c.entriesLock.Lock() - c.entries[info.Key] = newEntry + c.entries[key] = newEntry c.entriesLock.Unlock() // Trigger the waiter @@ -206,7 +220,7 @@ func (c *Cache) fetch(t string, r Request) (<-chan struct{}, error) { // If refresh is enabled, run the refresh in due time. The refresh // below might block, but saves us from spawning another goroutine. if tEntry.Opts != nil && tEntry.Opts.Refresh { - c.refresh(tEntry.Opts, t, r) + c.refresh(tEntry.Opts, t, key, r) } }() @@ -235,7 +249,7 @@ func (c *Cache) fetchDirect(t string, r Request) (interface{}, error) { return result.Value, nil } -func (c *Cache) refresh(opts *RegisterOptions, t string, r Request) { +func (c *Cache) refresh(opts *RegisterOptions, t string, key string, r Request) { // Sanity-check, we should not schedule anything that has refresh disabled if !opts.Refresh { return @@ -247,5 +261,5 @@ func (c *Cache) refresh(opts *RegisterOptions, t string, r Request) { } // Trigger - c.fetch(t, r) + c.fetch(t, key, r) } diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index 1bfed590c..1e75490a0 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -1,6 +1,7 @@ package cache import ( + "fmt" "sort" "sync" "testing" @@ -231,3 +232,54 @@ func TestCacheGet_periodicRefresh(t *testing.T) { resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"})) TestCacheGetChResult(t, resultCh, 12) } + +// Test that Get partitions the caches based on DC so two equivalent requests +// to different datacenters are automatically cached even if their keys are +// the same. +func TestCacheGet_partitionDC(t *testing.T) { + t.Parallel() + + c := TestCache(t) + c.RegisterType("t", &testPartitionType{}, nil) + + // Perform multiple gets + getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{ + Datacenter: "dc1", Key: "hello"})) + getCh2 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{ + Datacenter: "dc9", Key: "hello"})) + + // Should return both! + TestCacheGetChResult(t, getCh1, "dc1") + TestCacheGetChResult(t, getCh2, "dc9") +} + +// Test that Get partitions the caches based on token so two equivalent requests +// with different ACL tokens do not return the same result. +func TestCacheGet_partitionToken(t *testing.T) { + t.Parallel() + + c := TestCache(t) + c.RegisterType("t", &testPartitionType{}, nil) + + // Perform multiple gets + getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{ + Token: "", Key: "hello"})) + getCh2 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{ + Token: "foo", Key: "hello"})) + + // Should return both! + TestCacheGetChResult(t, getCh1, "") + TestCacheGetChResult(t, getCh2, "foo") +} + +// testPartitionType implements Type for testing that simply returns a value +// comprised of the request DC and ACL token, used for testing cache +// partitioning. +type testPartitionType struct{} + +func (t *testPartitionType) Fetch(opts FetchOptions, r Request) (FetchResult, error) { + info := r.CacheInfo() + return FetchResult{ + Value: fmt.Sprintf("%s%s", info.Datacenter, info.Token), + }, nil +} From 8bb4fd95a670e6eea15cc746366a44432875fb59 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 11 Apr 2018 09:52:51 +0100 Subject: [PATCH 155/627] agent: initialize the cache and cache the CA roots --- agent/agent.go | 26 ++++++++++++++++++++++++++ agent/agent_endpoint.go | 23 +++++++++++++++++++---- agent/cache-types/connect_ca.go | 9 ++++++--- agent/cache-types/connect_ca_test.go | 8 ++++---- 4 files changed, 55 insertions(+), 11 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 277bdd046..b6e923ee3 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -21,6 +21,8 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/ae" + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/consul" @@ -118,6 +120,9 @@ type Agent struct { // and the remote state. sync *ae.StateSyncer + // cache is the in-memory cache for data the Agent requests. + cache *cache.Cache + // checkReapAfter maps the check ID to a timeout after which we should // reap its associated service checkReapAfter map[types.CheckID]time.Duration @@ -290,6 +295,9 @@ func (a *Agent) Start() error { // regular and on-demand state synchronizations (anti-entropy). a.sync = ae.NewStateSyncer(a.State, c.AEInterval, a.shutdownCh, a.logger) + // create the cache + a.cache = cache.New(nil) + // create the config for the rpc server/client consulCfg, err := a.consulConfig() if err != nil { @@ -326,6 +334,9 @@ func (a *Agent) Start() error { a.State.Delegate = a.delegate a.State.TriggerSyncChanges = a.sync.SyncChanges.Trigger + // Register the cache + a.registerCache() + // Load checks/services/metadata. if err := a.loadServices(c); err != nil { return err @@ -2624,3 +2635,18 @@ func (a *Agent) ReloadConfig(newCfg *config.RuntimeConfig) error { return nil } + +// registerCache configures the cache and registers all the supported +// types onto the cache. This is NOT safe to call multiple times so +// care should be taken to call this exactly once after the cache +// field has been initialized. +func (a *Agent) registerCache() { + a.cache.RegisterType(cachetype.ConnectCARootName, &cachetype.ConnectCARoot{ + RPC: a.delegate, + }, &cache.RegisterOptions{ + // Maintain a blocking query, retry dropped connections quickly + Refresh: true, + RefreshTimer: 0, + RefreshTimeout: 10 * time.Minute, + }) +} diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index c1bf6fbe1..c64eb7a92 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -13,6 +13,7 @@ import ( "github.com/mitchellh/hashstructure" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/cache-types" "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/connect" @@ -885,10 +886,24 @@ func (s *HTTPServer) AgentToken(resp http.ResponseWriter, req *http.Request) (in // AgentConnectCARoots returns the trusted CA roots. func (s *HTTPServer) AgentConnectCARoots(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - // NOTE(mitchellh): for now this is identical to /v1/connect/ca/roots. - // In the future, we're going to do some agent-local caching and the - // behavior will differ. - return s.ConnectCARoots(resp, req) + var args structs.DCSpecificRequest + if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { + return nil, nil + } + + raw, err := s.agent.cache.Get(cachetype.ConnectCARootName, &args) + if err != nil { + return nil, err + } + + reply, ok := raw.(*structs.IndexedCARoots) + if !ok { + // This should never happen, but we want to protect against panics + return nil, fmt.Errorf("internal error: response type not correct") + } + defer setMeta(resp, &reply.QueryMeta) + + return *reply, nil } // AgentConnectCALeafCert returns the certificate bundle for a service diff --git a/agent/cache-types/connect_ca.go b/agent/cache-types/connect_ca.go index 85962b1fb..5b72a47a7 100644 --- a/agent/cache-types/connect_ca.go +++ b/agent/cache-types/connect_ca.go @@ -7,12 +7,15 @@ import ( "github.com/hashicorp/consul/agent/structs" ) -// TypeCARoot supports fetching the Connect CA roots. -type TypeCARoot struct { +// Recommended name for registration for ConnectCARoot +const ConnectCARootName = "connect-ca" + +// ConnectCARoot supports fetching the Connect CA roots. +type ConnectCARoot struct { RPC RPC } -func (c *TypeCARoot) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) { +func (c *ConnectCARoot) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) { var result cache.FetchResult // The request should be a DCSpecificRequest. diff --git a/agent/cache-types/connect_ca_test.go b/agent/cache-types/connect_ca_test.go index faf8317bd..24c37f313 100644 --- a/agent/cache-types/connect_ca_test.go +++ b/agent/cache-types/connect_ca_test.go @@ -10,11 +10,11 @@ import ( "github.com/stretchr/testify/require" ) -func TestTypeCARoot(t *testing.T) { +func TestConnectCARoot(t *testing.T) { require := require.New(t) rpc := TestRPC(t) defer rpc.AssertExpectations(t) - typ := &TypeCARoot{RPC: rpc} + typ := &ConnectCARoot{RPC: rpc} // Expect the proper RPC call. This also sets the expected value // since that is return-by-pointer in the arguments. @@ -42,11 +42,11 @@ func TestTypeCARoot(t *testing.T) { }, result) } -func TestTypeCARoot_badReqType(t *testing.T) { +func TestConnectCARoot_badReqType(t *testing.T) { require := require.New(t) rpc := TestRPC(t) defer rpc.AssertExpectations(t) - typ := &TypeCARoot{RPC: rpc} + typ := &ConnectCARoot{RPC: rpc} // Fetch _, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest( From 9e44a319d38b448588ad7c3a6d47fbd8c79f89f3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 11 Apr 2018 10:18:24 +0100 Subject: [PATCH 156/627] agent: check cache hit count to verify CA root caching, background update --- agent/agent_endpoint_test.go | 59 +++++++++++++++++++++++++++++++----- agent/cache/cache.go | 26 ++++++++++++++++ 2 files changed, 78 insertions(+), 7 deletions(-) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 32cb6ab98..2e583ec4f 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2121,32 +2121,77 @@ func TestAgentConnectCARoots_empty(t *testing.T) { func TestAgentConnectCARoots_list(t *testing.T) { t.Parallel() - assert := assert.New(t) + require := require.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() + // Grab the initial cache hit count + cacheHits := a.cache.Hits() + // Set some CAs var reply interface{} ca1 := connect.TestCA(t, nil) ca1.Active = false ca2 := connect.TestCA(t, nil) - assert.Nil(a.RPC("Test.ConnectCASetRoots", + require.Nil(a.RPC("Test.ConnectCASetRoots", []*structs.CARoot{ca1, ca2}, &reply)) // List req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) resp := httptest.NewRecorder() obj, err := a.srv.AgentConnectCARoots(resp, req) - assert.Nil(err) + require.Nil(err) value := obj.(structs.IndexedCARoots) - assert.Equal(value.ActiveRootID, ca2.ID) - assert.Len(value.Roots, 2) + require.Equal(value.ActiveRootID, ca2.ID) + require.Len(value.Roots, 2) // We should never have the secret information for _, r := range value.Roots { - assert.Equal("", r.SigningCert) - assert.Equal("", r.SigningKey) + require.Equal("", r.SigningCert) + require.Equal("", r.SigningKey) + } + + // That should've been a cache miss, so not hit change + require.Equal(cacheHits, a.cache.Hits()) + + // Test caching + { + // List it again + obj2, err := a.srv.AgentConnectCARoots(httptest.NewRecorder(), req) + require.Nil(err) + require.Equal(obj, obj2) + + // Should cache hit this time and not make request + require.Equal(cacheHits+1, a.cache.Hits()) + cacheHits++ + } + + // Test that caching is updated in the background + { + // Set some new CAs + var reply interface{} + ca := connect.TestCA(t, nil) + require.Nil(a.RPC("Test.ConnectCASetRoots", + []*structs.CARoot{ca}, &reply)) + + // Sleep a bit to wait for the cache to update + time.Sleep(100 * time.Millisecond) + + // List it again + obj, err := a.srv.AgentConnectCARoots(httptest.NewRecorder(), req) + require.Nil(err) + require.Equal(obj, obj) + + value := obj.(structs.IndexedCARoots) + require.Equal(value.ActiveRootID, ca.ID) + require.Len(value.Roots, 1) + + // Should be a cache hit! The data should've updated in the cache + // in the background so this should've been fetched directly from + // the cache. + require.Equal(cacheHits+1, a.cache.Hits()) + cacheHits++ } } diff --git a/agent/cache/cache.go b/agent/cache/cache.go index c512476d5..a57ab8343 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -15,6 +15,7 @@ package cache import ( "fmt" "sync" + "sync/atomic" "time" ) @@ -22,6 +23,11 @@ import ( // Cache is a agent-local cache of Consul data. type Cache struct { + // Keeps track of the cache hits and misses in total. This is used by + // tests currently to verify cache behavior and is not meant for general + // analytics; for that, go-metrics emitted values are better. + hits, misses uint64 + // types stores the list of data types that the cache knows how to service. // These can be dynamically registered with RegisterType. typesLock sync.RWMutex @@ -127,6 +133,9 @@ func (c *Cache) Get(t string, r Request) (interface{}, error) { // Get the actual key for our entry key := c.entryKey(&info) + // First time through + first := true + RETRY_GET: // Get the current value c.entriesLock.RLock() @@ -139,10 +148,22 @@ RETRY_GET: // we have. if ok && entry.Valid { if info.MinIndex == 0 || info.MinIndex < entry.Index { + if first { + atomic.AddUint64(&c.hits, 1) + } + return entry.Value, nil } } + if first { + // Record the miss if its our first time through + atomic.AddUint64(&c.misses, 1) + } + + // No longer our first time through + first = false + // At this point, we know we either don't have a value at all or the // value we have is too old. We need to wait for new data. waiter, err := c.fetch(t, key, r) @@ -263,3 +284,8 @@ func (c *Cache) refresh(opts *RegisterOptions, t string, key string, r Request) // Trigger c.fetch(t, key, r) } + +// Returns the number of cache hits. Safe to call concurrently. +func (c *Cache) Hits() uint64 { + return atomic.LoadUint64(&c.hits) +} From e3b1c400e5474a55978d2243ec2b8aa1cecaefee Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 15 Apr 2018 22:11:04 +0200 Subject: [PATCH 157/627] agent/cache-types: got basic CA leaf caching work, major problems still --- agent/cache-types/connect_ca.go | 182 ++++++++++++++++++++++++- agent/cache-types/connect_ca_test.go | 195 +++++++++++++++++++++++++++ agent/cache-types/testing.go | 48 +++++++ 3 files changed, 422 insertions(+), 3 deletions(-) diff --git a/agent/cache-types/connect_ca.go b/agent/cache-types/connect_ca.go index 5b72a47a7..22549ed49 100644 --- a/agent/cache-types/connect_ca.go +++ b/agent/cache-types/connect_ca.go @@ -2,15 +2,27 @@ package cachetype import ( "fmt" + "sync" + "sync/atomic" + "time" "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" + + // NOTE(mitcehllh): This is temporary while certs are stubbed out. + "github.com/mitchellh/go-testing-interface" ) -// Recommended name for registration for ConnectCARoot -const ConnectCARootName = "connect-ca" +// Recommended name for registration. +const ( + ConnectCARootName = "connect-ca-root" + ConnectCALeafName = "connect-ca-leaf" +) -// ConnectCARoot supports fetching the Connect CA roots. +// ConnectCARoot supports fetching the Connect CA roots. This is a +// straightforward cache type since it only has to block on the given +// index and return the data. type ConnectCARoot struct { RPC RPC } @@ -39,3 +51,167 @@ func (c *ConnectCARoot) Fetch(opts cache.FetchOptions, req cache.Request) (cache result.Index = reply.QueryMeta.Index return result, nil } + +// ConnectCALeaf supports fetching and generating Connect leaf +// certificates. +type ConnectCALeaf struct { + caIndex uint64 // Current index for CA roots + + issuedCertsLock sync.RWMutex + issuedCerts map[string]*structs.IssuedCert + + RPC RPC // RPC client for remote requests + Cache *cache.Cache // Cache that has CA root certs via ConnectCARoot +} + +func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) { + var result cache.FetchResult + + // Get the correct type + reqReal, ok := req.(*ConnectCALeafRequest) + if !ok { + return result, fmt.Errorf( + "Internal cache failure: request wrong type: %T", req) + } + + // This channel watches our overall timeout. The other goroutines + // launched in this function should end all around the same time so + // they clean themselves up. + timeoutCh := time.After(opts.Timeout) + + // Kick off the goroutine that waits for new CA roots. The channel buffer + // is so that the goroutine doesn't block forever if we return for other + // reasons. + newRootCACh := make(chan error, 1) + go c.waitNewRootCA(newRootCACh, opts.Timeout) + + // Get our prior cert (if we had one) and use that to determine our + // expiration time. If no cert exists, we expire immediately since we + // need to generate. + c.issuedCertsLock.RLock() + lastCert := c.issuedCerts[reqReal.Service] + c.issuedCertsLock.RUnlock() + + var leafExpiryCh <-chan time.Time + if lastCert != nil { + // Determine how long we wait until triggering. If we've already + // expired, we trigger immediately. + if expiryDur := lastCert.ValidBefore.Sub(time.Now()); expiryDur > 0 { + leafExpiryCh = time.After(expiryDur - 1*time.Hour) + // TODO(mitchellh): 1 hour buffer is hardcoded above + } + } + + if leafExpiryCh == nil { + // If the channel is still nil then it means we need to generate + // a cert no matter what: we either don't have an existing one or + // it is expired. + leafExpiryCh = time.After(0) + } + + // Block on the events that wake us up. + select { + case <-timeoutCh: + // TODO: what is the right error for a timeout? + return result, fmt.Errorf("timeout") + + case err := <-newRootCACh: + // A new root CA triggers us to refresh the leaf certificate. + // If there was an error while getting the root CA then we return. + // Otherwise, we leave the select statement and move to generation. + if err != nil { + return result, err + } + + case <-leafExpiryCh: + // The existing leaf certificate is expiring soon, so we generate a + // new cert with a healthy overlapping validity period (determined + // by the above channel). + } + + // Create a CSR. + // TODO(mitchellh): This is obviously not production ready! + csr, pk := connect.TestCSR(&testing.RuntimeT{}, &connect.SpiffeIDService{ + Host: "1234.consul", + Namespace: "default", + Datacenter: reqReal.Datacenter, + Service: reqReal.Service, + }) + + // Request signing + var reply structs.IssuedCert + args := structs.CASignRequest{CSR: csr} + if err := c.RPC.RPC("ConnectCA.Sign", &args, &reply); err != nil { + return result, err + } + reply.PrivateKeyPEM = pk + + // Lock the issued certs map so we can insert it. We only insert if + // we didn't happen to get a newer one. This should never happen since + // the Cache should ensure only one Fetch per service, but we sanity + // check just in case. + c.issuedCertsLock.Lock() + defer c.issuedCertsLock.Unlock() + lastCert = c.issuedCerts[reqReal.Service] + if lastCert == nil || lastCert.ModifyIndex < reply.ModifyIndex { + if c.issuedCerts == nil { + c.issuedCerts = make(map[string]*structs.IssuedCert) + } + c.issuedCerts[reqReal.Service] = &reply + lastCert = &reply + } + + result.Value = lastCert + result.Index = lastCert.ModifyIndex + return result, nil +} + +// waitNewRootCA blocks until a new root CA is available or the timeout is +// reached (on timeout ErrTimeout is returned on the channel). +func (c *ConnectCALeaf) waitNewRootCA(ch chan<- error, timeout time.Duration) { + // Fetch some new roots. This will block until our MinQueryIndex is + // matched or the timeout is reached. + rawRoots, err := c.Cache.Get(ConnectCARootName, &structs.DCSpecificRequest{ + Datacenter: "", + QueryOptions: structs.QueryOptions{ + MinQueryIndex: atomic.LoadUint64(&c.caIndex), + MaxQueryTime: timeout, + }, + }) + if err != nil { + ch <- err + return + } + + roots, ok := rawRoots.(*structs.IndexedCARoots) + if !ok { + // This should never happen but we don't want to even risk a panic + ch <- fmt.Errorf( + "internal error: CA root cache returned bad type: %T", rawRoots) + return + } + + // Set the new index + atomic.StoreUint64(&c.caIndex, roots.QueryMeta.Index) + + // Trigger the channel since we updated. + ch <- nil +} + +// ConnectCALeafRequest is the cache.Request implementation for the +// COnnectCALeaf cache type. This is implemented here and not in structs +// since this is only used for cache-related requests and not forwarded +// directly to any Consul servers. +type ConnectCALeafRequest struct { + Datacenter string + Service string // Service name, not ID + MinQueryIndex uint64 +} + +func (r *ConnectCALeafRequest) CacheInfo() cache.RequestInfo { + return cache.RequestInfo{ + Key: r.Service, + Datacenter: r.Datacenter, + MinIndex: r.MinQueryIndex, + } +} diff --git a/agent/cache-types/connect_ca_test.go b/agent/cache-types/connect_ca_test.go index 24c37f313..43953e7f8 100644 --- a/agent/cache-types/connect_ca_test.go +++ b/agent/cache-types/connect_ca_test.go @@ -1,6 +1,8 @@ package cachetype import ( + "fmt" + "sync/atomic" "testing" "time" @@ -55,3 +57,196 @@ func TestConnectCARoot_badReqType(t *testing.T) { require.Contains(err.Error(), "wrong type") } + +// Test that after an initial signing, new CA roots (new ID) will +// trigger a blocking query to execute. +func TestConnectCALeaf_changingRoots(t *testing.T) { + t.Parallel() + + require := require.New(t) + rpc := TestRPC(t) + defer rpc.AssertExpectations(t) + + typ, rootsCh := testCALeafType(t, rpc) + defer close(rootsCh) + rootsCh <- structs.IndexedCARoots{ + ActiveRootID: "1", + QueryMeta: structs.QueryMeta{Index: 1}, + } + + // Instrument ConnectCA.Sign to + var resp *structs.IssuedCert + var idx uint64 + rpc.On("RPC", "ConnectCA.Sign", mock.Anything, mock.Anything).Return(nil). + Run(func(args mock.Arguments) { + reply := args.Get(2).(*structs.IssuedCert) + reply.ValidBefore = time.Now().Add(12 * time.Hour) + reply.CreateIndex = atomic.AddUint64(&idx, 1) + reply.ModifyIndex = reply.CreateIndex + resp = reply + }) + + // We'll reuse the fetch options and request + opts := cache.FetchOptions{MinIndex: 0, Timeout: 10 * time.Second} + req := &ConnectCALeafRequest{Datacenter: "dc1", Service: "web"} + + // First fetch should return immediately + fetchCh := TestFetchCh(t, typ, opts, req) + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("shouldn't block waiting for fetch") + case result := <-fetchCh: + require.Equal(cache.FetchResult{ + Value: resp, + Index: 1, + }, result) + } + + // Second fetch should block with set index + fetchCh = TestFetchCh(t, typ, opts, req) + select { + case result := <-fetchCh: + t.Fatalf("should not return: %#v", result) + case <-time.After(100 * time.Millisecond): + } + + // Let's send in new roots, which should trigger the sign req + rootsCh <- structs.IndexedCARoots{ + ActiveRootID: "2", + QueryMeta: structs.QueryMeta{Index: 2}, + } + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("shouldn't block waiting for fetch") + case result := <-fetchCh: + require.Equal(cache.FetchResult{ + Value: resp, + Index: 2, + }, result) + } + + // Third fetch should block + fetchCh = TestFetchCh(t, typ, opts, req) + select { + case result := <-fetchCh: + t.Fatalf("should not return: %#v", result) + case <-time.After(100 * time.Millisecond): + } +} + +// Test that after an initial signing, an expiringLeaf will trigger a +// blocking query to resign. +func TestConnectCALeaf_expiringLeaf(t *testing.T) { + t.Parallel() + + require := require.New(t) + rpc := TestRPC(t) + defer rpc.AssertExpectations(t) + + typ, rootsCh := testCALeafType(t, rpc) + defer close(rootsCh) + rootsCh <- structs.IndexedCARoots{ + ActiveRootID: "1", + QueryMeta: structs.QueryMeta{Index: 1}, + } + + // Instrument ConnectCA.Sign to + var resp *structs.IssuedCert + var idx uint64 + rpc.On("RPC", "ConnectCA.Sign", mock.Anything, mock.Anything).Return(nil). + Run(func(args mock.Arguments) { + reply := args.Get(2).(*structs.IssuedCert) + reply.CreateIndex = atomic.AddUint64(&idx, 1) + reply.ModifyIndex = reply.CreateIndex + + // This sets the validity to 0 on the first call, and + // 12 hours+ on subsequent calls. This means that our first + // cert expires immediately. + reply.ValidBefore = time.Now().Add((12 * time.Hour) * + time.Duration(reply.CreateIndex-1)) + + resp = reply + }) + + // We'll reuse the fetch options and request + opts := cache.FetchOptions{MinIndex: 0, Timeout: 10 * time.Second} + req := &ConnectCALeafRequest{Datacenter: "dc1", Service: "web"} + + // First fetch should return immediately + fetchCh := TestFetchCh(t, typ, opts, req) + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("shouldn't block waiting for fetch") + case result := <-fetchCh: + require.Equal(cache.FetchResult{ + Value: resp, + Index: 1, + }, result) + } + + // Second fetch should return immediately despite there being + // no updated CA roots, because we issued an expired cert. + fetchCh = TestFetchCh(t, typ, opts, req) + select { + case <-time.After(100 * time.Millisecond): + t.Fatal("shouldn't block waiting for fetch") + case result := <-fetchCh: + require.Equal(cache.FetchResult{ + Value: resp, + Index: 2, + }, result) + } + + // Third fetch should block since the cert is not expiring and + // we also didn't update CA certs. + fetchCh = TestFetchCh(t, typ, opts, req) + select { + case result := <-fetchCh: + t.Fatalf("should not return: %#v", result) + case <-time.After(100 * time.Millisecond): + } +} + +// testCALeafType returns a *ConnectCALeaf that is pre-configured to +// use the given RPC implementation for "ConnectCA.Sign" operations. +func testCALeafType(t *testing.T, rpc RPC) (*ConnectCALeaf, chan structs.IndexedCARoots) { + // This creates an RPC implementation that will block until the + // value is sent on the channel. This lets us control when the + // next values show up. + rootsCh := make(chan structs.IndexedCARoots, 10) + rootsRPC := &testGatedRootsRPC{ValueCh: rootsCh} + + // Create a cache + c := cache.TestCache(t) + c.RegisterType(ConnectCARootName, &ConnectCARoot{RPC: rootsRPC}, &cache.RegisterOptions{ + // Disable refresh so that the gated channel controls the + // request directly. Otherwise, we get background refreshes and + // it screws up the ordering of the channel reads of the + // testGatedRootsRPC implementation. + Refresh: false, + }) + + // Create the leaf type + return &ConnectCALeaf{RPC: rpc, Cache: c}, rootsCh +} + +// testGatedRootsRPC will send each subsequent value on the channel as the +// RPC response, blocking if it is waiting for a value on the channel. This +// can be used to control when background fetches are returned and what they +// return. +// +// This should be used with Refresh = false for the registration options so +// automatic refreshes don't mess up the channel read ordering. +type testGatedRootsRPC struct { + ValueCh chan structs.IndexedCARoots +} + +func (r *testGatedRootsRPC) RPC(method string, args interface{}, reply interface{}) error { + if method != "ConnectCA.Roots" { + return fmt.Errorf("invalid RPC method: %s", method) + } + + replyReal := reply.(*structs.IndexedCARoots) + *replyReal = <-r.ValueCh + return nil +} diff --git a/agent/cache-types/testing.go b/agent/cache-types/testing.go index bf68ec478..fcffe45a9 100644 --- a/agent/cache-types/testing.go +++ b/agent/cache-types/testing.go @@ -1,6 +1,10 @@ package cachetype import ( + "reflect" + "time" + + "github.com/hashicorp/consul/agent/cache" "github.com/mitchellh/go-testing-interface" ) @@ -10,3 +14,47 @@ func TestRPC(t testing.T) *MockRPC { // perform some initialization later. return &MockRPC{} } + +// TestFetchCh returns a channel that returns the result of the Fetch call. +// This is useful for testing timing and concurrency with Fetch calls. +// Errors will show up as an error type on the resulting channel so a +// type switch should be used. +func TestFetchCh( + t testing.T, + typ cache.Type, + opts cache.FetchOptions, + req cache.Request) <-chan interface{} { + resultCh := make(chan interface{}) + go func() { + result, err := typ.Fetch(opts, req) + if err != nil { + resultCh <- err + return + } + + resultCh <- result + }() + + return resultCh +} + +// TestFetchChResult tests that the result from TestFetchCh matches +// within a reasonable period of time (it expects it to be "immediate" but +// waits some milliseconds). +func TestFetchChResult(t testing.T, ch <-chan interface{}, expected interface{}) { + t.Helper() + + select { + case result := <-ch: + if err, ok := result.(error); ok { + t.Fatalf("Result was error: %s", err) + return + } + + if !reflect.DeepEqual(result, expected) { + t.Fatalf("Result doesn't match!\n\n%#v\n\n%#v", result, expected) + } + + case <-time.After(50 * time.Millisecond): + } +} From b0f70f17db2f54038d1883e62fae28a69abf3834 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 16 Apr 2018 11:31:03 +0200 Subject: [PATCH 158/627] agent/cache-types: rename to separate root and leaf cache types --- .../{connect_ca.go => connect_ca_leaf.go} | 37 +----------- ...ect_ca_test.go => connect_ca_leaf_test.go} | 46 --------------- agent/cache-types/connect_ca_root.go | 43 ++++++++++++++ agent/cache-types/connect_ca_root_test.go | 57 +++++++++++++++++++ 4 files changed, 101 insertions(+), 82 deletions(-) rename agent/cache-types/{connect_ca.go => connect_ca_leaf.go} (84%) rename agent/cache-types/{connect_ca_test.go => connect_ca_leaf_test.go} (82%) create mode 100644 agent/cache-types/connect_ca_root.go create mode 100644 agent/cache-types/connect_ca_root_test.go diff --git a/agent/cache-types/connect_ca.go b/agent/cache-types/connect_ca_leaf.go similarity index 84% rename from agent/cache-types/connect_ca.go rename to agent/cache-types/connect_ca_leaf.go index 22549ed49..d90bc19bb 100644 --- a/agent/cache-types/connect_ca.go +++ b/agent/cache-types/connect_ca_leaf.go @@ -15,42 +15,7 @@ import ( ) // Recommended name for registration. -const ( - ConnectCARootName = "connect-ca-root" - ConnectCALeafName = "connect-ca-leaf" -) - -// ConnectCARoot supports fetching the Connect CA roots. This is a -// straightforward cache type since it only has to block on the given -// index and return the data. -type ConnectCARoot struct { - RPC RPC -} - -func (c *ConnectCARoot) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) { - var result cache.FetchResult - - // The request should be a DCSpecificRequest. - reqReal, ok := req.(*structs.DCSpecificRequest) - if !ok { - return result, fmt.Errorf( - "Internal cache failure: request wrong type: %T", req) - } - - // Set the minimum query index to our current index so we block - reqReal.QueryOptions.MinQueryIndex = opts.MinIndex - reqReal.QueryOptions.MaxQueryTime = opts.Timeout - - // Fetch - var reply structs.IndexedCARoots - if err := c.RPC.RPC("ConnectCA.Roots", reqReal, &reply); err != nil { - return result, err - } - - result.Value = &reply - result.Index = reply.QueryMeta.Index - return result, nil -} +const ConnectCALeafName = "connect-ca-leaf" // ConnectCALeaf supports fetching and generating Connect leaf // certificates. diff --git a/agent/cache-types/connect_ca_test.go b/agent/cache-types/connect_ca_leaf_test.go similarity index 82% rename from agent/cache-types/connect_ca_test.go rename to agent/cache-types/connect_ca_leaf_test.go index 43953e7f8..0612aed21 100644 --- a/agent/cache-types/connect_ca_test.go +++ b/agent/cache-types/connect_ca_leaf_test.go @@ -12,52 +12,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestConnectCARoot(t *testing.T) { - require := require.New(t) - rpc := TestRPC(t) - defer rpc.AssertExpectations(t) - typ := &ConnectCARoot{RPC: rpc} - - // Expect the proper RPC call. This also sets the expected value - // since that is return-by-pointer in the arguments. - var resp *structs.IndexedCARoots - rpc.On("RPC", "ConnectCA.Roots", mock.Anything, mock.Anything).Return(nil). - Run(func(args mock.Arguments) { - req := args.Get(1).(*structs.DCSpecificRequest) - require.Equal(uint64(24), req.QueryOptions.MinQueryIndex) - require.Equal(1*time.Second, req.QueryOptions.MaxQueryTime) - - reply := args.Get(2).(*structs.IndexedCARoots) - reply.QueryMeta.Index = 48 - resp = reply - }) - - // Fetch - result, err := typ.Fetch(cache.FetchOptions{ - MinIndex: 24, - Timeout: 1 * time.Second, - }, &structs.DCSpecificRequest{Datacenter: "dc1"}) - require.Nil(err) - require.Equal(cache.FetchResult{ - Value: resp, - Index: 48, - }, result) -} - -func TestConnectCARoot_badReqType(t *testing.T) { - require := require.New(t) - rpc := TestRPC(t) - defer rpc.AssertExpectations(t) - typ := &ConnectCARoot{RPC: rpc} - - // Fetch - _, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest( - t, cache.RequestInfo{Key: "foo", MinIndex: 64})) - require.NotNil(err) - require.Contains(err.Error(), "wrong type") - -} - // Test that after an initial signing, new CA roots (new ID) will // trigger a blocking query to execute. func TestConnectCALeaf_changingRoots(t *testing.T) { diff --git a/agent/cache-types/connect_ca_root.go b/agent/cache-types/connect_ca_root.go new file mode 100644 index 000000000..036cf53d2 --- /dev/null +++ b/agent/cache-types/connect_ca_root.go @@ -0,0 +1,43 @@ +package cachetype + +import ( + "fmt" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/structs" +) + +// Recommended name for registration. +const ConnectCARootName = "connect-ca-root" + +// ConnectCARoot supports fetching the Connect CA roots. This is a +// straightforward cache type since it only has to block on the given +// index and return the data. +type ConnectCARoot struct { + RPC RPC +} + +func (c *ConnectCARoot) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) { + var result cache.FetchResult + + // The request should be a DCSpecificRequest. + reqReal, ok := req.(*structs.DCSpecificRequest) + if !ok { + return result, fmt.Errorf( + "Internal cache failure: request wrong type: %T", req) + } + + // Set the minimum query index to our current index so we block + reqReal.QueryOptions.MinQueryIndex = opts.MinIndex + reqReal.QueryOptions.MaxQueryTime = opts.Timeout + + // Fetch + var reply structs.IndexedCARoots + if err := c.RPC.RPC("ConnectCA.Roots", reqReal, &reply); err != nil { + return result, err + } + + result.Value = &reply + result.Index = reply.QueryMeta.Index + return result, nil +} diff --git a/agent/cache-types/connect_ca_root_test.go b/agent/cache-types/connect_ca_root_test.go new file mode 100644 index 000000000..24c37f313 --- /dev/null +++ b/agent/cache-types/connect_ca_root_test.go @@ -0,0 +1,57 @@ +package cachetype + +import ( + "testing" + "time" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/structs" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestConnectCARoot(t *testing.T) { + require := require.New(t) + rpc := TestRPC(t) + defer rpc.AssertExpectations(t) + typ := &ConnectCARoot{RPC: rpc} + + // Expect the proper RPC call. This also sets the expected value + // since that is return-by-pointer in the arguments. + var resp *structs.IndexedCARoots + rpc.On("RPC", "ConnectCA.Roots", mock.Anything, mock.Anything).Return(nil). + Run(func(args mock.Arguments) { + req := args.Get(1).(*structs.DCSpecificRequest) + require.Equal(uint64(24), req.QueryOptions.MinQueryIndex) + require.Equal(1*time.Second, req.QueryOptions.MaxQueryTime) + + reply := args.Get(2).(*structs.IndexedCARoots) + reply.QueryMeta.Index = 48 + resp = reply + }) + + // Fetch + result, err := typ.Fetch(cache.FetchOptions{ + MinIndex: 24, + Timeout: 1 * time.Second, + }, &structs.DCSpecificRequest{Datacenter: "dc1"}) + require.Nil(err) + require.Equal(cache.FetchResult{ + Value: resp, + Index: 48, + }, result) +} + +func TestConnectCARoot_badReqType(t *testing.T) { + require := require.New(t) + rpc := TestRPC(t) + defer rpc.AssertExpectations(t) + typ := &ConnectCARoot{RPC: rpc} + + // Fetch + _, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest( + t, cache.RequestInfo{Key: "foo", MinIndex: 64})) + require.NotNil(err) + require.Contains(err.Error(), "wrong type") + +} From 45095894278edaf0455eeea949d4fb54c29f5180 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 16 Apr 2018 12:06:08 +0200 Subject: [PATCH 159/627] agent/cache: support timeouts for cache reads and empty fetch results --- agent/cache/cache.go | 44 ++++++++++++++++++------ agent/cache/cache_test.go | 71 +++++++++++++++++++++++++++++++++++++++ agent/cache/request.go | 9 +++++ agent/cache/type.go | 6 ++++ 4 files changed, 120 insertions(+), 10 deletions(-) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index a57ab8343..a1b4570b0 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -136,6 +136,9 @@ func (c *Cache) Get(t string, r Request) (interface{}, error) { // First time through first := true + // timeoutCh for watching our tmeout + var timeoutCh <-chan time.Time + RETRY_GET: // Get the current value c.entriesLock.RLock() @@ -164,16 +167,27 @@ RETRY_GET: // No longer our first time through first = false + // Set our timeout channel if we must + if info.Timeout > 0 && timeoutCh == nil { + timeoutCh = time.After(info.Timeout) + } + // At this point, we know we either don't have a value at all or the // value we have is too old. We need to wait for new data. - waiter, err := c.fetch(t, key, r) + waiterCh, err := c.fetch(t, key, r) if err != nil { return nil, err } - // Wait on our waiter and then retry the cache load - <-waiter - goto RETRY_GET + select { + case <-waiterCh: + // Our fetch returned, retry the get from the cache + goto RETRY_GET + + case <-timeoutCh: + // Timeout on the cache read, just return whatever we have. + return entry.Value, nil + } } // entryKey returns the key for the entry in the cache. See the note @@ -216,16 +230,26 @@ func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { // The actual Fetch must be performed in a goroutine. go func() { // Start building the new entry by blocking on the fetch. - var newEntry cacheEntry result, err := tEntry.Type.Fetch(FetchOptions{ MinIndex: entry.Index, }, r) - newEntry.Value = result.Value - newEntry.Index = result.Index - newEntry.Error = err - // This is a valid entry with a result - newEntry.Valid = true + var newEntry cacheEntry + if result.Value == nil { + // If no value was set, then we do not change the prior entry. + // Instead, we just update the waiter to be new so that another + // Get will wait on the correct value. + newEntry = entry + newEntry.Fetching = false + } else { + // A new value was given, so we create a brand new entry. + newEntry.Value = result.Value + newEntry.Index = result.Index + newEntry.Error = err + + // This is a valid entry with a result + newEntry.Valid = true + } // Create a new waiter that will be used for the next fetch. newEntry.Waiter = make(chan struct{}) diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index 1e75490a0..b8ca66dc4 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -194,6 +194,77 @@ func TestCacheGet_blockingIndex(t *testing.T) { TestCacheGetChResult(t, resultCh, 42) } +// Test a get with an index set will timeout if the fetch doesn't return +// anything. +func TestCacheGet_blockingIndexTimeout(t *testing.T) { + t.Parallel() + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + c.RegisterType("t", typ, nil) + + // Configure the type + triggerCh := make(chan time.Time) + typ.Static(FetchResult{Value: 1, Index: 4}, nil).Once() + typ.Static(FetchResult{Value: 12, Index: 5}, nil).Once() + typ.Static(FetchResult{Value: 42, Index: 6}, nil).WaitUntil(triggerCh) + + // Fetch should block + resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{ + Key: "hello", MinIndex: 5, Timeout: 200 * time.Millisecond})) + + // Should block + select { + case <-resultCh: + t.Fatal("should block") + case <-time.After(50 * time.Millisecond): + } + + // Should return after more of the timeout + select { + case result := <-resultCh: + require.Equal(t, 12, result) + case <-time.After(300 * time.Millisecond): + t.Fatal("should've returned") + } +} + +// Test that if a Type returns an empty value on Fetch that the previous +// value is preserved. +func TestCacheGet_emptyFetchResult(t *testing.T) { + t.Parallel() + + require := require.New(t) + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + c.RegisterType("t", typ, nil) + + // Configure the type + typ.Static(FetchResult{Value: 42, Index: 1}, nil).Times(1) + typ.Static(FetchResult{Value: nil}, nil) + + // Get, should fetch + req := TestRequest(t, RequestInfo{Key: "hello"}) + result, err := c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + + // Get, should not fetch since we already have a satisfying value + req = TestRequest(t, RequestInfo{ + Key: "hello", MinIndex: 1, Timeout: 100 * time.Millisecond}) + result, err = c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + + // Sleep a tiny bit just to let maybe some background calls happen + // then verify that we still only got the one call + time.Sleep(20 * time.Millisecond) + typ.AssertExpectations(t) +} + // Test that a type registered with a periodic refresh will perform // that refresh after the timer is up. func TestCacheGet_periodicRefresh(t *testing.T) { diff --git a/agent/cache/request.go b/agent/cache/request.go index b4a1b75d0..7beec58e8 100644 --- a/agent/cache/request.go +++ b/agent/cache/request.go @@ -1,5 +1,9 @@ package cache +import ( + "time" +) + // Request is a cache-able request. // // This interface is typically implemented by request structures in @@ -36,4 +40,9 @@ type RequestInfo struct { // to block until new data is available. If no index is available, the // default value (zero) is acceptable. MinIndex uint64 + + // Timeout is the timeout for waiting on a blocking query. When the + // timeout is reached, the last known value is returned (or maybe nil + // if there was no prior value). + Timeout time.Duration } diff --git a/agent/cache/type.go b/agent/cache/type.go index 6e8edeb5f..cccb10b94 100644 --- a/agent/cache/type.go +++ b/agent/cache/type.go @@ -15,6 +15,12 @@ type Type interface { // // The return value is a FetchResult which contains information about // the fetch. + // + // On timeout, FetchResult can behave one of two ways. First, it can + // return the last known value. This is the default behavior of blocking + // RPC calls in Consul so this allows cache types to be implemented with + // no extra logic. Second, FetchResult can return an unset value and index. + // In this case, the cache will reuse the last value automatically. Fetch(FetchOptions, Request) (FetchResult, error) } From ccd7eeef1ad5f4eaf2fedbfcfd6142c1b0c7803b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 16 Apr 2018 12:25:35 +0200 Subject: [PATCH 160/627] agent/cache-types/ca-leaf: proper result for timeout, race on setting CA --- agent/cache-types/connect_ca_leaf.go | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/agent/cache-types/connect_ca_leaf.go b/agent/cache-types/connect_ca_leaf.go index d90bc19bb..70d5e3c24 100644 --- a/agent/cache-types/connect_ca_leaf.go +++ b/agent/cache-types/connect_ca_leaf.go @@ -77,8 +77,11 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache // Block on the events that wake us up. select { case <-timeoutCh: - // TODO: what is the right error for a timeout? - return result, fmt.Errorf("timeout") + // On a timeout, we just return the empty result and no error. + // It isn't an error to timeout, its just the limit of time the + // caching system wants us to block for. By returning an empty result + // the caching system will ignore. + return result, nil case err := <-newRootCACh: // A new root CA triggers us to refresh the leaf certificate. @@ -122,6 +125,7 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache if c.issuedCerts == nil { c.issuedCerts = make(map[string]*structs.IssuedCert) } + c.issuedCerts[reqReal.Service] = &reply lastCert = &reply } @@ -156,8 +160,24 @@ func (c *ConnectCALeaf) waitNewRootCA(ch chan<- error, timeout time.Duration) { return } - // Set the new index - atomic.StoreUint64(&c.caIndex, roots.QueryMeta.Index) + // We do a loop here because there can be multiple waitNewRootCA calls + // happening simultaneously. Each Fetch kicks off one call. These are + // multiplexed through Cache.Get which should ensure we only ever + // actually make a single RPC call. However, there is a race to set + // the caIndex field so do a basic CAS loop here. + for { + // We only set our index if its newer than what is previously set. + old := atomic.LoadUint64(&c.caIndex) + if old == roots.Index || old > roots.Index { + break + } + + // Set the new index atomically. If the caIndex value changed + // in the meantime, retry. + if atomic.CompareAndSwapUint64(&c.caIndex, old, roots.Index) { + break + } + } // Trigger the channel since we updated. ch <- nil From 3b6c46b7d783fd23dbb3b2b99f3f0ba8dcef9182 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 16 Apr 2018 12:28:18 +0200 Subject: [PATCH 161/627] agent/structs: DCSpecificRequest sets all the proper fields for CacheInfo --- agent/structs/structs.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/agent/structs/structs.go b/agent/structs/structs.go index 19a9c7313..d65b50639 100644 --- a/agent/structs/structs.go +++ b/agent/structs/structs.go @@ -281,7 +281,10 @@ func (r *DCSpecificRequest) RequestDatacenter() string { func (r *DCSpecificRequest) CacheInfo() cache.RequestInfo { info := cache.RequestInfo{ - MinIndex: r.QueryOptions.MinQueryIndex, + Token: r.Token, + Datacenter: r.Datacenter, + MinIndex: r.MinQueryIndex, + Timeout: r.MaxQueryTime, } // To calculate the cache key we only hash the node filters. The From 6ecc2da7ffbf5ff6f2857be0f82cec47f2ae091b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 17 Apr 2018 18:03:13 -0500 Subject: [PATCH 162/627] agent/cache: integrate go-metrics so the cache is debuggable --- agent/cache/cache.go | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index a1b4570b0..db9f11a0e 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -17,6 +17,8 @@ import ( "sync" "sync/atomic" "time" + + "github.com/armon/go-metrics" ) //go:generate mockery -all -inpkg @@ -109,6 +111,9 @@ type RegisterOptions struct { } // RegisterType registers a cacheable type. +// +// This makes the type available for Get but does not automatically perform +// any prefetching. In order to populate the cache, Get must be called. func (c *Cache) RegisterType(n string, typ Type, opts *RegisterOptions) { c.typesLock.Lock() defer c.typesLock.Unlock() @@ -122,9 +127,18 @@ func (c *Cache) RegisterType(n string, typ Type, opts *RegisterOptions) { // // Multiple Get calls for the same Request (matching CacheKey value) will // block on a single network request. +// +// The timeout specified by the Request will be the timeout on the cache +// Get, and does not correspond to the timeout of any background data +// fetching. If the timeout is reached before data satisfying the minimum +// index is retrieved, the last known value (maybe nil) is returned. No +// error is returned on timeout. This matches the behavior of Consul blocking +// queries. func (c *Cache) Get(t string, r Request) (interface{}, error) { info := r.CacheInfo() if info.Key == "" { + metrics.IncrCounter([]string{"consul", "cache", "bypass"}, 1) + // If no key is specified, then we do not cache this request. // Pass directly through to the backend. return c.fetchDirect(t, r) @@ -152,6 +166,7 @@ RETRY_GET: if ok && entry.Valid { if info.MinIndex == 0 || info.MinIndex < entry.Index { if first { + metrics.IncrCounter([]string{"consul", "cache", t, "hit"}, 1) atomic.AddUint64(&c.hits, 1) } @@ -162,6 +177,15 @@ RETRY_GET: if first { // Record the miss if its our first time through atomic.AddUint64(&c.misses, 1) + + // We increment two different counters for cache misses depending on + // whether we're missing because we didn't have the data at all, + // or if we're missing because we're blocking on a set index. + if info.MinIndex == 0 { + metrics.IncrCounter([]string{"consul", "cache", t, "miss_new"}, 1) + } else { + metrics.IncrCounter([]string{"consul", "cache", t, "miss_block"}, 1) + } } // No longer our first time through @@ -196,6 +220,10 @@ func (c *Cache) entryKey(r *RequestInfo) string { return fmt.Sprintf("%s/%s/%s", r.Datacenter, r.Token, r.Key) } +// fetch triggers a new background fetch for the given Request. If a +// background fetch is already running for a matching Request, the waiter +// channel for that request is returned. The effect of this is that there +// is only ever one blocking query for any matching requests. func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { // Get the type that we're fetching c.typesLock.RLock() @@ -205,6 +233,7 @@ func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { return nil, fmt.Errorf("unknown type in cache: %s", t) } + // We acquire a write lock because we may have to set Fetching to true. c.entriesLock.Lock() defer c.entriesLock.Unlock() entry, ok := c.entries[key] @@ -226,6 +255,7 @@ func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { // perform multiple fetches. entry.Fetching = true c.entries[key] = entry + metrics.SetGauge([]string{"consul", "cache", "entries_count"}, float32(len(c.entries))) // The actual Fetch must be performed in a goroutine. go func() { @@ -234,6 +264,14 @@ func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { MinIndex: entry.Index, }, r) + if err == nil { + metrics.IncrCounter([]string{"consul", "cache", "fetch_success"}, 1) + metrics.IncrCounter([]string{"consul", "cache", t, "fetch_success"}, 1) + } else { + metrics.IncrCounter([]string{"consul", "cache", "fetch_error"}, 1) + metrics.IncrCounter([]string{"consul", "cache", t, "fetch_error"}, 1) + } + var newEntry cacheEntry if result.Value == nil { // If no value was set, then we do not change the prior entry. @@ -272,7 +310,9 @@ func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { return entry.Waiter, nil } -// fetchDirect fetches the given request with no caching. +// fetchDirect fetches the given request with no caching. Because this +// bypasses the caching entirely, multiple matching requests will result +// in multiple actual RPC calls (unlike fetch). func (c *Cache) fetchDirect(t string, r Request) (interface{}, error) { // Get the type that we're fetching c.typesLock.RLock() @@ -294,6 +334,8 @@ func (c *Cache) fetchDirect(t string, r Request) (interface{}, error) { return result.Value, nil } +// refresh triggers a fetch for a specific Request according to the +// registration options. func (c *Cache) refresh(opts *RegisterOptions, t string, key string, r Request) { // Sanity-check, we should not schedule anything that has refresh disabled if !opts.Refresh { From 109bb946e931885b55532961379dce3af9a96b79 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 17 Apr 2018 18:07:47 -0500 Subject: [PATCH 163/627] agent/cache: return the error as part of Get --- agent/cache/cache.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index db9f11a0e..a9727ab8e 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -170,7 +170,7 @@ RETRY_GET: atomic.AddUint64(&c.hits, 1) } - return entry.Value, nil + return entry.Value, entry.Error } } From 56774f24d0df89bc22bc1d1b0fe244f58fb71653 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 17 Apr 2018 18:18:16 -0500 Subject: [PATCH 164/627] agent/cache-types: support intention match queries --- agent/cache-types/intention_match.go | 41 ++++++++++++++++ agent/cache-types/intention_match_test.go | 57 +++++++++++++++++++++++ agent/structs/intention.go | 33 +++++++++++++ 3 files changed, 131 insertions(+) create mode 100644 agent/cache-types/intention_match.go create mode 100644 agent/cache-types/intention_match_test.go diff --git a/agent/cache-types/intention_match.go b/agent/cache-types/intention_match.go new file mode 100644 index 000000000..4c42725a1 --- /dev/null +++ b/agent/cache-types/intention_match.go @@ -0,0 +1,41 @@ +package cachetype + +import ( + "fmt" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/structs" +) + +// Recommended name for registration. +const IntentionMatchName = "intention-match" + +// IntentionMatch supports fetching the intentions via match queries. +type IntentionMatch struct { + RPC RPC +} + +func (c *IntentionMatch) Fetch(opts cache.FetchOptions, req cache.Request) (cache.FetchResult, error) { + var result cache.FetchResult + + // The request should be an IntentionQueryRequest. + reqReal, ok := req.(*structs.IntentionQueryRequest) + if !ok { + return result, fmt.Errorf( + "Internal cache failure: request wrong type: %T", req) + } + + // Set the minimum query index to our current index so we block + reqReal.MinQueryIndex = opts.MinIndex + reqReal.MaxQueryTime = opts.Timeout + + // Fetch + var reply structs.IndexedIntentionMatches + if err := c.RPC.RPC("Intention.Match", reqReal, &reply); err != nil { + return result, err + } + + result.Value = &reply + result.Index = reply.Index + return result, nil +} diff --git a/agent/cache-types/intention_match_test.go b/agent/cache-types/intention_match_test.go new file mode 100644 index 000000000..97b2951b3 --- /dev/null +++ b/agent/cache-types/intention_match_test.go @@ -0,0 +1,57 @@ +package cachetype + +import ( + "testing" + "time" + + "github.com/hashicorp/consul/agent/cache" + "github.com/hashicorp/consul/agent/structs" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestIntentionMatch(t *testing.T) { + require := require.New(t) + rpc := TestRPC(t) + defer rpc.AssertExpectations(t) + typ := &IntentionMatch{RPC: rpc} + + // Expect the proper RPC call. This also sets the expected value + // since that is return-by-pointer in the arguments. + var resp *structs.IndexedIntentionMatches + rpc.On("RPC", "Intention.Match", mock.Anything, mock.Anything).Return(nil). + Run(func(args mock.Arguments) { + req := args.Get(1).(*structs.IntentionQueryRequest) + require.Equal(uint64(24), req.MinQueryIndex) + require.Equal(1*time.Second, req.MaxQueryTime) + + reply := args.Get(2).(*structs.IndexedIntentionMatches) + reply.Index = 48 + resp = reply + }) + + // Fetch + result, err := typ.Fetch(cache.FetchOptions{ + MinIndex: 24, + Timeout: 1 * time.Second, + }, &structs.IntentionQueryRequest{Datacenter: "dc1"}) + require.Nil(err) + require.Equal(cache.FetchResult{ + Value: resp, + Index: 48, + }, result) +} + +func TestIntentionMatch_badReqType(t *testing.T) { + require := require.New(t) + rpc := TestRPC(t) + defer rpc.AssertExpectations(t) + typ := &IntentionMatch{RPC: rpc} + + // Fetch + _, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest( + t, cache.RequestInfo{Key: "foo", MinIndex: 64})) + require.NotNil(err) + require.Contains(err.Error(), "wrong type") + +} diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 316c9632b..6ad1a9835 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -2,10 +2,13 @@ package structs import ( "fmt" + "strconv" "strings" "time" + "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/go-multierror" + "github.com/mitchellh/hashstructure" ) const ( @@ -267,6 +270,36 @@ func (q *IntentionQueryRequest) RequestDatacenter() string { return q.Datacenter } +// cache.Request impl. +func (q *IntentionQueryRequest) CacheInfo() cache.RequestInfo { + // We only support caching Match queries, so if Match isn't set, + // then return an empty info object which will cause a pass-through + // (and likely fail). + if q.Match == nil { + return cache.RequestInfo{} + } + + info := cache.RequestInfo{ + Token: q.Token, + Datacenter: q.Datacenter, + MinIndex: q.MinQueryIndex, + Timeout: q.MaxQueryTime, + } + + // Calculate the cache key via just hashing the Match struct. This + // has been configured so things like ordering of entries has no + // effect (via struct tags). + v, err := hashstructure.Hash(q.Match, nil) + if err == nil { + // If there is an error, we don't set the key. A blank key forces + // no cache for this request so the request is forwarded directly + // to the server. + info.Key = strconv.FormatUint(v, 10) + } + + return info +} + // IntentionQueryMatch are the parameters for performing a match request // against the state store. type IntentionQueryMatch struct { From a1f8cb95706144bd70b12baa63901a2bf65e465e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 17 Apr 2018 18:26:58 -0500 Subject: [PATCH 165/627] agent: augment /v1/connect/authorize to cache intentions --- agent/agent.go | 9 +++++ agent/agent_endpoint.go | 10 ++++- agent/agent_endpoint_test.go | 75 ++++++++++++++++++++++++++++++++---- 3 files changed, 85 insertions(+), 9 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index b6e923ee3..610aeb64f 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2649,4 +2649,13 @@ func (a *Agent) registerCache() { RefreshTimer: 0, RefreshTimeout: 10 * time.Minute, }) + + a.cache.RegisterType(cachetype.IntentionMatchName, &cachetype.IntentionMatch{ + RPC: a.delegate, + }, &cache.RegisterOptions{ + // Maintain a blocking query, retry dropped connections quickly + Refresh: true, + RefreshTimer: 0, + RefreshTimeout: 10 * time.Minute, + }) } diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index c64eb7a92..798c370b2 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -1124,10 +1124,16 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R }, } args.Token = token - var reply structs.IndexedIntentionMatches - if err := s.agent.RPC("Intention.Match", args, &reply); err != nil { + + raw, err := s.agent.cache.Get(cachetype.IntentionMatchName, args) + if err != nil { return nil, err } + + reply, ok := raw.(*structs.IndexedIntentionMatches) + if !ok { + return nil, fmt.Errorf("internal error: response type not correct") + } if len(reply.Matches) != 1 { return nil, fmt.Errorf("Internal error loading matches") } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 2e583ec4f..93cffa617 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2495,13 +2495,14 @@ func TestAgentConnectAuthorize_idNotService(t *testing.T) { func TestAgentConnectAuthorize_allow(t *testing.T) { t.Parallel() - assert := assert.New(t) + require := require.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() target := "db" // Create some intentions + var ixnId string { req := structs.IntentionRequest{ Datacenter: "dc1", @@ -2514,10 +2515,12 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { req.Intention.DestinationName = target req.Intention.Action = structs.IntentionActionAllow - var reply string - assert.Nil(a.RPC("Intention.Apply", &req, &reply)) + require.Nil(a.RPC("Intention.Apply", &req, &ixnId)) } + // Grab the initial cache hit count + cacheHits := a.cache.Hits() + args := &structs.ConnectAuthorizeRequest{ Target: target, ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), @@ -2525,12 +2528,70 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() respRaw, err := a.srv.AgentConnectAuthorize(resp, req) - assert.Nil(err) - assert.Equal(200, resp.Code) + require.Nil(err) + require.Equal(200, resp.Code) obj := respRaw.(*connectAuthorizeResp) - assert.True(obj.Authorized) - assert.Contains(obj.Reason, "Matched") + require.True(obj.Authorized) + require.Contains(obj.Reason, "Matched") + + // That should've been a cache miss, so not hit change + require.Equal(cacheHits, a.cache.Hits()) + + // Make the request again + { + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + resp := httptest.NewRecorder() + respRaw, err := a.srv.AgentConnectAuthorize(resp, req) + require.Nil(err) + require.Equal(200, resp.Code) + + obj := respRaw.(*connectAuthorizeResp) + require.True(obj.Authorized) + require.Contains(obj.Reason, "Matched") + } + + // That should've been a cache hit + require.Equal(cacheHits+1, a.cache.Hits()) + cacheHits++ + + // Change the intention + { + req := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpUpdate, + Intention: structs.TestIntention(t), + } + req.Intention.ID = ixnId + req.Intention.SourceNS = structs.IntentionDefaultNamespace + req.Intention.SourceName = "web" + req.Intention.DestinationNS = structs.IntentionDefaultNamespace + req.Intention.DestinationName = target + req.Intention.Action = structs.IntentionActionDeny + + require.Nil(a.RPC("Intention.Apply", &req, &ixnId)) + } + + // Short sleep lets the cache background refresh happen + time.Sleep(100 * time.Millisecond) + + // Make the request again + { + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + resp := httptest.NewRecorder() + respRaw, err := a.srv.AgentConnectAuthorize(resp, req) + require.Nil(err) + require.Equal(200, resp.Code) + + obj := respRaw.(*connectAuthorizeResp) + require.False(obj.Authorized) + require.Contains(obj.Reason, "Matched") + } + + // That should've been a cache hit, too, since it updated in the + // background. + require.Equal(cacheHits+1, a.cache.Hits()) + cacheHits++ } // Test when there is an intention denying the connection From e9d58ca219e55aff5d691c2a1084729154fd1228 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 17 Apr 2018 18:42:49 -0500 Subject: [PATCH 166/627] agent/cache: lots of comment/doc updates --- agent/cache/cache.go | 27 +++++++++++++++++++++++---- agent/cache/request.go | 7 +++++-- 2 files changed, 28 insertions(+), 6 deletions(-) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index a9727ab8e..5bf7b787d 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -7,9 +7,11 @@ // balance performance and correctness, depending on the type of data being // requested. // -// Currently, the cache package supports only continuous, blocking query -// caching. This means that the cache update is edge-triggered by Consul -// server blocking queries. +// The types of data that can be cached is configurable via the Type interface. +// This allows specialized behavior for certain types of data. Each type of +// Consul data (CA roots, leaf certs, intentions, KV, catalog, etc.) will +// have to be manually implemented. This usually is not much work, see +// the "agent/cache-types" package. package cache import ( @@ -23,7 +25,24 @@ import ( //go:generate mockery -all -inpkg -// Cache is a agent-local cache of Consul data. +// Cache is a agent-local cache of Consul data. Create a Cache using the +// New function. A zero-value Cache is not ready for usage and will result +// in a panic. +// +// The types of data to be cached must be registered via RegisterType. Then, +// calls to Get specify the type and a Request implementation. The +// implementation of Request is usually done directly on the standard RPC +// struct in agent/structs. This API makes cache usage a mostly drop-in +// replacement for non-cached RPC calls. +// +// The cache is partitioned by ACL and datacenter. This allows the cache +// to be safe for multi-DC queries and for queries where the data is modified +// due to ACLs all without the cache having to have any clever logic, at +// the slight expense of a less perfect cache. +// +// The Cache exposes various metrics via go-metrics. Please view the source +// searching for "metrics." to see the various metrics exposed. These can be +// used to explore the performance of the cache. type Cache struct { // Keeps track of the cache hits and misses in total. This is used by // tests currently to verify cache behavior and is not meant for general diff --git a/agent/cache/request.go b/agent/cache/request.go index 7beec58e8..7cd53df25 100644 --- a/agent/cache/request.go +++ b/agent/cache/request.go @@ -4,7 +4,7 @@ import ( "time" ) -// Request is a cache-able request. +// Request is a cacheable request. // // This interface is typically implemented by request structures in // the agent/structs package. @@ -20,6 +20,8 @@ type RequestInfo struct { // Key is a unique cache key for this request. This key should // absolutely uniquely identify this request, since any conflicting // cache keys could result in invalid data being returned from the cache. + // The Key does not need to include ACL or DC information, since the + // cache already partitions by these values prior to using this key. Key string // Token is the ACL token associated with this request. @@ -43,6 +45,7 @@ type RequestInfo struct { // Timeout is the timeout for waiting on a blocking query. When the // timeout is reached, the last known value is returned (or maybe nil - // if there was no prior value). + // if there was no prior value). This "last known value" behavior matches + // normal Consul blocking queries. Timeout time.Duration } From 67604503e2e0e85d007ef17a7452b27cef8d1e81 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 19 Apr 2018 08:13:57 -0700 Subject: [PATCH 167/627] Add Makefile hack for tests to run --- GNUmakefile | 1 + 1 file changed, 1 insertion(+) diff --git a/GNUmakefile b/GNUmakefile index 2c412d9e5..660a82725 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -42,6 +42,7 @@ dev: changelogfmt vendorfmt dev-build dev-build: @echo "--> TEMPORARY HACK: installing hashstructure to make CI pass until we vendor it upstream" go get github.com/mitchellh/hashstructure + go get github.com/stretchr/testify/mock @echo "--> Building consul" mkdir -p pkg/$(GOOS)_$(GOARCH)/ bin/ go install -ldflags '$(GOLDFLAGS)' -tags '$(GOTAGS)' From 257fc34e51fb33560bfaf74b335e8f24068a07bd Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 19 Apr 2018 09:19:55 -0700 Subject: [PATCH 168/627] agent/cache: on error, return from Get immediately, don't block forever --- agent/cache/cache.go | 15 +++++++++++++++ agent/cache/cache_test.go | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index 5bf7b787d..d58d79729 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -193,6 +193,15 @@ RETRY_GET: } } + // If this isn't our first time through and our last value has an error, + // then we return the error. This has the behavior that we don't sit in + // a retry loop getting the same error for the entire duration of the + // timeout. Instead, we make one effort to fetch a new value, and if + // there was an error, we return. + if !first && entry.Error != nil { + return entry.Value, entry.Error + } + if first { // Record the miss if its our first time through atomic.AddUint64(&c.misses, 1) @@ -308,6 +317,12 @@ func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { newEntry.Valid = true } + // If we have an error and the prior entry wasn't valid, then we + // set the error at least. + if err != nil && !newEntry.Valid { + newEntry.Error = err + } + // Create a new waiter that will be used for the next fetch. newEntry.Waiter = make(chan struct{}) diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index b8ca66dc4..e5db006e6 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -42,6 +42,38 @@ func TestCacheGet_noIndex(t *testing.T) { typ.AssertExpectations(t) } +// Test a basic Get with no index and a failed fetch. +func TestCacheGet_initError(t *testing.T) { + t.Parallel() + + require := require.New(t) + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + c.RegisterType("t", typ, nil) + + // Configure the type + fetcherr := fmt.Errorf("error") + typ.Static(FetchResult{}, fetcherr).Times(2) + + // Get, should fetch + req := TestRequest(t, RequestInfo{Key: "hello"}) + result, err := c.Get("t", req) + require.Error(err) + require.Nil(result) + + // Get, should fetch again since our last fetch was an error + result, err = c.Get("t", req) + require.Error(err) + require.Nil(result) + + // Sleep a tiny bit just to let maybe some background calls happen + // then verify that we still only got the one call + time.Sleep(20 * time.Millisecond) + typ.AssertExpectations(t) +} + // Test a Get with a request that returns a blank cache key. This should // force a backend request and skip the cache entirely. func TestCacheGet_blankCacheKey(t *testing.T) { From 3c6acbda5d9d02353ea385a9a77a4abb305a9f86 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 19 Apr 2018 11:36:14 -0700 Subject: [PATCH 169/627] agent/cache: send the RefreshTimeout into the backend fetch --- agent/cache/cache.go | 7 ++++++- agent/cache/cache_test.go | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index d58d79729..9296a5fb1 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -134,6 +134,10 @@ type RegisterOptions struct { // This makes the type available for Get but does not automatically perform // any prefetching. In order to populate the cache, Get must be called. func (c *Cache) RegisterType(n string, typ Type, opts *RegisterOptions) { + if opts == nil { + opts = &RegisterOptions{} + } + c.typesLock.Lock() defer c.typesLock.Unlock() c.types[n] = typeEntry{Type: typ, Opts: opts} @@ -290,6 +294,7 @@ func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { // Start building the new entry by blocking on the fetch. result, err := tEntry.Type.Fetch(FetchOptions{ MinIndex: entry.Index, + Timeout: tEntry.Opts.RefreshTimeout, }, r) if err == nil { @@ -336,7 +341,7 @@ func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { // If refresh is enabled, run the refresh in due time. The refresh // below might block, but saves us from spawning another goroutine. - if tEntry.Opts != nil && tEntry.Opts.Refresh { + if tEntry.Opts.Refresh { c.refresh(tEntry.Opts, t, key, r) } }() diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index e5db006e6..49edc6e28 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -336,6 +336,39 @@ func TestCacheGet_periodicRefresh(t *testing.T) { TestCacheGetChResult(t, resultCh, 12) } +// Test that the backend fetch sets the proper timeout. +func TestCacheGet_fetchTimeout(t *testing.T) { + t.Parallel() + + require := require.New(t) + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + + // Register the type with a timeout + timeout := 10 * time.Minute + c.RegisterType("t", typ, &RegisterOptions{ + RefreshTimeout: timeout, + }) + + // Configure the type + var actual time.Duration + typ.Static(FetchResult{Value: 42}, nil).Times(1).Run(func(args mock.Arguments) { + opts := args.Get(0).(FetchOptions) + actual = opts.Timeout + }) + + // Get, should fetch + req := TestRequest(t, RequestInfo{Key: "hello"}) + result, err := c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + + // Test the timeout + require.Equal(timeout, actual) +} + // Test that Get partitions the caches based on DC so two equivalent requests // to different datacenters are automatically cached even if their keys are // the same. From 449bbd817df74d62a6a7cdaaabbef4023af981f3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 19 Apr 2018 17:31:50 -0700 Subject: [PATCH 170/627] agent/cache: initial TTL work --- agent/cache/cache.go | 155 ++++++++++++++++++++++++++++++-------- agent/cache/cache_test.go | 45 +++++++++++ agent/cache/entry.go | 103 +++++++++++++++++++++++++ agent/cache/entry_test.go | 10 +++ 4 files changed, 280 insertions(+), 33 deletions(-) create mode 100644 agent/cache/entry.go create mode 100644 agent/cache/entry_test.go diff --git a/agent/cache/cache.go b/agent/cache/cache.go index 9296a5fb1..0d332a21e 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -15,6 +15,7 @@ package cache import ( + "container/heap" "fmt" "sync" "sync/atomic" @@ -54,7 +55,11 @@ type Cache struct { typesLock sync.RWMutex types map[string]typeEntry - // entries contains the actual cache data. + // entries contains the actual cache data. Access to entries and + // entriesExpiryHeap must be protected by entriesLock. + // + // entriesExpiryHeap is a heap of *cacheEntry values ordered by + // expiry, with the soonest to expire being first in the list (index 0). // // NOTE(mitchellh): The entry map key is currently a string in the format // of "//" in order to properly partition @@ -62,21 +67,9 @@ type Cache struct { // big drawbacks: we can't evict by datacenter, ACL token, etc. For an // initial implementaiton this works and the tests are agnostic to the // internal storage format so changing this should be possible safely. - entriesLock sync.RWMutex - entries map[string]cacheEntry -} - -// cacheEntry stores a single cache entry. -type cacheEntry struct { - // Fields pertaining to the actual value - Value interface{} - Error error - Index uint64 - - // Metadata that is used for internal accounting - Valid bool - Fetching bool - Waiter chan struct{} + entriesLock sync.RWMutex + entries map[string]cacheEntry + entriesExpiryHeap *expiryHeap } // typeEntry is a single type that is registered with a Cache. @@ -93,16 +86,34 @@ type Options struct { // New creates a new cache with the given RPC client and reasonable defaults. // Further settings can be tweaked on the returned value. func New(*Options) *Cache { - return &Cache{ - entries: make(map[string]cacheEntry), - types: make(map[string]typeEntry), + // Initialize the heap. The buffer of 1 is really important because + // its possible for the expiry loop to trigger the heap to update + // itself and it'd block forever otherwise. + h := &expiryHeap{NotifyCh: make(chan struct{}, 1)} + heap.Init(h) + + c := &Cache{ + types: make(map[string]typeEntry), + entries: make(map[string]cacheEntry), + entriesExpiryHeap: h, } + + // Start the expiry watcher + go c.runExpiryLoop() + + return c } // RegisterOptions are options that can be associated with a type being // registered for the cache. This changes the behavior of the cache for // this type. type RegisterOptions struct { + // LastGetTTL is the time that the values returned by this type remain + // in the cache after the last get operation. If a value isn't accessed + // within this duration, the value is purged from the cache and + // background refreshing will cease. + LastGetTTL time.Duration + // Refresh configures whether the data is actively refreshed or if // the data is only refreshed on an explicit Get. The default (false) // is to only request data on explicit Get. @@ -137,6 +148,9 @@ func (c *Cache) RegisterType(n string, typ Type, opts *RegisterOptions) { if opts == nil { opts = &RegisterOptions{} } + if opts.LastGetTTL == 0 { + opts.LastGetTTL = 72 * time.Hour // reasonable default is days + } c.typesLock.Lock() defer c.typesLock.Unlock() @@ -193,6 +207,12 @@ RETRY_GET: atomic.AddUint64(&c.hits, 1) } + // Touch the expiration and fix the heap + entry.ResetExpires() + c.entriesLock.Lock() + heap.Fix(c.entriesExpiryHeap, *entry.ExpiryHeapIndex) + c.entriesLock.Unlock() + return entry.Value, entry.Error } } @@ -230,7 +250,7 @@ RETRY_GET: // At this point, we know we either don't have a value at all or the // value we have is too old. We need to wait for new data. - waiterCh, err := c.fetch(t, key, r) + waiterCh, err := c.fetch(t, key, r, true) if err != nil { return nil, err } @@ -256,7 +276,11 @@ func (c *Cache) entryKey(r *RequestInfo) string { // background fetch is already running for a matching Request, the waiter // channel for that request is returned. The effect of this is that there // is only ever one blocking query for any matching requests. -func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { +// +// If allowNew is true then the fetch should create the cache entry +// if it doesn't exist. If this is false, then fetch will do nothing +// if the entry doesn't exist. This latter case is to support refreshing. +func (c *Cache) fetch(t, key string, r Request, allowNew bool) (<-chan struct{}, error) { // Get the type that we're fetching c.typesLock.RLock() tEntry, ok := c.types[t] @@ -270,6 +294,15 @@ func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { defer c.entriesLock.Unlock() entry, ok := c.entries[key] + // If we aren't allowing new values and we don't have an existing value, + // return immediately. We return an immediately-closed channel so nothing + // blocks. + if !ok && !allowNew { + ch := make(chan struct{}) + close(ch) + return ch, nil + } + // If we already have an entry and it is actively fetching, then return // the currently active waiter. if ok && entry.Fetching { @@ -305,14 +338,10 @@ func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { metrics.IncrCounter([]string{"consul", "cache", t, "fetch_error"}, 1) } - var newEntry cacheEntry - if result.Value == nil { - // If no value was set, then we do not change the prior entry. - // Instead, we just update the waiter to be new so that another - // Get will wait on the correct value. - newEntry = entry - newEntry.Fetching = false - } else { + // Copy the existing entry to start. + newEntry := entry + newEntry.Fetching = false + if result.Value != nil { // A new value was given, so we create a brand new entry. newEntry.Value = result.Value newEntry.Index = result.Index @@ -331,12 +360,33 @@ func (c *Cache) fetch(t, key string, r Request) (<-chan struct{}, error) { // Create a new waiter that will be used for the next fetch. newEntry.Waiter = make(chan struct{}) - // Insert + // The key needs to always be set since this is used by the + // expiration loop to know what entry to delete. + newEntry.Key = key + + // If this is a new entry (not in the heap yet), then set the + // initial expiration TTL. + if newEntry.ExpiryHeapIndex == nil { + newEntry.ExpiresTTL = tEntry.Opts.LastGetTTL + newEntry.ResetExpires() + } + + // Set our entry c.entriesLock.Lock() + if newEntry.ExpiryHeapIndex != nil { + // If we're already in the heap, just change the value in-place. + // We don't need to call heap.Fix because the expiry doesn't + // change. + c.entriesExpiryHeap.Entries[*newEntry.ExpiryHeapIndex] = &newEntry + } else { + // Add the new value + newEntry.ExpiryHeapIndex = new(int) + heap.Push(c.entriesExpiryHeap, &newEntry) + } c.entries[key] = newEntry c.entriesLock.Unlock() - // Trigger the waiter + // Trigger the old waiter close(entry.Waiter) // If refresh is enabled, run the refresh in due time. The refresh @@ -386,8 +436,47 @@ func (c *Cache) refresh(opts *RegisterOptions, t string, key string, r Request) time.Sleep(opts.RefreshTimer) } - // Trigger - c.fetch(t, key, r) + // Trigger. The "allowNew" field is false because in the time we were + // waiting to refresh we may have expired and got evicted. If that + // happened, we don't want to create a new entry. + c.fetch(t, key, r, false) +} + +// runExpiryLoop is a blocking function that watches the expiration +// heap and invalidates entries that have expired. +func (c *Cache) runExpiryLoop() { + var expiryTimer *time.Timer + for { + // If we have a previous timer, stop it. + if expiryTimer != nil { + expiryTimer.Stop() + } + + // Get the entry expiring soonest + var entry *cacheEntry + var expiryCh <-chan time.Time + c.entriesLock.RLock() + if len(c.entriesExpiryHeap.Entries) > 0 { + entry = c.entriesExpiryHeap.Entries[0] + expiryTimer = time.NewTimer(entry.Expires().Sub(time.Now())) + expiryCh = expiryTimer.C + } + c.entriesLock.RUnlock() + + select { + case <-c.entriesExpiryHeap.NotifyCh: + // Entries changed, so the heap may have changed. Restart loop. + + case <-expiryCh: + // Entry expired! Remove it. + c.entriesLock.Lock() + delete(c.entries, entry.Key) + heap.Remove(c.entriesExpiryHeap, *entry.ExpiryHeapIndex) + c.entriesLock.Unlock() + + metrics.IncrCounter([]string{"consul", "cache", "evict_expired"}, 1) + } + } } // Returns the number of cache hits. Safe to call concurrently. diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index 49edc6e28..7ac8213f3 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -369,6 +369,51 @@ func TestCacheGet_fetchTimeout(t *testing.T) { require.Equal(timeout, actual) } +// Test that entries expire +func TestCacheGet_expire(t *testing.T) { + t.Parallel() + + require := require.New(t) + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + + // Register the type with a timeout + c.RegisterType("t", typ, &RegisterOptions{ + LastGetTTL: 400 * time.Millisecond, + }) + + // Configure the type + typ.Static(FetchResult{Value: 42}, nil).Times(2) + + // Get, should fetch + req := TestRequest(t, RequestInfo{Key: "hello"}) + result, err := c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + + // Get, should not fetch + req = TestRequest(t, RequestInfo{Key: "hello"}) + result, err = c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + + // Sleep for the expiry + time.Sleep(500 * time.Millisecond) + + // Get, should fetch + req = TestRequest(t, RequestInfo{Key: "hello"}) + result, err = c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + + // Sleep a tiny bit just to let maybe some background calls happen + // then verify that we still only got the one call + time.Sleep(20 * time.Millisecond) + typ.AssertExpectations(t) +} + // Test that Get partitions the caches based on DC so two equivalent requests // to different datacenters are automatically cached even if their keys are // the same. diff --git a/agent/cache/entry.go b/agent/cache/entry.go new file mode 100644 index 000000000..99636be6f --- /dev/null +++ b/agent/cache/entry.go @@ -0,0 +1,103 @@ +package cache + +import ( + "sync/atomic" + "time" +) + +// cacheEntry stores a single cache entry. +// +// Note that this isn't a very optimized structure currently. There are +// a lot of improvements that can be made here in the long term. +type cacheEntry struct { + // Fields pertaining to the actual value + Key string + Value interface{} + Error error + Index uint64 + + // Metadata that is used for internal accounting + Valid bool // True if the Value is set + Fetching bool // True if a fetch is already active + Waiter chan struct{} // Closed when this entry is invalidated + + // ExpiresRaw is the time.Time that this value expires. The time.Time + // is immune to wall clock changes since we only use APIs that + // operate on the monotonic value. The value is in an atomic.Value + // so we have an efficient way to "touch" the value while maybe being + // read without introducing complex locking. + ExpiresRaw atomic.Value + ExpiresTTL time.Duration + ExpiryHeapIndex *int +} + +// Expires is the time that this entry expires. The time.Time value returned +// has the monotonic clock preserved and should be used only with +// monotonic-safe operations to prevent wall clock changes affecting +// cache behavior. +func (e *cacheEntry) Expires() time.Time { + return e.ExpiresRaw.Load().(time.Time) +} + +// ResetExpires resets the expiration to be the ttl duration from now. +func (e *cacheEntry) ResetExpires() { + e.ExpiresRaw.Store(time.Now().Add(e.ExpiresTTL)) +} + +// expiryHeap is a heap implementation that stores information about +// when entires expire. Implements container/heap.Interface. +// +// All operations on the heap and read/write of the heap contents require +// the proper entriesLock to be held on Cache. +type expiryHeap struct { + Entries []*cacheEntry + + // NotifyCh is sent a value whenever the 0 index value of the heap + // changes. This can be used to detect when the earliest value + // changes. + NotifyCh chan struct{} +} + +func (h *expiryHeap) Len() int { return len(h.Entries) } + +func (h *expiryHeap) Swap(i, j int) { + h.Entries[i], h.Entries[j] = h.Entries[j], h.Entries[i] + *h.Entries[i].ExpiryHeapIndex = i + *h.Entries[j].ExpiryHeapIndex = j + + // If we're moving the 0 index, update the channel since we need + // to re-update the timer we're waiting on for the soonest expiring + // value. + if i == 0 || j == 0 { + h.NotifyCh <- struct{}{} + } +} + +func (h *expiryHeap) Less(i, j int) bool { + // The usage of Before here is important (despite being obvious): + // this function uses the monotonic time that should be available + // on the time.Time value so the heap is immune to wall clock changes. + return h.Entries[i].Expires().Before(h.Entries[j].Expires()) +} + +func (h *expiryHeap) Push(x interface{}) { + entry := x.(*cacheEntry) + + // For the first entry, we need to trigger a channel send because + // Swap won't be called; nothing to swap! We can call it right away + // because all heap operations are within a lock. + if len(h.Entries) == 0 { + *entry.ExpiryHeapIndex = 0 // Set correct initial index + h.NotifyCh <- struct{}{} + } + + h.Entries = append(h.Entries, entry) +} + +func (h *expiryHeap) Pop() interface{} { + old := h.Entries + n := len(old) + x := old[n-1] + h.Entries = old[0 : n-1] + return x +} diff --git a/agent/cache/entry_test.go b/agent/cache/entry_test.go new file mode 100644 index 000000000..0ebf0682d --- /dev/null +++ b/agent/cache/entry_test.go @@ -0,0 +1,10 @@ +package cache + +import ( + "container/heap" + "testing" +) + +func TestExpiryHeap_impl(t *testing.T) { + var _ heap.Interface = new(expiryHeap) +} From b319d06276716401fba6d7aa867f33ea9df9025d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 19 Apr 2018 18:28:01 -0700 Subject: [PATCH 171/627] agent/cache: rework how expiry data is stored to be more efficient --- agent/cache/cache.go | 59 +++++++++++++++++++++------------------ agent/cache/cache_test.go | 51 +++++++++++++++++++++++++++++++++ agent/cache/entry.go | 50 ++++++++++++++++----------------- 3 files changed, 108 insertions(+), 52 deletions(-) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index 0d332a21e..1d9b732b8 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -207,10 +207,16 @@ RETRY_GET: atomic.AddUint64(&c.hits, 1) } - // Touch the expiration and fix the heap - entry.ResetExpires() + // Touch the expiration and fix the heap. c.entriesLock.Lock() - heap.Fix(c.entriesExpiryHeap, *entry.ExpiryHeapIndex) + entry.Expiry.Reset() + idx := entry.Expiry.HeapIndex + heap.Fix(c.entriesExpiryHeap, entry.Expiry.HeapIndex) + if idx == 0 && entry.Expiry.HeapIndex == 0 { + // We didn't move and we were at the head of the heap. + // We need to let the loop know that the value changed. + c.entriesExpiryHeap.Notify() + } c.entriesLock.Unlock() return entry.Value, entry.Error @@ -360,29 +366,21 @@ func (c *Cache) fetch(t, key string, r Request, allowNew bool) (<-chan struct{}, // Create a new waiter that will be used for the next fetch. newEntry.Waiter = make(chan struct{}) - // The key needs to always be set since this is used by the - // expiration loop to know what entry to delete. - newEntry.Key = key - - // If this is a new entry (not in the heap yet), then set the - // initial expiration TTL. - if newEntry.ExpiryHeapIndex == nil { - newEntry.ExpiresTTL = tEntry.Opts.LastGetTTL - newEntry.ResetExpires() - } - // Set our entry c.entriesLock.Lock() - if newEntry.ExpiryHeapIndex != nil { - // If we're already in the heap, just change the value in-place. - // We don't need to call heap.Fix because the expiry doesn't - // change. - c.entriesExpiryHeap.Entries[*newEntry.ExpiryHeapIndex] = &newEntry - } else { - // Add the new value - newEntry.ExpiryHeapIndex = new(int) - heap.Push(c.entriesExpiryHeap, &newEntry) + + // If this is a new entry (not in the heap yet), then setup the + // initial expiry information and insert. If we're already in + // the heap we do nothing since we're reusing the same entry. + if newEntry.Expiry == nil || newEntry.Expiry.HeapIndex == -1 { + newEntry.Expiry = &cacheEntryExpiry{ + Key: key, + TTL: tEntry.Opts.LastGetTTL, + } + newEntry.Expiry.Reset() + heap.Push(c.entriesExpiryHeap, newEntry.Expiry) } + c.entries[key] = newEntry c.entriesLock.Unlock() @@ -453,12 +451,12 @@ func (c *Cache) runExpiryLoop() { } // Get the entry expiring soonest - var entry *cacheEntry + var entry *cacheEntryExpiry var expiryCh <-chan time.Time c.entriesLock.RLock() if len(c.entriesExpiryHeap.Entries) > 0 { entry = c.entriesExpiryHeap.Entries[0] - expiryTimer = time.NewTimer(entry.Expires().Sub(time.Now())) + expiryTimer = time.NewTimer(entry.Expires.Sub(time.Now())) expiryCh = expiryTimer.C } c.entriesLock.RUnlock() @@ -468,10 +466,17 @@ func (c *Cache) runExpiryLoop() { // Entries changed, so the heap may have changed. Restart loop. case <-expiryCh: - // Entry expired! Remove it. c.entriesLock.Lock() + + // Entry expired! Remove it. delete(c.entries, entry.Key) - heap.Remove(c.entriesExpiryHeap, *entry.ExpiryHeapIndex) + heap.Remove(c.entriesExpiryHeap, entry.HeapIndex) + + // This is subtle but important: if we race and simultaneously + // evict and fetch a new value, then we set this to -1 to + // have it treated as a new value so that the TTL is extended. + entry.HeapIndex = -1 + c.entriesLock.Unlock() metrics.IncrCounter([]string{"consul", "cache", "evict_expired"}, 1) diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index 7ac8213f3..82bdf7814 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -414,6 +414,57 @@ func TestCacheGet_expire(t *testing.T) { typ.AssertExpectations(t) } +// Test that entries reset their TTL on Get +func TestCacheGet_expireResetGet(t *testing.T) { + t.Parallel() + + require := require.New(t) + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + + // Register the type with a timeout + c.RegisterType("t", typ, &RegisterOptions{ + LastGetTTL: 150 * time.Millisecond, + }) + + // Configure the type + typ.Static(FetchResult{Value: 42}, nil).Times(2) + + // Get, should fetch + req := TestRequest(t, RequestInfo{Key: "hello"}) + result, err := c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + + // Fetch multiple times, where the total time is well beyond + // the TTL. We should not trigger any fetches during this time. + for i := 0; i < 5; i++ { + // Sleep a bit + time.Sleep(50 * time.Millisecond) + + // Get, should not fetch + req = TestRequest(t, RequestInfo{Key: "hello"}) + result, err = c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + } + + time.Sleep(200 * time.Millisecond) + + // Get, should fetch + req = TestRequest(t, RequestInfo{Key: "hello"}) + result, err = c.Get("t", req) + require.Nil(err) + require.Equal(42, result) + + // Sleep a tiny bit just to let maybe some background calls happen + // then verify that we still only got the one call + time.Sleep(20 * time.Millisecond) + typ.AssertExpectations(t) +} + // Test that Get partitions the caches based on DC so two equivalent requests // to different datacenters are automatically cached even if their keys are // the same. diff --git a/agent/cache/entry.go b/agent/cache/entry.go index 99636be6f..b86f80ea8 100644 --- a/agent/cache/entry.go +++ b/agent/cache/entry.go @@ -1,7 +1,6 @@ package cache import ( - "sync/atomic" "time" ) @@ -11,7 +10,6 @@ import ( // a lot of improvements that can be made here in the long term. type cacheEntry struct { // Fields pertaining to the actual value - Key string Value interface{} Error error Index uint64 @@ -21,27 +19,25 @@ type cacheEntry struct { Fetching bool // True if a fetch is already active Waiter chan struct{} // Closed when this entry is invalidated - // ExpiresRaw is the time.Time that this value expires. The time.Time - // is immune to wall clock changes since we only use APIs that - // operate on the monotonic value. The value is in an atomic.Value - // so we have an efficient way to "touch" the value while maybe being - // read without introducing complex locking. - ExpiresRaw atomic.Value - ExpiresTTL time.Duration - ExpiryHeapIndex *int + // Expiry contains information about the expiration of this + // entry. This is a pointer as its shared as a value in the + // expiryHeap as well. + Expiry *cacheEntryExpiry } -// Expires is the time that this entry expires. The time.Time value returned -// has the monotonic clock preserved and should be used only with -// monotonic-safe operations to prevent wall clock changes affecting -// cache behavior. -func (e *cacheEntry) Expires() time.Time { - return e.ExpiresRaw.Load().(time.Time) +// cacheEntryExpiry contains the expiration information for a cache +// entry. Any modifications to this struct should be done only while +// the Cache entriesLock is held. +type cacheEntryExpiry struct { + Key string // Key in the cache map + Expires time.Time // Time when entry expires (monotonic clock) + TTL time.Duration // TTL for this entry to extend when resetting + HeapIndex int // Index in the heap } -// ResetExpires resets the expiration to be the ttl duration from now. -func (e *cacheEntry) ResetExpires() { - e.ExpiresRaw.Store(time.Now().Add(e.ExpiresTTL)) +// Reset resets the expiration to be the ttl duration from now. +func (e *cacheEntryExpiry) Reset() { + e.Expires = time.Now().Add(e.TTL) } // expiryHeap is a heap implementation that stores information about @@ -50,7 +46,7 @@ func (e *cacheEntry) ResetExpires() { // All operations on the heap and read/write of the heap contents require // the proper entriesLock to be held on Cache. type expiryHeap struct { - Entries []*cacheEntry + Entries []*cacheEntryExpiry // NotifyCh is sent a value whenever the 0 index value of the heap // changes. This can be used to detect when the earliest value @@ -62,8 +58,8 @@ func (h *expiryHeap) Len() int { return len(h.Entries) } func (h *expiryHeap) Swap(i, j int) { h.Entries[i], h.Entries[j] = h.Entries[j], h.Entries[i] - *h.Entries[i].ExpiryHeapIndex = i - *h.Entries[j].ExpiryHeapIndex = j + h.Entries[i].HeapIndex = i + h.Entries[j].HeapIndex = j // If we're moving the 0 index, update the channel since we need // to re-update the timer we're waiting on for the soonest expiring @@ -77,17 +73,17 @@ func (h *expiryHeap) Less(i, j int) bool { // The usage of Before here is important (despite being obvious): // this function uses the monotonic time that should be available // on the time.Time value so the heap is immune to wall clock changes. - return h.Entries[i].Expires().Before(h.Entries[j].Expires()) + return h.Entries[i].Expires.Before(h.Entries[j].Expires) } func (h *expiryHeap) Push(x interface{}) { - entry := x.(*cacheEntry) + entry := x.(*cacheEntryExpiry) // For the first entry, we need to trigger a channel send because // Swap won't be called; nothing to swap! We can call it right away // because all heap operations are within a lock. if len(h.Entries) == 0 { - *entry.ExpiryHeapIndex = 0 // Set correct initial index + entry.HeapIndex = 0 // Set correct initial index h.NotifyCh <- struct{}{} } @@ -101,3 +97,7 @@ func (h *expiryHeap) Pop() interface{} { h.Entries = old[0 : n-1] return x } + +func (h *expiryHeap) Notify() { + h.NotifyCh <- struct{}{} +} From ec559d77bd193eeca9885ca6628e05ac44b60d20 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 19 Apr 2018 18:35:10 -0700 Subject: [PATCH 172/627] agent/cache: make edge case with prev/next idx == 0 handled better --- agent/cache/cache.go | 8 +------- agent/cache/entry.go | 30 ++++++++++++++++++++++++++---- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index 1d9b732b8..759d2bc1d 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -210,13 +210,7 @@ RETRY_GET: // Touch the expiration and fix the heap. c.entriesLock.Lock() entry.Expiry.Reset() - idx := entry.Expiry.HeapIndex - heap.Fix(c.entriesExpiryHeap, entry.Expiry.HeapIndex) - if idx == 0 && entry.Expiry.HeapIndex == 0 { - // We didn't move and we were at the head of the heap. - // We need to let the loop know that the value changed. - c.entriesExpiryHeap.Notify() - } + c.entriesExpiryHeap.Fix(entry.Expiry) c.entriesLock.Unlock() return entry.Value, entry.Error diff --git a/agent/cache/entry.go b/agent/cache/entry.go index b86f80ea8..8174d3f12 100644 --- a/agent/cache/entry.go +++ b/agent/cache/entry.go @@ -1,6 +1,7 @@ package cache import ( + "container/heap" "time" ) @@ -51,9 +52,34 @@ type expiryHeap struct { // NotifyCh is sent a value whenever the 0 index value of the heap // changes. This can be used to detect when the earliest value // changes. + // + // There is a single edge case where the heap will not automatically + // send a notification: if heap.Fix is called manually and the index + // changed is 0 and the change doesn't result in any moves (stays at index + // 0), then we won't detect the change. To work around this, please + // always call the expiryHeap.Fix method instead. NotifyCh chan struct{} } +// Identical to heap.Fix for this heap instance but will properly handle +// the edge case where idx == 0 and no heap modification is necessary, +// and still notify the NotifyCh. +// +// This is important for cache expiry since the expiry time may have been +// extended and if we don't send a message to the NotifyCh then we'll never +// reset the timer and the entry will be evicted early. +func (h *expiryHeap) Fix(entry *cacheEntryExpiry) { + idx := entry.HeapIndex + heap.Fix(h, idx) + + // This is the edge case we handle: if the prev and current index + // is zero, it means the head-of-line didn't change while the value + // changed. Notify to reset our expiry worker. + if idx == 0 && entry.HeapIndex == 0 { + h.NotifyCh <- struct{}{} + } +} + func (h *expiryHeap) Len() int { return len(h.Entries) } func (h *expiryHeap) Swap(i, j int) { @@ -97,7 +123,3 @@ func (h *expiryHeap) Pop() interface{} { h.Entries = old[0 : n-1] return x } - -func (h *expiryHeap) Notify() { - h.NotifyCh <- struct{}{} -} From 1c31e34e5b5cf86b815dbc3c907f5a883c558227 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 19 Apr 2018 18:40:12 -0700 Subject: [PATCH 173/627] agent/cache: send the total entries count on eviction to go-metrics --- agent/cache/cache.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index 759d2bc1d..a5aa575d8 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -471,9 +471,11 @@ func (c *Cache) runExpiryLoop() { // have it treated as a new value so that the TTL is extended. entry.HeapIndex = -1 - c.entriesLock.Unlock() - + // Set some metrics metrics.IncrCounter([]string{"consul", "cache", "evict_expired"}, 1) + metrics.SetGauge([]string{"consul", "cache", "entries_count"}, float32(len(c.entries))) + + c.entriesLock.Unlock() } } } From 3f80a9f330ca68cbc7094fc5454624b9c7acff6f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 20 Apr 2018 10:20:39 -0700 Subject: [PATCH 174/627] agent/cache: unit tests for ExpiryHeap, found a bug! --- agent/cache/entry.go | 5 ++- agent/cache/entry_test.go | 81 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 1 deletion(-) diff --git a/agent/cache/entry.go b/agent/cache/entry.go index 8174d3f12..f651e6421 100644 --- a/agent/cache/entry.go +++ b/agent/cache/entry.go @@ -105,11 +105,14 @@ func (h *expiryHeap) Less(i, j int) bool { func (h *expiryHeap) Push(x interface{}) { entry := x.(*cacheEntryExpiry) + // Set initial heap index, if we're going to the end then Swap + // won't be called so we need to initialize + entry.HeapIndex = len(h.Entries) + // For the first entry, we need to trigger a channel send because // Swap won't be called; nothing to swap! We can call it right away // because all heap operations are within a lock. if len(h.Entries) == 0 { - entry.HeapIndex = 0 // Set correct initial index h.NotifyCh <- struct{}{} } diff --git a/agent/cache/entry_test.go b/agent/cache/entry_test.go index 0ebf0682d..fe4073363 100644 --- a/agent/cache/entry_test.go +++ b/agent/cache/entry_test.go @@ -3,8 +3,89 @@ package cache import ( "container/heap" "testing" + "time" + + "github.com/stretchr/testify/require" ) func TestExpiryHeap_impl(t *testing.T) { var _ heap.Interface = new(expiryHeap) } + +func TestExpiryHeap(t *testing.T) { + require := require.New(t) + now := time.Now() + ch := make(chan struct{}, 10) // buffered to prevent blocking in tests + h := &expiryHeap{NotifyCh: ch} + + // Init, shouldn't trigger anything + heap.Init(h) + testNoMessage(t, ch) + + // Push an initial value, expect one message + entry := &cacheEntryExpiry{Key: "foo", HeapIndex: -1, Expires: now.Add(100)} + heap.Push(h, entry) + require.Equal(0, entry.HeapIndex) + testMessage(t, ch) + testNoMessage(t, ch) // exactly one asserted above + + // Push another that goes earlier than entry + entry2 := &cacheEntryExpiry{Key: "bar", HeapIndex: -1, Expires: now.Add(50)} + heap.Push(h, entry2) + require.Equal(0, entry2.HeapIndex) + require.Equal(1, entry.HeapIndex) + testMessage(t, ch) + testNoMessage(t, ch) // exactly one asserted above + + // Push another that goes at the end + entry3 := &cacheEntryExpiry{Key: "bar", HeapIndex: -1, Expires: now.Add(1000)} + heap.Push(h, entry3) + require.Equal(2, entry3.HeapIndex) + testNoMessage(t, ch) // no notify cause index 0 stayed the same + + // Remove the first entry (not Pop, since we don't use Pop, but that works too) + remove := h.Entries[0] + heap.Remove(h, remove.HeapIndex) + require.Equal(0, entry.HeapIndex) + require.Equal(1, entry3.HeapIndex) + testMessage(t, ch) + testMessage(t, ch) // we have two because two swaps happen + testNoMessage(t, ch) + + // Let's change entry 3 to be early, and fix it + entry3.Expires = now.Add(10) + h.Fix(entry3) + require.Equal(1, entry.HeapIndex) + require.Equal(0, entry3.HeapIndex) + testMessage(t, ch) + testNoMessage(t, ch) + + // Let's change entry 3 again, this is an edge case where if the 0th + // element changed, we didn't trigger the channel. Our Fix func should. + entry.Expires = now.Add(20) + h.Fix(entry3) + require.Equal(1, entry.HeapIndex) // no move + require.Equal(0, entry3.HeapIndex) + testMessage(t, ch) + testNoMessage(t, ch) // one message +} + +func testNoMessage(t *testing.T, ch <-chan struct{}) { + t.Helper() + + select { + case <-ch: + t.Fatal("should not have a message") + default: + } +} + +func testMessage(t *testing.T, ch <-chan struct{}) { + t.Helper() + + select { + case <-ch: + default: + t.Fatal("should have a message") + } +} From ad3928b6bdd54e7c0eea49a91343ad23fed561d0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 20 Apr 2018 10:43:50 -0700 Subject: [PATCH 175/627] agent/cache: don't every block on NotifyCh --- agent/cache/entry.go | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/agent/cache/entry.go b/agent/cache/entry.go index f651e6421..a1af4801c 100644 --- a/agent/cache/entry.go +++ b/agent/cache/entry.go @@ -76,7 +76,7 @@ func (h *expiryHeap) Fix(entry *cacheEntryExpiry) { // is zero, it means the head-of-line didn't change while the value // changed. Notify to reset our expiry worker. if idx == 0 && entry.HeapIndex == 0 { - h.NotifyCh <- struct{}{} + h.notify() } } @@ -91,7 +91,7 @@ func (h *expiryHeap) Swap(i, j int) { // to re-update the timer we're waiting on for the soonest expiring // value. if i == 0 || j == 0 { - h.NotifyCh <- struct{}{} + h.notify() } } @@ -113,7 +113,7 @@ func (h *expiryHeap) Push(x interface{}) { // Swap won't be called; nothing to swap! We can call it right away // because all heap operations are within a lock. if len(h.Entries) == 0 { - h.NotifyCh <- struct{}{} + h.notify() } h.Entries = append(h.Entries, entry) @@ -126,3 +126,16 @@ func (h *expiryHeap) Pop() interface{} { h.Entries = old[0 : n-1] return x } + +func (h *expiryHeap) notify() { + select { + case h.NotifyCh <- struct{}{}: + // Good + + default: + // If the send would've blocked, we just ignore it. The reason this + // is safe is because NotifyCh should always be a buffered channel. + // If this blocks, it means that there is a pending message anyways + // so the receiver will restart regardless. + } +} From 07d878a157cea226a79c37b9af5505094c002a91 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 20 Apr 2018 12:58:23 -0700 Subject: [PATCH 176/627] agent/cache: address feedback, clarify comments --- agent/cache/cache_test.go | 30 ++++++++++++++++-------------- agent/cache/entry.go | 4 +++- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index 82bdf7814..cf179b2ab 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -28,12 +28,12 @@ func TestCacheGet_noIndex(t *testing.T) { // Get, should fetch req := TestRequest(t, RequestInfo{Key: "hello"}) result, err := c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) // Get, should not fetch since we already have a satisfying value result, err = c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) // Sleep a tiny bit just to let maybe some background calls happen @@ -92,12 +92,12 @@ func TestCacheGet_blankCacheKey(t *testing.T) { // Get, should fetch req := TestRequest(t, RequestInfo{Key: ""}) result, err := c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) // Get, should not fetch since we already have a satisfying value result, err = c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) // Sleep a tiny bit just to let maybe some background calls happen @@ -281,14 +281,14 @@ func TestCacheGet_emptyFetchResult(t *testing.T) { // Get, should fetch req := TestRequest(t, RequestInfo{Key: "hello"}) result, err := c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) // Get, should not fetch since we already have a satisfying value req = TestRequest(t, RequestInfo{ Key: "hello", MinIndex: 1, Timeout: 100 * time.Millisecond}) result, err = c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) // Sleep a tiny bit just to let maybe some background calls happen @@ -362,7 +362,7 @@ func TestCacheGet_fetchTimeout(t *testing.T) { // Get, should fetch req := TestRequest(t, RequestInfo{Key: "hello"}) result, err := c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) // Test the timeout @@ -390,14 +390,16 @@ func TestCacheGet_expire(t *testing.T) { // Get, should fetch req := TestRequest(t, RequestInfo{Key: "hello"}) result, err := c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) - // Get, should not fetch + // Get, should not fetch, verified via the mock assertions above + hits := c.Hits() req = TestRequest(t, RequestInfo{Key: "hello"}) result, err = c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) + require.Equal(hits+1, c.Hits()) // Sleep for the expiry time.Sleep(500 * time.Millisecond) @@ -405,7 +407,7 @@ func TestCacheGet_expire(t *testing.T) { // Get, should fetch req = TestRequest(t, RequestInfo{Key: "hello"}) result, err = c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) // Sleep a tiny bit just to let maybe some background calls happen @@ -435,7 +437,7 @@ func TestCacheGet_expireResetGet(t *testing.T) { // Get, should fetch req := TestRequest(t, RequestInfo{Key: "hello"}) result, err := c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) // Fetch multiple times, where the total time is well beyond @@ -447,7 +449,7 @@ func TestCacheGet_expireResetGet(t *testing.T) { // Get, should not fetch req = TestRequest(t, RequestInfo{Key: "hello"}) result, err = c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) } @@ -456,7 +458,7 @@ func TestCacheGet_expireResetGet(t *testing.T) { // Get, should fetch req = TestRequest(t, RequestInfo{Key: "hello"}) result, err = c.Get("t", req) - require.Nil(err) + require.NoError(err) require.Equal(42, result) // Sleep a tiny bit just to let maybe some background calls happen diff --git a/agent/cache/entry.go b/agent/cache/entry.go index a1af4801c..50c575ff7 100644 --- a/agent/cache/entry.go +++ b/agent/cache/entry.go @@ -72,7 +72,7 @@ func (h *expiryHeap) Fix(entry *cacheEntryExpiry) { idx := entry.HeapIndex heap.Fix(h, idx) - // This is the edge case we handle: if the prev and current index + // This is the edge case we handle: if the prev (idx) and current (HeapIndex) // is zero, it means the head-of-line didn't change while the value // changed. Notify to reset our expiry worker. if idx == 0 && entry.HeapIndex == 0 { @@ -102,6 +102,7 @@ func (h *expiryHeap) Less(i, j int) bool { return h.Entries[i].Expires.Before(h.Entries[j].Expires) } +// heap.Interface, this isn't expected to be called directly. func (h *expiryHeap) Push(x interface{}) { entry := x.(*cacheEntryExpiry) @@ -119,6 +120,7 @@ func (h *expiryHeap) Push(x interface{}) { h.Entries = append(h.Entries, entry) } +// heap.Interface, this isn't expected to be called directly. func (h *expiryHeap) Pop() interface{} { old := h.Entries n := len(old) From dcb2671d10e34e7af39dd9036b1438ecc929bf27 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 22 Apr 2018 13:52:48 -0700 Subject: [PATCH 177/627] agent/cache: address PR feedback, lots of typos --- agent/agent.go | 6 ++---- agent/cache-types/connect_ca_leaf.go | 4 +++- agent/cache-types/intention_match_test.go | 4 ++-- agent/cache/cache.go | 2 +- agent/cache/request.go | 2 +- agent/cache/type.go | 4 ++-- agent/structs/intention.go | 4 ++-- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 610aeb64f..4d0246b99 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -295,8 +295,9 @@ func (a *Agent) Start() error { // regular and on-demand state synchronizations (anti-entropy). a.sync = ae.NewStateSyncer(a.State, c.AEInterval, a.shutdownCh, a.logger) - // create the cache + // create the cache and register types a.cache = cache.New(nil) + a.registerCache() // create the config for the rpc server/client consulCfg, err := a.consulConfig() @@ -334,9 +335,6 @@ func (a *Agent) Start() error { a.State.Delegate = a.delegate a.State.TriggerSyncChanges = a.sync.SyncChanges.Trigger - // Register the cache - a.registerCache() - // Load checks/services/metadata. if err := a.loadServices(c); err != nil { return err diff --git a/agent/cache-types/connect_ca_leaf.go b/agent/cache-types/connect_ca_leaf.go index 70d5e3c24..c6a2eee73 100644 --- a/agent/cache-types/connect_ca_leaf.go +++ b/agent/cache-types/connect_ca_leaf.go @@ -98,7 +98,9 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache } // Create a CSR. - // TODO(mitchellh): This is obviously not production ready! + // TODO(mitchellh): This is obviously not production ready! The host + // needs a correct host ID, and we probably don't want to use TestCSR + // and want a non-test-specific way to create a CSR. csr, pk := connect.TestCSR(&testing.RuntimeT{}, &connect.SpiffeIDService{ Host: "1234.consul", Namespace: "default", diff --git a/agent/cache-types/intention_match_test.go b/agent/cache-types/intention_match_test.go index 97b2951b3..d94d7d935 100644 --- a/agent/cache-types/intention_match_test.go +++ b/agent/cache-types/intention_match_test.go @@ -35,7 +35,7 @@ func TestIntentionMatch(t *testing.T) { MinIndex: 24, Timeout: 1 * time.Second, }, &structs.IntentionQueryRequest{Datacenter: "dc1"}) - require.Nil(err) + require.NoError(err) require.Equal(cache.FetchResult{ Value: resp, Index: 48, @@ -51,7 +51,7 @@ func TestIntentionMatch_badReqType(t *testing.T) { // Fetch _, err := typ.Fetch(cache.FetchOptions{}, cache.TestRequest( t, cache.RequestInfo{Key: "foo", MinIndex: 64})) - require.NotNil(err) + require.Error(err) require.Contains(err.Error(), "wrong type") } diff --git a/agent/cache/cache.go b/agent/cache/cache.go index a5aa575d8..cdcaffc58 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -187,7 +187,7 @@ func (c *Cache) Get(t string, r Request) (interface{}, error) { // First time through first := true - // timeoutCh for watching our tmeout + // timeoutCh for watching our timeout var timeoutCh <-chan time.Time RETRY_GET: diff --git a/agent/cache/request.go b/agent/cache/request.go index 7cd53df25..6a20a9c1f 100644 --- a/agent/cache/request.go +++ b/agent/cache/request.go @@ -18,7 +18,7 @@ type Request interface { // cacheability. type RequestInfo struct { // Key is a unique cache key for this request. This key should - // absolutely uniquely identify this request, since any conflicting + // be globally unique to identify this request, since any conflicting // cache keys could result in invalid data being returned from the cache. // The Key does not need to include ACL or DC information, since the // cache already partitions by these values prior to using this key. diff --git a/agent/cache/type.go b/agent/cache/type.go index cccb10b94..b4f630d2b 100644 --- a/agent/cache/type.go +++ b/agent/cache/type.go @@ -4,12 +4,12 @@ import ( "time" ) -// Type implement the logic to fetch certain types of data. +// Type implements the logic to fetch certain types of data. type Type interface { // Fetch fetches a single unique item. // // The FetchOptions contain the index and timeouts for blocking queries. - // The CacheMinIndex value on the Request itself should NOT be used + // The MinIndex value on the Request itself should NOT be used // as the blocking index since a request may be reused multiple times // as part of Refresh behavior. // diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 6ad1a9835..5c6b1e991 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -270,7 +270,7 @@ func (q *IntentionQueryRequest) RequestDatacenter() string { return q.Datacenter } -// cache.Request impl. +// CacheInfo implements cache.Request func (q *IntentionQueryRequest) CacheInfo() cache.RequestInfo { // We only support caching Match queries, so if Match isn't set, // then return an empty info object which will cause a pass-through @@ -294,7 +294,7 @@ func (q *IntentionQueryRequest) CacheInfo() cache.RequestInfo { // If there is an error, we don't set the key. A blank key forces // no cache for this request so the request is forwarded directly // to the server. - info.Key = strconv.FormatUint(v, 10) + info.Key = strconv.FormatUint(v, 16) } return info From 73838c9afafd47a5d6350bd89d2fa9ce9383c9dc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 22 Apr 2018 14:00:32 -0700 Subject: [PATCH 178/627] agent: use helper/retry instead of timing related tests --- agent/agent.go | 7 +++++-- agent/agent_endpoint_test.go | 24 ++++++++++++++---------- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 4d0246b99..3c866bc48 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -295,9 +295,8 @@ func (a *Agent) Start() error { // regular and on-demand state synchronizations (anti-entropy). a.sync = ae.NewStateSyncer(a.State, c.AEInterval, a.shutdownCh, a.logger) - // create the cache and register types + // create the cache a.cache = cache.New(nil) - a.registerCache() // create the config for the rpc server/client consulCfg, err := a.consulConfig() @@ -335,6 +334,10 @@ func (a *Agent) Start() error { a.State.Delegate = a.delegate a.State.TriggerSyncChanges = a.sync.SyncChanges.Trigger + // Register the cache. We do this much later so the delegate is + // populated from above. + a.registerCache() + // Load checks/services/metadata. if err := a.loadServices(c); err != nil { return err diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 93cffa617..44dd02923 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2175,17 +2175,21 @@ func TestAgentConnectCARoots_list(t *testing.T) { require.Nil(a.RPC("Test.ConnectCASetRoots", []*structs.CARoot{ca}, &reply)) - // Sleep a bit to wait for the cache to update - time.Sleep(100 * time.Millisecond) + retry.Run(t, func(r *retry.R) { + // List it again + obj, err := a.srv.AgentConnectCARoots(httptest.NewRecorder(), req) + if err != nil { + r.Fatal(err) + } - // List it again - obj, err := a.srv.AgentConnectCARoots(httptest.NewRecorder(), req) - require.Nil(err) - require.Equal(obj, obj) - - value := obj.(structs.IndexedCARoots) - require.Equal(value.ActiveRootID, ca.ID) - require.Len(value.Roots, 1) + value := obj.(structs.IndexedCARoots) + if ca.ID != value.ActiveRootID { + r.Fatalf("%s != %s", ca.ID, value.ActiveRootID) + } + if len(value.Roots) != 1 { + r.Fatalf("bad len: %d", len(value.Roots)) + } + }) // Should be a cache hit! The data should've updated in the cache // in the background so this should've been fetched directly from From 5abd43a56701370687308fc66345ded587a60f5f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 22 Apr 2018 14:09:06 -0700 Subject: [PATCH 179/627] agent: resolve flaky test by checking cache hits increase, rather than exact --- agent/agent_endpoint_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 44dd02923..d6b1996dd 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2194,8 +2194,10 @@ func TestAgentConnectCARoots_list(t *testing.T) { // Should be a cache hit! The data should've updated in the cache // in the background so this should've been fetched directly from // the cache. - require.Equal(cacheHits+1, a.cache.Hits()) - cacheHits++ + if v := a.cache.Hits(); v < cacheHits+1 { + t.Fatalf("expected at least one more cache hit, still at %d", v) + } + cacheHits = a.cache.Hits() } } From 93ff59a132b4c751cfb521c4296f29550da5d51d Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Fri, 20 Apr 2018 22:26:00 +0100 Subject: [PATCH 180/627] Fix racy connect network tests that always fail in Docker due to listen races --- connect/proxy/listener.go | 35 +++++++++++++++++++++++++++++----- connect/proxy/listener_test.go | 15 +++++++++------ connect/service_test.go | 11 ++++++----- connect/testing.go | 30 +++++++++++++++++++++++------ 4 files changed, 69 insertions(+), 22 deletions(-) diff --git a/connect/proxy/listener.go b/connect/proxy/listener.go index 51ab761ca..c8e70ac31 100644 --- a/connect/proxy/listener.go +++ b/connect/proxy/listener.go @@ -25,6 +25,15 @@ type Listener struct { stopFlag int32 stopChan chan struct{} + // listeningChan is closed when listener is opened successfully. It's really + // only for use in tests where we need to coordinate wait for the Serve + // goroutine to be running before we proceed trying to connect. On my laptop + // this always works out anyway but on constrained VMs and especially docker + // containers (e.g. in CI) we often see the Dial routine win the race and get + // `connection refused`. Retry loops and sleeps are unpleasant workarounds and + // this is cheap and correct. + listeningChan chan struct{} + logger *log.Logger } @@ -41,8 +50,9 @@ func NewPublicListener(svc *connect.Service, cfg PublicListenerConfig, return net.DialTimeout("tcp", cfg.LocalServiceAddress, time.Duration(cfg.LocalConnectTimeoutMs)*time.Millisecond) }, - stopChan: make(chan struct{}), - logger: logger, + stopChan: make(chan struct{}), + listeningChan: make(chan struct{}), + logger: logger, } } @@ -64,17 +74,27 @@ func NewUpstreamListener(svc *connect.Service, cfg UpstreamConfig, defer cancel() return svc.Dial(ctx, cfg.resolver) }, - stopChan: make(chan struct{}), - logger: logger, + stopChan: make(chan struct{}), + listeningChan: make(chan struct{}), + logger: logger, } } -// Serve runs the listener until it is stopped. +// Serve runs the listener until it is stopped. It is an error to call Serve +// more than once for any given Listener instance. func (l *Listener) Serve() error { + // Ensure we mark state closed if we fail before Close is called externally. + defer l.Close() + + if atomic.LoadInt32(&l.stopFlag) != 0 { + return errors.New("serve called on a closed listener") + } + listen, err := l.listenFunc() if err != nil { return err } + close(l.listeningChan) for { conn, err := listen.Accept() @@ -113,3 +133,8 @@ func (l *Listener) handleConn(src net.Conn) { func (l *Listener) Close() error { return nil } + +// Wait for the listener to be ready to accept connections. +func (l *Listener) Wait() { + <-l.listeningChan +} diff --git a/connect/proxy/listener_test.go b/connect/proxy/listener_test.go index ce41c81e5..8354fbe58 100644 --- a/connect/proxy/listener_test.go +++ b/connect/proxy/listener_test.go @@ -24,7 +24,7 @@ func TestPublicListener(t *testing.T) { } testApp, err := NewTestTCPServer(t, cfg.LocalServiceAddress) - require.Nil(t, err) + require.NoError(t, err) defer testApp.Close() svc := connect.TestService(t, "db", ca) @@ -34,9 +34,10 @@ func TestPublicListener(t *testing.T) { // Run proxy go func() { err := l.Serve() - require.Nil(t, err) + require.NoError(t, err) }() defer l.Close() + l.Wait() // Proxy and backend are running, play the part of a TLS client using same // cert for now. @@ -44,7 +45,7 @@ func TestPublicListener(t *testing.T) { Addr: addrs[0], CertURI: agConnect.TestSpiffeIDService(t, "db"), }) - require.Nilf(t, err, "unexpected err: %s", err) + require.NoError(t, err) TestEchoConn(t, conn, "") } @@ -56,9 +57,10 @@ func TestUpstreamListener(t *testing.T) { testSvr := connect.NewTestServer(t, "db", ca) go func() { err := testSvr.Serve() - require.Nil(t, err) + require.NoError(t, err) }() defer testSvr.Close() + <-testSvr.Listening cfg := UpstreamConfig{ DestinationType: "service", @@ -79,13 +81,14 @@ func TestUpstreamListener(t *testing.T) { // Run proxy go func() { err := l.Serve() - require.Nil(t, err) + require.NoError(t, err) }() defer l.Close() + l.Wait() // Proxy and fake remote service are running, play the part of the app // connecting to a remote connect service over TCP. conn, err := net.Dial("tcp", cfg.LocalBindAddress) - require.Nilf(t, err, "unexpected err: %s", err) + require.NoError(t, err) TestEchoConn(t, conn, "") } diff --git a/connect/service_test.go b/connect/service_test.go index 7bc4c97f2..20433d1f5 100644 --- a/connect/service_test.go +++ b/connect/service_test.go @@ -73,9 +73,10 @@ func TestService_Dial(t *testing.T) { if tt.accept { go func() { err := testSvr.Serve() - require.Nil(err) + require.NoError(err) }() defer testSvr.Close() + <-testSvr.Listening } // Always expect to be connecting to a "DB" @@ -95,10 +96,10 @@ func TestService_Dial(t *testing.T) { testTimer.Stop() if tt.wantErr == "" { - require.Nil(err) + require.NoError(err) require.IsType(&tls.Conn{}, conn) } else { - require.NotNil(err) + require.Error(err) require.Contains(err.Error(), tt.wantErr) } @@ -117,7 +118,6 @@ func TestService_ServerTLSConfig(t *testing.T) { } func TestService_HTTPClient(t *testing.T) { - require := require.New(t) ca := connect.TestCA(t, nil) s := TestService(t, "web", ca) @@ -129,8 +129,9 @@ func TestService_HTTPClient(t *testing.T) { err := testSvr.ServeHTTPS(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("Hello, I am Backend")) })) - require.Nil(t, err) + require.NoError(t, err) }() + <-testSvr.Listening // TODO(banks): this will talk http2 on both client and server. I hit some // compatibility issues when testing though need to make sure that the http diff --git a/connect/testing.go b/connect/testing.go index 9f6e4f781..f9a6a4850 100644 --- a/connect/testing.go +++ b/connect/testing.go @@ -105,6 +105,8 @@ type TestServer struct { // Addr is the listen address. It is set to a random free port on `localhost` // by default. Addr string + // Listening is closed when the listener is run. + Listening chan struct{} l net.Listener stopFlag int32 @@ -116,11 +118,12 @@ type TestServer struct { func NewTestServer(t testing.T, service string, ca *structs.CARoot) *TestServer { ports := freeport.GetT(t, 1) return &TestServer{ - Service: service, - CA: ca, - stopChan: make(chan struct{}), - TLSCfg: TestTLSConfig(t, service, ca), - Addr: fmt.Sprintf("localhost:%d", ports[0]), + Service: service, + CA: ca, + stopChan: make(chan struct{}), + TLSCfg: TestTLSConfig(t, service, ca), + Addr: fmt.Sprintf("127.0.0.1:%d", ports[0]), + Listening: make(chan struct{}), } } @@ -132,6 +135,7 @@ func (s *TestServer) Serve() error { if err != nil { return err } + close(s.Listening) s.l = l log.Printf("test connect service listening on %s", s.Addr) @@ -172,7 +176,21 @@ func (s *TestServer) ServeHTTPS(h http.Handler) error { Handler: h, } log.Printf("starting test connect HTTPS server on %s", s.Addr) - return srv.ListenAndServeTLS("", "") + + // Use our own listener so we can signal when it's ready. + l, err := net.Listen("tcp", s.Addr) + if err != nil { + return err + } + close(s.Listening) + s.l = l + log.Printf("test connect service listening on %s", s.Addr) + + err = srv.ServeTLS(l, "", "") + if atomic.LoadInt32(&s.stopFlag) == 1 { + return nil + } + return err } // Close stops a TestServer From aa10fb2f48f2e0440c9c51a7814c0386daa3b0e2 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Tue, 24 Apr 2018 11:50:31 -0700 Subject: [PATCH 181/627] Clarify some comments and names around CA bootstrapping --- agent/connect/ca_provider.go | 4 +- agent/consul/connect_ca_endpoint.go | 22 ++++---- agent/consul/connect_ca_provider.go | 8 +-- agent/consul/fsm/commands_oss_test.go | 2 +- agent/consul/leader.go | 28 +++++----- agent/consul/server.go | 6 +-- agent/consul/state/connect_ca.go | 76 ++++++++++++++------------- agent/structs/connect_ca.go | 8 +-- 8 files changed, 80 insertions(+), 74 deletions(-) diff --git a/agent/connect/ca_provider.go b/agent/connect/ca_provider.go index 9a53d02a0..cb7219669 100644 --- a/agent/connect/ca_provider.go +++ b/agent/connect/ca_provider.go @@ -29,8 +29,8 @@ type CAProvider interface { // SignCA signs a CA CSR and returns the resulting cross-signed cert. SignCA(*x509.CertificateRequest) (string, error) - // Teardown performs any necessary cleanup that should happen when the provider + // Cleanup performs any necessary cleanup that should happen when the provider // is shut down permanently, such as removing a temporary PKI backend in Vault // created for an intermediate CA. - Teardown() error + Cleanup() error } diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 9a3adeb99..5e3c2f6c7 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -52,7 +52,7 @@ func (s *ConnectCA) ConfigurationSet( return err } - // This action requires operator read access. + // This action requires operator write access. rule, err := s.srv.resolveToken(args.Token) if err != nil { return err @@ -133,7 +133,7 @@ func (s *ConnectCA) ConfigurationSet( } // Add the cross signed cert to the new root's intermediates - newActiveRoot.Intermediates = []string{xcCert} + newActiveRoot.IntermediateCerts = []string{xcCert} // Update the roots and CA config in the state store at the same time idx, roots, err := state.CARoots(nil) @@ -166,11 +166,11 @@ func (s *ConnectCA) ConfigurationSet( // and call teardown on the old provider s.srv.setCAProvider(newProvider) - if err := oldProvider.Teardown(); err != nil { - return err + if err := oldProvider.Cleanup(); err != nil { + s.srv.logger.Printf("[WARN] connect: failed to clean up old provider %q", config.Provider) } - s.srv.logger.Printf("[INFO] connect: CA rotated to the new root under %q provider", args.Config.Provider) + s.srv.logger.Printf("[INFO] connect: CA rotated to new root under provider %q", args.Config.Provider) return nil } @@ -205,12 +205,12 @@ func (s *ConnectCA) Roots( // directly to the structure in the memdb store. reply.Roots[i] = &structs.CARoot{ - ID: r.ID, - Name: r.Name, - RootCert: r.RootCert, - Intermediates: r.Intermediates, - RaftIndex: r.RaftIndex, - Active: r.Active, + ID: r.ID, + Name: r.Name, + RootCert: r.RootCert, + IntermediateCerts: r.IntermediateCerts, + RaftIndex: r.RaftIndex, + Active: r.Active, } if r.Active { diff --git a/agent/consul/connect_ca_provider.go b/agent/consul/connect_ca_provider.go index 6f0508ce1..8a3c81b2b 100644 --- a/agent/consul/connect_ca_provider.go +++ b/agent/consul/connect_ca_provider.go @@ -167,7 +167,7 @@ func (c *ConsulCAProvider) GenerateIntermediate() (*structs.CARoot, *x509.Certif return nil, nil, err } - id := &connect.SpiffeIDSigning{ClusterID: config.ClusterSerial, Domain: "consul"} + id := &connect.SpiffeIDSigning{ClusterID: config.ClusterID, Domain: "consul"} template := &x509.CertificateRequest{ URIs: []*url.URL{id.URI()}, } @@ -198,7 +198,7 @@ func (c *ConsulCAProvider) GenerateIntermediate() (*structs.CARoot, *x509.Certif } // Remove the state store entry for this provider instance. -func (c *ConsulCAProvider) Teardown() error { +func (c *ConsulCAProvider) Cleanup() error { args := &structs.CARequest{ Op: structs.CAOpDeleteProviderState, ProviderState: &structs.CAConsulProviderState{ID: c.id}, @@ -336,7 +336,7 @@ func (c *ConsulCAProvider) SignCA(csr *x509.CertificateRequest) (string, error) if err != nil { return "", err } - id := &connect.SpiffeIDSigning{ClusterID: config.ClusterSerial, Domain: "consul"} + id := &connect.SpiffeIDSigning{ClusterID: config.ClusterID, Domain: "consul"} keyId, err := connect.KeyId(privKey.Public()) if err != nil { return "", err @@ -423,7 +423,7 @@ func (c *ConsulCAProvider) generateCA(privateKey, contents string, sn uint64) (* if pemContents == "" { // The URI (SPIFFE compatible) for the cert - id := &connect.SpiffeIDSigning{ClusterID: config.ClusterSerial, Domain: "consul"} + id := &connect.SpiffeIDSigning{ClusterID: config.ClusterID, Domain: "consul"} keyId, err := connect.KeyId(privKey.Public()) if err != nil { return nil, err diff --git a/agent/consul/fsm/commands_oss_test.go b/agent/consul/fsm/commands_oss_test.go index a6552240c..a52e6d7b6 100644 --- a/agent/consul/fsm/commands_oss_test.go +++ b/agent/consul/fsm/commands_oss_test.go @@ -1251,7 +1251,7 @@ func TestFSM_CAConfig(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - var conf *connect.ConsulCAProviderConfig + var conf *structs.ConsulCAProviderConfig if err := mapstructure.WeakDecode(config.Config, &conf); err != nil { t.Fatalf("error decoding config: %s, %v", err, config.Config) } diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 91bacee2f..282393cd3 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -7,16 +7,15 @@ import ( "sync" "time" - "github.com/hashicorp/consul/agent/connect" - uuid "github.com/hashicorp/go-uuid" - "github.com/armon/go-metrics" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/autopilot" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/types" + uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/go-version" "github.com/hashicorp/raft" "github.com/hashicorp/serf/serf" @@ -215,7 +214,7 @@ func (s *Server) establishLeadership() error { s.autopilot.Start() // todo(kyhavlov): start a goroutine here for handling periodic CA rotation - s.bootstrapCA() + s.initializeCA() s.setConsistentReadReady() return nil @@ -366,8 +365,9 @@ func (s *Server) getOrCreateAutopilotConfig() *autopilot.Config { return config } -// getOrCreateCAConfig is used to get the CA config, initializing it if necessary -func (s *Server) getOrCreateCAConfig() (*structs.CAConfiguration, error) { +// initializeCAConfig is used to initialize the CA config if necessary +// when setting up the CA during establishLeadership +func (s *Server) initializeCAConfig() (*structs.CAConfiguration, error) { state := s.fsm.State() _, config, err := state.CAConfig() if err != nil { @@ -377,13 +377,13 @@ func (s *Server) getOrCreateCAConfig() (*structs.CAConfiguration, error) { return config, nil } - sn, err := uuid.GenerateUUID() + id, err := uuid.GenerateUUID() if err != nil { return nil, err } config = s.config.CAConfig - config.ClusterSerial = sn + config.ClusterID = id req := structs.CARequest{ Op: structs.CAOpSetConfig, Config: config, @@ -395,9 +395,10 @@ func (s *Server) getOrCreateCAConfig() (*structs.CAConfiguration, error) { return config, nil } -// bootstrapCA creates a CA provider from the current configuration. -func (s *Server) bootstrapCA() error { - conf, err := s.getOrCreateCAConfig() +// initializeCA sets up the CA provider when gaining leadership, bootstrapping +// the root in the state store if necessary. +func (s *Server) initializeCA() error { + conf, err := s.initializeCAConfig() if err != nil { return err } @@ -424,7 +425,10 @@ func (s *Server) bootstrapCA() error { if err != nil { return err } - if root != nil && root.ID == trustedCA.ID { + if root != nil { + if root.ID != trustedCA.ID { + s.logger.Printf("[WARN] connect: CA root %q is not the active root (%q)", trustedCA.ID, root.ID) + } return nil } diff --git a/agent/consul/server.go b/agent/consul/server.go index fef016829..e15d5f71c 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -17,9 +17,8 @@ import ( "sync/atomic" "time" - "github.com/hashicorp/consul/agent/connect" - "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/autopilot" "github.com/hashicorp/consul/agent/consul/fsm" "github.com/hashicorp/consul/agent/consul/state" @@ -98,7 +97,8 @@ type Server struct { // autopilotWaitGroup is used to block until Autopilot shuts down. autopilotWaitGroup sync.WaitGroup - // caProvider is the current CA provider in use for Connect. + // caProvider is the current CA provider in use for Connect. This is + // only non-nil when we are the leader. caProvider connect.CAProvider caProviderLock sync.RWMutex diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go index 17e274992..99986c891 100644 --- a/agent/consul/state/connect_ca.go +++ b/agent/consul/state/connect_ca.go @@ -8,17 +8,38 @@ import ( ) const ( - caConfigTableName = "connect-ca-config" - caRootTableName = "connect-ca-roots" - caProviderTableName = "connect-ca-builtin" + caBuiltinProviderTableName = "connect-ca-builtin" + caConfigTableName = "connect-ca-config" + caRootTableName = "connect-ca-roots" ) +// caBuiltinProviderTableSchema returns a new table schema used for storing +// the built-in CA provider's state for connect. This is only used by +// the internal Consul CA provider. +func caBuiltinProviderTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: caBuiltinProviderTableName, + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + AllowMissing: false, + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + }, + } +} + // caConfigTableSchema returns a new table schema used for storing // the CA config for Connect. func caConfigTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ Name: caConfigTableName, Indexes: map[string]*memdb.IndexSchema{ + // This table only stores one row, so this just ignores the ID field + // and always overwrites the same config object. "id": &memdb.IndexSchema{ Name: "id", AllowMissing: true, @@ -49,29 +70,10 @@ func caRootTableSchema() *memdb.TableSchema { } } -// caProviderTableSchema returns a new table schema used for storing -// the built-in CA provider's state for connect. This is only used by -// the internal Consul CA provider. -func caProviderTableSchema() *memdb.TableSchema { - return &memdb.TableSchema{ - Name: caProviderTableName, - Indexes: map[string]*memdb.IndexSchema{ - "id": &memdb.IndexSchema{ - Name: "id", - AllowMissing: false, - Unique: true, - Indexer: &memdb.StringFieldIndex{ - Field: "ID", - }, - }, - }, - } -} - func init() { + registerSchema(caBuiltinProviderTableSchema) registerSchema(caConfigTableSchema) registerSchema(caRootTableSchema) - registerSchema(caProviderTableSchema) } // CAConfig is used to pull the CA config from the snapshot. @@ -170,7 +172,7 @@ func (s *Store) caSetConfigTxn(idx uint64, tx *memdb.Txn, config *structs.CAConf if prev != nil { existing := prev.(*structs.CAConfiguration) config.CreateIndex = existing.CreateIndex - config.ClusterSerial = existing.ClusterSerial + config.ClusterID = existing.ClusterID } else { config.CreateIndex = idx } @@ -319,7 +321,7 @@ func (s *Store) CARootSetCAS(idx, cidx uint64, rs []*structs.CARoot) (bool, erro // CAProviderState is used to pull the built-in provider state from the snapshot. func (s *Snapshot) CAProviderState() (*structs.CAConsulProviderState, error) { - c, err := s.tx.First(caProviderTableName, "id") + c, err := s.tx.First(caBuiltinProviderTableName, "id") if err != nil { return nil, err } @@ -334,7 +336,7 @@ func (s *Snapshot) CAProviderState() (*structs.CAConsulProviderState, error) { // CAProviderState is used when restoring from a snapshot. func (s *Restore) CAProviderState(state *structs.CAConsulProviderState) error { - if err := s.tx.Insert(caProviderTableName, state); err != nil { + if err := s.tx.Insert(caBuiltinProviderTableName, state); err != nil { return fmt.Errorf("failed restoring built-in CA state: %s", err) } @@ -347,10 +349,10 @@ func (s *Store) CAProviderState(id string) (uint64, *structs.CAConsulProviderSta defer tx.Abort() // Get the index - idx := maxIndexTxn(tx, caProviderTableName) + idx := maxIndexTxn(tx, caBuiltinProviderTableName) // Get the provider config - c, err := tx.First(caProviderTableName, "id", id) + c, err := tx.First(caBuiltinProviderTableName, "id", id) if err != nil { return 0, nil, fmt.Errorf("failed built-in CA state lookup: %s", err) } @@ -369,10 +371,10 @@ func (s *Store) CAProviderStates() (uint64, []*structs.CAConsulProviderState, er defer tx.Abort() // Get the index - idx := maxIndexTxn(tx, caProviderTableName) + idx := maxIndexTxn(tx, caBuiltinProviderTableName) // Get all - iter, err := tx.Get(caProviderTableName, "id") + iter, err := tx.Get(caBuiltinProviderTableName, "id") if err != nil { return 0, nil, fmt.Errorf("failed CA provider state lookup: %s", err) } @@ -390,7 +392,7 @@ func (s *Store) CASetProviderState(idx uint64, state *structs.CAConsulProviderSt defer tx.Abort() // Check for an existing config - existing, err := tx.First(caProviderTableName, "id", state.ID) + existing, err := tx.First(caBuiltinProviderTableName, "id", state.ID) if err != nil { return false, fmt.Errorf("failed built-in CA state lookup: %s", err) } @@ -403,12 +405,12 @@ func (s *Store) CASetProviderState(idx uint64, state *structs.CAConsulProviderSt } state.ModifyIndex = idx - if err := tx.Insert(caProviderTableName, state); err != nil { + if err := tx.Insert(caBuiltinProviderTableName, state); err != nil { return false, fmt.Errorf("failed updating built-in CA state: %s", err) } // Update the index - if err := tx.Insert("index", &IndexEntry{caProviderTableName, idx}); err != nil { + if err := tx.Insert("index", &IndexEntry{caBuiltinProviderTableName, idx}); err != nil { return false, fmt.Errorf("failed updating index: %s", err) } @@ -423,10 +425,10 @@ func (s *Store) CADeleteProviderState(id string) error { defer tx.Abort() // Get the index - idx := maxIndexTxn(tx, caProviderTableName) + idx := maxIndexTxn(tx, caBuiltinProviderTableName) // Check for an existing config - existing, err := tx.First(caProviderTableName, "id", id) + existing, err := tx.First(caBuiltinProviderTableName, "id", id) if err != nil { return fmt.Errorf("failed built-in CA state lookup: %s", err) } @@ -437,10 +439,10 @@ func (s *Store) CADeleteProviderState(id string) error { providerState := existing.(*structs.CAConsulProviderState) // Do the delete and update the index - if err := tx.Delete(caProviderTableName, providerState); err != nil { + if err := tx.Delete(caBuiltinProviderTableName, providerState); err != nil { return err } - if err := tx.Insert("index", &IndexEntry{caProviderTableName, idx}); err != nil { + if err := tx.Insert("index", &IndexEntry{caBuiltinProviderTableName, idx}); err != nil { return fmt.Errorf("failed updating index: %s", err) } diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 33c355fca..39c46f0c4 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -31,9 +31,9 @@ type CARoot struct { // RootCert is the PEM-encoded public certificate. RootCert string - // Intermediates is a list of PEM-encoded intermediate certs to + // IntermediateCerts is a list of PEM-encoded intermediate certs to // attach to any leaf certs signed by this CA. - Intermediates []string + IntermediateCerts []string // SigningCert is the PEM-encoded signing certificate and SigningKey // is the PEM-encoded private key for the signing certificate. These @@ -146,8 +146,8 @@ const ( // CAConfiguration is the configuration for the current CA plugin. type CAConfiguration struct { - // Unique identifier for the cluster - ClusterSerial string `json:"-"` + // ClusterID is a unique identifier for the cluster + ClusterID string `json:"-"` // Provider is the CA provider implementation to use. Provider string From 44b30476cb0f1ecb835ff01efba7d0af2b517e38 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Tue, 24 Apr 2018 16:16:37 -0700 Subject: [PATCH 182/627] Simplify the CA provider interface by moving some logic out --- agent/connect/ca.go | 31 +++- agent/connect/ca_provider.go | 23 ++- agent/consul/connect_ca_endpoint.go | 35 +++-- agent/consul/connect_ca_provider.go | 217 +++++++++++----------------- agent/consul/leader.go | 29 +++- agent/consul/state/connect_ca.go | 2 +- agent/structs/connect_ca.go | 2 +- 7 files changed, 169 insertions(+), 170 deletions(-) diff --git a/agent/connect/ca.go b/agent/connect/ca.go index 818af9f9f..87b01994e 100644 --- a/agent/connect/ca.go +++ b/agent/connect/ca.go @@ -1,8 +1,11 @@ package connect import ( + "bytes" "crypto" "crypto/ecdsa" + "crypto/rsa" + "crypto/sha1" "crypto/sha256" "crypto/x509" "encoding/pem" @@ -25,6 +28,23 @@ func ParseCert(pemValue string) (*x509.Certificate, error) { return x509.ParseCertificate(block.Bytes) } +// ParseCertFingerprint parses the x509 certificate from a PEM-encoded value +// and returns the SHA-1 fingerprint. +func ParseCertFingerprint(pemValue string) (string, error) { + // The _ result below is not an error but the remaining PEM bytes. + block, _ := pem.Decode([]byte(pemValue)) + if block == nil { + return "", fmt.Errorf("no PEM-encoded data found") + } + + hash := sha1.Sum(block.Bytes) + hexified := make([][]byte, len(hash)) + for i, data := range hash { + hexified[i] = []byte(fmt.Sprintf("%02X", data)) + } + return string(bytes.Join(hexified, []byte(":"))), nil +} + // ParseSigner parses a crypto.Signer from a PEM-encoded key. The private key // is expected to be the first block in the PEM value. func ParseSigner(pemValue string) (crypto.Signer, error) { @@ -38,6 +58,9 @@ func ParseSigner(pemValue string) (crypto.Signer, error) { case "EC PRIVATE KEY": return x509.ParseECPrivateKey(block.Bytes) + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + case "PRIVATE KEY": signer, err := x509.ParsePKCS8PrivateKey(block.Bytes) if err != nil { @@ -74,15 +97,17 @@ func ParseCSR(pemValue string) (*x509.CertificateRequest, error) { // KeyId returns a x509 KeyId from the given signing key. The key must be // an *ecdsa.PublicKey currently, but may support more types in the future. func KeyId(raw interface{}) ([]byte, error) { - pub, ok := raw.(*ecdsa.PublicKey) - if !ok { + switch raw.(type) { + case *ecdsa.PublicKey: + case *rsa.PublicKey: + default: return nil, fmt.Errorf("invalid key type: %T", raw) } // This is not standard; RFC allows any unique identifier as long as they // match in subject/authority chains but suggests specific hashing of DER // bytes of public key including DER tags. - bs, err := x509.MarshalPKIXPublicKey(pub) + bs, err := x509.MarshalPKIXPublicKey(raw) if err != nil { return nil, err } diff --git a/agent/connect/ca_provider.go b/agent/connect/ca_provider.go index cb7219669..1eb5bde11 100644 --- a/agent/connect/ca_provider.go +++ b/agent/connect/ca_provider.go @@ -13,21 +13,28 @@ type CAProvider interface { // Active root returns the currently active root CA for this // provider. This should be a parent of the certificate returned by // ActiveIntermediate() - ActiveRoot() (*structs.CARoot, error) + ActiveRoot() (string, error) // ActiveIntermediate returns the current signing cert used by this // provider for generating SPIFFE leaf certs. - ActiveIntermediate() (*structs.CARoot, error) + ActiveIntermediate() (string, error) - // GenerateIntermediate returns a new intermediate signing cert, a - // cross-signing CSR for it and sets it to the active intermediate. - GenerateIntermediate() (*structs.CARoot, *x509.CertificateRequest, error) + // GenerateIntermediate returns a new intermediate signing cert and + // sets it to the active intermediate. + GenerateIntermediate() (string, error) // Sign signs a leaf certificate used by Connect proxies from a CSR. - Sign(*SpiffeIDService, *x509.CertificateRequest) (*structs.IssuedCert, error) + Sign(*x509.CertificateRequest) (*structs.IssuedCert, error) - // SignCA signs a CA CSR and returns the resulting cross-signed cert. - SignCA(*x509.CertificateRequest) (string, error) + // CrossSignCA must accept a CA certificate signed by another CA's key + // and cross sign it exactly as it is such that it forms a chain back the the + // CAProvider's current root. Specifically, the Distinguished Name, Subject + // Alternative Name, SubjectKeyID and other relevant extensions must be kept. + // The resulting certificate must have a distinct Serial Number and the + // AuthorityKeyID set to the CAProvider's current signing key as well as the + // Issuer related fields changed as necessary. The resulting certificate is + // returned as a PEM formatted string. + CrossSignCA(*x509.Certificate) (string, error) // Cleanup performs any necessary cleanup that should happen when the provider // is shut down permanently, such as removing a temporary PKI backend in Vault diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 5e3c2f6c7..b0041423e 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -80,11 +80,22 @@ func (s *ConnectCA) ConfigurationSet( return fmt.Errorf("could not initialize provider: %v", err) } - newActiveRoot, err := newProvider.ActiveRoot() + newRootPEM, err := newProvider.ActiveRoot() if err != nil { return err } + id, err := connect.ParseCertFingerprint(newRootPEM) + if err != nil { + return fmt.Errorf("error parsing root fingerprint: %v", err) + } + newActiveRoot := &structs.CARoot{ + ID: id, + Name: fmt.Sprintf("%s CA Root Cert", config.Provider), + RootCert: newRootPEM, + Active: true, + } + // Compare the new provider's root CA ID to the current one. If they // match, just update the existing provider with the new config. // If they don't match, begin the root rotation process. @@ -121,13 +132,19 @@ func (s *ConnectCA) ConfigurationSet( // to get the cross-signed intermediate // 3. Get the active root for the new provider, append the intermediate from step 3 // to its list of intermediates - _, csr, err := newProvider.GenerateIntermediate() + intermediatePEM, err := newProvider.GenerateIntermediate() if err != nil { return err } + intermediateCA, err := connect.ParseCert(intermediatePEM) + if err != nil { + return err + } + + // Have the old provider cross-sign the new intermediate oldProvider := s.srv.getCAProvider() - xcCert, err := oldProvider.SignCA(csr) + xcCert, err := oldProvider.CrossSignCA(intermediateCA) if err != nil { return err } @@ -237,21 +254,11 @@ func (s *ConnectCA) Sign( return err } - // Parse the SPIFFE ID - spiffeId, err := connect.ParseCertURI(csr.URIs[0]) - if err != nil { - return err - } - serviceId, ok := spiffeId.(*connect.SpiffeIDService) - if !ok { - return fmt.Errorf("SPIFFE ID in CSR must be a service ID") - } - // todo(kyhavlov): more validation on the CSR before signing provider := s.srv.getCAProvider() - cert, err := provider.Sign(serviceId, csr) + cert, err := provider.Sign(csr) if err != nil { return err } diff --git a/agent/consul/connect_ca_provider.go b/agent/consul/connect_ca_provider.go index 8a3c81b2b..2f32a3d67 100644 --- a/agent/consul/connect_ca_provider.go +++ b/agent/consul/connect_ca_provider.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" - uuid "github.com/hashicorp/go-uuid" "github.com/mitchellh/mapstructure" ) @@ -96,12 +95,16 @@ func NewConsulCAProvider(rawConfig map[string]interface{}, srv *Server) (*Consul newState.PrivateKey = conf.PrivateKey } - // Generate the root CA - ca, err := provider.generateCA(newState.PrivateKey, conf.RootCert, idx+1) - if err != nil { - return nil, fmt.Errorf("error generating CA: %v", err) + // Generate the root CA if necessary + if conf.RootCert == "" { + ca, err := provider.generateCA(newState.PrivateKey, idx+1) + if err != nil { + return nil, fmt.Errorf("error generating CA: %v", err) + } + newState.RootCert = ca + } else { + newState.RootCert = conf.RootCert } - newState.CARoot = ca // Write the provider state args := &structs.CARequest{ @@ -133,68 +136,33 @@ func decodeConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { } // Return the active root CA and generate a new one if needed -func (c *ConsulCAProvider) ActiveRoot() (*structs.CARoot, error) { +func (c *ConsulCAProvider) ActiveRoot() (string, error) { state := c.srv.fsm.State() _, providerState, err := state.CAProviderState(c.id) if err != nil { - return nil, err + return "", err } - return providerState.CARoot, nil + return providerState.RootCert, nil } // We aren't maintaining separate root/intermediate CAs for the builtin // provider, so just return the root. -func (c *ConsulCAProvider) ActiveIntermediate() (*structs.CARoot, error) { +func (c *ConsulCAProvider) ActiveIntermediate() (string, error) { return c.ActiveRoot() } // We aren't maintaining separate root/intermediate CAs for the builtin // provider, so just generate a CSR for the active root. -func (c *ConsulCAProvider) GenerateIntermediate() (*structs.CARoot, *x509.CertificateRequest, error) { +func (c *ConsulCAProvider) GenerateIntermediate() (string, error) { ca, err := c.ActiveIntermediate() if err != nil { - return nil, nil, err + return "", err } - state := c.srv.fsm.State() - _, providerState, err := state.CAProviderState(c.id) - if err != nil { - return nil, nil, err - } - _, config, err := state.CAConfig() - if err != nil { - return nil, nil, err - } + // todo(kyhavlov): make a new intermediate here - id := &connect.SpiffeIDSigning{ClusterID: config.ClusterID, Domain: "consul"} - template := &x509.CertificateRequest{ - URIs: []*url.URL{id.URI()}, - } - - signer, err := connect.ParseSigner(providerState.PrivateKey) - if err != nil { - return nil, nil, err - } - - // Create the CSR itself - var csrBuf bytes.Buffer - bs, err := x509.CreateCertificateRequest(rand.Reader, template, signer) - if err != nil { - return nil, nil, fmt.Errorf("error creating CSR: %s", err) - } - - err = pem.Encode(&csrBuf, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: bs}) - if err != nil { - return nil, nil, fmt.Errorf("error encoding CSR: %s", err) - } - - csr, err := connect.ParseCSR(csrBuf.String()) - if err != nil { - return nil, nil, err - } - - return ca, csr, err + return ca, err } // Remove the state store entry for this provider instance. @@ -216,7 +184,7 @@ func (c *ConsulCAProvider) Cleanup() error { // Sign returns a new certificate valid for the given SpiffeIDService // using the current CA. -func (c *ConsulCAProvider) Sign(serviceId *connect.SpiffeIDService, csr *x509.CertificateRequest) (*structs.IssuedCert, error) { +func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (*structs.IssuedCert, error) { // Lock during the signing so we don't use the same index twice // for different cert serial numbers. c.Lock() @@ -242,8 +210,18 @@ func (c *ConsulCAProvider) Sign(serviceId *connect.SpiffeIDService, csr *x509.Ce return nil, err } + // Parse the SPIFFE ID + spiffeId, err := connect.ParseCertURI(csr.URIs[0]) + if err != nil { + return nil, err + } + serviceId, ok := spiffeId.(*connect.SpiffeIDService) + if !ok { + return nil, fmt.Errorf("SPIFFE ID in CSR must be a service ID") + } + // Parse the CA cert - caCert, err := connect.ParseCert(providerState.CARoot.RootCert) + caCert, err := connect.ParseCert(providerState.RootCert) if err != nil { return nil, fmt.Errorf("error parsing CA cert: %s", err) } @@ -312,8 +290,8 @@ func (c *ConsulCAProvider) Sign(serviceId *connect.SpiffeIDService, csr *x509.Ce }, nil } -// SignCA returns an intermediate CA cert signed by the current active root. -func (c *ConsulCAProvider) SignCA(csr *x509.CertificateRequest) (string, error) { +// CrossSignCA returns the given intermediate CA cert signed by the current active root. +func (c *ConsulCAProvider) CrossSignCA(cert *x509.Certificate) (string, error) { c.Lock() defer c.Unlock() @@ -329,44 +307,28 @@ func (c *ConsulCAProvider) SignCA(csr *x509.CertificateRequest) (string, error) return "", fmt.Errorf("error parsing private key %q: %v", providerState.PrivateKey, err) } - name := fmt.Sprintf("Consul cross-signed CA %d", providerState.LeafIndex+1) - - // The URI (SPIFFE compatible) for the cert - _, config, err := state.CAConfig() + rootCA, err := connect.ParseCert(providerState.RootCert) if err != nil { return "", err } - id := &connect.SpiffeIDSigning{ClusterID: config.ClusterID, Domain: "consul"} + keyId, err := connect.KeyId(privKey.Public()) if err != nil { return "", err } - // Create the CA cert + // Create the cross-signing template from the existing root CA serialNum := &big.Int{} serialNum.SetUint64(providerState.LeafIndex + 1) - template := x509.Certificate{ - SerialNumber: serialNum, - Subject: pkix.Name{CommonName: name}, - URIs: csr.URIs, - Signature: csr.Signature, - PublicKeyAlgorithm: csr.PublicKeyAlgorithm, - PublicKey: csr.PublicKey, - PermittedDNSDomainsCritical: true, - PermittedDNSDomains: []string{id.URI().Hostname()}, - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageCertSign | - x509.KeyUsageCRLSign | - x509.KeyUsageDigitalSignature, - IsCA: true, - NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), - NotBefore: time.Now(), - AuthorityKeyId: keyId, - SubjectKeyId: keyId, - } + template := *cert + template.SerialNumber = serialNum + template.Subject = rootCA.Subject + template.SignatureAlgorithm = rootCA.SignatureAlgorithm + template.SubjectKeyId = keyId + template.AuthorityKeyId = keyId bs, err := x509.CreateCertificate( - rand.Reader, &template, &template, privKey.Public(), privKey) + rand.Reader, &template, rootCA, cert.PublicKey, privKey) if err != nil { return "", fmt.Errorf("error generating CA certificate: %s", err) } @@ -405,75 +367,58 @@ func generatePrivateKey() (string, error) { } // generateCA makes a new root CA using the current private key -func (c *ConsulCAProvider) generateCA(privateKey, contents string, sn uint64) (*structs.CARoot, error) { +func (c *ConsulCAProvider) generateCA(privateKey string, sn uint64) (string, error) { state := c.srv.fsm.State() _, config, err := state.CAConfig() if err != nil { - return nil, err + return "", err } privKey, err := connect.ParseSigner(privateKey) if err != nil { - return nil, fmt.Errorf("error parsing private key %q: %v", privateKey, err) + return "", fmt.Errorf("error parsing private key %q: %v", privateKey, err) } name := fmt.Sprintf("Consul CA %d", sn) - pemContents := contents - - if pemContents == "" { - // The URI (SPIFFE compatible) for the cert - id := &connect.SpiffeIDSigning{ClusterID: config.ClusterID, Domain: "consul"} - keyId, err := connect.KeyId(privKey.Public()) - if err != nil { - return nil, err - } - - // Create the CA cert - serialNum := &big.Int{} - serialNum.SetUint64(sn) - template := x509.Certificate{ - SerialNumber: serialNum, - Subject: pkix.Name{CommonName: name}, - URIs: []*url.URL{id.URI()}, - PermittedDNSDomainsCritical: true, - PermittedDNSDomains: []string{id.URI().Hostname()}, - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageCertSign | - x509.KeyUsageCRLSign | - x509.KeyUsageDigitalSignature, - IsCA: true, - NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), - NotBefore: time.Now(), - AuthorityKeyId: keyId, - SubjectKeyId: keyId, - } - - bs, err := x509.CreateCertificate( - rand.Reader, &template, &template, privKey.Public(), privKey) - if err != nil { - return nil, fmt.Errorf("error generating CA certificate: %s", err) - } - - var buf bytes.Buffer - err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) - if err != nil { - return nil, fmt.Errorf("error encoding private key: %s", err) - } - - pemContents = buf.String() - } - - // Generate an ID for the new CA cert - rootId, err := uuid.GenerateUUID() + // The URI (SPIFFE compatible) for the cert + id := &connect.SpiffeIDSigning{ClusterID: config.ClusterID, Domain: "consul"} + keyId, err := connect.KeyId(privKey.Public()) if err != nil { - return nil, err + return "", err } - return &structs.CARoot{ - ID: rootId, - Name: name, - RootCert: pemContents, - Active: true, - }, nil + // Create the CA cert + serialNum := &big.Int{} + serialNum.SetUint64(sn) + template := x509.Certificate{ + SerialNumber: serialNum, + Subject: pkix.Name{CommonName: name}, + URIs: []*url.URL{id.URI()}, + PermittedDNSDomainsCritical: true, + PermittedDNSDomains: []string{id.URI().Hostname()}, + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageCertSign | + x509.KeyUsageCRLSign | + x509.KeyUsageDigitalSignature, + IsCA: true, + NotAfter: time.Now().Add(10 * 365 * 24 * time.Hour), + NotBefore: time.Now(), + AuthorityKeyId: keyId, + SubjectKeyId: keyId, + } + + bs, err := x509.CreateCertificate( + rand.Reader, &template, &template, privKey.Public(), privKey) + if err != nil { + return "", fmt.Errorf("error generating CA certificate: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) + if err != nil { + return "", fmt.Errorf("error encoding private key: %s", err) + } + + return buf.String(), nil } diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 282393cd3..1670add29 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -214,7 +214,9 @@ func (s *Server) establishLeadership() error { s.autopilot.Start() // todo(kyhavlov): start a goroutine here for handling periodic CA rotation - s.initializeCA() + if err := s.initializeCA(); err != nil { + return err + } s.setConsistentReadReady() return nil @@ -232,6 +234,8 @@ func (s *Server) revokeLeadership() error { return err } + s.setCAProvider(nil) + s.resetConsistentReadReady() s.autopilot.Stop() return nil @@ -412,22 +416,33 @@ func (s *Server) initializeCA() error { s.setCAProvider(provider) // Get the active root cert from the CA - trustedCA, err := provider.ActiveRoot() + rootPEM, err := provider.ActiveRoot() if err != nil { return fmt.Errorf("error getting root cert: %v", err) } + id, err := connect.ParseCertFingerprint(rootPEM) + if err != nil { + return fmt.Errorf("error parsing root fingerprint: %v", err) + } + rootCA := &structs.CARoot{ + ID: id, + Name: fmt.Sprintf("%s CA Root Cert", conf.Provider), + RootCert: rootPEM, + Active: true, + } + // Check if the CA root is already initialized and exit if it is. // Every change to the CA after this initial bootstrapping should // be done through the rotation process. state := s.fsm.State() - _, root, err := state.CARootActive(nil) + _, activeRoot, err := state.CARootActive(nil) if err != nil { return err } - if root != nil { - if root.ID != trustedCA.ID { - s.logger.Printf("[WARN] connect: CA root %q is not the active root (%q)", trustedCA.ID, root.ID) + if activeRoot != nil { + if activeRoot.ID != rootCA.ID { + s.logger.Printf("[WARN] connect: CA root %q is not the active root (%q)", rootCA.ID, activeRoot.ID) } return nil } @@ -442,7 +457,7 @@ func (s *Server) initializeCA() error { resp, err := s.raftApply(structs.ConnectCARequestType, &structs.CARequest{ Op: structs.CAOpSetRoots, Index: idx, - Roots: []*structs.CARoot{trustedCA}, + Roots: []*structs.CARoot{rootCA}, }) if err != nil { s.logger.Printf("[ERR] connect: Apply failed %v", err) diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go index 99986c891..7c4cea294 100644 --- a/agent/consul/state/connect_ca.go +++ b/agent/consul/state/connect_ca.go @@ -62,7 +62,7 @@ func caRootTableSchema() *memdb.TableSchema { Name: "id", AllowMissing: false, Unique: true, - Indexer: &memdb.UUIDFieldIndex{ + Indexer: &memdb.StringFieldIndex{ Field: "ID", }, }, diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 39c46f0c4..4562fc1ea 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -164,7 +164,7 @@ type CAConfiguration struct { type CAConsulProviderState struct { ID string PrivateKey string - CARoot *CARoot + RootCert string LeafIndex uint64 RaftIndex From 887cc98d7e532026a54dd1341af039afd8950fa4 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Tue, 24 Apr 2018 16:31:42 -0700 Subject: [PATCH 183/627] Simplify the CAProvider.Sign method --- agent/connect/ca_provider.go | 4 +--- agent/consul/connect_ca_endpoint.go | 28 ++++++++++++++++++++---- agent/consul/connect_ca_provider.go | 33 ++++++++++++----------------- 3 files changed, 38 insertions(+), 27 deletions(-) diff --git a/agent/connect/ca_provider.go b/agent/connect/ca_provider.go index 1eb5bde11..bec028851 100644 --- a/agent/connect/ca_provider.go +++ b/agent/connect/ca_provider.go @@ -2,8 +2,6 @@ package connect import ( "crypto/x509" - - "github.com/hashicorp/consul/agent/structs" ) // CAProvider is the interface for Consul to interact with @@ -24,7 +22,7 @@ type CAProvider interface { GenerateIntermediate() (string, error) // Sign signs a leaf certificate used by Connect proxies from a CSR. - Sign(*x509.CertificateRequest) (*structs.IssuedCert, error) + Sign(*x509.CertificateRequest) (string, error) // CrossSignCA must accept a CA certificate signed by another CA's key // and cross sign it exactly as it is such that it forms a chain back the the diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index b0041423e..b952c5f87 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -254,17 +254,37 @@ func (s *ConnectCA) Sign( return err } - // todo(kyhavlov): more validation on the CSR before signing - provider := s.srv.getCAProvider() - cert, err := provider.Sign(csr) + // todo(kyhavlov): more validation on the CSR before signing + pem, err := provider.Sign(csr) + if err != nil { + return err + } + + // Parse the SPIFFE ID + spiffeId, err := connect.ParseCertURI(csr.URIs[0]) + if err != nil { + return err + } + serviceId, ok := spiffeId.(*connect.SpiffeIDService) + if !ok { + return fmt.Errorf("SPIFFE ID in CSR must be a service ID") + } + cert, err := connect.ParseCert(pem) if err != nil { return err } // Set the response - *reply = *cert + reply = &structs.IssuedCert{ + SerialNumber: connect.HexString(cert.SerialNumber.Bytes()), + CertPEM: pem, + Service: serviceId.Service, + ServiceURI: cert.URIs[0].String(), + ValidAfter: cert.NotBefore, + ValidBefore: cert.NotAfter, + } return nil } diff --git a/agent/consul/connect_ca_provider.go b/agent/consul/connect_ca_provider.go index 2f32a3d67..d7d76f88e 100644 --- a/agent/consul/connect_ca_provider.go +++ b/agent/consul/connect_ca_provider.go @@ -184,7 +184,7 @@ func (c *ConsulCAProvider) Cleanup() error { // Sign returns a new certificate valid for the given SpiffeIDService // using the current CA. -func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (*structs.IssuedCert, error) { +func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (string, error) { // Lock during the signing so we don't use the same index twice // for different cert serial numbers. c.Lock() @@ -194,36 +194,36 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (*structs.IssuedCe state := c.srv.fsm.State() _, providerState, err := state.CAProviderState(c.id) if err != nil { - return nil, err + return "", err } // Create the keyId for the cert from the signing private key. signer, err := connect.ParseSigner(providerState.PrivateKey) if err != nil { - return nil, err + return "", err } if signer == nil { - return nil, fmt.Errorf("error signing cert: Consul CA not initialized yet") + return "", fmt.Errorf("error signing cert: Consul CA not initialized yet") } keyId, err := connect.KeyId(signer.Public()) if err != nil { - return nil, err + return "", err } // Parse the SPIFFE ID spiffeId, err := connect.ParseCertURI(csr.URIs[0]) if err != nil { - return nil, err + return "", err } serviceId, ok := spiffeId.(*connect.SpiffeIDService) if !ok { - return nil, fmt.Errorf("SPIFFE ID in CSR must be a service ID") + return "", fmt.Errorf("SPIFFE ID in CSR must be a service ID") } // Parse the CA cert caCert, err := connect.ParseCert(providerState.RootCert) if err != nil { - return nil, fmt.Errorf("error parsing CA cert: %s", err) + return "", fmt.Errorf("error parsing CA cert: %s", err) } // Cert template for generation @@ -257,11 +257,11 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (*structs.IssuedCe bs, err := x509.CreateCertificate( rand.Reader, &template, caCert, signer.Public(), signer) if err != nil { - return nil, fmt.Errorf("error generating certificate: %s", err) + return "", fmt.Errorf("error generating certificate: %s", err) } err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) if err != nil { - return nil, fmt.Errorf("error encoding private key: %s", err) + return "", fmt.Errorf("error encoding private key: %s", err) } // Increment the leaf cert index @@ -273,21 +273,14 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (*structs.IssuedCe } resp, err := c.srv.raftApply(structs.ConnectCARequestType, args) if err != nil { - return nil, err + return "", err } if respErr, ok := resp.(error); ok { - return nil, respErr + return "", respErr } // Set the response - return &structs.IssuedCert{ - SerialNumber: connect.HexString(template.SerialNumber.Bytes()), - CertPEM: buf.String(), - Service: serviceId.Service, - ServiceURI: template.URIs[0].String(), - ValidAfter: template.NotBefore, - ValidBefore: template.NotAfter, - }, nil + return buf.String(), nil } // CrossSignCA returns the given intermediate CA cert signed by the current active root. From 02fef5f9a2302847dbf3c6ae2510686ba07306ba Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Tue, 24 Apr 2018 17:14:30 -0700 Subject: [PATCH 184/627] Move ConsulCAProviderConfig into structs package --- agent/consul/connect_ca_provider.go | 12 +++--------- agent/consul/connect_ca_provider_test.go | 4 +--- agent/structs/connect_ca.go | 6 ++++++ 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/agent/consul/connect_ca_provider.go b/agent/consul/connect_ca_provider.go index d7d76f88e..1dbe884c7 100644 --- a/agent/consul/connect_ca_provider.go +++ b/agent/consul/connect_ca_provider.go @@ -19,14 +19,8 @@ import ( "github.com/mitchellh/mapstructure" ) -type ConsulCAProviderConfig struct { - PrivateKey string - RootCert string - RotationPeriod time.Duration -} - type ConsulCAProvider struct { - config *ConsulCAProviderConfig + config *structs.ConsulCAProviderConfig id string srv *Server @@ -122,8 +116,8 @@ func NewConsulCAProvider(rawConfig map[string]interface{}, srv *Server) (*Consul return provider, nil } -func decodeConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { - var config *ConsulCAProviderConfig +func decodeConfig(raw map[string]interface{}) (*structs.ConsulCAProviderConfig, error) { + var config *structs.ConsulCAProviderConfig if err := mapstructure.WeakDecode(raw, &config); err != nil { return nil, fmt.Errorf("error decoding config: %s", err) } diff --git a/agent/consul/connect_ca_provider_test.go b/agent/consul/connect_ca_provider_test.go index adad3acba..583f91722 100644 --- a/agent/consul/connect_ca_provider_test.go +++ b/agent/consul/connect_ca_provider_test.go @@ -28,7 +28,5 @@ func TestCAProvider_Bootstrap(t *testing.T) { state := s1.fsm.State() _, activeRoot, err := state.CARootActive(nil) assert.NoError(err) - assert.Equal(root.ID, activeRoot.ID) - assert.Equal(root.Name, activeRoot.Name) - assert.Equal(root.RootCert, activeRoot.RootCert) + assert.Equal(root, activeRoot.RootCert) } diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 4562fc1ea..c46db703a 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -160,6 +160,12 @@ type CAConfiguration struct { RaftIndex } +type ConsulCAProviderConfig struct { + PrivateKey string + RootCert string + RotationPeriod time.Duration +} + // CAConsulProviderState is used to track the built-in Consul CA provider's state. type CAConsulProviderState struct { ID string From 21677132260a3a575a7c6e0a80af80aadb37d711 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Wed, 25 Apr 2018 11:34:08 -0700 Subject: [PATCH 185/627] Add CA config to connect section of agent config --- agent/agent.go | 18 ++++++++++++++++++ agent/config/builder.go | 11 +++++++++++ agent/config/config.go | 6 ++++-- agent/config/runtime.go | 6 ++++++ agent/config/runtime_test.go | 23 +++++++++++++++++++---- agent/consul/config.go | 3 +++ agent/consul/connect_ca_endpoint.go | 18 ++++++++++++++++++ agent/consul/connect_ca_provider.go | 4 ++-- agent/consul/leader.go | 5 +++++ 9 files changed, 86 insertions(+), 8 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 3c866bc48..8f7dd9043 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -894,6 +894,24 @@ func (a *Agent) consulConfig() (*consul.Config, error) { base.TLSCipherSuites = a.config.TLSCipherSuites base.TLSPreferServerCipherSuites = a.config.TLSPreferServerCipherSuites + // Copy the Connect CA bootstrap config + if a.config.ConnectEnabled { + base.ConnectEnabled = true + + if a.config.ConnectCAProvider != "" { + base.CAConfig.Provider = a.config.ConnectCAProvider + + // Merge with the default config if it's the consul provider. + if a.config.ConnectCAProvider == "consul" { + for k, v := range a.config.ConnectCAConfig { + base.CAConfig.Config[k] = v + } + } else { + base.CAConfig.Config = a.config.ConnectCAConfig + } + } + } + // Setup the user event callback base.UserEventHandler = func(e serf.UserEvent) { select { diff --git a/agent/config/builder.go b/agent/config/builder.go index ec36e9ab0..6ad6c70b5 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -522,6 +522,14 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { // Connect proxy defaults. proxyBindMinPort, proxyBindMaxPort := b.connectProxyPortRange(c.Connect) + var connectEnabled bool + var connectCAProvider string + var connectCAConfig map[string]interface{} + if c.Connect != nil { + connectEnabled = b.boolVal(c.Connect.Enabled) + connectCAProvider = b.stringVal(c.Connect.CAProvider) + connectCAConfig = c.Connect.CAConfig + } // ---------------------------------------------------------------- // build runtime config @@ -641,8 +649,11 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { CheckUpdateInterval: b.durationVal("check_update_interval", c.CheckUpdateInterval), Checks: checks, ClientAddrs: clientAddrs, + ConnectEnabled: connectEnabled, ConnectProxyBindMinPort: proxyBindMinPort, ConnectProxyBindMaxPort: proxyBindMaxPort, + ConnectCAProvider: connectCAProvider, + ConnectCAConfig: connectCAConfig, DataDir: b.stringVal(c.DataDir), Datacenter: strings.ToLower(b.stringVal(c.Datacenter)), DevMode: b.boolVal(b.Flags.DevMode), diff --git a/agent/config/config.go b/agent/config/config.go index f652c9076..5eb231472 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -368,8 +368,10 @@ type ServiceConnectProxy struct { type Connect struct { // Enabled opts the agent into connect. It should be set on all clients and // servers in a cluster for correct connect operation. TODO(banks) review that. - Enabled bool `json:"enabled,omitempty" hcl:"enabled" mapstructure:"enabled"` - ProxyDefaults *ConnectProxyDefaults `json:"proxy_defaults,omitempty" hcl:"proxy_defaults" mapstructure:"proxy_defaults"` + Enabled *bool `json:"enabled,omitempty" hcl:"enabled" mapstructure:"enabled"` + ProxyDefaults *ConnectProxyDefaults `json:"proxy_defaults,omitempty" hcl:"proxy_defaults" mapstructure:"proxy_defaults"` + CAProvider *string `json:"ca_provider,omitempty" hcl:"ca_provider" mapstructure:"ca_provider"` + CAConfig map[string]interface{} `json:"ca_config,omitempty" hcl:"ca_config" mapstructure:"ca_config"` } // ConnectProxyDefaults is the agent-global connect proxy configuration. diff --git a/agent/config/runtime.go b/agent/config/runtime.go index b31630d27..15a7ac2ba 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -647,6 +647,12 @@ type RuntimeConfig struct { // registration time to allow global control of defaults. ConnectProxyDefaultConfig map[string]interface{} + // ConnectCAProvider is the type of CA provider to use with Connect. + ConnectCAProvider string + + // ConnectCAConfig is the config to use for the CA provider. + ConnectCAConfig map[string]interface{} + // DNSAddrs contains the list of TCP and UDP addresses the DNS server will // bind to. If the DNS endpoint is disabled (ports.dns <= 0) the list is // empty. diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 773b7a036..1db5ab207 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -2354,6 +2354,11 @@ func TestFullConfig(t *testing.T) { "check_update_interval": "16507s", "client_addr": "93.83.18.19", "connect": { + "ca_provider": "b8j4ynx9", + "ca_config": { + "g4cvJyys": "IRLXE9Ds", + "hyMy9Oxn": "XeBp4Sis" + }, "enabled": true, "proxy_defaults": { "bind_min_port": 2000, @@ -2811,6 +2816,11 @@ func TestFullConfig(t *testing.T) { check_update_interval = "16507s" client_addr = "93.83.18.19" connect { + ca_provider = "b8j4ynx9" + ca_config { + "g4cvJyys" = "IRLXE9Ds" + "hyMy9Oxn" = "XeBp4Sis" + } enabled = true proxy_defaults { bind_min_port = 2000 @@ -3403,10 +3413,15 @@ func TestFullConfig(t *testing.T) { DeregisterCriticalServiceAfter: 13209 * time.Second, }, }, - CheckUpdateInterval: 16507 * time.Second, - ClientAddrs: []*net.IPAddr{ipAddr("93.83.18.19")}, - ConnectProxyBindMinPort: 2000, - ConnectProxyBindMaxPort: 3000, + CheckUpdateInterval: 16507 * time.Second, + ClientAddrs: []*net.IPAddr{ipAddr("93.83.18.19")}, + ConnectProxyBindMinPort: 2000, + ConnectProxyBindMaxPort: 3000, + ConnectCAProvider: "b8j4ynx9", + ConnectCAConfig: map[string]interface{}{ + "g4cvJyys": "IRLXE9Ds", + "hyMy9Oxn": "XeBp4Sis", + }, DNSAddrs: []net.Addr{tcpAddr("93.95.95.81:7001"), udpAddr("93.95.95.81:7001")}, DNSARecordLimit: 29907, DNSAllowStale: true, diff --git a/agent/consul/config.go b/agent/consul/config.go index df4e55e42..94c8bc06a 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -348,6 +348,9 @@ type Config struct { // dead servers. AutopilotInterval time.Duration + // ConnectEnabled is whether to enable Connect features such as the CA. + ConnectEnabled bool + // CAConfig is used to apply the initial Connect CA configuration when // bootstrapping. CAConfig *structs.CAConfiguration diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index b952c5f87..f52c9218e 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -1,6 +1,7 @@ package consul import ( + "errors" "fmt" "reflect" @@ -11,6 +12,8 @@ import ( "github.com/hashicorp/go-memdb" ) +var ErrConnectNotEnabled = errors.New("Connect must be enabled in order to use this endpoint") + // ConnectCA manages the Connect CA. type ConnectCA struct { // srv is a pointer back to the server. @@ -21,6 +24,11 @@ type ConnectCA struct { func (s *ConnectCA) ConfigurationGet( args *structs.DCSpecificRequest, reply *structs.CAConfiguration) error { + // Exit early if Connect hasn't been enabled. + if !s.srv.config.ConnectEnabled { + return ErrConnectNotEnabled + } + if done, err := s.srv.forward("ConnectCA.ConfigurationGet", args, args, reply); done { return err } @@ -48,6 +56,11 @@ func (s *ConnectCA) ConfigurationGet( func (s *ConnectCA) ConfigurationSet( args *structs.CARequest, reply *interface{}) error { + // Exit early if Connect hasn't been enabled. + if !s.srv.config.ConnectEnabled { + return ErrConnectNotEnabled + } + if done, err := s.srv.forward("ConnectCA.ConfigurationSet", args, args, reply); done { return err } @@ -244,6 +257,11 @@ func (s *ConnectCA) Roots( func (s *ConnectCA) Sign( args *structs.CASignRequest, reply *structs.IssuedCert) error { + // Exit early if Connect hasn't been enabled. + if !s.srv.config.ConnectEnabled { + return ErrConnectNotEnabled + } + if done, err := s.srv.forward("ConnectCA.Sign", args, args, reply); done { return err } diff --git a/agent/consul/connect_ca_provider.go b/agent/consul/connect_ca_provider.go index 1dbe884c7..f9321138b 100644 --- a/agent/consul/connect_ca_provider.go +++ b/agent/consul/connect_ca_provider.go @@ -291,7 +291,7 @@ func (c *ConsulCAProvider) CrossSignCA(cert *x509.Certificate) (string, error) { privKey, err := connect.ParseSigner(providerState.PrivateKey) if err != nil { - return "", fmt.Errorf("error parsing private key %q: %v", providerState.PrivateKey, err) + return "", fmt.Errorf("error parsing private key %q: %s", providerState.PrivateKey, err) } rootCA, err := connect.ParseCert(providerState.RootCert) @@ -363,7 +363,7 @@ func (c *ConsulCAProvider) generateCA(privateKey string, sn uint64) (string, err privKey, err := connect.ParseSigner(privateKey) if err != nil { - return "", fmt.Errorf("error parsing private key %q: %v", privateKey, err) + return "", fmt.Errorf("error parsing private key %q: %s", privateKey, err) } name := fmt.Sprintf("Consul CA %d", sn) diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 1670add29..d9c3a83ea 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -402,6 +402,11 @@ func (s *Server) initializeCAConfig() (*structs.CAConfiguration, error) { // initializeCA sets up the CA provider when gaining leadership, bootstrapping // the root in the state store if necessary. func (s *Server) initializeCA() error { + // Bail if connect isn't enabled. + if !s.config.ConnectEnabled { + return nil + } + conf, err := s.initializeCAConfig() if err != nil { return err From 216e74b4ad0700b94524917998339cb24acce6ce Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 5 Apr 2018 11:45:53 +0100 Subject: [PATCH 186/627] Connect verification and AuthZ --- connect/service.go | 8 +- connect/testing.go | 9 ++- connect/tls.go | 135 +++++++++++++++++++++++++++---- connect/tls_test.go | 188 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 318 insertions(+), 22 deletions(-) diff --git a/connect/service.go b/connect/service.go index 6bbda0807..51bad44f6 100644 --- a/connect/service.go +++ b/connect/service.go @@ -74,8 +74,8 @@ func NewServiceWithLogger(serviceID string, client *api.Client, client: client, logger: logger, } - s.serverTLSCfg = newReloadableTLSConfig(defaultTLSConfig(serverVerifyCerts)) - s.clientTLSCfg = newReloadableTLSConfig(defaultTLSConfig(clientVerifyCerts)) + s.serverTLSCfg = newReloadableTLSConfig(defaultTLSConfig(newServerSideVerifier(client, serviceID))) + s.clientTLSCfg = newReloadableTLSConfig(defaultTLSConfig(clientSideVerifier)) // TODO(banks) run the background certificate sync return s, nil @@ -97,9 +97,9 @@ func NewDevServiceFromCertFiles(serviceID string, client *api.Client, // Note that newReloadableTLSConfig makes a copy so we can re-use the same // base for both client and server with swapped verifiers. - tlsCfg.VerifyPeerCertificate = serverVerifyCerts + setVerifier(tlsCfg, newServerSideVerifier(client, serviceID)) s.serverTLSCfg = newReloadableTLSConfig(tlsCfg) - tlsCfg.VerifyPeerCertificate = clientVerifyCerts + setVerifier(tlsCfg, clientSideVerifier) s.clientTLSCfg = newReloadableTLSConfig(tlsCfg) return s, nil } diff --git a/connect/testing.go b/connect/testing.go index f9a6a4850..c23b83701 100644 --- a/connect/testing.go +++ b/connect/testing.go @@ -26,10 +26,11 @@ func TestService(t testing.T, service string, ca *structs.CARoot) *Service { t.Fatal(err) } + // verify server without AuthZ call svc.serverTLSCfg = newReloadableTLSConfig( - TestTLSConfigWithVerifier(t, service, ca, serverVerifyCerts)) + TestTLSConfigWithVerifier(t, service, ca, newServerSideVerifier(nil, service))) svc.clientTLSCfg = newReloadableTLSConfig( - TestTLSConfigWithVerifier(t, service, ca, clientVerifyCerts)) + TestTLSConfigWithVerifier(t, service, ca, clientSideVerifier)) return svc } @@ -43,9 +44,9 @@ func TestTLSConfig(t testing.T, service string, ca *structs.CARoot) *tls.Config } // TestTLSConfigWithVerifier returns a *tls.Config suitable for use during -// tests, it will use the given verifyFunc to verify tls certificates. +// tests, it will use the given verifierFunc to verify tls certificates. func TestTLSConfigWithVerifier(t testing.T, service string, ca *structs.CARoot, - verifier verifyFunc) *tls.Config { + verifier verifierFunc) *tls.Config { t.Helper() cfg := defaultTLSConfig(verifier) diff --git a/connect/tls.go b/connect/tls.go index 89d5ccb54..d23b49396 100644 --- a/connect/tls.go +++ b/connect/tls.go @@ -5,17 +5,23 @@ import ( "crypto/x509" "errors" "io/ioutil" + "log" "sync" "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/api" ) -// verifyFunc is the type of tls.Config.VerifyPeerCertificate for convenience. -type verifyFunc func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error +// verifierFunc is a function that can accept rawCertificate bytes from a peer +// and verify them against a given tls.Config. It's called from the +// tls.Config.VerifyPeerCertificate hook. We don't pass verifiedChains since +// that is always nil in our usage. Implementations can use the roots provided +// in the cfg to verify the certs. +type verifierFunc func(cfg *tls.Config, rawCerts [][]byte) error // defaultTLSConfig returns the standard config. -func defaultTLSConfig(verify verifyFunc) *tls.Config { - return &tls.Config{ +func defaultTLSConfig(v verifierFunc) *tls.Config { + cfg := &tls.Config{ MinVersion: tls.VersionTLS12, ClientAuth: tls.RequireAndVerifyClientCert, // We don't have access to go internals that decide if AES hardware @@ -34,12 +40,23 @@ func defaultTLSConfig(verify verifyFunc) *tls.Config { // We have to set this since otherwise Go will attempt to verify DNS names // match DNS SAN/CN which we don't want. We hook up VerifyPeerCertificate to // do our own path validation as well as Connect AuthZ. - InsecureSkipVerify: true, - VerifyPeerCertificate: verify, + InsecureSkipVerify: true, // Include h2 to allow connect http servers to automatically support http2. // See: https://github.com/golang/go/blob/917c33fe8672116b04848cf11545296789cafd3b/src/net/http/server.go#L2724-L2731 NextProtos: []string{"h2"}, } + setVerifier(cfg, v) + return cfg +} + +// setVerifier takes a *tls.Config and set's it's VerifyPeerCertificates hook to +// use the passed verifierFunc. +func setVerifier(cfg *tls.Config, v verifierFunc) { + if v != nil { + cfg.VerifyPeerCertificate = func(rawCerts [][]byte, chains [][]*x509.Certificate) error { + return v(cfg, rawCerts) + } + } } // reloadableTLSConfig exposes a tls.Config that can have it's certificates @@ -147,14 +164,104 @@ func verifyServerCertMatchesURI(certs []*x509.Certificate, return errors.New("peer certificate mismatch") } -// serverVerifyCerts is the verifyFunc for use on Connect servers. -func serverVerifyCerts(rawCerts [][]byte, chains [][]*x509.Certificate) error { - // TODO(banks): implement me - return nil +// newServerSideVerifier returns a verifierFunc that wraps the provided +// api.Client to verify the TLS chain and perform AuthZ for the server end of +// the connection. The service name provided is used as the target serviceID +// for the Authorization. +func newServerSideVerifier(client *api.Client, serviceID string) verifierFunc { + return func(tlsCfg *tls.Config, rawCerts [][]byte) error { + leaf, err := verifyChain(tlsCfg, rawCerts, false) + if err != nil { + return err + } + + // Check leaf is a cert we understand + if len(leaf.URIs) < 1 { + return errors.New("connect: invalid leaf certificate") + } + + certURI, err := connect.ParseCertURI(leaf.URIs[0]) + if err != nil { + return errors.New("connect: invalid leaf certificate URI") + } + + // No AuthZ if there is no client. + if client == nil { + return nil + } + + // Perform AuthZ + req := &api.AgentAuthorizeParams{ + // TODO(banks): this is jank, we have a serviceID from the Service setup + // but this needs to be a service name as the target. For now we are + // relying on them usually being the same but this will break when they + // are not. We either need to make Authorize endpoint optionally accept + // IDs somehow or rethink this as it will require fetching the service + // name sometime ahead of accepting requests (maybe along with TLS certs?) + // which feels gross and will take extra plumbing to expose it to here. + Target: serviceID, + ClientCertURI: certURI.URI().String(), + ClientCertSerial: connect.HexString(leaf.SerialNumber.Bytes()), + } + resp, err := client.Agent().ConnectAuthorize(req) + if err != nil { + return errors.New("connect: authz call failed: " + err.Error()) + } + if !resp.Authorized { + return errors.New("connect: authz denied: " + resp.Reason) + } + log.Println("[DEBUG] authz result", resp) + return nil + } } -// clientVerifyCerts is the verifyFunc for use on Connect clients. -func clientVerifyCerts(rawCerts [][]byte, chains [][]*x509.Certificate) error { - // TODO(banks): implement me - return nil +// clientSideVerifier is a verifierFunc that performs verification of certificates +// on the client end of the connection. For now it is just basic TLS +// verification since the identity check needs additional state and becomes +// clunky to customise the callback for every outgoing request. That is done +// within Service.Dial for now. +func clientSideVerifier(tlsCfg *tls.Config, rawCerts [][]byte) error { + _, err := verifyChain(tlsCfg, rawCerts, true) + return err +} + +// verifyChain performs standard TLS verification without enforcing remote +// hostname matching. +func verifyChain(tlsCfg *tls.Config, rawCerts [][]byte, client bool) (*x509.Certificate, error) { + + // Fetch leaf and intermediates. This is based on code form tls handshake. + if len(rawCerts) < 1 { + return nil, errors.New("tls: no certificates from peer") + } + certs := make([]*x509.Certificate, len(rawCerts)) + for i, asn1Data := range rawCerts { + cert, err := x509.ParseCertificate(asn1Data) + if err != nil { + return nil, errors.New("tls: failed to parse certificate from peer: " + err.Error()) + } + certs[i] = cert + } + + cas := tlsCfg.RootCAs + if client { + cas = tlsCfg.ClientCAs + } + + opts := x509.VerifyOptions{ + Roots: cas, + Intermediates: x509.NewCertPool(), + } + if !client { + // Server side only sets KeyUsages in tls. This defaults to ServerAuth in + // x509 lib. See + // https://github.com/golang/go/blob/ee7dd810f9ca4e63ecfc1d3044869591783b8b74/src/crypto/x509/verify.go#L866-L868 + opts.KeyUsages = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} + } + + // All but the first cert are intermediates + for _, cert := range certs[1:] { + opts.Intermediates.AddCert(cert) + } + _, err := certs[0].Verify(opts) + return certs[0], err } diff --git a/connect/tls_test.go b/connect/tls_test.go index d13b78661..82b89440f 100644 --- a/connect/tls_test.go +++ b/connect/tls_test.go @@ -1,10 +1,14 @@ package connect import ( + "crypto/tls" "crypto/x509" + "encoding/pem" "testing" + "github.com/hashicorp/consul/agent" "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/api" "github.com/stretchr/testify/require" ) @@ -100,3 +104,187 @@ func Test_verifyServerCertMatchesURI(t *testing.T) { }) } } + +func testCertPEMBlock(t *testing.T, pemValue string) []byte { + t.Helper() + // The _ result below is not an error but the remaining PEM bytes. + block, _ := pem.Decode([]byte(pemValue)) + require.NotNil(t, block) + require.Equal(t, "CERTIFICATE", block.Type) + return block.Bytes +} + +func TestClientSideVerifier(t *testing.T) { + ca1 := connect.TestCA(t, nil) + ca2 := connect.TestCA(t, ca1) + + webCA1PEM, _ := connect.TestLeaf(t, "web", ca1) + webCA2PEM, _ := connect.TestLeaf(t, "web", ca2) + + webCA1 := testCertPEMBlock(t, webCA1PEM) + xcCA2 := testCertPEMBlock(t, ca2.SigningCert) + webCA2 := testCertPEMBlock(t, webCA2PEM) + + tests := []struct { + name string + tlsCfg *tls.Config + rawCerts [][]byte + wantErr string + }{ + { + name: "ok service ca1", + tlsCfg: TestTLSConfig(t, "web", ca1), + rawCerts: [][]byte{webCA1}, + wantErr: "", + }, + { + name: "untrusted CA", + tlsCfg: TestTLSConfig(t, "web", ca2), // only trust ca2 + rawCerts: [][]byte{webCA1}, // present ca1 + wantErr: "unknown authority", + }, + { + name: "cross signed intermediate", + tlsCfg: TestTLSConfig(t, "web", ca1), // only trust ca1 + rawCerts: [][]byte{webCA2, xcCA2}, // present ca2 signed cert, and xc + wantErr: "", + }, + { + name: "cross signed without intermediate", + tlsCfg: TestTLSConfig(t, "web", ca1), // only trust ca1 + rawCerts: [][]byte{webCA2}, // present ca2 signed cert only + wantErr: "unknown authority", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + err := clientSideVerifier(tt.tlsCfg, tt.rawCerts) + if tt.wantErr == "" { + require.Nil(err) + } else { + require.NotNil(err) + require.Contains(err.Error(), tt.wantErr) + } + }) + } +} + +func TestServerSideVerifier(t *testing.T) { + ca1 := connect.TestCA(t, nil) + ca2 := connect.TestCA(t, ca1) + + webCA1PEM, _ := connect.TestLeaf(t, "web", ca1) + webCA2PEM, _ := connect.TestLeaf(t, "web", ca2) + + apiCA1PEM, _ := connect.TestLeaf(t, "api", ca1) + apiCA2PEM, _ := connect.TestLeaf(t, "api", ca2) + + webCA1 := testCertPEMBlock(t, webCA1PEM) + xcCA2 := testCertPEMBlock(t, ca2.SigningCert) + webCA2 := testCertPEMBlock(t, webCA2PEM) + + apiCA1 := testCertPEMBlock(t, apiCA1PEM) + apiCA2 := testCertPEMBlock(t, apiCA2PEM) + + // Setup a local test agent to query + agent := agent.NewTestAgent("test-consul", "") + defer agent.Shutdown() + + cfg := api.DefaultConfig() + cfg.Address = agent.HTTPAddr() + client, err := api.NewClient(cfg) + require.Nil(t, err) + + // Setup intentions to validate against. We actually default to allow so first + // setup a blanket deny rule for db, then only allow web. + connect := client.Connect() + ixn := &api.Intention{ + SourceNS: "default", + SourceName: "*", + DestinationNS: "default", + DestinationName: "db", + Action: api.IntentionActionDeny, + SourceType: api.IntentionSourceConsul, + Meta: map[string]string{}, + } + id, _, err := connect.IntentionCreate(ixn, nil) + require.Nil(t, err) + require.NotEmpty(t, id) + + ixn = &api.Intention{ + SourceNS: "default", + SourceName: "web", + DestinationNS: "default", + DestinationName: "db", + Action: api.IntentionActionAllow, + SourceType: api.IntentionSourceConsul, + Meta: map[string]string{}, + } + id, _, err = connect.IntentionCreate(ixn, nil) + require.Nil(t, err) + require.NotEmpty(t, id) + + tests := []struct { + name string + service string + tlsCfg *tls.Config + rawCerts [][]byte + wantErr string + }{ + { + name: "ok service ca1, allow", + service: "db", + tlsCfg: TestTLSConfig(t, "db", ca1), + rawCerts: [][]byte{webCA1}, + wantErr: "", + }, + { + name: "untrusted CA", + service: "db", + tlsCfg: TestTLSConfig(t, "db", ca2), // only trust ca2 + rawCerts: [][]byte{webCA1}, // present ca1 + wantErr: "unknown authority", + }, + { + name: "cross signed intermediate, allow", + service: "db", + tlsCfg: TestTLSConfig(t, "db", ca1), // only trust ca1 + rawCerts: [][]byte{webCA2, xcCA2}, // present ca2 signed cert, and xc + wantErr: "", + }, + { + name: "cross signed without intermediate", + service: "db", + tlsCfg: TestTLSConfig(t, "db", ca1), // only trust ca1 + rawCerts: [][]byte{webCA2}, // present ca2 signed cert only + wantErr: "unknown authority", + }, + { + name: "ok service ca1, deny", + service: "db", + tlsCfg: TestTLSConfig(t, "db", ca1), + rawCerts: [][]byte{apiCA1}, + wantErr: "denied", + }, + { + name: "cross signed intermediate, deny", + service: "db", + tlsCfg: TestTLSConfig(t, "db", ca1), // only trust ca1 + rawCerts: [][]byte{apiCA2, xcCA2}, // present ca2 signed cert, and xc + wantErr: "denied", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := newServerSideVerifier(client, tt.service) + err := v(tt.tlsCfg, tt.rawCerts) + if tt.wantErr == "" { + require.Nil(t, err) + } else { + require.NotNil(t, err) + require.Contains(t, err.Error(), tt.wantErr) + } + }) + } +} From 53dc914d21ab2d321fd4ea40c26a73fb30622935 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 5 Apr 2018 20:30:19 +0100 Subject: [PATCH 187/627] Refactor reloadableTLSConfig and verifyier shenanigans into simpler dynamicTLSConfig --- connect/service.go | 22 ++---- connect/testing.go | 22 ++---- connect/tls.go | 180 +++++++++++++++++++++++++++----------------- connect/tls_test.go | 164 ++++++++++++++++++++++++++++------------ 4 files changed, 238 insertions(+), 150 deletions(-) diff --git a/connect/service.go b/connect/service.go index 51bad44f6..f9d6591c2 100644 --- a/connect/service.go +++ b/connect/service.go @@ -42,11 +42,8 @@ type Service struct { // connections will fail to verify. client *api.Client - // serverTLSCfg is the (reloadable) TLS config we use for serving. - serverTLSCfg *reloadableTLSConfig - - // clientTLSCfg is the (reloadable) TLS config we use for dialling. - clientTLSCfg *reloadableTLSConfig + // tlsCfg is the dynamic TLS config + tlsCfg *dynamicTLSConfig // httpResolverFromAddr is a function that returns a Resolver from a string // address for HTTP clients. It's privately pluggable to make testing easier @@ -73,9 +70,8 @@ func NewServiceWithLogger(serviceID string, client *api.Client, serviceID: serviceID, client: client, logger: logger, + tlsCfg: newDynamicTLSConfig(defaultTLSConfig()), } - s.serverTLSCfg = newReloadableTLSConfig(defaultTLSConfig(newServerSideVerifier(client, serviceID))) - s.clientTLSCfg = newReloadableTLSConfig(defaultTLSConfig(clientSideVerifier)) // TODO(banks) run the background certificate sync return s, nil @@ -94,13 +90,7 @@ func NewDevServiceFromCertFiles(serviceID string, client *api.Client, if err != nil { return nil, err } - - // Note that newReloadableTLSConfig makes a copy so we can re-use the same - // base for both client and server with swapped verifiers. - setVerifier(tlsCfg, newServerSideVerifier(client, serviceID)) - s.serverTLSCfg = newReloadableTLSConfig(tlsCfg) - setVerifier(tlsCfg, clientSideVerifier) - s.clientTLSCfg = newReloadableTLSConfig(tlsCfg) + s.tlsCfg = newDynamicTLSConfig(tlsCfg) return s, nil } @@ -115,7 +105,7 @@ func NewDevServiceFromCertFiles(serviceID string, client *api.Client, // error during renewal. The listener will be able to accept connections again // once connectivity is restored provided the client's Token is valid. func (s *Service) ServerTLSConfig() *tls.Config { - return s.serverTLSCfg.TLSConfig() + return s.tlsCfg.Get(newServerSideVerifier(s.client, s.serviceID)) } // Dial connects to a remote Connect-enabled server. The passed Resolver is used @@ -138,7 +128,7 @@ func (s *Service) Dial(ctx context.Context, resolver Resolver) (net.Conn, error) return nil, err } - tlsConn := tls.Client(tcpConn, s.clientTLSCfg.TLSConfig()) + tlsConn := tls.Client(tcpConn, s.tlsCfg.Get(clientSideVerifier)) // Set deadline for Handshake to complete. deadline, ok := ctx.Deadline() if ok { diff --git a/connect/testing.go b/connect/testing.go index c23b83701..491036aaf 100644 --- a/connect/testing.go +++ b/connect/testing.go @@ -20,17 +20,15 @@ import ( func TestService(t testing.T, service string, ca *structs.CARoot) *Service { t.Helper() - // Don't need to talk to client since we are setting TLSConfig locally + // Don't need to talk to client since we are setting TLSConfig locally. This + // will cause server verification to skip AuthZ too. svc, err := NewService(service, nil) if err != nil { t.Fatal(err) } - // verify server without AuthZ call - svc.serverTLSCfg = newReloadableTLSConfig( - TestTLSConfigWithVerifier(t, service, ca, newServerSideVerifier(nil, service))) - svc.clientTLSCfg = newReloadableTLSConfig( - TestTLSConfigWithVerifier(t, service, ca, clientSideVerifier)) + // Override the tlsConfig hackily. + svc.tlsCfg = newDynamicTLSConfig(TestTLSConfig(t, service, ca)) return svc } @@ -39,17 +37,7 @@ func TestService(t testing.T, service string, ca *structs.CARoot) *Service { func TestTLSConfig(t testing.T, service string, ca *structs.CARoot) *tls.Config { t.Helper() - // Insecure default (nil verifier) - return TestTLSConfigWithVerifier(t, service, ca, nil) -} - -// TestTLSConfigWithVerifier returns a *tls.Config suitable for use during -// tests, it will use the given verifierFunc to verify tls certificates. -func TestTLSConfigWithVerifier(t testing.T, service string, ca *structs.CARoot, - verifier verifierFunc) *tls.Config { - t.Helper() - - cfg := defaultTLSConfig(verifier) + cfg := defaultTLSConfig() cfg.Certificates = []tls.Certificate{TestSvcKeyPair(t, service, ca)} cfg.RootCAs = TestCAPool(t, ca) cfg.ClientCAs = TestCAPool(t, ca) diff --git a/connect/tls.go b/connect/tls.go index d23b49396..f5cb95a75 100644 --- a/connect/tls.go +++ b/connect/tls.go @@ -5,7 +5,6 @@ import ( "crypto/x509" "errors" "io/ioutil" - "log" "sync" "github.com/hashicorp/consul/agent/connect" @@ -14,13 +13,18 @@ import ( // verifierFunc is a function that can accept rawCertificate bytes from a peer // and verify them against a given tls.Config. It's called from the -// tls.Config.VerifyPeerCertificate hook. We don't pass verifiedChains since -// that is always nil in our usage. Implementations can use the roots provided -// in the cfg to verify the certs. +// tls.Config.VerifyPeerCertificate hook. +// +// We don't pass verifiedChains since that is always nil in our usage. +// Implementations can use the roots provided in the cfg to verify the certs. +// +// The passed *tls.Config may have a nil VerifyPeerCertificates function but +// will have correct roots, leaf and other fields. type verifierFunc func(cfg *tls.Config, rawCerts [][]byte) error -// defaultTLSConfig returns the standard config. -func defaultTLSConfig(v verifierFunc) *tls.Config { +// defaultTLSConfig returns the standard config with no peer verifier. It is +// insecure to use it as-is. +func defaultTLSConfig() *tls.Config { cfg := &tls.Config{ MinVersion: tls.VersionTLS12, ClientAuth: tls.RequireAndVerifyClientCert, @@ -45,70 +49,11 @@ func defaultTLSConfig(v verifierFunc) *tls.Config { // See: https://github.com/golang/go/blob/917c33fe8672116b04848cf11545296789cafd3b/src/net/http/server.go#L2724-L2731 NextProtos: []string{"h2"}, } - setVerifier(cfg, v) return cfg } -// setVerifier takes a *tls.Config and set's it's VerifyPeerCertificates hook to -// use the passed verifierFunc. -func setVerifier(cfg *tls.Config, v verifierFunc) { - if v != nil { - cfg.VerifyPeerCertificate = func(rawCerts [][]byte, chains [][]*x509.Certificate) error { - return v(cfg, rawCerts) - } - } -} - -// reloadableTLSConfig exposes a tls.Config that can have it's certificates -// reloaded. On a server, this uses GetConfigForClient to pass the current -// tls.Config or client certificate for each acceptted connection. On a client, -// this uses GetClientCertificate to provide the current client certificate. -type reloadableTLSConfig struct { - mu sync.Mutex - - // cfg is the current config to use for new connections - cfg *tls.Config -} - -// newReloadableTLSConfig returns a reloadable config currently set to base. -func newReloadableTLSConfig(base *tls.Config) *reloadableTLSConfig { - c := &reloadableTLSConfig{} - c.SetTLSConfig(base) - return c -} - -// TLSConfig returns a *tls.Config that will dynamically load certs. It's -// suitable for use in either a client or server. -func (c *reloadableTLSConfig) TLSConfig() *tls.Config { - c.mu.Lock() - cfgCopy := c.cfg - c.mu.Unlock() - return cfgCopy -} - -// SetTLSConfig sets the config used for future connections. It is safe to call -// from any goroutine. -func (c *reloadableTLSConfig) SetTLSConfig(cfg *tls.Config) error { - copy := cfg.Clone() - copy.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { - current := c.TLSConfig() - if len(current.Certificates) < 1 { - return nil, errors.New("tls: no certificates configured") - } - return ¤t.Certificates[0], nil - } - copy.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { - return c.TLSConfig(), nil - } - - c.mu.Lock() - defer c.mu.Unlock() - c.cfg = copy - return nil -} - // devTLSConfigFromFiles returns a default TLS Config but with certs and CAs -// based on local files for dev. +// based on local files for dev. No verification is setup. func devTLSConfigFromFiles(caFile, certFile, keyFile string) (*tls.Config, error) { @@ -126,9 +71,7 @@ func devTLSConfigFromFiles(caFile, certFile, return nil, err } - // Insecure no verification - cfg := defaultTLSConfig(nil) - + cfg := defaultTLSConfig() cfg.Certificates = []tls.Certificate{cert} cfg.RootCAs = roots cfg.ClientCAs = roots @@ -210,7 +153,6 @@ func newServerSideVerifier(client *api.Client, serviceID string) verifierFunc { if !resp.Authorized { return errors.New("connect: authz denied: " + resp.Reason) } - log.Println("[DEBUG] authz result", resp) return nil } } @@ -265,3 +207,101 @@ func verifyChain(tlsCfg *tls.Config, rawCerts [][]byte, client bool) (*x509.Cert _, err := certs[0].Verify(opts) return certs[0], err } + +// dynamicTLSConfig represents the state for returning a tls.Config that can +// have root and leaf certificates updated dynamically with all existing clients +// and servers automatically picking up the changes. It requires initialising +// with a valid base config from which all the non-certificate and verification +// params are used. The base config passed should not be modified externally as +// it is assumed to be serialised by the embedded mutex. +type dynamicTLSConfig struct { + base *tls.Config + + sync.Mutex + leaf *tls.Certificate + roots *x509.CertPool +} + +// newDynamicTLSConfig returns a dynamicTLSConfig constructed from base. +// base.Certificates[0] is used as the initial leaf and base.RootCAs is used as +// the initial roots. +func newDynamicTLSConfig(base *tls.Config) *dynamicTLSConfig { + cfg := &dynamicTLSConfig{ + base: base, + } + if len(base.Certificates) > 0 { + cfg.leaf = &base.Certificates[0] + } + if base.RootCAs != nil { + cfg.roots = base.RootCAs + } + return cfg +} + +// Get fetches the lastest tls.Config with all the hooks attached to keep it +// loading the most recent roots and certs even after future changes to cfg. +// +// The verifierFunc passed will be attached to the config returned such that it +// runs with the _latest_ config object returned passed to it. That means that a +// client can use this config for a long time and will still verify against the +// latest roots even though the roots in the struct is has can't change. +func (cfg *dynamicTLSConfig) Get(v verifierFunc) *tls.Config { + cfg.Lock() + defer cfg.Unlock() + copy := cfg.base.Clone() + copy.RootCAs = cfg.roots + copy.ClientCAs = cfg.roots + if v != nil { + copy.VerifyPeerCertificate = func(rawCerts [][]byte, chains [][]*x509.Certificate) error { + return v(cfg.Get(nil), rawCerts) + } + } + copy.GetCertificate = func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + leaf := cfg.Leaf() + if leaf == nil { + return nil, errors.New("tls: no certificates configured") + } + return leaf, nil + } + copy.GetClientCertificate = func(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) { + leaf := cfg.Leaf() + if leaf == nil { + return nil, errors.New("tls: no certificates configured") + } + return leaf, nil + } + copy.GetConfigForClient = func(*tls.ClientHelloInfo) (*tls.Config, error) { + return cfg.Get(v), nil + } + return copy +} + +// SetRoots sets new roots. +func (cfg *dynamicTLSConfig) SetRoots(roots *x509.CertPool) error { + cfg.Lock() + defer cfg.Unlock() + cfg.roots = roots + return nil +} + +// SetLeaf sets a new leaf. +func (cfg *dynamicTLSConfig) SetLeaf(leaf *tls.Certificate) error { + cfg.Lock() + defer cfg.Unlock() + cfg.leaf = leaf + return nil +} + +// Roots returns the current CA root CertPool. +func (cfg *dynamicTLSConfig) Roots() *x509.CertPool { + cfg.Lock() + defer cfg.Unlock() + return cfg.roots +} + +// Leaf returns the current Leaf certificate. +func (cfg *dynamicTLSConfig) Leaf() *tls.Certificate { + cfg.Lock() + defer cfg.Unlock() + return cfg.leaf +} diff --git a/connect/tls_test.go b/connect/tls_test.go index 82b89440f..aa1063f3e 100644 --- a/connect/tls_test.go +++ b/connect/tls_test.go @@ -12,53 +12,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestReloadableTLSConfig(t *testing.T) { - require := require.New(t) - base := defaultTLSConfig(nil) - - c := newReloadableTLSConfig(base) - - // The dynamic config should be the one we loaded (with some different hooks) - got := c.TLSConfig() - expect := base.Clone() - // Equal and even cmp.Diff fail on tls.Config due to unexported fields in - // each. Compare a few things to prove it's returning the bits we - // specifically set. - require.Equal(expect.Certificates, got.Certificates) - require.Equal(expect.RootCAs, got.RootCAs) - require.Equal(expect.ClientCAs, got.ClientCAs) - require.Equal(expect.InsecureSkipVerify, got.InsecureSkipVerify) - require.Equal(expect.MinVersion, got.MinVersion) - require.Equal(expect.CipherSuites, got.CipherSuites) - require.NotNil(got.GetClientCertificate) - require.NotNil(got.GetConfigForClient) - require.Contains(got.NextProtos, "h2") - - ca := connect.TestCA(t, nil) - - // Now change the config as if we just loaded certs from Consul - new := TestTLSConfig(t, "web", ca) - err := c.SetTLSConfig(new) - require.Nil(err) - - // Change the passed config to ensure SetTLSConfig made a copy otherwise this - // is racey. - expect = new.Clone() - new.Certificates = nil - - // The dynamic config should be the one we loaded (with some different hooks) - got = c.TLSConfig() - require.Equal(expect.Certificates, got.Certificates) - require.Equal(expect.RootCAs, got.RootCAs) - require.Equal(expect.ClientCAs, got.ClientCAs) - require.Equal(expect.InsecureSkipVerify, got.InsecureSkipVerify) - require.Equal(expect.MinVersion, got.MinVersion) - require.Equal(expect.CipherSuites, got.CipherSuites) - require.NotNil(got.GetClientCertificate) - require.NotNil(got.GetConfigForClient) - require.Contains(got.NextProtos, "h2") -} - func Test_verifyServerCertMatchesURI(t *testing.T) { ca1 := connect.TestCA(t, nil) @@ -288,3 +241,120 @@ func TestServerSideVerifier(t *testing.T) { }) } } + +// requireEqualTLSConfig compares tlsConfig fields we care about. Equal and even +// cmp.Diff fail on tls.Config due to unexported fields in each. expectLeaf +// allows expecting a leaf cert different from the one in expect +func requireEqualTLSConfig(t *testing.T, expect, got *tls.Config) { + require := require.New(t) + require.Equal(expect.RootCAs, got.RootCAs) + require.Equal(expect.ClientCAs, got.ClientCAs) + require.Equal(expect.InsecureSkipVerify, got.InsecureSkipVerify) + require.Equal(expect.MinVersion, got.MinVersion) + require.Equal(expect.CipherSuites, got.CipherSuites) + require.NotNil(got.GetCertificate) + require.NotNil(got.GetClientCertificate) + require.NotNil(got.GetConfigForClient) + require.Contains(got.NextProtos, "h2") + + var expectLeaf *tls.Certificate + var err error + if expect.GetCertificate != nil { + expectLeaf, err = expect.GetCertificate(nil) + require.Nil(err) + } else if len(expect.Certificates) > 0 { + expectLeaf = &expect.Certificates[0] + } + + gotLeaf, err := got.GetCertificate(nil) + require.Nil(err) + require.Equal(expectLeaf, gotLeaf) + + gotLeaf, err = got.GetClientCertificate(nil) + require.Nil(err) + require.Equal(expectLeaf, gotLeaf) +} + +// requireCorrectVerifier invokes got.VerifyPeerCertificate and expects the +// tls.Config arg to be returned on the provided channel. This ensures the +// correct verifier func was attached to got. +// +// It then ensures that the tls.Config passed to the verifierFunc was actually +// the same as the expected current value. +func requireCorrectVerifier(t *testing.T, expect, got *tls.Config, + ch chan *tls.Config) { + + err := got.VerifyPeerCertificate(nil, nil) + require.Nil(t, err) + verifierCfg := <-ch + // The tls.Cfg passed to verifyFunc should be the expected (current) value. + requireEqualTLSConfig(t, expect, verifierCfg) +} + +func TestDynamicTLSConfig(t *testing.T) { + require := require.New(t) + + ca1 := connect.TestCA(t, nil) + ca2 := connect.TestCA(t, nil) + baseCfg := TestTLSConfig(t, "web", ca1) + newCfg := TestTLSConfig(t, "web", ca2) + + c := newDynamicTLSConfig(baseCfg) + + // Should set them from the base config + require.Equal(c.Leaf(), &baseCfg.Certificates[0]) + require.Equal(c.Roots(), baseCfg.RootCAs) + + // Create verifiers we can assert are set and run correctly. + v1Ch := make(chan *tls.Config, 1) + v2Ch := make(chan *tls.Config, 1) + v3Ch := make(chan *tls.Config, 1) + verify1 := func(cfg *tls.Config, rawCerts [][]byte) error { + v1Ch <- cfg + return nil + } + verify2 := func(cfg *tls.Config, rawCerts [][]byte) error { + v2Ch <- cfg + return nil + } + verify3 := func(cfg *tls.Config, rawCerts [][]byte) error { + v3Ch <- cfg + return nil + } + + // The dynamic config should be the one we loaded (with some different hooks) + gotBefore := c.Get(verify1) + requireEqualTLSConfig(t, baseCfg, gotBefore) + requireCorrectVerifier(t, baseCfg, gotBefore, v1Ch) + + // Now change the roots as if we just loaded new roots from Consul + err := c.SetRoots(newCfg.RootCAs) + require.Nil(err) + + // The dynamic config should have the new roots, but old leaf + gotAfter := c.Get(verify2) + expect := newCfg.Clone() + expect.GetCertificate = func(_ *tls.ClientHelloInfo) (*tls.Certificate, error) { + return &baseCfg.Certificates[0], nil + } + requireEqualTLSConfig(t, expect, gotAfter) + requireCorrectVerifier(t, expect, gotAfter, v2Ch) + + // The old config fetched before should still call it's own verify func, but + // that verifier should be passed the new config (expect). + requireCorrectVerifier(t, expect, gotBefore, v1Ch) + + // Now change the leaf + err = c.SetLeaf(&newCfg.Certificates[0]) + require.Nil(err) + + // The dynamic config should have the new roots, AND new leaf + gotAfterLeaf := c.Get(verify3) + requireEqualTLSConfig(t, newCfg, gotAfterLeaf) + requireCorrectVerifier(t, newCfg, gotAfterLeaf, v3Ch) + + // Both older configs should still call their own verify funcs, but those + // verifiers should be passed the new config. + requireCorrectVerifier(t, newCfg, gotBefore, v1Ch) + requireCorrectVerifier(t, newCfg, gotAfter, v2Ch) +} From 6f566f750e953bcaea068b8a2a8a1301a2235767 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 5 Apr 2018 17:15:43 +0100 Subject: [PATCH 188/627] Basic `watch` support for connect proxy config and certificate endpoints. - Includes some bug fixes for previous `api` work and `agent` that weren't tested - Needed somewhat pervasive changes to support hash based blocking - some TODOs left in our watch toolchain that will explicitly fail on hash-based watches. - Integration into `connect` is partially done here but still WIP --- agent/agent_endpoint.go | 2 +- agent/agent_endpoint_test.go | 10 +- agent/http_oss.go | 1 + agent/structs/connect.go | 7 +- agent/watch_handler.go | 20 ++- agent/watch_handler_test.go | 4 +- api/agent_test.go | 29 +++-- api/api.go | 19 ++- command/watch/watch.go | 13 +- connect/service.go | 99 ++++++++++++++- watch/funcs.go | 147 +++++++++++++++++----- watch/funcs_test.go | 234 +++++++++++++++++++++++++++++++++-- watch/plan.go | 34 +++-- watch/plan_test.go | 16 ++- watch/watch.go | 75 +++++++++-- 15 files changed, 615 insertions(+), 95 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 798c370b2..d500b17ba 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -942,7 +942,7 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http. // // Returns the local proxy config for the identified proxy. Requires token= // param with the correct local ProxyToken (not ACL token). -func (s *HTTPServer) ConnectProxyConfig(resp http.ResponseWriter, req *http.Request) (interface{}, error) { +func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Get the proxy ID. Note that this is the ID of a proxy's service instance. id := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/proxy/") diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index d6b1996dd..d5ea7305a 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2292,7 +2292,7 @@ func TestAgentConnectProxy(t *testing.T) { ProxyServiceID: "test-proxy", TargetServiceID: "test", TargetServiceName: "test", - ContentHash: "a15dccb216d38a6e", + ContentHash: "84346af2031659c9", ExecMode: "daemon", Command: "", Config: map[string]interface{}{ @@ -2310,7 +2310,7 @@ func TestAgentConnectProxy(t *testing.T) { ur, err := copystructure.Copy(expectedResponse) require.NoError(t, err) updatedResponse := ur.(*api.ConnectProxyConfig) - updatedResponse.ContentHash = "22bc9233a52c08fd" + updatedResponse.ContentHash = "7d53473b0e9db5a" upstreams := updatedResponse.Config["upstreams"].([]interface{}) upstreams = append(upstreams, map[string]interface{}{ @@ -2337,7 +2337,7 @@ func TestAgentConnectProxy(t *testing.T) { }, { name: "blocking fetch timeout, no change", - url: "/v1/agent/connect/proxy/test-proxy?hash=a15dccb216d38a6e&wait=100ms", + url: "/v1/agent/connect/proxy/test-proxy?hash=" + expectedResponse.ContentHash + "&wait=100ms", wantWait: 100 * time.Millisecond, wantCode: 200, wantErr: false, @@ -2352,7 +2352,7 @@ func TestAgentConnectProxy(t *testing.T) { }, { name: "blocking fetch returns change", - url: "/v1/agent/connect/proxy/test-proxy?hash=a15dccb216d38a6e", + url: "/v1/agent/connect/proxy/test-proxy?hash=" + expectedResponse.ContentHash, updateFunc: func() { time.Sleep(100 * time.Millisecond) // Re-register with new proxy config @@ -2393,7 +2393,7 @@ func TestAgentConnectProxy(t *testing.T) { go tt.updateFunc() } start := time.Now() - obj, err := a.srv.ConnectProxyConfig(resp, req) + obj, err := a.srv.AgentConnectProxyConfig(resp, req) elapsed := time.Now().Sub(start) if tt.wantErr { diff --git a/agent/http_oss.go b/agent/http_oss.go index 124a26875..d9b8068ef 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -32,6 +32,7 @@ func init() { registerEndpoint("/v1/agent/connect/authorize", []string{"POST"}, (*HTTPServer).AgentConnectAuthorize) registerEndpoint("/v1/agent/connect/ca/roots", []string{"GET"}, (*HTTPServer).AgentConnectCARoots) registerEndpoint("/v1/agent/connect/ca/leaf/", []string{"GET"}, (*HTTPServer).AgentConnectCALeafCert) + registerEndpoint("/v1/agent/connect/proxy/", []string{"GET"}, (*HTTPServer).AgentConnectProxyConfig) registerEndpoint("/v1/agent/service/register", []string{"PUT"}, (*HTTPServer).AgentRegisterService) registerEndpoint("/v1/agent/service/deregister/", []string{"PUT"}, (*HTTPServer).AgentDeregisterService) registerEndpoint("/v1/agent/service/maintenance/", []string{"PUT"}, (*HTTPServer).AgentServiceMaintenance) diff --git a/agent/structs/connect.go b/agent/structs/connect.go index 5f907c1ab..20970c1bf 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -66,8 +66,11 @@ type ConnectManagedProxy struct { // ProxyService is a pointer to the local proxy's service record for // convenience. The proxies ID and name etc. can be read from there. It may be - // nil if the agent is starting up and hasn't registered the service yet. - ProxyService *NodeService + // nil if the agent is starting up and hasn't registered the service yet. We + // ignore it when calculating the hash value since the only thing that effects + // the proxy's config is the ID of the target service which is already + // represented below. + ProxyService *NodeService `hash:"ignore"` // TargetServiceID is the ID of the target service on the localhost. It may // not exist yet since bootstrapping is allowed to happen in either order. diff --git a/agent/watch_handler.go b/agent/watch_handler.go index 4c6a9d3f3..27c7a430e 100644 --- a/agent/watch_handler.go +++ b/agent/watch_handler.go @@ -42,7 +42,13 @@ func makeWatchHandler(logOutput io.Writer, handler interface{}) watch.HandlerFun } logger := log.New(logOutput, "", log.LstdFlags) - fn := func(idx uint64, data interface{}) { + fn := func(blockVal watch.BlockingParam, data interface{}) { + idx, ok := blockVal.(watch.WaitIndexVal) + if !ok { + logger.Printf("[ERR] agent: watch handler doesn't support non-index watches") + return + } + // Create the command var cmd *osexec.Cmd var err error @@ -58,7 +64,7 @@ func makeWatchHandler(logOutput io.Writer, handler interface{}) watch.HandlerFun } cmd.Env = append(os.Environ(), - "CONSUL_INDEX="+strconv.FormatUint(idx, 10), + "CONSUL_INDEX="+strconv.FormatUint(uint64(idx), 10), ) // Collect the output @@ -96,7 +102,13 @@ func makeWatchHandler(logOutput io.Writer, handler interface{}) watch.HandlerFun func makeHTTPWatchHandler(logOutput io.Writer, config *watch.HttpHandlerConfig) watch.HandlerFunc { logger := log.New(logOutput, "", log.LstdFlags) - fn := func(idx uint64, data interface{}) { + fn := func(blockVal watch.BlockingParam, data interface{}) { + idx, ok := blockVal.(watch.WaitIndexVal) + if !ok { + logger.Printf("[ERR] agent: watch handler doesn't support non-index watches") + return + } + trans := cleanhttp.DefaultTransport() // Skip SSL certificate verification if TLSSkipVerify is true @@ -132,7 +144,7 @@ func makeHTTPWatchHandler(logOutput io.Writer, config *watch.HttpHandlerConfig) } req = req.WithContext(ctx) req.Header.Add("Content-Type", "application/json") - req.Header.Add("X-Consul-Index", strconv.FormatUint(idx, 10)) + req.Header.Add("X-Consul-Index", strconv.FormatUint(uint64(idx), 10)) for key, values := range config.Header { for _, val := range values { req.Header.Add(key, val) diff --git a/agent/watch_handler_test.go b/agent/watch_handler_test.go index f7ba83b0a..6851baf71 100644 --- a/agent/watch_handler_test.go +++ b/agent/watch_handler_test.go @@ -17,7 +17,7 @@ func TestMakeWatchHandler(t *testing.T) { defer os.Remove("handler_index_out") script := "bash -c 'echo $CONSUL_INDEX >> handler_index_out && cat >> handler_out'" handler := makeWatchHandler(os.Stderr, script) - handler(100, []string{"foo", "bar", "baz"}) + handler(watch.WaitIndexVal(100), []string{"foo", "bar", "baz"}) raw, err := ioutil.ReadFile("handler_out") if err != nil { t.Fatalf("err: %v", err) @@ -62,5 +62,5 @@ func TestMakeHTTPWatchHandler(t *testing.T) { Timeout: time.Minute, } handler := makeHTTPWatchHandler(os.Stderr, &config) - handler(100, []string{"foo", "bar", "baz"}) + handler(watch.WaitIndexVal(100), []string{"foo", "bar", "baz"}) } diff --git a/api/agent_test.go b/api/agent_test.go index 01d35ae15..8cc58e012 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -1087,20 +1087,31 @@ func TestAPI_AgentConnectProxyConfig(t *testing.T) { Name: "foo", Tags: []string{"bar", "baz"}, Port: 8000, - Check: &AgentServiceCheck{ - CheckID: "foo-ttl", - TTL: "15s", + Connect: &AgentServiceConnect{ + Proxy: &AgentServiceConnectProxy{ + Config: map[string]interface{}{ + "foo": "bar", + }, + }, }, } if err := agent.ServiceRegister(reg); err != nil { t.Fatalf("err: %v", err) } - checks, err := agent.Checks() - if err != nil { - t.Fatalf("err: %v", err) - } - if _, ok := checks["foo-ttl"]; !ok { - t.Fatalf("missing check: %v", checks) + config, qm, err := agent.ConnectProxyConfig("foo-proxy", nil) + require.NoError(t, err) + expectConfig := &ConnectProxyConfig{ + ProxyServiceID: "foo-proxy", + TargetServiceID: "foo", + TargetServiceName: "foo", + ContentHash: "e662ea8600d84cf0", + ExecMode: "daemon", + Command: "", + Config: map[string]interface{}{ + "foo": "bar", + }, } + require.Equal(t, expectConfig, config) + require.Equal(t, "e662ea8600d84cf0", qm.LastContentHash) } diff --git a/api/api.go b/api/api.go index 6f3034d90..6d6436637 100644 --- a/api/api.go +++ b/api/api.go @@ -175,6 +175,11 @@ type QueryMeta struct { // a blocking query LastIndex uint64 + // LastContentHash. This can be used as a WaitHash to perform a blocking query + // for endpoints that support hash-based blocking. Endpoints that do not + // support it will return an empty hash. + LastContentHash string + // Time of last contact from the leader for the // server servicing the request LastContact time.Duration @@ -733,12 +738,16 @@ func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (* func parseQueryMeta(resp *http.Response, q *QueryMeta) error { header := resp.Header - // Parse the X-Consul-Index - index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + // Parse the X-Consul-Index (if it's set - hash based blocking queries don't + // set this) + if indexStr := header.Get("X-Consul-Index"); indexStr != "" { + index, err := strconv.ParseUint(indexStr, 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index } - q.LastIndex = index + q.LastContentHash = header.Get("X-Consul-ContentHash") // Parse the X-Consul-LastContact last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) diff --git a/command/watch/watch.go b/command/watch/watch.go index 3b8c67836..2286de1cc 100644 --- a/command/watch/watch.go +++ b/command/watch/watch.go @@ -154,7 +154,7 @@ func (c *cmd) Run(args []string) int { // 1: true errExit := 0 if len(c.flags.Args()) == 0 { - wp.Handler = func(idx uint64, data interface{}) { + wp.Handler = func(blockParam consulwatch.BlockingParam, data interface{}) { defer wp.Stop() buf, err := json.MarshalIndent(data, "", " ") if err != nil { @@ -164,7 +164,14 @@ func (c *cmd) Run(args []string) int { c.UI.Output(string(buf)) } } else { - wp.Handler = func(idx uint64, data interface{}) { + wp.Handler = func(blockVal consulwatch.BlockingParam, data interface{}) { + idx, ok := blockVal.(consulwatch.WaitIndexVal) + if !ok { + // TODO(banks): make this work for hash based watches. + c.UI.Error("Error: watch handler doesn't support non-index watches") + return + } + doneCh := make(chan struct{}) defer close(doneCh) logFn := func(err error) { @@ -185,7 +192,7 @@ func (c *cmd) Run(args []string) int { goto ERR } cmd.Env = append(os.Environ(), - "CONSUL_INDEX="+strconv.FormatUint(idx, 10), + "CONSUL_INDEX="+strconv.FormatUint(uint64(idx), 10), ) // Encode the input diff --git a/connect/service.go b/connect/service.go index f9d6591c2..4c8887745 100644 --- a/connect/service.go +++ b/connect/service.go @@ -3,6 +3,7 @@ package connect import ( "context" "crypto/tls" + "crypto/x509" "errors" "log" "net" @@ -11,6 +12,7 @@ import ( "time" "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/watch" "golang.org/x/net/http2" ) @@ -52,6 +54,9 @@ type Service struct { // TODO(banks): write the proper implementation httpResolverFromAddr func(addr string) (Resolver, error) + rootsWatch *watch.Plan + leafWatch *watch.Plan + logger *log.Logger } @@ -73,7 +78,28 @@ func NewServiceWithLogger(serviceID string, client *api.Client, tlsCfg: newDynamicTLSConfig(defaultTLSConfig()), } - // TODO(banks) run the background certificate sync + // Set up root and leaf watches + p, err := watch.Parse(map[string]interface{}{ + "type": "connect_roots", + }) + if err != nil { + return nil, err + } + s.rootsWatch = p + s.rootsWatch.Handler = s.rootsWatchHandler + + p, err = watch.Parse(map[string]interface{}{ + "type": "connect_leaf", + }) + if err != nil { + return nil, err + } + s.leafWatch = p + s.leafWatch.Handler = s.leafWatchHandler + + //go s.rootsWatch.RunWithClientAndLogger(s.client, s.logger) + //go s.leafWatch.RunWithClientAndLogger(s.client, s.logger) + return s, nil } @@ -201,6 +227,75 @@ func (s *Service) HTTPClient() *http.Client { // Close stops the service and frees resources. func (s *Service) Close() error { - // TODO(banks): stop background activity if started + if s.rootsWatch != nil { + s.rootsWatch.Stop() + } + if s.leafWatch != nil { + s.leafWatch.Stop() + } return nil } + +func (s *Service) rootsWatchHandler(blockParam watch.BlockingParam, raw interface{}) { + if raw == nil { + return + } + v, ok := raw.(*api.CARootList) + if !ok || v == nil { + s.logger.Println("[ERR] got invalid response from root watch") + return + } + + // Got new root certificates, update the tls.Configs. + roots := x509.NewCertPool() + for _, root := range v.Roots { + roots.AppendCertsFromPEM([]byte(root.RootCertPEM)) + } + + // Note that SetTLSConfig takes care of adding a dynamic GetConfigForClient + // hook that will fetch this updated config for new incoming connections on a + // server. That means all future connections are validated against the new + // roots. On a client, we only expose Dial and we fetch the most recent config + // each time so all future Dials (direct or via an http.Client with our dial + // hook) will grab this new config. + newCfg := s.serverTLSCfg.TLSConfig() + // Server-side verification uses ClientCAs. + newCfg.ClientCAs = roots + s.serverTLSCfg.SetTLSConfig(newCfg) + + newCfg = s.clientTLSCfg.TLSConfig() + // Client-side verification uses RootCAs. + newCfg.RootCAs = roots + s.clientTLSCfg.SetTLSConfig(newCfg) +} + +func (s *Service) leafWatchHandler(blockParam watch.BlockingParam, raw interface{}) { + if raw == nil { + return // ignore + } + v, ok := raw.(*api.LeafCert) + if !ok || v == nil { + s.logger.Println("[ERR] got invalid response from root watch") + return + } + + // Got new leaf, update the tls.Configs + cert, err := tls.X509KeyPair([]byte(v.CertPEM), []byte(v.PrivateKeyPEM)) + if err != nil { + s.logger.Printf("[ERR] failed to parse new leaf cert: %s", err) + return + } + + // Note that SetTLSConfig takes care of adding a dynamic GetClientCertificate + // hook that will fetch the first cert from the Certificates slice of the + // current config for each outbound client request even if the client is using + // an old version of the config struct so all we need to do it set that and + // all existing clients will start using the new cert. + newCfg := s.serverTLSCfg.TLSConfig() + newCfg.Certificates = []tls.Certificate{cert} + s.serverTLSCfg.SetTLSConfig(newCfg) + + newCfg = s.clientTLSCfg.TLSConfig() + newCfg.Certificates = []tls.Certificate{cert} + s.clientTLSCfg.SetTLSConfig(newCfg) +} diff --git a/watch/funcs.go b/watch/funcs.go index 20265decc..8c5823633 100644 --- a/watch/funcs.go +++ b/watch/funcs.go @@ -3,6 +3,7 @@ package watch import ( "context" "fmt" + "log" consulapi "github.com/hashicorp/consul/api" ) @@ -16,13 +17,16 @@ var watchFuncFactory map[string]watchFactory func init() { watchFuncFactory = map[string]watchFactory{ - "key": keyWatch, - "keyprefix": keyPrefixWatch, - "services": servicesWatch, - "nodes": nodesWatch, - "service": serviceWatch, - "checks": checksWatch, - "event": eventWatch, + "key": keyWatch, + "keyprefix": keyPrefixWatch, + "services": servicesWatch, + "nodes": nodesWatch, + "service": serviceWatch, + "checks": checksWatch, + "event": eventWatch, + "connect_roots": connectRootsWatch, + "connect_leaf": connectLeafWatch, + "connect_proxy_config": connectProxyConfigWatch, } } @@ -40,18 +44,18 @@ func keyWatch(params map[string]interface{}) (WatcherFunc, error) { if key == "" { return nil, fmt.Errorf("Must specify a single key to watch") } - fn := func(p *Plan) (uint64, interface{}, error) { + fn := func(p *Plan) (BlockingParam, interface{}, error) { kv := p.client.KV() opts := makeQueryOptionsWithContext(p, stale) defer p.cancelFunc() pair, meta, err := kv.Get(key, &opts) if err != nil { - return 0, nil, err + return nil, nil, err } if pair == nil { - return meta.LastIndex, nil, err + return WaitIndexVal(meta.LastIndex), nil, err } - return meta.LastIndex, pair, err + return WaitIndexVal(meta.LastIndex), pair, err } return fn, nil } @@ -70,15 +74,15 @@ func keyPrefixWatch(params map[string]interface{}) (WatcherFunc, error) { if prefix == "" { return nil, fmt.Errorf("Must specify a single prefix to watch") } - fn := func(p *Plan) (uint64, interface{}, error) { + fn := func(p *Plan) (BlockingParam, interface{}, error) { kv := p.client.KV() opts := makeQueryOptionsWithContext(p, stale) defer p.cancelFunc() pairs, meta, err := kv.List(prefix, &opts) if err != nil { - return 0, nil, err + return nil, nil, err } - return meta.LastIndex, pairs, err + return WaitIndexVal(meta.LastIndex), pairs, err } return fn, nil } @@ -90,15 +94,15 @@ func servicesWatch(params map[string]interface{}) (WatcherFunc, error) { return nil, err } - fn := func(p *Plan) (uint64, interface{}, error) { + fn := func(p *Plan) (BlockingParam, interface{}, error) { catalog := p.client.Catalog() opts := makeQueryOptionsWithContext(p, stale) defer p.cancelFunc() services, meta, err := catalog.Services(&opts) if err != nil { - return 0, nil, err + return nil, nil, err } - return meta.LastIndex, services, err + return WaitIndexVal(meta.LastIndex), services, err } return fn, nil } @@ -110,15 +114,15 @@ func nodesWatch(params map[string]interface{}) (WatcherFunc, error) { return nil, err } - fn := func(p *Plan) (uint64, interface{}, error) { + fn := func(p *Plan) (BlockingParam, interface{}, error) { catalog := p.client.Catalog() opts := makeQueryOptionsWithContext(p, stale) defer p.cancelFunc() nodes, meta, err := catalog.Nodes(&opts) if err != nil { - return 0, nil, err + return nil, nil, err } - return meta.LastIndex, nodes, err + return WaitIndexVal(meta.LastIndex), nodes, err } return fn, nil } @@ -147,15 +151,15 @@ func serviceWatch(params map[string]interface{}) (WatcherFunc, error) { return nil, err } - fn := func(p *Plan) (uint64, interface{}, error) { + fn := func(p *Plan) (BlockingParam, interface{}, error) { health := p.client.Health() opts := makeQueryOptionsWithContext(p, stale) defer p.cancelFunc() nodes, meta, err := health.Service(service, tag, passingOnly, &opts) if err != nil { - return 0, nil, err + return nil, nil, err } - return meta.LastIndex, nodes, err + return WaitIndexVal(meta.LastIndex), nodes, err } return fn, nil } @@ -181,7 +185,7 @@ func checksWatch(params map[string]interface{}) (WatcherFunc, error) { state = "any" } - fn := func(p *Plan) (uint64, interface{}, error) { + fn := func(p *Plan) (BlockingParam, interface{}, error) { health := p.client.Health() opts := makeQueryOptionsWithContext(p, stale) defer p.cancelFunc() @@ -194,9 +198,9 @@ func checksWatch(params map[string]interface{}) (WatcherFunc, error) { checks, meta, err = health.Checks(service, &opts) } if err != nil { - return 0, nil, err + return nil, nil, err } - return meta.LastIndex, checks, err + return WaitIndexVal(meta.LastIndex), checks, err } return fn, nil } @@ -210,23 +214,98 @@ func eventWatch(params map[string]interface{}) (WatcherFunc, error) { return nil, err } - fn := func(p *Plan) (uint64, interface{}, error) { + fn := func(p *Plan) (BlockingParam, interface{}, error) { event := p.client.Event() opts := makeQueryOptionsWithContext(p, false) defer p.cancelFunc() events, meta, err := event.List(name, &opts) if err != nil { - return 0, nil, err + return nil, nil, err } // Prune to only the new events for i := 0; i < len(events); i++ { - if event.IDToIndex(events[i].ID) == p.lastIndex { + if WaitIndexVal(event.IDToIndex(events[i].ID)).Equal(p.lastParamVal) { events = events[i+1:] break } } - return meta.LastIndex, events, err + return WaitIndexVal(meta.LastIndex), events, err + } + return fn, nil +} + +// connectRootsWatch is used to watch for changes to Connect Root certificates. +func connectRootsWatch(params map[string]interface{}) (WatcherFunc, error) { + // We don't support stale since roots are likely to be cached locally in the + // agent anyway. + + fn := func(p *Plan) (BlockingParam, interface{}, error) { + agent := p.client.Agent() + opts := makeQueryOptionsWithContext(p, false) + defer p.cancelFunc() + + roots, meta, err := agent.ConnectCARoots(&opts) + if err != nil { + return nil, nil, err + } + + return WaitIndexVal(meta.LastIndex), roots, err + } + return fn, nil +} + +// connectLeafWatch is used to watch for changes to Connect Leaf certificates +// for given local service id. +func connectLeafWatch(params map[string]interface{}) (WatcherFunc, error) { + // We don't support stale since certs are likely to be cached locally in the + // agent anyway. + + var serviceID string + if err := assignValue(params, "service_id", &serviceID); err != nil { + return nil, err + } + + fn := func(p *Plan) (BlockingParam, interface{}, error) { + agent := p.client.Agent() + opts := makeQueryOptionsWithContext(p, false) + defer p.cancelFunc() + + leaf, meta, err := agent.ConnectCALeaf(serviceID, &opts) + if err != nil { + return nil, nil, err + } + + return WaitIndexVal(meta.LastIndex), leaf, err + } + return fn, nil +} + +// connectProxyConfigWatch is used to watch for changes to Connect managed proxy +// configuration. Note that this state is agent-local so the watch mechanism +// uses `hash` rather than `index` for deciding whether to block. +func connectProxyConfigWatch(params map[string]interface{}) (WatcherFunc, error) { + // We don't support consistency modes since it's agent local data + + var proxyServiceID string + if err := assignValue(params, "proxy_service_id", &proxyServiceID); err != nil { + return nil, err + } + + fn := func(p *Plan) (BlockingParam, interface{}, error) { + agent := p.client.Agent() + opts := makeQueryOptionsWithContext(p, false) + defer p.cancelFunc() + + log.Printf("DEBUG: id: %s, opts: %v", proxyServiceID, opts) + + config, _, err := agent.ConnectProxyConfig(proxyServiceID, &opts) + if err != nil { + return nil, nil, err + } + + // Return string ContentHash since we don't have Raft indexes to block on. + return WaitHashVal(config.ContentHash), config, err } return fn, nil } @@ -234,6 +313,12 @@ func eventWatch(params map[string]interface{}) (WatcherFunc, error) { func makeQueryOptionsWithContext(p *Plan, stale bool) consulapi.QueryOptions { ctx, cancel := context.WithCancel(context.Background()) p.cancelFunc = cancel - opts := consulapi.QueryOptions{AllowStale: stale, WaitIndex: p.lastIndex} + opts := consulapi.QueryOptions{AllowStale: stale} + switch param := p.lastParamVal.(type) { + case WaitIndexVal: + opts.WaitIndex = uint64(param) + case WaitHashVal: + opts.WaitHash = string(param) + } return *opts.WithContext(ctx) } diff --git a/watch/funcs_test.go b/watch/funcs_test.go index 190ae24fa..89c5a1e80 100644 --- a/watch/funcs_test.go +++ b/watch/funcs_test.go @@ -8,8 +8,10 @@ import ( "time" "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/agent/structs" consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/watch" + "github.com/stretchr/testify/require" ) var errBadContent = errors.New("bad content") @@ -30,7 +32,7 @@ func TestKeyWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"key", "key":"foo/bar/baz"}`) - plan.Handler = func(idx uint64, raw interface{}) { + plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { if raw == nil { return // ignore } @@ -84,7 +86,7 @@ func TestKeyWatch_With_PrefixDelete(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"key", "key":"foo/bar/baz"}`) - plan.Handler = func(idx uint64, raw interface{}) { + plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { if raw == nil { return // ignore } @@ -138,7 +140,7 @@ func TestKeyPrefixWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"keyprefix", "prefix":"foo/"}`) - plan.Handler = func(idx uint64, raw interface{}) { + plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { if raw == nil { return // ignore } @@ -191,7 +193,7 @@ func TestServicesWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"services"}`) - plan.Handler = func(idx uint64, raw interface{}) { + plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { if raw == nil { return // ignore } @@ -245,7 +247,7 @@ func TestNodesWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"nodes"}`) - plan.Handler = func(idx uint64, raw interface{}) { + plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { if raw == nil { return // ignore } @@ -296,7 +298,7 @@ func TestServiceWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"service", "service":"foo", "tag":"bar", "passingonly":true}`) - plan.Handler = func(idx uint64, raw interface{}) { + plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { if raw == nil { return // ignore } @@ -352,7 +354,7 @@ func TestChecksWatch_State(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"checks", "state":"warning"}`) - plan.Handler = func(idx uint64, raw interface{}) { + plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { if raw == nil { return // ignore } @@ -413,7 +415,7 @@ func TestChecksWatch_Service(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"checks", "service":"foobar"}`) - plan.Handler = func(idx uint64, raw interface{}) { + plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { if raw == nil { return // ignore } @@ -479,7 +481,7 @@ func TestEventWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"event", "name": "foo"}`) - plan.Handler = func(idx uint64, raw interface{}) { + plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { if raw == nil { return } @@ -523,6 +525,220 @@ func TestEventWatch(t *testing.T) { wg.Wait() } +func TestConnectRootsWatch(t *testing.T) { + // TODO(banks) enable and make it work once this is supported. Note that this + // test actually passes currently just by busy-polling the roots endpoint + // until it changes. + t.Skip("CA and Leaf implementation don't actually support blocking yet") + t.Parallel() + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + + invoke := makeInvokeCh() + plan := mustParse(t, `{"type":"connect_roots"}`) + plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + if raw == nil { + return // ignore + } + v, ok := raw.(*consulapi.CARootList) + if !ok || v == nil { + return // ignore + } + // TODO(banks): verify the right roots came back. + invoke <- nil + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + time.Sleep(20 * time.Millisecond) + // TODO(banks): this is a hack since CA config is in flux. We _did_ expose a + // temporary agent endpoint for PUTing config, but didn't expose it in `api` + // package intentionally. If we are going to hack around with temporary API, + // we can might as well drop right down to the RPC level... + args := structs.CAConfiguration{ + Provider: "static", + Config: map[string]interface{}{ + "Name": "test-1", + "Generate": true, + }, + } + var reply interface{} + if err := a.RPC("ConnectCA.ConfigurationSet", &args, &reply); err != nil { + t.Fatalf("err: %v", err) + } + + }() + + wg.Add(1) + go func() { + defer wg.Done() + if err := plan.Run(a.HTTPAddr()); err != nil { + t.Fatalf("err: %v", err) + } + }() + + if err := <-invoke; err != nil { + t.Fatalf("err: %v", err) + } + + plan.Stop() + wg.Wait() +} + +func TestConnectLeafWatch(t *testing.T) { + // TODO(banks) enable and make it work once this is supported. + t.Skip("CA and Leaf implementation don't actually support blocking yet") + t.Parallel() + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + + // Register a web service to get certs for + { + agent := a.Client().Agent() + reg := consulapi.AgentServiceRegistration{ + ID: "web", + Name: "web", + Port: 9090, + } + err := agent.ServiceRegister(®) + require.Nil(t, err) + } + + // Setup a new generated CA + // + // TODO(banks): this is a hack since CA config is in flux. We _did_ expose a + // temporary agent endpoint for PUTing config, but didn't expose it in `api` + // package intentionally. If we are going to hack around with temporary API, + // we can might as well drop right down to the RPC level... + args := structs.CAConfiguration{ + Provider: "static", + Config: map[string]interface{}{ + "Name": "test-1", + "Generate": true, + }, + } + var reply interface{} + if err := a.RPC("ConnectCA.ConfigurationSet", &args, &reply); err != nil { + t.Fatalf("err: %v", err) + } + + invoke := makeInvokeCh() + plan := mustParse(t, `{"type":"connect_leaf", "service_id":"web"}`) + plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + if raw == nil { + return // ignore + } + v, ok := raw.(*consulapi.LeafCert) + if !ok || v == nil { + return // ignore + } + // TODO(banks): verify the right leaf came back. + invoke <- nil + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + time.Sleep(20 * time.Millisecond) + + // Change the CA which should eventually trigger a leaf change but probably + // won't now so this test has no way to succeed yet. + args := structs.CAConfiguration{ + Provider: "static", + Config: map[string]interface{}{ + "Name": "test-2", + "Generate": true, + }, + } + var reply interface{} + if err := a.RPC("ConnectCA.ConfigurationSet", &args, &reply); err != nil { + t.Fatalf("err: %v", err) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + if err := plan.Run(a.HTTPAddr()); err != nil { + t.Fatalf("err: %v", err) + } + }() + + if err := <-invoke; err != nil { + t.Fatalf("err: %v", err) + } + + plan.Stop() + wg.Wait() +} + +func TestConnectProxyConfigWatch(t *testing.T) { + t.Parallel() + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + + // Register a local agent service with a managed proxy + reg := &consulapi.AgentServiceRegistration{ + Name: "web", + Port: 8080, + Connect: &consulapi.AgentServiceConnect{ + Proxy: &consulapi.AgentServiceConnectProxy{ + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + } + client := a.Client() + agent := client.Agent() + err := agent.ServiceRegister(reg) + require.NoError(t, err) + + invoke := makeInvokeCh() + plan := mustParse(t, `{"type":"connect_proxy_config", "proxy_service_id":"web-proxy"}`) + plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + if raw == nil { + return // ignore + } + v, ok := raw.(*consulapi.ConnectProxyConfig) + if !ok || v == nil { + return // ignore + } + invoke <- nil + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + time.Sleep(20 * time.Millisecond) + + // Change the proxy's config + reg.Connect.Proxy.Config["foo"] = "buzz" + reg.Connect.Proxy.Config["baz"] = "qux" + err := agent.ServiceRegister(reg) + require.NoError(t, err) + }() + + wg.Add(1) + go func() { + defer wg.Done() + if err := plan.Run(a.HTTPAddr()); err != nil { + t.Fatalf("err: %v", err) + } + }() + + if err := <-invoke; err != nil { + t.Fatalf("err: %v", err) + } + + plan.Stop() + wg.Wait() +} + func mustParse(t *testing.T, q string) *watch.Plan { var params map[string]interface{} if err := json.Unmarshal([]byte(q), ¶ms); err != nil { diff --git a/watch/plan.go b/watch/plan.go index fff9da7c7..6292c19a4 100644 --- a/watch/plan.go +++ b/watch/plan.go @@ -37,7 +37,6 @@ func (p *Plan) RunWithConfig(address string, conf *consulapi.Config) error { if err != nil { return fmt.Errorf("Failed to connect to agent: %v", err) } - p.client = client // Create the logger output := p.LogOutput @@ -46,12 +45,24 @@ func (p *Plan) RunWithConfig(address string, conf *consulapi.Config) error { } logger := log.New(output, "", log.LstdFlags) + return p.RunWithClientAndLogger(client, logger) +} + +// RunWithClientAndLogger runs a watch plan using an external client and +// log.Logger instance. Using this, the plan's Datacenter, Token and LogOutput +// fields are ignored and the passed client is expected to be configured as +// needed. +func (p *Plan) RunWithClientAndLogger(client *consulapi.Client, + logger *log.Logger) error { + + p.client = client + // Loop until we are canceled failures := 0 OUTER: for !p.shouldStop() { // Invoke the handler - index, result, err := p.Watcher(p) + blockParamVal, result, err := p.Watcher(p) // Check if we should terminate since the function // could have blocked for a while @@ -63,7 +74,11 @@ OUTER: if err != nil { // Perform an exponential backoff failures++ - p.lastIndex = 0 + if blockParamVal == nil { + p.lastParamVal = nil + } else { + p.lastParamVal = blockParamVal.Next(p.lastParamVal) + } retry := retryInterval * time.Duration(failures*failures) if retry > maxBackoffTime { retry = maxBackoffTime @@ -82,24 +97,21 @@ OUTER: failures = 0 // If the index is unchanged do nothing - if index == p.lastIndex { + if p.lastParamVal != nil && p.lastParamVal.Equal(blockParamVal) { continue } // Update the index, look for change - oldIndex := p.lastIndex - p.lastIndex = index - if oldIndex != 0 && reflect.DeepEqual(p.lastResult, result) { + oldParamVal := p.lastParamVal + p.lastParamVal = blockParamVal.Next(oldParamVal) + if oldParamVal != nil && reflect.DeepEqual(p.lastResult, result) { continue } - if p.lastIndex < oldIndex { - p.lastIndex = 0 - } // Handle the updated result p.lastResult = result if p.Handler != nil { - p.Handler(index, result) + p.Handler(blockParamVal, result) } } return nil diff --git a/watch/plan_test.go b/watch/plan_test.go index 16e4cfbc2..6099dc294 100644 --- a/watch/plan_test.go +++ b/watch/plan_test.go @@ -10,9 +10,12 @@ func init() { } func noopWatch(params map[string]interface{}) (WatcherFunc, error) { - fn := func(p *Plan) (uint64, interface{}, error) { - idx := p.lastIndex + 1 - return idx, idx, nil + fn := func(p *Plan) (BlockingParam, interface{}, error) { + idx := WaitIndexVal(0) + if i, ok := p.lastParamVal.(WaitIndexVal); ok { + idx = i + } + return idx + 1, uint64(idx + 1), nil } return fn, nil } @@ -32,7 +35,12 @@ func TestRun_Stop(t *testing.T) { var expect uint64 = 1 doneCh := make(chan struct{}) - plan.Handler = func(idx uint64, val interface{}) { + plan.Handler = func(blockParamVal BlockingParam, val interface{}) { + idxVal, ok := blockParamVal.(WaitIndexVal) + if !ok { + t.Fatalf("Expected index-based watch") + } + idx := uint64(idxVal) if idx != expect { t.Fatalf("Bad: %d %d", expect, idx) } diff --git a/watch/watch.go b/watch/watch.go index cdf534296..b520d702e 100644 --- a/watch/watch.go +++ b/watch/watch.go @@ -28,10 +28,10 @@ type Plan struct { Handler HandlerFunc LogOutput io.Writer - address string - client *consulapi.Client - lastIndex uint64 - lastResult interface{} + address string + client *consulapi.Client + lastParamVal BlockingParam + lastResult interface{} stop bool stopCh chan struct{} @@ -48,11 +48,72 @@ type HttpHandlerConfig struct { TLSSkipVerify bool `mapstructure:"tls_skip_verify"` } -// WatcherFunc is used to watch for a diff -type WatcherFunc func(*Plan) (uint64, interface{}, error) +// BlockingParam is an interface representing the common operations needed for +// different styles of blocking. It's used to abstract the core watch plan from +// whether we are performing index-based or hash-based blocking. +type BlockingParam interface { + // Equal returns whether the other param value should be considered equal + // (i.e. representing no change in the watched resource). Equal must not panic + // if other is nil. + Equal(other BlockingParam) bool + + // Next is called when deciding which value to use on the next blocking call. + // It assumes the BlockingParam value it is called on is the most recent one + // returned and passes the previous one which may be nil as context. This + // allows types to customise logic around ordering without assuming there is + // an order. For example WaitIndexVal can check that the index didn't go + // backwards and if it did then reset to 0. Most other cases should just + // return themselves (the most recent value) to be used in the next request. + Next(previous BlockingParam) BlockingParam +} + +// WaitIndexVal is a type representing a Consul index that implements +// BlockingParam. +type WaitIndexVal uint64 + +// Equal implements BlockingParam +func (idx WaitIndexVal) Equal(other BlockingParam) bool { + if otherIdx, ok := other.(WaitIndexVal); ok { + return idx == otherIdx + } + return false +} + +// Next implements BlockingParam +func (idx WaitIndexVal) Next(previous BlockingParam) BlockingParam { + if previous == nil { + return idx + } + prevIdx, ok := previous.(WaitIndexVal) + if ok && prevIdx > idx { + // This value is smaller than the previous index, reset. + return WaitIndexVal(0) + } + return idx +} + +// WaitHashVal is a type representing a Consul content hash that implements +// BlockingParam. +type WaitHashVal string + +// Equal implements BlockingParam +func (h WaitHashVal) Equal(other BlockingParam) bool { + if otherHash, ok := other.(WaitHashVal); ok { + return h == otherHash + } + return false +} + +// Next implements BlockingParam +func (h WaitHashVal) Next(previous BlockingParam) BlockingParam { + return h +} + +// WatcherFunc is used to watch for a diff. +type WatcherFunc func(*Plan) (BlockingParam, interface{}, error) // HandlerFunc is used to handle new data -type HandlerFunc func(uint64, interface{}) +type HandlerFunc func(BlockingParam, interface{}) // Parse takes a watch query and compiles it into a WatchPlan or an error func Parse(params map[string]interface{}) (*Plan, error) { From eca94dcc9245a79c3312e5db0f2732e05d44cc5d Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Wed, 25 Apr 2018 14:53:30 +0100 Subject: [PATCH 189/627] Working proxy config reload tests --- connect/proxy/config.go | 133 +++++++++++++++--- connect/proxy/config_test.go | 125 ++++++++++++++++ connect/proxy/testdata/config-kitchensink.hcl | 3 +- key.pem | 0 watch/funcs.go | 3 - 5 files changed, 242 insertions(+), 22 deletions(-) create mode 100644 key.pem diff --git a/connect/proxy/config.go b/connect/proxy/config.go index a8f83d22c..3bd4db38b 100644 --- a/connect/proxy/config.go +++ b/connect/proxy/config.go @@ -5,8 +5,11 @@ import ( "io/ioutil" "log" + "github.com/mitchellh/mapstructure" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/connect" + "github.com/hashicorp/consul/watch" "github.com/hashicorp/hcl" ) @@ -59,21 +62,23 @@ type Config struct { // PublicListenerConfig contains the parameters needed for the incoming mTLS // listener. type PublicListenerConfig struct { - // BindAddress is the host:port the public mTLS listener will bind to. - BindAddress string `json:"bind_address" hcl:"bind_address"` + // BindAddress is the host/IP the public mTLS listener will bind to. + BindAddress string `json:"bind_address" hcl:"bind_address" mapstructure:"bind_address"` + + BindPort string `json:"bind_port" hcl:"bind_port" mapstructure:"bind_port"` // LocalServiceAddress is the host:port for the proxied application. This // should be on loopback or otherwise protected as it's plain TCP. - LocalServiceAddress string `json:"local_service_address" hcl:"local_service_address"` + LocalServiceAddress string `json:"local_service_address" hcl:"local_service_address" mapstructure:"local_service_address"` // LocalConnectTimeout is the timeout for establishing connections with the // local backend. Defaults to 1000 (1s). - LocalConnectTimeoutMs int `json:"local_connect_timeout_ms" hcl:"local_connect_timeout_ms"` + LocalConnectTimeoutMs int `json:"local_connect_timeout_ms" hcl:"local_connect_timeout_ms" mapstructure:"local_connect_timeout_ms"` // HandshakeTimeout is the timeout for incoming mTLS clients to complete a // handshake. Setting this low avoids DOS by malicious clients holding // resources open. Defaults to 10000 (10s). - HandshakeTimeoutMs int `json:"handshake_timeout_ms" hcl:"handshake_timeout_ms"` + HandshakeTimeoutMs int `json:"handshake_timeout_ms" hcl:"handshake_timeout_ms" mapstructure:"handshake_timeout_ms"` } // applyDefaults sets zero-valued params to a sane default. @@ -88,26 +93,28 @@ func (plc *PublicListenerConfig) applyDefaults() { // UpstreamConfig configures an upstream (outgoing) listener. type UpstreamConfig struct { - // LocalAddress is the host:port to listen on for local app connections. - LocalBindAddress string `json:"local_bind_address" hcl:"local_bind_address,attr"` + // LocalAddress is the host/ip to listen on for local app connections. Defaults to 127.0.0.1. + LocalBindAddress string `json:"local_bind_address" hcl:"local_bind_address,attr" mapstructure:"local_bind_address"` + + LocalBindPort int `json:"local_bind_port" hcl:"local_bind_port,attr" mapstructure:"local_bind_port"` // DestinationName is the service name of the destination. - DestinationName string `json:"destination_name" hcl:"destination_name,attr"` + DestinationName string `json:"destination_name" hcl:"destination_name,attr" mapstructure:"destination_name"` // DestinationNamespace is the namespace of the destination. - DestinationNamespace string `json:"destination_namespace" hcl:"destination_namespace,attr"` + DestinationNamespace string `json:"destination_namespace" hcl:"destination_namespace,attr" mapstructure:"destination_namespace"` // DestinationType determines which service discovery method is used to find a // candidate instance to connect to. - DestinationType string `json:"destination_type" hcl:"destination_type,attr"` + DestinationType string `json:"destination_type" hcl:"destination_type,attr" mapstructure:"destination_type"` // DestinationDatacenter is the datacenter the destination is in. If empty, // defaults to discovery within the same datacenter. - DestinationDatacenter string `json:"destination_datacenter" hcl:"destination_datacenter,attr"` + DestinationDatacenter string `json:"destination_datacenter" hcl:"destination_datacenter,attr" mapstructure:"destination_datacenter"` // ConnectTimeout is the timeout for establishing connections with the remote // service instance. Defaults to 10,000 (10s). - ConnectTimeoutMs int `json:"connect_timeout_ms" hcl:"connect_timeout_ms,attr"` + ConnectTimeoutMs int `json:"connect_timeout_ms" hcl:"connect_timeout_ms,attr" mapstructure:"connect_timeout_ms"` // resolver is used to plug in the service discover mechanism. It can be used // in tests to bypass discovery. In real usage it is used to inject the @@ -121,13 +128,22 @@ func (uc *UpstreamConfig) applyDefaults() { if uc.ConnectTimeoutMs == 0 { uc.ConnectTimeoutMs = 10000 } + if uc.DestinationType == "" { + uc.DestinationType = "service" + } + if uc.DestinationNamespace == "" { + uc.DestinationNamespace = "default" + } + if uc.LocalBindAddress == "" { + uc.LocalBindAddress = "127.0.0.1" + } } // String returns a string that uniquely identifies the Upstream. Used for // identifying the upstream in log output and map keys. func (uc *UpstreamConfig) String() string { - return fmt.Sprintf("%s->%s:%s/%s", uc.LocalBindAddress, uc.DestinationType, - uc.DestinationNamespace, uc.DestinationName) + return fmt.Sprintf("%s:%d->%s:%s/%s", uc.LocalBindAddress, uc.LocalBindPort, + uc.DestinationType, uc.DestinationNamespace, uc.DestinationName) } // UpstreamResolverFromClient returns a ConsulResolver that can resolve the @@ -212,12 +228,93 @@ type AgentConfigWatcher struct { client *api.Client proxyID string logger *log.Logger + ch chan *Config + plan *watch.Plan +} + +// NewAgentConfigWatcher creates an AgentConfigWatcher. +func NewAgentConfigWatcher(client *api.Client, proxyID string, + logger *log.Logger) (*AgentConfigWatcher, error) { + w := &AgentConfigWatcher{ + client: client, + proxyID: proxyID, + logger: logger, + ch: make(chan *Config), + } + + // Setup watch plan for config + plan, err := watch.Parse(map[string]interface{}{ + "type": "connect_proxy_config", + "proxy_service_id": w.proxyID, + }) + if err != nil { + return nil, err + } + w.plan = plan + w.plan.Handler = w.handler + go w.plan.RunWithClientAndLogger(w.client, w.logger) + return w, nil +} + +func (w *AgentConfigWatcher) handler(blockVal watch.BlockingParam, + val interface{}) { + log.Printf("DEBUG: got hash %s", blockVal.(watch.WaitHashVal)) + + resp, ok := val.(*api.ConnectProxyConfig) + if !ok { + w.logger.Printf("[WARN] proxy config watch returned bad response: %v", val) + return + } + + // Setup Service instance now we know target ID etc + service, err := connect.NewService(resp.TargetServiceID, w.client) + if err != nil { + w.logger.Printf("[WARN] proxy config watch failed to initialize"+ + " service: %s", err) + return + } + + // Create proxy config from the response + cfg := &Config{ + ProxyID: w.proxyID, + // Token should be already setup in the client + ProxiedServiceID: resp.TargetServiceID, + ProxiedServiceNamespace: "default", + service: service, + } + + // Unmarshal configs + err = mapstructure.Decode(resp.Config, &cfg.PublicListener) + if err != nil { + w.logger.Printf("[ERR] proxy config watch public listener config "+ + "couldn't be parsed: %s", err) + return + } + cfg.PublicListener.applyDefaults() + + err = mapstructure.Decode(resp.Config["upstreams"], &cfg.Upstreams) + if err != nil { + w.logger.Printf("[ERR] proxy config watch upstream listener config "+ + "couldn't be parsed: %s", err) + return + } + for i := range cfg.Upstreams { + cfg.Upstreams[i].applyDefaults() + } + + // Parsed config OK, deliver it! + w.ch <- cfg } // Watch implements ConfigWatcher. func (w *AgentConfigWatcher) Watch() <-chan *Config { - watch := make(chan *Config) - // TODO implement me, note we need to discover the Service instance to use and - // set it on the Config we return. - return watch + return w.ch +} + +// Close frees watcher resources and implements io.Closer +func (w *AgentConfigWatcher) Close() error { + if w.plan != nil { + w.plan.Stop() + } + return nil } diff --git a/connect/proxy/config_test.go b/connect/proxy/config_test.go index 96782b12e..855eaddf1 100644 --- a/connect/proxy/config_test.go +++ b/connect/proxy/config_test.go @@ -1,8 +1,15 @@ package proxy import ( + "log" + "os" "testing" + "time" + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/connect" "github.com/stretchr/testify/require" ) @@ -106,3 +113,121 @@ func TestUpstreamResolverFromClient(t *testing.T) { }) } } + +func TestAgentConfigWatcher(t *testing.T) { + a := agent.NewTestAgent("agent_smith", "") + + client := a.Client() + agent := client.Agent() + + // Register a service with a proxy + // Register a local agent service with a managed proxy + reg := &api.AgentServiceRegistration{ + Name: "web", + Port: 8080, + Connect: &api.AgentServiceConnect{ + Proxy: &api.AgentServiceConnectProxy{ + Config: map[string]interface{}{ + "bind_address": "10.10.10.10", + "bind_port": "1010", + "local_service_address": "127.0.0.1:5000", + "handshake_timeout_ms": 999, + "upstreams": []interface{}{ + map[string]interface{}{ + "destination_name": "db", + "local_bind_port": 9191, + }, + }, + }, + }, + }, + } + err := agent.ServiceRegister(reg) + require.NoError(t, err) + + w, err := NewAgentConfigWatcher(client, "web-proxy", + log.New(os.Stderr, "", log.LstdFlags)) + require.NoError(t, err) + + cfg := testGetConfigValTimeout(t, w, 500*time.Millisecond) + + expectCfg := &Config{ + ProxyID: w.proxyID, + ProxiedServiceID: "web", + ProxiedServiceNamespace: "default", + PublicListener: PublicListenerConfig{ + BindAddress: "10.10.10.10", + BindPort: "1010", + LocalServiceAddress: "127.0.0.1:5000", + HandshakeTimeoutMs: 999, + LocalConnectTimeoutMs: 1000, // from applyDefaults + }, + Upstreams: []UpstreamConfig{ + { + DestinationName: "db", + DestinationNamespace: "default", + DestinationType: "service", + LocalBindPort: 9191, + LocalBindAddress: "127.0.0.1", + ConnectTimeoutMs: 10000, // from applyDefaults + }, + }, + } + + // nil this out as comparisons are problematic, we'll explicitly sanity check + // it's reasonable later. + assert.NotNil(t, cfg.service) + cfg.service = nil + + assert.Equal(t, expectCfg, cfg) + + // TODO(banks): Sanity check the service is viable and gets TLS certs eventually from + // the agent. + + // Now keep watching and update the config. + go func() { + // Wait for watcher to be watching + time.Sleep(20 * time.Millisecond) + upstreams := reg.Connect.Proxy.Config["upstreams"].([]interface{}) + upstreams = append(upstreams, map[string]interface{}{ + "destination_name": "cache", + "local_bind_port": 9292, + "local_bind_address": "127.10.10.10", + }) + reg.Connect.Proxy.Config["upstreams"] = upstreams + reg.Connect.Proxy.Config["local_connect_timeout_ms"] = 444 + err := agent.ServiceRegister(reg) + require.NoError(t, err) + }() + + cfg = testGetConfigValTimeout(t, w, 2*time.Second) + + expectCfg.Upstreams = append(expectCfg.Upstreams, UpstreamConfig{ + DestinationName: "cache", + DestinationNamespace: "default", + DestinationType: "service", + ConnectTimeoutMs: 10000, // from applyDefaults + LocalBindPort: 9292, + LocalBindAddress: "127.10.10.10", + }) + expectCfg.PublicListener.LocalConnectTimeoutMs = 444 + + // nil this out as comparisons are problematic, we'll explicitly sanity check + // it's reasonable later. + assert.NotNil(t, cfg.service) + cfg.service = nil + + assert.Equal(t, expectCfg, cfg) +} + +func testGetConfigValTimeout(t *testing.T, w ConfigWatcher, + timeout time.Duration) *Config { + t.Helper() + select { + case cfg := <-w.Watch(): + return cfg + case <-time.After(timeout): + t.Fatalf("timeout after %s waiting for config update", timeout) + return nil + } +} diff --git a/connect/proxy/testdata/config-kitchensink.hcl b/connect/proxy/testdata/config-kitchensink.hcl index 2bda99791..fccfdffd0 100644 --- a/connect/proxy/testdata/config-kitchensink.hcl +++ b/connect/proxy/testdata/config-kitchensink.hcl @@ -12,7 +12,8 @@ dev_service_cert_file = "connect/testdata/ca1-svc-web.cert.pem" dev_service_key_file = "connect/testdata/ca1-svc-web.key.pem" public_listener { - bind_address = ":9999" + bind_address = "127.0.0.1" + bind_port= "9999" local_service_address = "127.0.0.1:5000" } diff --git a/key.pem b/key.pem new file mode 100644 index 000000000..e69de29bb diff --git a/watch/funcs.go b/watch/funcs.go index 8c5823633..3ad7f4f68 100644 --- a/watch/funcs.go +++ b/watch/funcs.go @@ -3,7 +3,6 @@ package watch import ( "context" "fmt" - "log" consulapi "github.com/hashicorp/consul/api" ) @@ -297,8 +296,6 @@ func connectProxyConfigWatch(params map[string]interface{}) (WatcherFunc, error) opts := makeQueryOptionsWithContext(p, false) defer p.cancelFunc() - log.Printf("DEBUG: id: %s, opts: %v", proxyServiceID, opts) - config, _, err := agent.ConnectProxyConfig(proxyServiceID, &opts) if err != nil { return nil, nil, err From 072b2a79cacd0155d2cd6d9c84088952b5329e29 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Wed, 25 Apr 2018 20:41:26 +0100 Subject: [PATCH 190/627] Support legacy watch.HandlerFunc type for backward compat reduces impact of change --- agent/watch_handler.go | 20 +++----------- agent/watch_handler_test.go | 4 +-- command/watch/watch.go | 13 +++------ connect/proxy/config.go | 2 +- connect/service.go | 4 +-- watch/funcs.go | 20 +++++++------- watch/funcs_test.go | 24 ++++++++--------- watch/plan.go | 12 +++++++-- watch/plan_test.go | 53 ++++++++++++++++++++++++++++++++++--- watch/watch.go | 53 ++++++++++++++++++++++--------------- 10 files changed, 125 insertions(+), 80 deletions(-) diff --git a/agent/watch_handler.go b/agent/watch_handler.go index 27c7a430e..4c6a9d3f3 100644 --- a/agent/watch_handler.go +++ b/agent/watch_handler.go @@ -42,13 +42,7 @@ func makeWatchHandler(logOutput io.Writer, handler interface{}) watch.HandlerFun } logger := log.New(logOutput, "", log.LstdFlags) - fn := func(blockVal watch.BlockingParam, data interface{}) { - idx, ok := blockVal.(watch.WaitIndexVal) - if !ok { - logger.Printf("[ERR] agent: watch handler doesn't support non-index watches") - return - } - + fn := func(idx uint64, data interface{}) { // Create the command var cmd *osexec.Cmd var err error @@ -64,7 +58,7 @@ func makeWatchHandler(logOutput io.Writer, handler interface{}) watch.HandlerFun } cmd.Env = append(os.Environ(), - "CONSUL_INDEX="+strconv.FormatUint(uint64(idx), 10), + "CONSUL_INDEX="+strconv.FormatUint(idx, 10), ) // Collect the output @@ -102,13 +96,7 @@ func makeWatchHandler(logOutput io.Writer, handler interface{}) watch.HandlerFun func makeHTTPWatchHandler(logOutput io.Writer, config *watch.HttpHandlerConfig) watch.HandlerFunc { logger := log.New(logOutput, "", log.LstdFlags) - fn := func(blockVal watch.BlockingParam, data interface{}) { - idx, ok := blockVal.(watch.WaitIndexVal) - if !ok { - logger.Printf("[ERR] agent: watch handler doesn't support non-index watches") - return - } - + fn := func(idx uint64, data interface{}) { trans := cleanhttp.DefaultTransport() // Skip SSL certificate verification if TLSSkipVerify is true @@ -144,7 +132,7 @@ func makeHTTPWatchHandler(logOutput io.Writer, config *watch.HttpHandlerConfig) } req = req.WithContext(ctx) req.Header.Add("Content-Type", "application/json") - req.Header.Add("X-Consul-Index", strconv.FormatUint(uint64(idx), 10)) + req.Header.Add("X-Consul-Index", strconv.FormatUint(idx, 10)) for key, values := range config.Header { for _, val := range values { req.Header.Add(key, val) diff --git a/agent/watch_handler_test.go b/agent/watch_handler_test.go index 6851baf71..f7ba83b0a 100644 --- a/agent/watch_handler_test.go +++ b/agent/watch_handler_test.go @@ -17,7 +17,7 @@ func TestMakeWatchHandler(t *testing.T) { defer os.Remove("handler_index_out") script := "bash -c 'echo $CONSUL_INDEX >> handler_index_out && cat >> handler_out'" handler := makeWatchHandler(os.Stderr, script) - handler(watch.WaitIndexVal(100), []string{"foo", "bar", "baz"}) + handler(100, []string{"foo", "bar", "baz"}) raw, err := ioutil.ReadFile("handler_out") if err != nil { t.Fatalf("err: %v", err) @@ -62,5 +62,5 @@ func TestMakeHTTPWatchHandler(t *testing.T) { Timeout: time.Minute, } handler := makeHTTPWatchHandler(os.Stderr, &config) - handler(watch.WaitIndexVal(100), []string{"foo", "bar", "baz"}) + handler(100, []string{"foo", "bar", "baz"}) } diff --git a/command/watch/watch.go b/command/watch/watch.go index 2286de1cc..3b8c67836 100644 --- a/command/watch/watch.go +++ b/command/watch/watch.go @@ -154,7 +154,7 @@ func (c *cmd) Run(args []string) int { // 1: true errExit := 0 if len(c.flags.Args()) == 0 { - wp.Handler = func(blockParam consulwatch.BlockingParam, data interface{}) { + wp.Handler = func(idx uint64, data interface{}) { defer wp.Stop() buf, err := json.MarshalIndent(data, "", " ") if err != nil { @@ -164,14 +164,7 @@ func (c *cmd) Run(args []string) int { c.UI.Output(string(buf)) } } else { - wp.Handler = func(blockVal consulwatch.BlockingParam, data interface{}) { - idx, ok := blockVal.(consulwatch.WaitIndexVal) - if !ok { - // TODO(banks): make this work for hash based watches. - c.UI.Error("Error: watch handler doesn't support non-index watches") - return - } - + wp.Handler = func(idx uint64, data interface{}) { doneCh := make(chan struct{}) defer close(doneCh) logFn := func(err error) { @@ -192,7 +185,7 @@ func (c *cmd) Run(args []string) int { goto ERR } cmd.Env = append(os.Environ(), - "CONSUL_INDEX="+strconv.FormatUint(uint64(idx), 10), + "CONSUL_INDEX="+strconv.FormatUint(idx, 10), ) // Encode the input diff --git a/connect/proxy/config.go b/connect/proxy/config.go index 3bd4db38b..b5a8c6bb4 100644 --- a/connect/proxy/config.go +++ b/connect/proxy/config.go @@ -256,7 +256,7 @@ func NewAgentConfigWatcher(client *api.Client, proxyID string, return w, nil } -func (w *AgentConfigWatcher) handler(blockVal watch.BlockingParam, +func (w *AgentConfigWatcher) handler(blockVal watch.BlockingParamVal, val interface{}) { log.Printf("DEBUG: got hash %s", blockVal.(watch.WaitHashVal)) diff --git a/connect/service.go b/connect/service.go index 4c8887745..a614f227f 100644 --- a/connect/service.go +++ b/connect/service.go @@ -236,7 +236,7 @@ func (s *Service) Close() error { return nil } -func (s *Service) rootsWatchHandler(blockParam watch.BlockingParam, raw interface{}) { +func (s *Service) rootsWatchHandler(blockParam watch.BlockingParamVal, raw interface{}) { if raw == nil { return } @@ -269,7 +269,7 @@ func (s *Service) rootsWatchHandler(blockParam watch.BlockingParam, raw interfac s.clientTLSCfg.SetTLSConfig(newCfg) } -func (s *Service) leafWatchHandler(blockParam watch.BlockingParam, raw interface{}) { +func (s *Service) leafWatchHandler(blockParam watch.BlockingParamVal, raw interface{}) { if raw == nil { return // ignore } diff --git a/watch/funcs.go b/watch/funcs.go index 3ad7f4f68..5e72e40a6 100644 --- a/watch/funcs.go +++ b/watch/funcs.go @@ -43,7 +43,7 @@ func keyWatch(params map[string]interface{}) (WatcherFunc, error) { if key == "" { return nil, fmt.Errorf("Must specify a single key to watch") } - fn := func(p *Plan) (BlockingParam, interface{}, error) { + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { kv := p.client.KV() opts := makeQueryOptionsWithContext(p, stale) defer p.cancelFunc() @@ -73,7 +73,7 @@ func keyPrefixWatch(params map[string]interface{}) (WatcherFunc, error) { if prefix == "" { return nil, fmt.Errorf("Must specify a single prefix to watch") } - fn := func(p *Plan) (BlockingParam, interface{}, error) { + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { kv := p.client.KV() opts := makeQueryOptionsWithContext(p, stale) defer p.cancelFunc() @@ -93,7 +93,7 @@ func servicesWatch(params map[string]interface{}) (WatcherFunc, error) { return nil, err } - fn := func(p *Plan) (BlockingParam, interface{}, error) { + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { catalog := p.client.Catalog() opts := makeQueryOptionsWithContext(p, stale) defer p.cancelFunc() @@ -113,7 +113,7 @@ func nodesWatch(params map[string]interface{}) (WatcherFunc, error) { return nil, err } - fn := func(p *Plan) (BlockingParam, interface{}, error) { + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { catalog := p.client.Catalog() opts := makeQueryOptionsWithContext(p, stale) defer p.cancelFunc() @@ -150,7 +150,7 @@ func serviceWatch(params map[string]interface{}) (WatcherFunc, error) { return nil, err } - fn := func(p *Plan) (BlockingParam, interface{}, error) { + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { health := p.client.Health() opts := makeQueryOptionsWithContext(p, stale) defer p.cancelFunc() @@ -184,7 +184,7 @@ func checksWatch(params map[string]interface{}) (WatcherFunc, error) { state = "any" } - fn := func(p *Plan) (BlockingParam, interface{}, error) { + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { health := p.client.Health() opts := makeQueryOptionsWithContext(p, stale) defer p.cancelFunc() @@ -213,7 +213,7 @@ func eventWatch(params map[string]interface{}) (WatcherFunc, error) { return nil, err } - fn := func(p *Plan) (BlockingParam, interface{}, error) { + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { event := p.client.Event() opts := makeQueryOptionsWithContext(p, false) defer p.cancelFunc() @@ -239,7 +239,7 @@ func connectRootsWatch(params map[string]interface{}) (WatcherFunc, error) { // We don't support stale since roots are likely to be cached locally in the // agent anyway. - fn := func(p *Plan) (BlockingParam, interface{}, error) { + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { agent := p.client.Agent() opts := makeQueryOptionsWithContext(p, false) defer p.cancelFunc() @@ -265,7 +265,7 @@ func connectLeafWatch(params map[string]interface{}) (WatcherFunc, error) { return nil, err } - fn := func(p *Plan) (BlockingParam, interface{}, error) { + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { agent := p.client.Agent() opts := makeQueryOptionsWithContext(p, false) defer p.cancelFunc() @@ -291,7 +291,7 @@ func connectProxyConfigWatch(params map[string]interface{}) (WatcherFunc, error) return nil, err } - fn := func(p *Plan) (BlockingParam, interface{}, error) { + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { agent := p.client.Agent() opts := makeQueryOptionsWithContext(p, false) defer p.cancelFunc() diff --git a/watch/funcs_test.go b/watch/funcs_test.go index 89c5a1e80..d5253de44 100644 --- a/watch/funcs_test.go +++ b/watch/funcs_test.go @@ -32,7 +32,7 @@ func TestKeyWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"key", "key":"foo/bar/baz"}`) - plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { return // ignore } @@ -86,7 +86,7 @@ func TestKeyWatch_With_PrefixDelete(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"key", "key":"foo/bar/baz"}`) - plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { return // ignore } @@ -140,7 +140,7 @@ func TestKeyPrefixWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"keyprefix", "prefix":"foo/"}`) - plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { return // ignore } @@ -193,7 +193,7 @@ func TestServicesWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"services"}`) - plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { return // ignore } @@ -247,7 +247,7 @@ func TestNodesWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"nodes"}`) - plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { return // ignore } @@ -298,7 +298,7 @@ func TestServiceWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"service", "service":"foo", "tag":"bar", "passingonly":true}`) - plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { return // ignore } @@ -354,7 +354,7 @@ func TestChecksWatch_State(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"checks", "state":"warning"}`) - plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { return // ignore } @@ -415,7 +415,7 @@ func TestChecksWatch_Service(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"checks", "service":"foobar"}`) - plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { return // ignore } @@ -481,7 +481,7 @@ func TestEventWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"event", "name": "foo"}`) - plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { return } @@ -536,7 +536,7 @@ func TestConnectRootsWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"connect_roots"}`) - plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { return // ignore } @@ -626,7 +626,7 @@ func TestConnectLeafWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"connect_leaf", "service_id":"web"}`) - plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { return // ignore } @@ -699,7 +699,7 @@ func TestConnectProxyConfigWatch(t *testing.T) { invoke := makeInvokeCh() plan := mustParse(t, `{"type":"connect_proxy_config", "proxy_service_id":"web-proxy"}`) - plan.Handler = func(blockParam watch.BlockingParam, raw interface{}) { + plan.HybridHandler = func(blockParamVal watch.BlockingParamVal, raw interface{}) { if raw == nil { return // ignore } diff --git a/watch/plan.go b/watch/plan.go index 6292c19a4..1e34e4eac 100644 --- a/watch/plan.go +++ b/watch/plan.go @@ -110,8 +110,16 @@ OUTER: // Handle the updated result p.lastResult = result - if p.Handler != nil { - p.Handler(blockParamVal, result) + // If a hybrid handler exists use that + if p.HybridHandler != nil { + p.HybridHandler(blockParamVal, result) + } else if p.Handler != nil { + idx, ok := blockParamVal.(WaitIndexVal) + if !ok { + logger.Printf("[ERR] consul.watch: Handler only supports index-based " + + " watches but non index-based watch run. Skipping Handler.") + } + p.Handler(uint64(idx), result) } } return nil diff --git a/watch/plan_test.go b/watch/plan_test.go index 6099dc294..0ac648508 100644 --- a/watch/plan_test.go +++ b/watch/plan_test.go @@ -10,7 +10,7 @@ func init() { } func noopWatch(params map[string]interface{}) (WatcherFunc, error) { - fn := func(p *Plan) (BlockingParam, interface{}, error) { + fn := func(p *Plan) (BlockingParamVal, interface{}, error) { idx := WaitIndexVal(0) if i, ok := p.lastParamVal.(WaitIndexVal); ok { idx = i @@ -35,10 +35,57 @@ func TestRun_Stop(t *testing.T) { var expect uint64 = 1 doneCh := make(chan struct{}) - plan.Handler = func(blockParamVal BlockingParam, val interface{}) { + plan.Handler = func(idx uint64, val interface{}) { + if idx != expect { + t.Fatalf("Bad: %d %d", expect, idx) + } + if val != expect { + t.Fatalf("Bad: %d %d", expect, val) + } + if expect == 1 { + close(doneCh) + } + expect++ + } + + errCh := make(chan error, 1) + go func() { + errCh <- plan.Run("127.0.0.1:8500") + }() + + select { + case <-doneCh: + plan.Stop() + + case <-time.After(1 * time.Second): + t.Fatalf("handler never ran") + } + + select { + case err := <-errCh: + if err != nil { + t.Fatalf("err: %v", err) + } + + case <-time.After(1 * time.Second): + t.Fatalf("watcher didn't exit") + } + + if expect == 1 { + t.Fatalf("Bad: %d", expect) + } +} + +func TestRun_Stop_Hybrid(t *testing.T) { + t.Parallel() + plan := mustParse(t, `{"type":"noop"}`) + + var expect uint64 = 1 + doneCh := make(chan struct{}) + plan.HybridHandler = func(blockParamVal BlockingParamVal, val interface{}) { idxVal, ok := blockParamVal.(WaitIndexVal) if !ok { - t.Fatalf("Expected index-based watch") + t.Fatalf("expected index-based watch") } idx := uint64(idxVal) if idx != expect { diff --git a/watch/watch.go b/watch/watch.go index b520d702e..1bc3d0ae6 100644 --- a/watch/watch.go +++ b/watch/watch.go @@ -24,13 +24,16 @@ type Plan struct { HandlerType string Exempt map[string]interface{} - Watcher WatcherFunc - Handler HandlerFunc - LogOutput io.Writer + Watcher WatcherFunc + // Handler is kept for backward compatibility but only supports watches based + // on index param. To support hash based watches, set HybridHandler instead. + Handler HandlerFunc + HybridHandler HybridHandlerFunc + LogOutput io.Writer address string client *consulapi.Client - lastParamVal BlockingParam + lastParamVal BlockingParamVal lastResult interface{} stop bool @@ -48,39 +51,39 @@ type HttpHandlerConfig struct { TLSSkipVerify bool `mapstructure:"tls_skip_verify"` } -// BlockingParam is an interface representing the common operations needed for +// BlockingParamVal is an interface representing the common operations needed for // different styles of blocking. It's used to abstract the core watch plan from // whether we are performing index-based or hash-based blocking. -type BlockingParam interface { +type BlockingParamVal interface { // Equal returns whether the other param value should be considered equal // (i.e. representing no change in the watched resource). Equal must not panic // if other is nil. - Equal(other BlockingParam) bool + Equal(other BlockingParamVal) bool // Next is called when deciding which value to use on the next blocking call. - // It assumes the BlockingParam value it is called on is the most recent one + // It assumes the BlockingParamVal value it is called on is the most recent one // returned and passes the previous one which may be nil as context. This // allows types to customise logic around ordering without assuming there is // an order. For example WaitIndexVal can check that the index didn't go // backwards and if it did then reset to 0. Most other cases should just // return themselves (the most recent value) to be used in the next request. - Next(previous BlockingParam) BlockingParam + Next(previous BlockingParamVal) BlockingParamVal } // WaitIndexVal is a type representing a Consul index that implements -// BlockingParam. +// BlockingParamVal. type WaitIndexVal uint64 -// Equal implements BlockingParam -func (idx WaitIndexVal) Equal(other BlockingParam) bool { +// Equal implements BlockingParamVal +func (idx WaitIndexVal) Equal(other BlockingParamVal) bool { if otherIdx, ok := other.(WaitIndexVal); ok { return idx == otherIdx } return false } -// Next implements BlockingParam -func (idx WaitIndexVal) Next(previous BlockingParam) BlockingParam { +// Next implements BlockingParamVal +func (idx WaitIndexVal) Next(previous BlockingParamVal) BlockingParamVal { if previous == nil { return idx } @@ -93,27 +96,33 @@ func (idx WaitIndexVal) Next(previous BlockingParam) BlockingParam { } // WaitHashVal is a type representing a Consul content hash that implements -// BlockingParam. +// BlockingParamVal. type WaitHashVal string -// Equal implements BlockingParam -func (h WaitHashVal) Equal(other BlockingParam) bool { +// Equal implements BlockingParamVal +func (h WaitHashVal) Equal(other BlockingParamVal) bool { if otherHash, ok := other.(WaitHashVal); ok { return h == otherHash } return false } -// Next implements BlockingParam -func (h WaitHashVal) Next(previous BlockingParam) BlockingParam { +// Next implements BlockingParamVal +func (h WaitHashVal) Next(previous BlockingParamVal) BlockingParamVal { return h } // WatcherFunc is used to watch for a diff. -type WatcherFunc func(*Plan) (BlockingParam, interface{}, error) +type WatcherFunc func(*Plan) (BlockingParamVal, interface{}, error) -// HandlerFunc is used to handle new data -type HandlerFunc func(BlockingParam, interface{}) +// HandlerFunc is used to handle new data. It only works for index-based watches +// (which is almost all end points currently) and is kept for backwards +// compatibility until more places can make use of hash-based watches too. +type HandlerFunc func(uint64, interface{}) + +// HybridHandlerFunc is used to handle new data. It can support either +// index-based or hash-based watches via the BlockingParamVal. +type HybridHandlerFunc func(BlockingParamVal, interface{}) // Parse takes a watch query and compiles it into a WatchPlan or an error func Parse(params map[string]interface{}) (*Plan, error) { From 2b1660fdf729f9b76e5837e1ead42abf8ff79b12 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Wed, 25 Apr 2018 21:22:31 +0100 Subject: [PATCH 191/627] Fix tests and listeners to work with Config changes (splitting host and port fields) --- connect/proxy/config.go | 4 ++-- connect/proxy/config_test.go | 7 ++++--- connect/proxy/listener.go | 8 ++++++-- connect/proxy/listener_test.go | 19 ++++++++++++------- connect/proxy/testing.go | 13 +++---------- connect/service.go | 4 ++-- 6 files changed, 29 insertions(+), 26 deletions(-) diff --git a/connect/proxy/config.go b/connect/proxy/config.go index b5a8c6bb4..6fad0bd55 100644 --- a/connect/proxy/config.go +++ b/connect/proxy/config.go @@ -65,7 +65,7 @@ type PublicListenerConfig struct { // BindAddress is the host/IP the public mTLS listener will bind to. BindAddress string `json:"bind_address" hcl:"bind_address" mapstructure:"bind_address"` - BindPort string `json:"bind_port" hcl:"bind_port" mapstructure:"bind_port"` + BindPort int `json:"bind_port" hcl:"bind_port" mapstructure:"bind_port"` // LocalServiceAddress is the host:port for the proxied application. This // should be on loopback or otherwise protected as it's plain TCP. @@ -251,7 +251,7 @@ func NewAgentConfigWatcher(client *api.Client, proxyID string, return nil, err } w.plan = plan - w.plan.Handler = w.handler + w.plan.HybridHandler = w.handler go w.plan.RunWithClientAndLogger(w.client, w.logger) return w, nil } diff --git a/connect/proxy/config_test.go b/connect/proxy/config_test.go index 855eaddf1..e576d5f82 100644 --- a/connect/proxy/config_test.go +++ b/connect/proxy/config_test.go @@ -24,7 +24,8 @@ func TestParseConfigFile(t *testing.T) { ProxiedServiceID: "web", ProxiedServiceNamespace: "default", PublicListener: PublicListenerConfig{ - BindAddress: ":9999", + BindAddress: "127.0.0.1", + BindPort: 9999, LocalServiceAddress: "127.0.0.1:5000", LocalConnectTimeoutMs: 1000, HandshakeTimeoutMs: 10000, // From defaults @@ -129,7 +130,7 @@ func TestAgentConfigWatcher(t *testing.T) { Proxy: &api.AgentServiceConnectProxy{ Config: map[string]interface{}{ "bind_address": "10.10.10.10", - "bind_port": "1010", + "bind_port": 1010, "local_service_address": "127.0.0.1:5000", "handshake_timeout_ms": 999, "upstreams": []interface{}{ @@ -157,7 +158,7 @@ func TestAgentConfigWatcher(t *testing.T) { ProxiedServiceNamespace: "default", PublicListener: PublicListenerConfig{ BindAddress: "10.10.10.10", - BindPort: "1010", + BindPort: 1010, LocalServiceAddress: "127.0.0.1:5000", HandshakeTimeoutMs: 999, LocalConnectTimeoutMs: 1000, // from applyDefaults diff --git a/connect/proxy/listener.go b/connect/proxy/listener.go index c8e70ac31..12134f840 100644 --- a/connect/proxy/listener.go +++ b/connect/proxy/listener.go @@ -4,6 +4,7 @@ import ( "context" "crypto/tls" "errors" + "fmt" "log" "net" "sync/atomic" @@ -44,7 +45,9 @@ func NewPublicListener(svc *connect.Service, cfg PublicListenerConfig, return &Listener{ Service: svc, listenFunc: func() (net.Listener, error) { - return tls.Listen("tcp", cfg.BindAddress, svc.ServerTLSConfig()) + return tls.Listen("tcp", + fmt.Sprintf("%s:%d", cfg.BindAddress, cfg.BindPort), + svc.ServerTLSConfig()) }, dialFunc: func() (net.Conn, error) { return net.DialTimeout("tcp", cfg.LocalServiceAddress, @@ -63,7 +66,8 @@ func NewUpstreamListener(svc *connect.Service, cfg UpstreamConfig, return &Listener{ Service: svc, listenFunc: func() (net.Listener, error) { - return net.Listen("tcp", cfg.LocalBindAddress) + return net.Listen("tcp", + fmt.Sprintf("%s:%d", cfg.LocalBindAddress, cfg.LocalBindPort)) }, dialFunc: func() (net.Conn, error) { if cfg.resolver == nil { diff --git a/connect/proxy/listener_test.go b/connect/proxy/listener_test.go index 8354fbe58..a0bc640d7 100644 --- a/connect/proxy/listener_test.go +++ b/connect/proxy/listener_test.go @@ -2,6 +2,7 @@ package proxy import ( "context" + "fmt" "log" "net" "os" @@ -9,16 +10,18 @@ import ( agConnect "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/connect" + "github.com/hashicorp/consul/lib/freeport" "github.com/stretchr/testify/require" ) func TestPublicListener(t *testing.T) { ca := agConnect.TestCA(t, nil) - addrs := TestLocalBindAddrs(t, 2) + ports := freeport.GetT(t, 2) cfg := PublicListenerConfig{ - BindAddress: addrs[0], - LocalServiceAddress: addrs[1], + BindAddress: "127.0.0.1", + BindPort: ports[0], + LocalServiceAddress: TestLocalAddr(ports[1]), HandshakeTimeoutMs: 100, LocalConnectTimeoutMs: 100, } @@ -42,7 +45,7 @@ func TestPublicListener(t *testing.T) { // Proxy and backend are running, play the part of a TLS client using same // cert for now. conn, err := svc.Dial(context.Background(), &connect.StaticResolver{ - Addr: addrs[0], + Addr: TestLocalAddr(ports[0]), CertURI: agConnect.TestSpiffeIDService(t, "db"), }) require.NoError(t, err) @@ -51,7 +54,7 @@ func TestPublicListener(t *testing.T) { func TestUpstreamListener(t *testing.T) { ca := agConnect.TestCA(t, nil) - addrs := TestLocalBindAddrs(t, 1) + ports := freeport.GetT(t, 1) // Run a test server that we can dial. testSvr := connect.NewTestServer(t, "db", ca) @@ -67,7 +70,8 @@ func TestUpstreamListener(t *testing.T) { DestinationNamespace: "default", DestinationName: "db", ConnectTimeoutMs: 100, - LocalBindAddress: addrs[0], + LocalBindAddress: "localhost", + LocalBindPort: ports[0], resolver: &connect.StaticResolver{ Addr: testSvr.Addr, CertURI: agConnect.TestSpiffeIDService(t, "db"), @@ -88,7 +92,8 @@ func TestUpstreamListener(t *testing.T) { // Proxy and fake remote service are running, play the part of the app // connecting to a remote connect service over TCP. - conn, err := net.Dial("tcp", cfg.LocalBindAddress) + conn, err := net.Dial("tcp", + fmt.Sprintf("%s:%d", cfg.LocalBindAddress, cfg.LocalBindPort)) require.NoError(t, err) TestEchoConn(t, conn, "") } diff --git a/connect/proxy/testing.go b/connect/proxy/testing.go index 9ed8c41c4..f986cfe50 100644 --- a/connect/proxy/testing.go +++ b/connect/proxy/testing.go @@ -7,20 +7,13 @@ import ( "net" "sync/atomic" - "github.com/hashicorp/consul/lib/freeport" "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/require" ) -// TestLocalBindAddrs returns n localhost address:port strings with free ports -// for binding test listeners to. -func TestLocalBindAddrs(t testing.T, n int) []string { - ports := freeport.GetT(t, n) - addrs := make([]string, n) - for i, p := range ports { - addrs[i] = fmt.Sprintf("localhost:%d", p) - } - return addrs +// TestLocalAddr makes a localhost address on the given port +func TestLocalAddr(port int) string { + return fmt.Sprintf("localhost:%d", port) } // TestTCPServer is a simple TCP echo server for use during tests. diff --git a/connect/service.go b/connect/service.go index a614f227f..18e6dd89e 100644 --- a/connect/service.go +++ b/connect/service.go @@ -86,7 +86,7 @@ func NewServiceWithLogger(serviceID string, client *api.Client, return nil, err } s.rootsWatch = p - s.rootsWatch.Handler = s.rootsWatchHandler + s.rootsWatch.HybridHandler = s.rootsWatchHandler p, err = watch.Parse(map[string]interface{}{ "type": "connect_leaf", @@ -95,7 +95,7 @@ func NewServiceWithLogger(serviceID string, client *api.Client, return nil, err } s.leafWatch = p - s.leafWatch.Handler = s.leafWatchHandler + s.leafWatch.HybridHandler = s.leafWatchHandler //go s.rootsWatch.RunWithClientAndLogger(s.client, s.logger) //go s.leafWatch.RunWithClientAndLogger(s.client, s.logger) From 153808db7cfb8a0a20a206778c28a4c7f9e1400f Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 26 Apr 2018 18:06:26 +0100 Subject: [PATCH 192/627] Don't allow connect watches in agent/cli yet --- agent/agent.go | 10 ++++++++++ agent/agent_test.go | 12 ++++++++++++ command/watch/watch.go | 5 +++++ command/watch/watch_test.go | 20 ++++++++++++++++++++ 4 files changed, 47 insertions(+) diff --git a/agent/agent.go b/agent/agent.go index 8f7dd9043..f62495a01 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -621,6 +621,16 @@ func (a *Agent) reloadWatches(cfg *config.RuntimeConfig) error { return fmt.Errorf("Handler type '%s' not recognized", params["handler_type"]) } + // Don't let people use connect watches via this mechanism for now as it + // needs thought about how to do securely and shouldn't be necessary. Note + // that if the type assertion fails an type is not a string then + // ParseExample below will error so we don't need to handle that case. + if typ, ok := params["type"].(string); ok { + if strings.HasPrefix(typ, "connect_") { + return fmt.Errorf("Watch type %s is not allowed in agent config", typ) + } + } + // Parse the watches, excluding 'handler' and 'args' wp, err := watch.ParseExempt(params, []string{"handler", "args"}) if err != nil { diff --git a/agent/agent_test.go b/agent/agent_test.go index c22ce56ba..caa76a28d 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -2259,6 +2259,18 @@ func TestAgent_reloadWatches(t *testing.T) { t.Fatalf("bad: %s", err) } + // Should fail to reload with connect watches + newConf.Watches = []map[string]interface{}{ + { + "type": "connect_roots", + "key": "asdf", + "args": []interface{}{"ls"}, + }, + } + if err := a.reloadWatches(&newConf); err == nil || !strings.Contains(err.Error(), "not allowed in agent config") { + t.Fatalf("bad: %s", err) + } + // Should still succeed with only HTTPS addresses newConf.HTTPSAddrs = newConf.HTTPAddrs newConf.HTTPAddrs = make([]net.Addr, 0) diff --git a/command/watch/watch.go b/command/watch/watch.go index 3b8c67836..bf4691457 100644 --- a/command/watch/watch.go +++ b/command/watch/watch.go @@ -135,6 +135,11 @@ func (c *cmd) Run(args []string) int { return 1 } + if strings.HasPrefix(wp.Type, "connect_") { + c.UI.Error(fmt.Sprintf("Type %s is not supported in the CLI tool", wp.Type)) + return 1 + } + // Create and test the HTTP client client, err := c.http.APIClient() if err != nil { diff --git a/command/watch/watch_test.go b/command/watch/watch_test.go index 153377f65..b1fed48c9 100644 --- a/command/watch/watch_test.go +++ b/command/watch/watch_test.go @@ -33,3 +33,23 @@ func TestWatchCommand(t *testing.T) { t.Fatalf("bad: %#v", ui.OutputWriter.String()) } } + +func TestWatchCommandNoConnect(t *testing.T) { + t.Parallel() + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + + ui := cli.NewMockUi() + c := New(ui, nil) + args := []string{"-http-addr=" + a.HTTPAddr(), "-type=connect_leaf"} + + code := c.Run(args) + if code != 1 { + t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) + } + + if !strings.Contains(ui.ErrorWriter.String(), + "Type connect_leaf is not supported in the CLI tool") { + t.Fatalf("bad: %#v", ui.ErrorWriter.String()) + } +} From a29f3c6b96922dd038ad8a721c9e9c1203fce7c0 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Thu, 26 Apr 2018 20:14:37 -0700 Subject: [PATCH 193/627] Fix some inconsistencies around the CA provider code --- agent/connect/ca.go | 17 ++++++------ agent/consul/connect_ca_endpoint.go | 2 +- agent/consul/connect_ca_provider.go | 42 +++++++++++++++++------------ agent/consul/leader.go | 2 +- agent/structs/connect_ca.go | 8 +++--- 5 files changed, 39 insertions(+), 32 deletions(-) diff --git a/agent/connect/ca.go b/agent/connect/ca.go index 87b01994e..ff0f0813d 100644 --- a/agent/connect/ca.go +++ b/agent/connect/ca.go @@ -1,7 +1,6 @@ package connect import ( - "bytes" "crypto" "crypto/ecdsa" "crypto/rsa" @@ -28,21 +27,21 @@ func ParseCert(pemValue string) (*x509.Certificate, error) { return x509.ParseCertificate(block.Bytes) } -// ParseCertFingerprint parses the x509 certificate from a PEM-encoded value -// and returns the SHA-1 fingerprint. -func ParseCertFingerprint(pemValue string) (string, error) { +// CalculateCertFingerprint parses the x509 certificate from a PEM-encoded value +// and calculates the SHA-1 fingerprint. +func CalculateCertFingerprint(pemValue string) (string, error) { // The _ result below is not an error but the remaining PEM bytes. block, _ := pem.Decode([]byte(pemValue)) if block == nil { return "", fmt.Errorf("no PEM-encoded data found") } - hash := sha1.Sum(block.Bytes) - hexified := make([][]byte, len(hash)) - for i, data := range hash { - hexified[i] = []byte(fmt.Sprintf("%02X", data)) + if block.Type != "CERTIFICATE" { + return "", fmt.Errorf("first PEM-block should be CERTIFICATE type") } - return string(bytes.Join(hexified, []byte(":"))), nil + + hash := sha1.Sum(block.Bytes) + return HexString(hash[:]), nil } // ParseSigner parses a crypto.Signer from a PEM-encoded key. The private key diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index f52c9218e..72ac2adbc 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -98,7 +98,7 @@ func (s *ConnectCA) ConfigurationSet( return err } - id, err := connect.ParseCertFingerprint(newRootPEM) + id, err := connect.CalculateCertFingerprint(newRootPEM) if err != nil { return fmt.Errorf("error parsing root fingerprint: %v", err) } diff --git a/agent/consul/connect_ca_provider.go b/agent/consul/connect_ca_provider.go index f9321138b..afb74fe78 100644 --- a/agent/consul/connect_ca_provider.go +++ b/agent/consul/connect_ca_provider.go @@ -222,7 +222,7 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (string, error) { // Cert template for generation sn := &big.Int{} - sn.SetUint64(providerState.LeafIndex + 1) + sn.SetUint64(providerState.SerialIndex + 1) template := x509.Certificate{ SerialNumber: sn, Subject: pkix.Name{CommonName: serviceId.Service}, @@ -240,6 +240,7 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (string, error) { x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, }, + // todo(kyhavlov): add a way to set the cert lifetime here from the CA config NotAfter: time.Now().Add(3 * 24 * time.Hour), NotBefore: time.Now(), AuthorityKeyId: keyId, @@ -258,20 +259,7 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (string, error) { return "", fmt.Errorf("error encoding private key: %s", err) } - // Increment the leaf cert index - newState := *providerState - newState.LeafIndex += 1 - args := &structs.CARequest{ - Op: structs.CAOpSetProviderState, - ProviderState: &newState, - } - resp, err := c.srv.raftApply(structs.ConnectCARequestType, args) - if err != nil { - return "", err - } - if respErr, ok := resp.(error); ok { - return "", respErr - } + c.incrementSerialIndex(providerState) // Set the response return buf.String(), nil @@ -306,10 +294,9 @@ func (c *ConsulCAProvider) CrossSignCA(cert *x509.Certificate) (string, error) { // Create the cross-signing template from the existing root CA serialNum := &big.Int{} - serialNum.SetUint64(providerState.LeafIndex + 1) + serialNum.SetUint64(providerState.SerialIndex + 1) template := *cert template.SerialNumber = serialNum - template.Subject = rootCA.Subject template.SignatureAlgorithm = rootCA.SignatureAlgorithm template.SubjectKeyId = keyId template.AuthorityKeyId = keyId @@ -326,9 +313,30 @@ func (c *ConsulCAProvider) CrossSignCA(cert *x509.Certificate) (string, error) { return "", fmt.Errorf("error encoding private key: %s", err) } + c.incrementSerialIndex(providerState) + return buf.String(), nil } +// incrementSerialIndex increments the cert serial number index in the provider state +func (c *ConsulCAProvider) incrementSerialIndex(providerState *structs.CAConsulProviderState) error { + newState := *providerState + newState.SerialIndex += 1 + args := &structs.CARequest{ + Op: structs.CAOpSetProviderState, + ProviderState: &newState, + } + resp, err := c.srv.raftApply(structs.ConnectCARequestType, args) + if err != nil { + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } + + return nil +} + // generatePrivateKey returns a new private key func generatePrivateKey() (string, error) { var pk *ecdsa.PrivateKey diff --git a/agent/consul/leader.go b/agent/consul/leader.go index d9c3a83ea..2f01b8833 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -426,7 +426,7 @@ func (s *Server) initializeCA() error { return fmt.Errorf("error getting root cert: %v", err) } - id, err := connect.ParseCertFingerprint(rootPEM) + id, err := connect.CalculateCertFingerprint(rootPEM) if err != nil { return fmt.Errorf("error parsing root fingerprint: %v", err) } diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index c46db703a..0570057b6 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -168,10 +168,10 @@ type ConsulCAProviderConfig struct { // CAConsulProviderState is used to track the built-in Consul CA provider's state. type CAConsulProviderState struct { - ID string - PrivateKey string - RootCert string - LeafIndex uint64 + ID string + PrivateKey string + RootCert string + SerialIndex uint64 RaftIndex } From 19b9399f2fbef438fbbf4252da140507c0a12a7b Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Thu, 26 Apr 2018 23:02:18 -0700 Subject: [PATCH 194/627] Add more tests for built-in provider --- agent/consul/connect_ca_provider.go | 1 - agent/consul/connect_ca_provider_test.go | 150 +++++++++++++++++++++++ agent/consul/server_test.go | 2 + 3 files changed, 152 insertions(+), 1 deletion(-) diff --git a/agent/consul/connect_ca_provider.go b/agent/consul/connect_ca_provider.go index afb74fe78..1c509e2b0 100644 --- a/agent/consul/connect_ca_provider.go +++ b/agent/consul/connect_ca_provider.go @@ -341,7 +341,6 @@ func (c *ConsulCAProvider) incrementSerialIndex(providerState *structs.CAConsulP func generatePrivateKey() (string, error) { var pk *ecdsa.PrivateKey - // If we have no key, then create a new one. pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { return "", fmt.Errorf("error generating private key: %s", err) diff --git a/agent/consul/connect_ca_provider_test.go b/agent/consul/connect_ca_provider_test.go index 583f91722..ead41309f 100644 --- a/agent/consul/connect_ca_provider_test.go +++ b/agent/consul/connect_ca_provider_test.go @@ -3,7 +3,9 @@ package consul import ( "os" "testing" + "time" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/testrpc" "github.com/stretchr/testify/assert" ) @@ -25,8 +27,156 @@ func TestCAProvider_Bootstrap(t *testing.T) { root, err := provider.ActiveRoot() assert.NoError(err) + // Intermediate should be the same cert. + inter, err := provider.ActiveIntermediate() + assert.NoError(err) + + // Make sure we initialize without errors and that the + // root cert gets set to the active cert. state := s1.fsm.State() _, activeRoot, err := state.CARootActive(nil) assert.NoError(err) assert.Equal(root, activeRoot.RootCert) + assert.Equal(inter, activeRoot.RootCert) +} + +func TestCAProvider_Bootstrap_WithCert(t *testing.T) { + t.Parallel() + + // Make sure setting a custom private key/root cert works. + assert := assert.New(t) + rootCA := connect.TestCA(t, nil) + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.CAConfig.Config["PrivateKey"] = rootCA.SigningKey + c.CAConfig.Config["RootCert"] = rootCA.RootCert + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + provider := s1.getCAProvider() + + root, err := provider.ActiveRoot() + assert.NoError(err) + + // Make sure we initialize without errors and that the + // root cert we provided gets set to the active cert. + state := s1.fsm.State() + _, activeRoot, err := state.CARootActive(nil) + assert.NoError(err) + assert.Equal(root, activeRoot.RootCert) + assert.Equal(rootCA.RootCert, activeRoot.RootCert) +} + +func TestCAProvider_SignLeaf(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + provider := s1.getCAProvider() + + spiffeService := &connect.SpiffeIDService{ + Host: s1.config.NodeName, + Namespace: "default", + Datacenter: s1.config.Datacenter, + Service: "foo", + } + + // Generate a leaf cert for the service. + { + raw, _ := connect.TestCSR(t, spiffeService) + + csr, err := connect.ParseCSR(raw) + assert.NoError(err) + + cert, err := provider.Sign(csr) + assert.NoError(err) + + parsed, err := connect.ParseCert(cert) + assert.NoError(err) + assert.Equal(parsed.URIs[0], spiffeService.URI()) + assert.Equal(parsed.Subject.CommonName, "foo") + assert.Equal(parsed.SerialNumber.Uint64(), uint64(1)) + + // Ensure the cert is valid now and expires within the correct limit. + assert.True(parsed.NotAfter.Sub(time.Now()) < 3*24*time.Hour) + assert.True(parsed.NotBefore.Before(time.Now())) + } + + // Generate a new cert for another service and make sure + // the serial number is incremented. + spiffeService.Service = "bar" + { + raw, _ := connect.TestCSR(t, spiffeService) + + csr, err := connect.ParseCSR(raw) + assert.NoError(err) + + cert, err := provider.Sign(csr) + assert.NoError(err) + + parsed, err := connect.ParseCert(cert) + assert.NoError(err) + assert.Equal(parsed.URIs[0], spiffeService.URI()) + assert.Equal(parsed.Subject.CommonName, "bar") + assert.Equal(parsed.SerialNumber.Uint64(), uint64(2)) + + // Ensure the cert is valid now and expires within the correct limit. + assert.True(parsed.NotAfter.Sub(time.Now()) < 3*24*time.Hour) + assert.True(parsed.NotBefore.Before(time.Now())) + } +} + +func TestCAProvider_CrossSignCA(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + + // Make sure setting a custom private key/root cert works. + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + provider := s1.getCAProvider() + + rootCA := connect.TestCA(t, nil) + rootPEM, err := provider.ActiveRoot() + assert.NoError(err) + root, err := connect.ParseCert(rootPEM) + assert.NoError(err) + + // Have the provider cross sign our new CA cert. + cert, err := connect.ParseCert(rootCA.RootCert) + assert.NoError(err) + oldSubject := cert.Subject.CommonName + xcPEM, err := provider.CrossSignCA(cert) + assert.NoError(err) + + xc, err := connect.ParseCert(xcPEM) + assert.NoError(err) + + // AuthorityKeyID and SubjectKeyID should be the signing root's. + assert.Equal(root.AuthorityKeyId, xc.AuthorityKeyId) + assert.Equal(root.SubjectKeyId, xc.SubjectKeyId) + + // Subject name should not have changed. + assert.NotEqual(root.Subject.CommonName, xc.Subject.CommonName) + assert.Equal(oldSubject, xc.Subject.CommonName) + + // Issuer should be the signing root. + assert.Equal(root.Issuer.CommonName, xc.Issuer.CommonName) } diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 3afbb6f07..84ec6743a 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -91,6 +91,8 @@ func testServerConfig(t *testing.T) (string, *Config) { // looks like several depend on it. config.RPCHoldTimeout = 5 * time.Second + config.ConnectEnabled = true + return dir, config } From 7c0976208d0b4881de41530861916fb831ce0ea0 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Thu, 26 Apr 2018 23:28:27 -0700 Subject: [PATCH 195/627] Add tests for the built in CA's state store table --- agent/consul/fsm/commands_oss_test.go | 39 ++++++++++ agent/consul/state/connect_ca.go | 41 +++------- agent/consul/state/connect_ca_test.go | 103 ++++++++++++++++++++++++++ 3 files changed, 154 insertions(+), 29 deletions(-) diff --git a/agent/consul/fsm/commands_oss_test.go b/agent/consul/fsm/commands_oss_test.go index a52e6d7b6..280bf5b38 100644 --- a/agent/consul/fsm/commands_oss_test.go +++ b/agent/consul/fsm/commands_oss_test.go @@ -1318,3 +1318,42 @@ func TestFSM_CARoots(t *testing.T) { assert.Len(roots, 2) } } + +func TestFSM_CABuiltinProvider(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + fsm, err := New(nil, os.Stderr) + assert.Nil(err) + + // Provider state. + expected := &structs.CAConsulProviderState{ + ID: "foo", + PrivateKey: "a", + RootCert: "b", + SerialIndex: 2, + RaftIndex: structs.RaftIndex{ + CreateIndex: 1, + ModifyIndex: 1, + }, + } + + // Create a new request. + req := structs.CARequest{ + Op: structs.CAOpSetProviderState, + ProviderState: expected, + } + + { + buf, err := structs.Encode(structs.ConnectCARequestType, req) + assert.Nil(err) + assert.True(fsm.Apply(makeLog(buf)).(bool)) + } + + // Verify it's in the state store. + { + _, state, err := fsm.state.CAProviderState("foo") + assert.Nil(err) + assert.Equal(expected, state) + } +} diff --git a/agent/consul/state/connect_ca.go b/agent/consul/state/connect_ca.go index 7c4cea294..a7f51a52a 100644 --- a/agent/consul/state/connect_ca.go +++ b/agent/consul/state/connect_ca.go @@ -319,19 +319,19 @@ func (s *Store) CARootSetCAS(idx, cidx uint64, rs []*structs.CARoot) (bool, erro return true, nil } -// CAProviderState is used to pull the built-in provider state from the snapshot. -func (s *Snapshot) CAProviderState() (*structs.CAConsulProviderState, error) { - c, err := s.tx.First(caBuiltinProviderTableName, "id") +// CAProviderState is used to pull the built-in provider states from the snapshot. +func (s *Snapshot) CAProviderState() ([]*structs.CAConsulProviderState, error) { + ixns, err := s.tx.Get(caBuiltinProviderTableName, "id") if err != nil { return nil, err } - state, ok := c.(*structs.CAConsulProviderState) - if !ok { - return nil, nil + var ret []*structs.CAConsulProviderState + for wrapped := ixns.Next(); wrapped != nil; wrapped = ixns.Next() { + ret = append(ret, wrapped.(*structs.CAConsulProviderState)) } - return state, nil + return ret, nil } // CAProviderState is used when restoring from a snapshot. @@ -339,6 +339,9 @@ func (s *Restore) CAProviderState(state *structs.CAConsulProviderState) error { if err := s.tx.Insert(caBuiltinProviderTableName, state); err != nil { return fmt.Errorf("failed restoring built-in CA state: %s", err) } + if err := indexUpdateMaxTxn(s.tx, state.ModifyIndex, caBuiltinProviderTableName); err != nil { + return fmt.Errorf("failed updating index: %s", err) + } return nil } @@ -365,27 +368,6 @@ func (s *Store) CAProviderState(id string) (uint64, *structs.CAConsulProviderSta return idx, state, nil } -// CAProviderStates is used to get the Consul CA provider state for the given ID. -func (s *Store) CAProviderStates() (uint64, []*structs.CAConsulProviderState, error) { - tx := s.db.Txn(false) - defer tx.Abort() - - // Get the index - idx := maxIndexTxn(tx, caBuiltinProviderTableName) - - // Get all - iter, err := tx.Get(caBuiltinProviderTableName, "id") - if err != nil { - return 0, nil, fmt.Errorf("failed CA provider state lookup: %s", err) - } - - var results []*structs.CAConsulProviderState - for v := iter.Next(); v != nil; v = iter.Next() { - results = append(results, v.(*structs.CAConsulProviderState)) - } - return idx, results, nil -} - // CASetProviderState is used to set the current built-in CA provider state. func (s *Store) CASetProviderState(idx uint64, state *structs.CAConsulProviderState) (bool, error) { tx := s.db.Txn(true) @@ -419,7 +401,8 @@ func (s *Store) CASetProviderState(idx uint64, state *structs.CAConsulProviderSt return true, nil } -// CADeleteProviderState is used to remove the Consul CA provider state for the given ID. +// CADeleteProviderState is used to remove the built-in Consul CA provider +// state for the given ID. func (s *Store) CADeleteProviderState(id string) error { tx := s.db.Txn(true) defer tx.Abort() diff --git a/agent/consul/state/connect_ca_test.go b/agent/consul/state/connect_ca_test.go index cd37f526b..4639c7f5a 100644 --- a/agent/consul/state/connect_ca_test.go +++ b/agent/consul/state/connect_ca_test.go @@ -349,3 +349,106 @@ func TestStore_CARoot_Snapshot_Restore(t *testing.T) { assert.Equal(roots, actual) }() } + +func TestStore_CABuiltinProvider(t *testing.T) { + assert := assert.New(t) + s := testStateStore(t) + + { + expected := &structs.CAConsulProviderState{ + ID: "foo", + PrivateKey: "a", + RootCert: "b", + SerialIndex: 1, + } + + ok, err := s.CASetProviderState(0, expected) + assert.NoError(err) + assert.True(ok) + + idx, state, err := s.CAProviderState(expected.ID) + assert.NoError(err) + assert.Equal(idx, uint64(0)) + assert.Equal(expected, state) + } + + { + expected := &structs.CAConsulProviderState{ + ID: "bar", + PrivateKey: "c", + RootCert: "d", + SerialIndex: 2, + } + + ok, err := s.CASetProviderState(1, expected) + assert.NoError(err) + assert.True(ok) + + idx, state, err := s.CAProviderState(expected.ID) + assert.NoError(err) + assert.Equal(idx, uint64(1)) + assert.Equal(expected, state) + } +} + +func TestStore_CABuiltinProvider_Snapshot_Restore(t *testing.T) { + assert := assert.New(t) + s := testStateStore(t) + + // Create multiple state entries. + before := []*structs.CAConsulProviderState{ + { + ID: "bar", + PrivateKey: "y", + RootCert: "z", + SerialIndex: 2, + }, + { + ID: "foo", + PrivateKey: "a", + RootCert: "b", + SerialIndex: 1, + }, + } + + for i, state := range before { + ok, err := s.CASetProviderState(uint64(98+i), state) + assert.NoError(err) + assert.True(ok) + } + + // Take a snapshot. + snap := s.Snapshot() + defer snap.Close() + + // Modify the state store. + after := &structs.CAConsulProviderState{ + ID: "foo", + PrivateKey: "c", + RootCert: "d", + SerialIndex: 1, + } + ok, err := s.CASetProviderState(100, after) + assert.NoError(err) + assert.True(ok) + + snapped, err := snap.CAProviderState() + assert.NoError(err) + assert.Equal(before, snapped) + + // Restore onto a new state store. + s2 := testStateStore(t) + restore := s2.Restore() + for _, entry := range snapped { + assert.NoError(restore.CAProviderState(entry)) + } + restore.Commit() + + // Verify the restored values match those from before the snapshot. + for _, state := range before { + idx, res, err := s2.CAProviderState(state.ID) + assert.NoError(err) + assert.Equal(idx, uint64(99)) + assert.Equal(state, res) + } +} From 0e184f3f5b848bb37f6dd1aa95aaac225a60ca06 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 27 Apr 2018 20:10:15 -0700 Subject: [PATCH 196/627] Fix config tests --- agent/config/runtime_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 1db5ab207..9dff7733b 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -3415,6 +3415,7 @@ func TestFullConfig(t *testing.T) { }, CheckUpdateInterval: 16507 * time.Second, ClientAddrs: []*net.IPAddr{ipAddr("93.83.18.19")}, + ConnectEnabled: true, ConnectProxyBindMinPort: 2000, ConnectProxyBindMaxPort: 3000, ConnectCAProvider: "b8j4ynx9", @@ -4092,6 +4093,8 @@ func TestSanitize(t *testing.T) { } ], "ClientAddrs": [], + "ConnectCAConfig": {}, + "ConnectCAProvider": "", "ConnectEnabled": false, "ConnectProxyBindMaxPort": 0, "ConnectProxyBindMinPort": 0, From b28e11fdd318c7754a625b291a2f9f8997d416f1 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Sun, 29 Apr 2018 20:44:40 -0700 Subject: [PATCH 197/627] Fill out connect CA rpc endpoint tests --- agent/consul/connect_ca_endpoint.go | 4 +- agent/consul/connect_ca_endpoint_test.go | 207 +++++++++++++++++++++-- agent/consul/connect_ca_provider.go | 4 +- agent/testagent.go | 3 + 4 files changed, 196 insertions(+), 22 deletions(-) diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 72ac2adbc..35dbe46e8 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -130,7 +130,7 @@ func (s *ConnectCA) ConfigurationSet( // If the config has been committed, update the local provider instance s.srv.setCAProvider(newProvider) - s.srv.logger.Printf("[INFO] connect: provider config updated") + s.srv.logger.Printf("[INFO] connect: CA provider config updated") return nil } @@ -295,7 +295,7 @@ func (s *ConnectCA) Sign( } // Set the response - reply = &structs.IssuedCert{ + *reply = structs.IssuedCert{ SerialNumber: connect.HexString(cert.SerialNumber.Bytes()), CertPEM: pem, Service: serviceId.Service, diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index f2404eb4c..321bcfcb4 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -4,6 +4,7 @@ import ( "crypto/x509" "os" "testing" + "time" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" @@ -12,6 +13,14 @@ import ( "github.com/stretchr/testify/assert" ) +func testParseCert(t *testing.T, pemValue string) *x509.Certificate { + cert, err := connect.ParseCert(pemValue) + if err != nil { + t.Fatal(err) + } + return cert +} + // Test listing root CAs. func TestConnectCARoots(t *testing.T) { t.Parallel() @@ -30,16 +39,18 @@ func TestConnectCARoots(t *testing.T) { ca1 := connect.TestCA(t, nil) ca2 := connect.TestCA(t, nil) ca2.Active = false - ok, err := state.CARootSetCAS(1, 0, []*structs.CARoot{ca1, ca2}) + idx, _, err := state.CARoots(nil) + assert.NoError(err) + ok, err := state.CARootSetCAS(idx, idx, []*structs.CARoot{ca1, ca2}) assert.True(ok) - assert.Nil(err) + assert.NoError(err) // Request args := &structs.DCSpecificRequest{ Datacenter: "dc1", } var reply structs.IndexedCARoots - assert.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", args, &reply)) + assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", args, &reply)) // Verify assert.Equal(ca1.ID, reply.ActiveRootID) @@ -51,11 +62,173 @@ func TestConnectCARoots(t *testing.T) { } } +func TestConnectCAConfig_GetSet(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Get the starting config + { + args := &structs.DCSpecificRequest{ + Datacenter: "dc1", + } + var reply structs.CAConfiguration + assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply)) + + actual, err := ParseConsulCAConfig(reply.Config) + assert.NoError(err) + expected, err := ParseConsulCAConfig(s1.config.CAConfig.Config) + assert.NoError(err) + assert.Equal(reply.Provider, s1.config.CAConfig.Provider) + assert.Equal(actual, expected) + } + + // Update a config value + newConfig := &structs.CAConfiguration{ + Provider: "consul", + Config: map[string]interface{}{ + "PrivateKey": "", + "RootCert": "", + "RotationPeriod": 180 * 24 * time.Hour, + }, + } + { + args := &structs.CARequest{ + Datacenter: "dc1", + Config: newConfig, + } + var reply interface{} + + assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply)) + } + + // Verify the new config was set + { + args := &structs.DCSpecificRequest{ + Datacenter: "dc1", + } + var reply structs.CAConfiguration + assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply)) + + actual, err := ParseConsulCAConfig(reply.Config) + assert.NoError(err) + expected, err := ParseConsulCAConfig(newConfig.Config) + assert.NoError(err) + assert.Equal(reply.Provider, newConfig.Provider) + assert.Equal(actual, expected) + } +} + +func TestConnectCAConfig_TriggerRotation(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Store the current root + rootReq := &structs.DCSpecificRequest{ + Datacenter: "dc1", + } + var rootList structs.IndexedCARoots + assert.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", rootReq, &rootList)) + assert.Len(rootList.Roots, 1) + oldRoot := rootList.Roots[0] + + // Update the provider config to use a new private key, which should + // cause a rotation. + newKey, err := generatePrivateKey() + assert.NoError(err) + newConfig := &structs.CAConfiguration{ + Provider: "consul", + Config: map[string]interface{}{ + "PrivateKey": newKey, + "RootCert": "", + "RotationPeriod": 90 * 24 * time.Hour, + }, + } + { + args := &structs.CARequest{ + Datacenter: "dc1", + Config: newConfig, + } + var reply interface{} + + assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationSet", args, &reply)) + } + + // Make sure the new root has been added along with an intermediate + // cross-signed by the old root. + { + args := &structs.DCSpecificRequest{ + Datacenter: "dc1", + } + var reply structs.IndexedCARoots + assert.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", args, &reply)) + assert.Len(reply.Roots, 2) + + for _, r := range reply.Roots { + if r.ID == oldRoot.ID { + // The old root should no longer be marked as the active root, + // and none of its other fields should have changed. + assert.False(r.Active) + assert.Equal(r.Name, oldRoot.Name) + assert.Equal(r.RootCert, oldRoot.RootCert) + assert.Equal(r.SigningCert, oldRoot.SigningCert) + assert.Equal(r.IntermediateCerts, oldRoot.IntermediateCerts) + } else { + // The new root should have a valid cross-signed cert from the old + // root as an intermediate. + assert.True(r.Active) + assert.Len(r.IntermediateCerts, 1) + + xc := testParseCert(t, r.IntermediateCerts[0]) + oldRootCert := testParseCert(t, oldRoot.RootCert) + newRootCert := testParseCert(t, r.RootCert) + + // Should have the authority/subject key IDs and signature algo of the + // (old) signing CA. + assert.Equal(xc.AuthorityKeyId, oldRootCert.AuthorityKeyId) + assert.Equal(xc.SubjectKeyId, oldRootCert.SubjectKeyId) + assert.Equal(xc.SignatureAlgorithm, oldRootCert.SignatureAlgorithm) + + // The common name and SAN should not have changed. + assert.Equal(xc.Subject.CommonName, newRootCert.Subject.CommonName) + assert.Equal(xc.URIs, newRootCert.URIs) + } + } + } + + // Verify the new config was set. + { + args := &structs.DCSpecificRequest{ + Datacenter: "dc1", + } + var reply structs.CAConfiguration + assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply)) + + actual, err := ParseConsulCAConfig(reply.Config) + assert.NoError(err) + expected, err := ParseConsulCAConfig(newConfig.Config) + assert.NoError(err) + assert.Equal(reply.Provider, newConfig.Provider) + assert.Equal(actual, expected) + } +} + // Test CA signing -// -// NOTE(mitchellh): Just testing the happy path and not all the other validation -// issues because the internals of this method will probably be gutted for the -// CA plugins then we can just test mocks. func TestConnectCASign(t *testing.T) { t.Parallel() @@ -68,32 +241,30 @@ func TestConnectCASign(t *testing.T) { testrpc.WaitForLeader(t, s1.RPC, "dc1") - // Insert a CA - state := s1.fsm.State() - ca := connect.TestCA(t, nil) - ok, err := state.CARootSetCAS(1, 0, []*structs.CARoot{ca}) - assert.True(ok) - assert.Nil(err) - // Generate a CSR and request signing spiffeId := connect.TestSpiffeIDService(t, "web") csr, _ := connect.TestCSR(t, spiffeId) args := &structs.CASignRequest{ - Datacenter: "dc01", + Datacenter: "dc1", CSR: csr, } var reply structs.IssuedCert - assert.Nil(msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply)) + assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply)) + + // Get the current CA + state := s1.fsm.State() + _, ca, err := state.CARootActive(nil) + assert.NoError(err) // Verify that the cert is signed by the CA roots := x509.NewCertPool() assert.True(roots.AppendCertsFromPEM([]byte(ca.RootCert))) leaf, err := connect.ParseCert(reply.CertPEM) - assert.Nil(err) + assert.NoError(err) _, err = leaf.Verify(x509.VerifyOptions{ Roots: roots, }) - assert.Nil(err) + assert.NoError(err) // Verify other fields assert.Equal("web", reply.Service) diff --git a/agent/consul/connect_ca_provider.go b/agent/consul/connect_ca_provider.go index 1c509e2b0..cb2bcad57 100644 --- a/agent/consul/connect_ca_provider.go +++ b/agent/consul/connect_ca_provider.go @@ -30,7 +30,7 @@ type ConsulCAProvider struct { // NewConsulCAProvider returns a new instance of the Consul CA provider, // bootstrapping its state in the state store necessary func NewConsulCAProvider(rawConfig map[string]interface{}, srv *Server) (*ConsulCAProvider, error) { - conf, err := decodeConfig(rawConfig) + conf, err := ParseConsulCAConfig(rawConfig) if err != nil { return nil, err } @@ -116,7 +116,7 @@ func NewConsulCAProvider(rawConfig map[string]interface{}, srv *Server) (*Consul return provider, nil } -func decodeConfig(raw map[string]interface{}) (*structs.ConsulCAProviderConfig, error) { +func ParseConsulCAConfig(raw map[string]interface{}) (*structs.ConsulCAProviderConfig, error) { var config *structs.ConsulCAProviderConfig if err := mapstructure.WeakDecode(raw, &config); err != nil { return nil, fmt.Errorf("error decoding config: %s", err) diff --git a/agent/testagent.go b/agent/testagent.go index 581143016..c2e4ddf01 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -334,6 +334,9 @@ func TestConfig(sources ...config.Source) *config.RuntimeConfig { server = true node_id = "` + nodeID + `" node_name = "Node ` + nodeID + `" + connect { + enabled = true + } performance { raft_multiplier = 1 } From 4c1b82834b48cf800368caff36e2db876daf1492 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Mon, 30 Apr 2018 17:35:02 +0100 Subject: [PATCH 198/627] Add support for measuring tx/rx packets through proxied connections. --- connect/proxy/conn.go | 45 ++++++++++++++++++++++++++++++++++---- connect/proxy/conn_test.go | 9 ++++++++ 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/connect/proxy/conn.go b/connect/proxy/conn.go index 70019e55c..fe52853f0 100644 --- a/connect/proxy/conn.go +++ b/connect/proxy/conn.go @@ -8,8 +8,9 @@ import ( // Conn represents a single proxied TCP connection. type Conn struct { - src, dst net.Conn - stopping int32 + src, dst net.Conn + srcW, dstW countWriter + stopping int32 } // NewConn returns a conn joining the two given net.Conn @@ -17,6 +18,8 @@ func NewConn(src, dst net.Conn) *Conn { return &Conn{ src: src, dst: dst, + srcW: countWriter{w: src}, + dstW: countWriter{w: dst}, stopping: 0, } } @@ -47,10 +50,10 @@ func (c *Conn) CopyBytes() error { // causing this goroutine to exit but not the outer one. See // TestConnSrcClosing which will fail if you comment the defer below. defer c.Close() - io.Copy(c.dst, c.src) + io.Copy(&c.dstW, c.src) }() - _, err := io.Copy(c.src, c.dst) + _, err := io.Copy(&c.srcW, c.dst) // Note that we don't wait for the other goroutine to finish because it either // already has due to it's src conn closing, or it will once our defer fires // and closes the source conn. No need for the extra coordination. @@ -59,3 +62,37 @@ func (c *Conn) CopyBytes() error { } return err } + +// Stats returns number of bytes transmitted and recieved. Transmit means bytes +// written to dst, receive means bytes written to src. +func (c *Conn) Stats() (txBytes, rxBytes uint64) { + return c.srcW.Written(), c.dstW.Written() +} + +// countWriter is an io.Writer that counts the number of bytes being written +// before passing them through. We use it to gather metrics for bytes +// sent/received. Note that since we are always copying between a net.TCPConn +// and a tls.Conn, none of the optimisations using syscalls like splice and +// ReaderTo/WriterFrom can be used anyway and io.Copy falls back to a generic +// buffered read/write loop. +// +// We use atomic updates to synchronize reads and writes here. It's the cheapest +// uncontended option based on +// https://gist.github.com/banks/e76b40c0cc4b01503f0a0e4e0af231d5. Further +// optimization can be made when if/when identified as a real overhead. +type countWriter struct { + written uint64 + w io.Writer +} + +// Write implements io.Writer +func (cw *countWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + atomic.AddUint64(&cw.written, uint64(n)) + return +} + +// Written returns how many bytes have been written to w. +func (cw *countWriter) Written() uint64 { + return atomic.LoadUint64(&cw.written) +} diff --git a/connect/proxy/conn_test.go b/connect/proxy/conn_test.go index a37720ea0..4de428ad0 100644 --- a/connect/proxy/conn_test.go +++ b/connect/proxy/conn_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -88,6 +89,10 @@ func TestConn(t *testing.T) { require.Nil(t, err) require.Equal(t, "ping 2\n", got) + tx, rx := c.Stats() + assert.Equal(t, uint64(7), tx) + assert.Equal(t, uint64(7), rx) + _, err = src.Write([]byte("pong 1\n")) require.Nil(t, err) _, err = dst.Write([]byte("pong 2\n")) @@ -101,6 +106,10 @@ func TestConn(t *testing.T) { require.Nil(t, err) require.Equal(t, "pong 2\n", got) + tx, rx = c.Stats() + assert.Equal(t, uint64(14), tx) + assert.Equal(t, uint64(14), rx) + c.Close() ret := <-retCh From 8b38cdaba190303f434238fb9692aefb2f9955f7 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Mon, 30 Apr 2018 18:17:39 +0100 Subject: [PATCH 199/627] Add TODO for false-sharing --- connect/proxy/conn.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/connect/proxy/conn.go b/connect/proxy/conn.go index fe52853f0..d55e861bf 100644 --- a/connect/proxy/conn.go +++ b/connect/proxy/conn.go @@ -8,7 +8,10 @@ import ( // Conn represents a single proxied TCP connection. type Conn struct { - src, dst net.Conn + src, dst net.Conn + // TODO(banks): benchmark and consider adding _ [8]uint64 padding between + // these to prevent false sharing between the rx and tx goroutines when + // running on separate cores. srcW, dstW countWriter stopping int32 } From 554f367dad772da7c18e506018908ce480f9a76e Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Mon, 30 Apr 2018 22:27:46 +0100 Subject: [PATCH 200/627] Fix build error introduced in bad merge of TLS stuff --- connect/service.go | 29 ++--------------------------- 1 file changed, 2 insertions(+), 27 deletions(-) diff --git a/connect/service.go b/connect/service.go index 18e6dd89e..4f38558a3 100644 --- a/connect/service.go +++ b/connect/service.go @@ -252,21 +252,7 @@ func (s *Service) rootsWatchHandler(blockParam watch.BlockingParamVal, raw inter roots.AppendCertsFromPEM([]byte(root.RootCertPEM)) } - // Note that SetTLSConfig takes care of adding a dynamic GetConfigForClient - // hook that will fetch this updated config for new incoming connections on a - // server. That means all future connections are validated against the new - // roots. On a client, we only expose Dial and we fetch the most recent config - // each time so all future Dials (direct or via an http.Client with our dial - // hook) will grab this new config. - newCfg := s.serverTLSCfg.TLSConfig() - // Server-side verification uses ClientCAs. - newCfg.ClientCAs = roots - s.serverTLSCfg.SetTLSConfig(newCfg) - - newCfg = s.clientTLSCfg.TLSConfig() - // Client-side verification uses RootCAs. - newCfg.RootCAs = roots - s.clientTLSCfg.SetTLSConfig(newCfg) + s.tlsCfg.SetRoots(roots) } func (s *Service) leafWatchHandler(blockParam watch.BlockingParamVal, raw interface{}) { @@ -286,16 +272,5 @@ func (s *Service) leafWatchHandler(blockParam watch.BlockingParamVal, raw interf return } - // Note that SetTLSConfig takes care of adding a dynamic GetClientCertificate - // hook that will fetch the first cert from the Certificates slice of the - // current config for each outbound client request even if the client is using - // an old version of the config struct so all we need to do it set that and - // all existing clients will start using the new cert. - newCfg := s.serverTLSCfg.TLSConfig() - newCfg.Certificates = []tls.Certificate{cert} - s.serverTLSCfg.SetTLSConfig(newCfg) - - newCfg = s.clientTLSCfg.TLSConfig() - newCfg.Certificates = []tls.Certificate{cert} - s.clientTLSCfg.SetTLSConfig(newCfg) + s.tlsCfg.SetLeaf(&cert) } From dcd277de8a49be524fa9801b8c0893b821ee14cf Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Mon, 30 Apr 2018 22:23:49 +0100 Subject: [PATCH 201/627] Wire up agent leaf endpoint to cache framework to support blocking. --- agent/agent.go | 10 +++ agent/agent_endpoint.go | 45 ++++++---- agent/agent_endpoint_test.go | 116 +++++++++++++++++++------- agent/agent_test.go | 9 -- agent/cache-types/connect_ca_leaf.go | 14 ++-- agent/connect/testing_ca.go | 49 ++++++++++- agent/connect_ca_endpoint_test.go | 12 +-- agent/consul/connect_ca_endpoint.go | 33 ++++++-- agent/consul/connect_ca_provider.go | 17 ++-- agent/consul/testing.go | 26 ------ agent/consul/testing_endpoint.go | 43 ---------- agent/consul/testing_endpoint_test.go | 42 ---------- agent/consul/testing_test.go | 13 --- 13 files changed, 225 insertions(+), 204 deletions(-) delete mode 100644 agent/consul/testing.go delete mode 100644 agent/consul/testing_endpoint.go delete mode 100644 agent/consul/testing_endpoint_test.go delete mode 100644 agent/consul/testing_test.go diff --git a/agent/agent.go b/agent/agent.go index f62495a01..9dfe2abea 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2679,6 +2679,16 @@ func (a *Agent) registerCache() { RefreshTimeout: 10 * time.Minute, }) + a.cache.RegisterType(cachetype.ConnectCALeafName, &cachetype.ConnectCALeaf{ + RPC: a.delegate, + Cache: a.cache, + }, &cache.RegisterOptions{ + // Maintain a blocking query, retry dropped connections quickly + Refresh: true, + RefreshTimer: 0, + RefreshTimeout: 10 * time.Minute, + }) + a.cache.RegisterType(cachetype.IntentionMatchName, &cachetype.IntentionMatch{ RPC: a.delegate, }, &cache.RegisterOptions{ diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index d500b17ba..b13e6d076 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -28,9 +28,7 @@ import ( "github.com/hashicorp/serf/serf" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - // NOTE(mitcehllh): This is temporary while certs are stubbed out. - "github.com/mitchellh/go-testing-interface" ) type Self struct { @@ -918,24 +916,39 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http. return nil, fmt.Errorf("unknown service ID: %s", id) } - // Create a CSR. - // TODO(mitchellh): This is obviously not production ready! - csr, pk := connect.TestCSR(&testing.RuntimeT{}, &connect.SpiffeIDService{ - Host: "1234.consul", - Namespace: "default", - Datacenter: s.agent.config.Datacenter, - Service: service.Service, - }) + args := cachetype.ConnectCALeafRequest{ + Service: service.Service, // Need name not ID + } + var qOpts structs.QueryOptions + // Store DC in the ConnectCALeafRequest but query opts separately + if done := s.parse(resp, req, &args.Datacenter, &qOpts); done { + return nil, nil + } + args.MinQueryIndex = qOpts.MinQueryIndex - // Request signing - var reply structs.IssuedCert - args := structs.CASignRequest{CSR: csr} - if err := s.agent.RPC("ConnectCA.Sign", &args, &reply); err != nil { + // Validate token + // TODO(banks): support correct proxy token checking too + rule, err := s.agent.resolveToken(qOpts.Token) + if err != nil { return nil, err } - reply.PrivateKeyPEM = pk + if rule != nil && !rule.ServiceWrite(service.Service, nil) { + return nil, acl.ErrPermissionDenied + } - return &reply, nil + raw, err := s.agent.cache.Get(cachetype.ConnectCALeafName, &args) + if err != nil { + return nil, err + } + + reply, ok := raw.(*structs.IssuedCert) + if !ok { + // This should never happen, but we want to protect against panics + return nil, fmt.Errorf("internal error: response type not correct") + } + setIndex(resp, reply.ModifyIndex) + + return reply, nil } // GET /v1/agent/connect/proxy/:proxy_service_id diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index d5ea7305a..fad92cb9a 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2,6 +2,7 @@ package agent import ( "bytes" + "crypto/tls" "crypto/x509" "fmt" "io" @@ -2105,7 +2106,7 @@ func TestAgentConnectCARoots_empty(t *testing.T) { t.Parallel() assert := assert.New(t) - a := NewTestAgent(t.Name(), "") + a := NewTestAgent(t.Name(), "connect { enabled = false }") defer a.Shutdown() req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) @@ -2128,13 +2129,9 @@ func TestAgentConnectCARoots_list(t *testing.T) { // Grab the initial cache hit count cacheHits := a.cache.Hits() - // Set some CAs - var reply interface{} - ca1 := connect.TestCA(t, nil) - ca1.Active = false - ca2 := connect.TestCA(t, nil) - require.Nil(a.RPC("Test.ConnectCASetRoots", - []*structs.CARoot{ca1, ca2}, &reply)) + // Set some CAs. Note that NewTestAgent already bootstraps one CA so this just + // adds a second and makes it active. + ca2 := connect.TestCAConfigSet(t, a, nil) // List req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) @@ -2152,7 +2149,7 @@ func TestAgentConnectCARoots_list(t *testing.T) { require.Equal("", r.SigningKey) } - // That should've been a cache miss, so not hit change + // That should've been a cache miss, so no hit change require.Equal(cacheHits, a.cache.Hits()) // Test caching @@ -2169,24 +2166,21 @@ func TestAgentConnectCARoots_list(t *testing.T) { // Test that caching is updated in the background { - // Set some new CAs - var reply interface{} - ca := connect.TestCA(t, nil) - require.Nil(a.RPC("Test.ConnectCASetRoots", - []*structs.CARoot{ca}, &reply)) + // Set a new CA + ca := connect.TestCAConfigSet(t, a, nil) retry.Run(t, func(r *retry.R) { // List it again obj, err := a.srv.AgentConnectCARoots(httptest.NewRecorder(), req) - if err != nil { - r.Fatal(err) - } + r.Check(err) value := obj.(structs.IndexedCARoots) if ca.ID != value.ActiveRootID { r.Fatalf("%s != %s", ca.ID, value.ActiveRootID) } - if len(value.Roots) != 1 { + // There are now 3 CAs because we didn't complete rotation on the original + // 2 + if len(value.Roots) != 3 { r.Fatalf("bad len: %d", len(value.Roots)) } }) @@ -2205,13 +2199,16 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { t.Parallel() assert := assert.New(t) + require := require.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() - // Set CAs - var reply interface{} - ca1 := connect.TestCA(t, nil) - assert.Nil(a.RPC("Test.ConnectCASetRoots", []*structs.CARoot{ca1}, &reply)) + // CA already setup by default by NewTestAgent but force a new one so we can + // verify it was signed easily. + ca1 := connect.TestCAConfigSet(t, a, nil) + + // Grab the initial cache hit count + cacheHits := a.cache.Hits() { // Register a local service @@ -2227,7 +2224,7 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(args)) resp := httptest.NewRecorder() _, err := a.srv.AgentRegisterService(resp, req) - assert.Nil(err) + require.NoError(err) if !assert.Equal(200, resp.Code) { t.Log("Body: ", resp.Body.String()) } @@ -2237,23 +2234,86 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/foo", nil) resp := httptest.NewRecorder() obj, err := a.srv.AgentConnectCALeafCert(resp, req) - assert.Nil(err) + require.NoError(err) // Get the issued cert issued, ok := obj.(*structs.IssuedCert) assert.True(ok) // Verify that the cert is signed by the CA + requireLeafValidUnderCA(t, issued, ca1) + + // Verify blocking index + assert.True(issued.ModifyIndex > 0) + assert.Equal(fmt.Sprintf("%d", issued.ModifyIndex), + resp.Header().Get("X-Consul-Index")) + + // That should've been a cache miss, so no hit change + require.Equal(cacheHits, a.cache.Hits()) + + // Test caching + { + // Fetch it again + obj2, err := a.srv.AgentConnectCALeafCert(httptest.NewRecorder(), req) + require.NoError(err) + require.Equal(obj, obj2) + + // Should cache hit this time and not make request + require.Equal(cacheHits+1, a.cache.Hits()) + cacheHits++ + } + + // Test that caching is updated in the background + { + // Set a new CA + ca := connect.TestCAConfigSet(t, a, nil) + + retry.Run(t, func(r *retry.R) { + // Try and sign again (note no index/wait arg since cache should update in + // background even if we aren't actively blocking) + obj, err := a.srv.AgentConnectCALeafCert(httptest.NewRecorder(), req) + r.Check(err) + + issued2 := obj.(*structs.IssuedCert) + if issued.CertPEM == issued2.CertPEM { + r.Fatalf("leaf has not updated") + } + + // Got a new leaf. Sanity check it's a whole new key as well as differnt + // cert. + if issued.PrivateKeyPEM == issued2.PrivateKeyPEM { + r.Fatalf("new leaf has same private key as before") + } + + // Verify that the cert is signed by the new CA + requireLeafValidUnderCA(t, issued2, ca) + }) + + // Should be a cache hit! The data should've updated in the cache + // in the background so this should've been fetched directly from + // the cache. + if v := a.cache.Hits(); v < cacheHits+1 { + t.Fatalf("expected at least one more cache hit, still at %d", v) + } + cacheHits = a.cache.Hits() + } +} + +func requireLeafValidUnderCA(t *testing.T, issued *structs.IssuedCert, + ca *structs.CARoot) { + roots := x509.NewCertPool() - assert.True(roots.AppendCertsFromPEM([]byte(ca1.RootCert))) + require.True(t, roots.AppendCertsFromPEM([]byte(ca.RootCert))) leaf, err := connect.ParseCert(issued.CertPEM) - assert.Nil(err) + require.NoError(t, err) _, err = leaf.Verify(x509.VerifyOptions{ Roots: roots, }) - assert.Nil(err) + require.NoError(t, err) - // TODO(mitchellh): verify the private key matches the cert + // Verify the private key matches. tls.LoadX509Keypair does this for us! + _, err = tls.X509KeyPair([]byte(issued.CertPEM), []byte(issued.PrivateKeyPEM)) + require.NoError(t, err) } func TestAgentConnectProxy(t *testing.T) { diff --git a/agent/agent_test.go b/agent/agent_test.go index caa76a28d..730c10bc9 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -18,7 +18,6 @@ import ( "github.com/stretchr/testify/require" "github.com/hashicorp/consul/agent/checks" - "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/testutil" @@ -28,14 +27,6 @@ import ( "github.com/pascaldekloe/goe/verify" ) -// TestMain is the main entrypoint for `go test`. -func TestMain(m *testing.M) { - // Enable the test RPC endpoints - consul.TestEndpoint() - - os.Exit(m.Run()) -} - func externalIP() (string, error) { addrs, err := net.InterfaceAddrs() if err != nil { diff --git a/agent/cache-types/connect_ca_leaf.go b/agent/cache-types/connect_ca_leaf.go index c6a2eee73..2c1cd156a 100644 --- a/agent/cache-types/connect_ca_leaf.go +++ b/agent/cache-types/connect_ca_leaf.go @@ -48,7 +48,7 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache // is so that the goroutine doesn't block forever if we return for other // reasons. newRootCACh := make(chan error, 1) - go c.waitNewRootCA(newRootCACh, opts.Timeout) + go c.waitNewRootCA(reqReal.Datacenter, newRootCACh, opts.Timeout) // Get our prior cert (if we had one) and use that to determine our // expiration time. If no cert exists, we expire immediately since we @@ -110,7 +110,10 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache // Request signing var reply structs.IssuedCert - args := structs.CASignRequest{CSR: csr} + args := structs.CASignRequest{ + Datacenter: reqReal.Datacenter, + CSR: csr, + } if err := c.RPC.RPC("ConnectCA.Sign", &args, &reply); err != nil { return result, err } @@ -139,11 +142,12 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache // waitNewRootCA blocks until a new root CA is available or the timeout is // reached (on timeout ErrTimeout is returned on the channel). -func (c *ConnectCALeaf) waitNewRootCA(ch chan<- error, timeout time.Duration) { +func (c *ConnectCALeaf) waitNewRootCA(datacenter string, ch chan<- error, + timeout time.Duration) { // Fetch some new roots. This will block until our MinQueryIndex is // matched or the timeout is reached. rawRoots, err := c.Cache.Get(ConnectCARootName, &structs.DCSpecificRequest{ - Datacenter: "", + Datacenter: datacenter, QueryOptions: structs.QueryOptions{ MinQueryIndex: atomic.LoadUint64(&c.caIndex), MaxQueryTime: timeout, @@ -186,7 +190,7 @@ func (c *ConnectCALeaf) waitNewRootCA(ch chan<- error, timeout time.Duration) { } // ConnectCALeafRequest is the cache.Request implementation for the -// COnnectCALeaf cache type. This is implemented here and not in structs +// ConnectCALeaf cache type. This is implemented here and not in structs // since this is only used for cache-related requests and not forwarded // directly to any Consul servers. type ConnectCALeafRequest struct { diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index e12372589..fbb5eed49 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -39,7 +39,6 @@ var testCACounter uint64 // SigningCert. func TestCA(t testing.T, xc *structs.CARoot) *structs.CARoot { var result structs.CARoot - result.ID = testUUID(t) result.Active = true result.Name = fmt.Sprintf("Test CA %d", atomic.AddUint64(&testCACounter, 1)) @@ -86,6 +85,10 @@ func TestCA(t testing.T, xc *structs.CARoot) *structs.CARoot { t.Fatalf("error encoding private key: %s", err) } result.RootCert = buf.String() + result.ID, err = CalculateCertFingerprint(result.RootCert) + if err != nil { + t.Fatalf("error generating CA ID fingerprint: %s", err) + } // If there is a prior CA to cross-sign with, then we need to create that // and set it as the signing cert. @@ -286,3 +289,47 @@ func testUUID(t testing.T) string { return ret } + +// TestAgentRPC is an interface that an RPC client must implement. This is a +// helper interface that is implemented by the agent delegate so that test +// helpers can make RPCs without introducing an import cycle on `agent`. +type TestAgentRPC interface { + RPC(method string, args interface{}, reply interface{}) error +} + +// TestCAConfigSet sets a CARoot returned by TestCA into the TestAgent state. It +// requires that TestAgent had connect enabled in it's config. If ca is nil, a +// new CA is created. +// +// It returns the CARoot passed or created. +// +// Note that we have to use an interface for the TestAgent.RPC method since we +// can't introduce an import cycle by importing `agent.TestAgent` here directly. +// It also means this will work in a few other places we mock that method. +func TestCAConfigSet(t testing.T, a TestAgentRPC, + ca *structs.CARoot) *structs.CARoot { + t.Helper() + + if ca == nil { + ca = TestCA(t, nil) + } + newConfig := &structs.CAConfiguration{ + Provider: "consul", + Config: map[string]interface{}{ + "PrivateKey": ca.SigningKey, + "RootCert": ca.RootCert, + "RotationPeriod": 180 * 24 * time.Hour, + }, + } + args := &structs.CARequest{ + Datacenter: "dc1", + Config: newConfig, + } + var reply interface{} + + err := a.RPC("ConnectCA.ConfigurationSet", args, &reply) + if err != nil { + t.Fatalf("failed to set test CA config: %s", err) + } + return ca +} diff --git a/agent/connect_ca_endpoint_test.go b/agent/connect_ca_endpoint_test.go index bcf209ffe..a9b355e0d 100644 --- a/agent/connect_ca_endpoint_test.go +++ b/agent/connect_ca_endpoint_test.go @@ -14,7 +14,7 @@ func TestConnectCARoots_empty(t *testing.T) { t.Parallel() assert := assert.New(t) - a := NewTestAgent(t.Name(), "") + a := NewTestAgent(t.Name(), "connect { enabled = false }") defer a.Shutdown() req, _ := http.NewRequest("GET", "/v1/connect/ca/roots", nil) @@ -34,13 +34,9 @@ func TestConnectCARoots_list(t *testing.T) { a := NewTestAgent(t.Name(), "") defer a.Shutdown() - // Set some CAs - var reply interface{} - ca1 := connect.TestCA(t, nil) - ca1.Active = false - ca2 := connect.TestCA(t, nil) - assert.Nil(a.RPC("Test.ConnectCASetRoots", - []*structs.CARoot{ca1, ca2}, &reply)) + // Set some CAs. Note that NewTestAgent already bootstraps one CA so this just + // adds a second and makes it active. + ca2 := connect.TestCAConfigSet(t, a, nil) // List req, _ := http.NewRequest("GET", "/v1/connect/ca/roots", nil) diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 35dbe46e8..136cbcb49 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -272,14 +272,6 @@ func (s *ConnectCA) Sign( return err } - provider := s.srv.getCAProvider() - - // todo(kyhavlov): more validation on the CSR before signing - pem, err := provider.Sign(csr) - if err != nil { - return err - } - // Parse the SPIFFE ID spiffeId, err := connect.ParseCertURI(csr.URIs[0]) if err != nil { @@ -289,6 +281,27 @@ func (s *ConnectCA) Sign( if !ok { return fmt.Errorf("SPIFFE ID in CSR must be a service ID") } + + provider := s.srv.getCAProvider() + + // todo(kyhavlov): more validation on the CSR before signing + pem, err := provider.Sign(csr) + if err != nil { + return err + } + + // TODO(banks): when we implement IssuedCerts table we can use the insert to + // that as the raft index to return in response. Right now we can rely on only + // the built-in provider being supported and the implementation detail that we + // have to write a SerialIndex update to the provider config table for every + // cert issued so in all cases this index will be higher than any previous + // sign response. This has to happen after the provider.Sign call to observe + // the index update. + modIdx, _, err := s.srv.fsm.State().CAConfig() + if err != nil { + return err + } + cert, err := connect.ParseCert(pem) if err != nil { return err @@ -302,6 +315,10 @@ func (s *ConnectCA) Sign( ServiceURI: cert.URIs[0].String(), ValidAfter: cert.NotBefore, ValidBefore: cert.NotAfter, + RaftIndex: structs.RaftIndex{ + ModifyIndex: modIdx, + CreateIndex: modIdx, + }, } return nil diff --git a/agent/consul/connect_ca_provider.go b/agent/consul/connect_ca_provider.go index cb2bcad57..0d7d851b0 100644 --- a/agent/consul/connect_ca_provider.go +++ b/agent/consul/connect_ca_provider.go @@ -250,7 +250,7 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (string, error) { // Create the certificate, PEM encode it and return that value. var buf bytes.Buffer bs, err := x509.CreateCertificate( - rand.Reader, &template, caCert, signer.Public(), signer) + rand.Reader, &template, caCert, csr.PublicKey, signer) if err != nil { return "", fmt.Errorf("error generating certificate: %s", err) } @@ -259,7 +259,10 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (string, error) { return "", fmt.Errorf("error encoding private key: %s", err) } - c.incrementSerialIndex(providerState) + err = c.incrementSerialIndex(providerState) + if err != nil { + return "", err + } // Set the response return buf.String(), nil @@ -313,15 +316,19 @@ func (c *ConsulCAProvider) CrossSignCA(cert *x509.Certificate) (string, error) { return "", fmt.Errorf("error encoding private key: %s", err) } - c.incrementSerialIndex(providerState) + err = c.incrementSerialIndex(providerState) + if err != nil { + return "", err + } return buf.String(), nil } -// incrementSerialIndex increments the cert serial number index in the provider state +// incrementSerialIndex increments the cert serial number index in the provider +// state. func (c *ConsulCAProvider) incrementSerialIndex(providerState *structs.CAConsulProviderState) error { newState := *providerState - newState.SerialIndex += 1 + newState.SerialIndex++ args := &structs.CARequest{ Op: structs.CAOpSetProviderState, ProviderState: &newState, diff --git a/agent/consul/testing.go b/agent/consul/testing.go deleted file mode 100644 index afae7c1a1..000000000 --- a/agent/consul/testing.go +++ /dev/null @@ -1,26 +0,0 @@ -package consul - -import ( - "sync" -) - -// testEndpointsOnce ensures that endpoints for testing are registered once. -var testEndpointsOnce sync.Once - -// TestEndpoints registers RPC endpoints specifically for testing. These -// endpoints enable some internal data access that we normally disallow, but -// are useful for modifying server state. -// -// To use this, modify TestMain to call this function prior to running tests. -// -// These should NEVER be registered outside of tests. -// -// NOTE(mitchellh): This was created so that the downstream agent tests can -// modify internal Connect CA state. When the CA plugin work comes in with -// a more complete CA API, this may no longer be necessary and we can remove it. -// That would be ideal. -func TestEndpoint() { - testEndpointsOnce.Do(func() { - registerEndpoint(func(s *Server) interface{} { return &Test{s} }) - }) -} diff --git a/agent/consul/testing_endpoint.go b/agent/consul/testing_endpoint.go deleted file mode 100644 index 6e3cec12f..000000000 --- a/agent/consul/testing_endpoint.go +++ /dev/null @@ -1,43 +0,0 @@ -package consul - -import ( - "github.com/hashicorp/consul/agent/structs" -) - -// Test is an RPC endpoint that is only available during `go test` when -// `TestEndpoint` is called. This is not and must not ever be available -// during a real running Consul agent, since it this endpoint bypasses -// critical ACL checks. -type Test struct { - // srv is a pointer back to the server. - srv *Server -} - -// ConnectCASetRoots sets the current CA roots state. -func (s *Test) ConnectCASetRoots( - args []*structs.CARoot, - reply *interface{}) error { - - // Get the highest index - state := s.srv.fsm.State() - idx, _, err := state.CARoots(nil) - if err != nil { - return err - } - - // Commit - resp, err := s.srv.raftApply(structs.ConnectCARequestType, &structs.CARequest{ - Op: structs.CAOpSetRoots, - Index: idx, - Roots: args, - }) - if err != nil { - s.srv.logger.Printf("[ERR] consul.test: Apply failed %v", err) - return err - } - if respErr, ok := resp.(error); ok { - return respErr - } - - return nil -} diff --git a/agent/consul/testing_endpoint_test.go b/agent/consul/testing_endpoint_test.go deleted file mode 100644 index e20213695..000000000 --- a/agent/consul/testing_endpoint_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package consul - -import ( - "os" - "testing" - - "github.com/hashicorp/consul/agent/connect" - "github.com/hashicorp/consul/agent/structs" - "github.com/hashicorp/consul/testrpc" - "github.com/hashicorp/net-rpc-msgpackrpc" - "github.com/stretchr/testify/assert" -) - -// Test setting the CAs -func TestTestConnectCASetRoots(t *testing.T) { - t.Parallel() - - assert := assert.New(t) - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - codec := rpcClient(t, s1) - defer codec.Close() - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - // Prepare - ca1 := connect.TestCA(t, nil) - ca2 := connect.TestCA(t, nil) - ca2.Active = false - - // Request - args := []*structs.CARoot{ca1, ca2} - var reply interface{} - assert.Nil(msgpackrpc.CallWithCodec(codec, "Test.ConnectCASetRoots", args, &reply)) - - // Verify they're there - state := s1.fsm.State() - _, actual, err := state.CARoots(nil) - assert.Nil(err) - assert.Len(actual, 2) -} diff --git a/agent/consul/testing_test.go b/agent/consul/testing_test.go deleted file mode 100644 index 98e8dd743..000000000 --- a/agent/consul/testing_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package consul - -import ( - "os" - "testing" -) - -func TestMain(m *testing.M) { - // Register the test RPC endpoint - TestEndpoint() - - os.Exit(m.Run()) -} From 02ab461dae45a213255334396b8a81f79765939a Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 26 Apr 2018 14:01:20 +0100 Subject: [PATCH 202/627] TLS watching integrated into Service with some basic tests. There are also a lot of small bug fixes found when testing lots of things end-to-end for the first time and some cleanup now it's integrated with real CA code. --- agent/agent_endpoint.go | 69 +++++++- agent/agent_endpoint_test.go | 242 +++++++++++++++++++++++++++- agent/config/builder.go | 203 ++++++++++++----------- agent/config/runtime.go | 6 +- agent/config/runtime_test.go | 18 ++- agent/connect/testing_ca.go | 2 +- agent/connect/testing_spiffe.go | 2 +- agent/http_oss.go | 2 +- agent/local/state.go | 7 +- agent/local/state_test.go | 15 ++ agent/structs/connect.go | 7 +- agent/structs/service_definition.go | 5 +- api/agent.go | 3 - api/agent_test.go | 75 ++++++++- connect/proxy/config.go | 20 +-- connect/proxy/config_test.go | 10 -- connect/proxy/listener.go | 18 ++- connect/proxy/proxy.go | 65 +++++--- connect/resolver.go | 8 +- connect/service.go | 51 ++++-- connect/service_test.go | 94 ++++++++++- connect/testing.go | 11 +- connect/tls.go | 61 ++++++- connect/tls_test.go | 42 +++++ testutil/server.go | 3 + watch/funcs.go | 6 +- watch/funcs_test.go | 93 ++++------- 27 files changed, 868 insertions(+), 270 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index b13e6d076..fde7ca5e2 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -28,7 +28,6 @@ import ( "github.com/hashicorp/serf/serf" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - // NOTE(mitcehllh): This is temporary while certs are stubbed out. ) type Self struct { @@ -1000,14 +999,71 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http } contentHash := fmt.Sprintf("%x", hash) + // Merge globals defaults + config := make(map[string]interface{}) + for k, v := range s.agent.config.ConnectProxyDefaultConfig { + if _, ok := config[k]; !ok { + config[k] = v + } + } + + execMode := "daemon" + // If there is a global default mode use that instead + if s.agent.config.ConnectProxyDefaultExecMode != "" { + execMode = s.agent.config.ConnectProxyDefaultExecMode + } + // If it's actually set though, use the one set + if proxy.Proxy.ExecMode != structs.ProxyExecModeUnspecified { + execMode = proxy.Proxy.ExecMode.String() + } + + // TODO(banks): default the binary to current binary. Probably needs to be + // done deeper though as it will be needed for actually managing proxy + // lifecycle. + command := proxy.Proxy.Command + if command == "" { + if execMode == "daemon" { + command = s.agent.config.ConnectProxyDefaultDaemonCommand + } + if execMode == "script" { + command = s.agent.config.ConnectProxyDefaultScriptCommand + } + } + // No global defaults set either... + if command == "" { + command = "consul connect proxy" + } + + // Set defaults for anything that is still not specified but required. + // Note that these are not included in the content hash. Since we expect + // them to be static in general but some like the default target service + // port might not be. In that edge case services can set that explicitly + // when they re-register which will be caught though. + for k, v := range proxy.Proxy.Config { + config[k] = v + } + if _, ok := config["bind_port"]; !ok { + config["bind_port"] = proxy.Proxy.ProxyService.Port + } + if _, ok := config["bind_address"]; !ok { + // Default to binding to the same address the agent is configured to + // bind to. + config["bind_address"] = s.agent.config.BindAddr.String() + } + if _, ok := config["local_service_address"]; !ok { + // Default to localhost and the port the service registered with + config["local_service_address"] = fmt.Sprintf("127.0.0.1:%d", + target.Port) + } + reply := &api.ConnectProxyConfig{ ProxyServiceID: proxy.Proxy.ProxyService.ID, TargetServiceID: target.ID, TargetServiceName: target.Service, ContentHash: contentHash, - ExecMode: api.ProxyExecMode(proxy.Proxy.ExecMode.String()), - Command: proxy.Proxy.Command, - Config: proxy.Proxy.Config, + ExecMode: api.ProxyExecMode(execMode), + Command: command, + Config: config, } return contentHash, reply, nil }) @@ -1040,10 +1096,13 @@ func (s *HTTPServer) agentLocalBlockingQuery(resp http.ResponseWriter, hash stri // Apply a small amount of jitter to the request. wait += lib.RandomStagger(wait / 16) timeout = time.NewTimer(wait) - ws = memdb.NewWatchSet() } for { + // Must reset this every loop in case the Watch set is already closed but + // hash remains same. In that case we'll need to re-block on ws.Watch() + // again. + ws = memdb.NewWatchSet() curHash, curResp, err := fn(ws) if err != nil { return curResp, err diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index fad92cb9a..be97bf5a4 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2316,7 +2316,7 @@ func requireLeafValidUnderCA(t *testing.T, issued *structs.IssuedCert, require.NoError(t, err) } -func TestAgentConnectProxy(t *testing.T) { +func TestAgentConnectProxyConfig_Blocking(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") @@ -2354,7 +2354,7 @@ func TestAgentConnectProxy(t *testing.T) { TargetServiceName: "test", ContentHash: "84346af2031659c9", ExecMode: "daemon", - Command: "", + Command: "consul connect proxy", Config: map[string]interface{}{ "upstreams": []interface{}{ map[string]interface{}{ @@ -2362,15 +2362,17 @@ func TestAgentConnectProxy(t *testing.T) { "local_port": float64(3131), }, }, - "bind_port": float64(1234), - "connect_timeout_ms": float64(500), + "bind_address": "127.0.0.1", + "local_service_address": "127.0.0.1:8000", + "bind_port": float64(1234), + "connect_timeout_ms": float64(500), }, } ur, err := copystructure.Copy(expectedResponse) require.NoError(t, err) updatedResponse := ur.(*api.ConnectProxyConfig) - updatedResponse.ContentHash = "7d53473b0e9db5a" + updatedResponse.ContentHash = "e1e3395f0d00cd41" upstreams := updatedResponse.Config["upstreams"].([]interface{}) upstreams = append(upstreams, map[string]interface{}{ @@ -2431,6 +2433,41 @@ func TestAgentConnectProxy(t *testing.T) { wantErr: false, wantResp: updatedResponse, }, + { + // This test exercises a case that caused a busy loop to eat CPU for the + // entire duration of the blocking query. If a service gets re-registered + // wth same proxy config then the old proxy config chan is closed causing + // blocked watchset.Watch to return false indicating a change. But since + // the hash is the same when the blocking fn is re-called we should just + // keep blocking on the next iteration. The bug hit was that the WatchSet + // ws was not being reset in the loop and so when you try to `Watch` it + // the second time it just returns immediately making the blocking loop + // into a busy-poll! + // + // This test though doesn't catch that because busy poll still has the + // correct external behaviour. I don't want to instrument the loop to + // assert it's not executing too fast here as I can't think of a clean way + // and the issue is fixed now so this test doesn't actually catch the + // error, but does provide an easy way to verify the behaviour by hand: + // 1. Make this test fail e.g. change wantErr to true + // 2. Add a log.Println or similar into the blocking loop/function + // 3. See whether it's called just once or many times in a tight loop. + name: "blocking fetch interrupted with no change (same hash)", + url: "/v1/agent/connect/proxy/test-proxy?wait=200ms&hash=" + expectedResponse.ContentHash, + updateFunc: func() { + time.Sleep(100 * time.Millisecond) + // Re-register with _same_ proxy config + req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err = a.srv.AgentRegisterService(resp, req) + require.NoError(t, err) + require.Equal(t, 200, resp.Code, "body: %s", resp.Body.String()) + }, + wantWait: 200 * time.Millisecond, + wantCode: 200, + wantErr: false, + wantResp: expectedResponse, + }, } for _, tt := range tests { @@ -2479,6 +2516,201 @@ func TestAgentConnectProxy(t *testing.T) { } } +func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { + t.Parallel() + + // Define a local service with a managed proxy. It's registered in the test + // loop to make sure agent state is predictable whatever order tests execute + // since some alter this service config. + reg := &structs.ServiceDefinition{ + ID: "test-id", + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{}, + } + + tests := []struct { + name string + globalConfig string + proxy structs.ServiceDefinitionConnectProxy + wantMode api.ProxyExecMode + wantCommand string + wantConfig map[string]interface{} + }{ + { + name: "defaults", + globalConfig: ` + bind_addr = "0.0.0.0" + connect { + enabled = true + proxy_defaults = { + bind_min_port = 10000 + bind_max_port = 10000 + } + } + `, + proxy: structs.ServiceDefinitionConnectProxy{}, + wantMode: api.ProxyExecModeDaemon, + wantCommand: "consul connect proxy", + wantConfig: map[string]interface{}{ + "bind_address": "0.0.0.0", + "bind_port": 10000, // "randomly" chosen from our range of 1 + "local_service_address": "127.0.0.1:8000", // port from service reg + }, + }, + { + name: "global defaults - script", + globalConfig: ` + bind_addr = "0.0.0.0" + connect { + enabled = true + proxy_defaults = { + bind_min_port = 10000 + bind_max_port = 10000 + exec_mode = "script" + script_command = "script.sh" + } + } + `, + proxy: structs.ServiceDefinitionConnectProxy{}, + wantMode: api.ProxyExecModeScript, + wantCommand: "script.sh", + wantConfig: map[string]interface{}{ + "bind_address": "0.0.0.0", + "bind_port": 10000, // "randomly" chosen from our range of 1 + "local_service_address": "127.0.0.1:8000", // port from service reg + }, + }, + { + name: "global defaults - daemon", + globalConfig: ` + bind_addr = "0.0.0.0" + connect { + enabled = true + proxy_defaults = { + bind_min_port = 10000 + bind_max_port = 10000 + exec_mode = "daemon" + daemon_command = "daemon.sh" + } + } + `, + proxy: structs.ServiceDefinitionConnectProxy{}, + wantMode: api.ProxyExecModeDaemon, + wantCommand: "daemon.sh", + wantConfig: map[string]interface{}{ + "bind_address": "0.0.0.0", + "bind_port": 10000, // "randomly" chosen from our range of 1 + "local_service_address": "127.0.0.1:8000", // port from service reg + }, + }, + { + name: "global default config merge", + globalConfig: ` + bind_addr = "0.0.0.0" + connect { + enabled = true + proxy_defaults = { + bind_min_port = 10000 + bind_max_port = 10000 + config = { + connect_timeout_ms = 1000 + } + } + } + `, + proxy: structs.ServiceDefinitionConnectProxy{ + Config: map[string]interface{}{ + "foo": "bar", + }, + }, + wantMode: api.ProxyExecModeDaemon, + wantCommand: "consul connect proxy", + wantConfig: map[string]interface{}{ + "bind_address": "0.0.0.0", + "bind_port": 10000, // "randomly" chosen from our range of 1 + "local_service_address": "127.0.0.1:8000", // port from service reg + "connect_timeout_ms": 1000, + "foo": "bar", + }, + }, + { + name: "overrides in reg", + globalConfig: ` + bind_addr = "0.0.0.0" + connect { + enabled = true + proxy_defaults = { + bind_min_port = 10000 + bind_max_port = 10000 + exec_mode = "daemon" + daemon_command = "daemon.sh" + script_command = "script.sh" + config = { + connect_timeout_ms = 1000 + } + } + } + `, + proxy: structs.ServiceDefinitionConnectProxy{ + ExecMode: "script", + Command: "foo.sh", + Config: map[string]interface{}{ + "connect_timeout_ms": 2000, + "bind_address": "127.0.0.1", + "bind_port": 1024, + "local_service_address": "127.0.0.1:9191", + }, + }, + wantMode: api.ProxyExecModeScript, + wantCommand: "foo.sh", + wantConfig: map[string]interface{}{ + "bind_address": "127.0.0.1", + "bind_port": float64(1024), + "local_service_address": "127.0.0.1:9191", + "connect_timeout_ms": float64(2000), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + a := NewTestAgent(t.Name(), tt.globalConfig) + defer a.Shutdown() + + // Register the basic service with the required config + { + reg.Connect.Proxy = &tt.proxy + req, _ := http.NewRequest("PUT", "/v1/agent/service/register", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + require.NoError(err) + require.Equal(200, resp.Code, "body: %s", resp.Body.String()) + } + + req, _ := http.NewRequest("GET", "/v1/agent/connect/proxy/test-id-proxy", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentConnectProxyConfig(resp, req) + require.NoError(err) + + proxyCfg := obj.(*api.ConnectProxyConfig) + assert.Equal("test-id-proxy", proxyCfg.ProxyServiceID) + assert.Equal("test-id", proxyCfg.TargetServiceID) + assert.Equal("test", proxyCfg.TargetServiceName) + assert.Equal(tt.wantMode, proxyCfg.ExecMode) + assert.Equal(tt.wantCommand, proxyCfg.Command) + require.Equal(tt.wantConfig, proxyCfg.Config) + }) + } +} + func TestAgentConnectAuthorize_badBody(t *testing.T) { t.Parallel() diff --git a/agent/config/builder.go b/agent/config/builder.go index 6ad6c70b5..3d9818adc 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -531,6 +531,17 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { connectCAConfig = c.Connect.CAConfig } + proxyDefaultExecMode := "" + proxyDefaultDaemonCommand := "" + proxyDefaultScriptCommand := "" + proxyDefaultConfig := make(map[string]interface{}) + if c.Connect != nil && c.Connect.ProxyDefaults != nil { + proxyDefaultExecMode = b.stringVal(c.Connect.ProxyDefaults.ExecMode) + proxyDefaultDaemonCommand = b.stringVal(c.Connect.ProxyDefaults.DaemonCommand) + proxyDefaultScriptCommand = b.stringVal(c.Connect.ProxyDefaults.ScriptCommand) + proxyDefaultConfig = c.Connect.ProxyDefaults.Config + } + // ---------------------------------------------------------------- // build runtime config // @@ -638,100 +649,104 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { TelemetryStatsiteAddr: b.stringVal(c.Telemetry.StatsiteAddr), // Agent - AdvertiseAddrLAN: advertiseAddrLAN, - AdvertiseAddrWAN: advertiseAddrWAN, - BindAddr: bindAddr, - Bootstrap: b.boolVal(c.Bootstrap), - BootstrapExpect: b.intVal(c.BootstrapExpect), - CAFile: b.stringVal(c.CAFile), - CAPath: b.stringVal(c.CAPath), - CertFile: b.stringVal(c.CertFile), - CheckUpdateInterval: b.durationVal("check_update_interval", c.CheckUpdateInterval), - Checks: checks, - ClientAddrs: clientAddrs, - ConnectEnabled: connectEnabled, - ConnectProxyBindMinPort: proxyBindMinPort, - ConnectProxyBindMaxPort: proxyBindMaxPort, - ConnectCAProvider: connectCAProvider, - ConnectCAConfig: connectCAConfig, - DataDir: b.stringVal(c.DataDir), - Datacenter: strings.ToLower(b.stringVal(c.Datacenter)), - DevMode: b.boolVal(b.Flags.DevMode), - DisableAnonymousSignature: b.boolVal(c.DisableAnonymousSignature), - DisableCoordinates: b.boolVal(c.DisableCoordinates), - DisableHostNodeID: b.boolVal(c.DisableHostNodeID), - DisableKeyringFile: b.boolVal(c.DisableKeyringFile), - DisableRemoteExec: b.boolVal(c.DisableRemoteExec), - DisableUpdateCheck: b.boolVal(c.DisableUpdateCheck), - DiscardCheckOutput: b.boolVal(c.DiscardCheckOutput), - DiscoveryMaxStale: b.durationVal("discovery_max_stale", c.DiscoveryMaxStale), - EnableAgentTLSForChecks: b.boolVal(c.EnableAgentTLSForChecks), - EnableDebug: b.boolVal(c.EnableDebug), - EnableScriptChecks: b.boolVal(c.EnableScriptChecks), - EnableSyslog: b.boolVal(c.EnableSyslog), - EnableUI: b.boolVal(c.UI), - EncryptKey: b.stringVal(c.EncryptKey), - EncryptVerifyIncoming: b.boolVal(c.EncryptVerifyIncoming), - EncryptVerifyOutgoing: b.boolVal(c.EncryptVerifyOutgoing), - KeyFile: b.stringVal(c.KeyFile), - LeaveDrainTime: b.durationVal("performance.leave_drain_time", c.Performance.LeaveDrainTime), - LeaveOnTerm: leaveOnTerm, - LogLevel: b.stringVal(c.LogLevel), - NodeID: types.NodeID(b.stringVal(c.NodeID)), - NodeMeta: c.NodeMeta, - NodeName: b.nodeName(c.NodeName), - NonVotingServer: b.boolVal(c.NonVotingServer), - PidFile: b.stringVal(c.PidFile), - RPCAdvertiseAddr: rpcAdvertiseAddr, - RPCBindAddr: rpcBindAddr, - RPCHoldTimeout: b.durationVal("performance.rpc_hold_timeout", c.Performance.RPCHoldTimeout), - RPCMaxBurst: b.intVal(c.Limits.RPCMaxBurst), - RPCProtocol: b.intVal(c.RPCProtocol), - RPCRateLimit: rate.Limit(b.float64Val(c.Limits.RPCRate)), - RaftProtocol: b.intVal(c.RaftProtocol), - RaftSnapshotThreshold: b.intVal(c.RaftSnapshotThreshold), - RaftSnapshotInterval: b.durationVal("raft_snapshot_interval", c.RaftSnapshotInterval), - ReconnectTimeoutLAN: b.durationVal("reconnect_timeout", c.ReconnectTimeoutLAN), - ReconnectTimeoutWAN: b.durationVal("reconnect_timeout_wan", c.ReconnectTimeoutWAN), - RejoinAfterLeave: b.boolVal(c.RejoinAfterLeave), - RetryJoinIntervalLAN: b.durationVal("retry_interval", c.RetryJoinIntervalLAN), - RetryJoinIntervalWAN: b.durationVal("retry_interval_wan", c.RetryJoinIntervalWAN), - RetryJoinLAN: b.expandAllOptionalAddrs("retry_join", c.RetryJoinLAN), - RetryJoinMaxAttemptsLAN: b.intVal(c.RetryJoinMaxAttemptsLAN), - RetryJoinMaxAttemptsWAN: b.intVal(c.RetryJoinMaxAttemptsWAN), - RetryJoinWAN: b.expandAllOptionalAddrs("retry_join_wan", c.RetryJoinWAN), - SegmentName: b.stringVal(c.SegmentName), - Segments: segments, - SerfAdvertiseAddrLAN: serfAdvertiseAddrLAN, - SerfAdvertiseAddrWAN: serfAdvertiseAddrWAN, - SerfBindAddrLAN: serfBindAddrLAN, - SerfBindAddrWAN: serfBindAddrWAN, - SerfPortLAN: serfPortLAN, - SerfPortWAN: serfPortWAN, - ServerMode: b.boolVal(c.ServerMode), - ServerName: b.stringVal(c.ServerName), - ServerPort: serverPort, - Services: services, - SessionTTLMin: b.durationVal("session_ttl_min", c.SessionTTLMin), - SkipLeaveOnInt: skipLeaveOnInt, - StartJoinAddrsLAN: b.expandAllOptionalAddrs("start_join", c.StartJoinAddrsLAN), - StartJoinAddrsWAN: b.expandAllOptionalAddrs("start_join_wan", c.StartJoinAddrsWAN), - SyslogFacility: b.stringVal(c.SyslogFacility), - TLSCipherSuites: b.tlsCipherSuites("tls_cipher_suites", c.TLSCipherSuites), - TLSMinVersion: b.stringVal(c.TLSMinVersion), - TLSPreferServerCipherSuites: b.boolVal(c.TLSPreferServerCipherSuites), - TaggedAddresses: c.TaggedAddresses, - TranslateWANAddrs: b.boolVal(c.TranslateWANAddrs), - UIDir: b.stringVal(c.UIDir), - UnixSocketGroup: b.stringVal(c.UnixSocket.Group), - UnixSocketMode: b.stringVal(c.UnixSocket.Mode), - UnixSocketUser: b.stringVal(c.UnixSocket.User), - VerifyIncoming: b.boolVal(c.VerifyIncoming), - VerifyIncomingHTTPS: b.boolVal(c.VerifyIncomingHTTPS), - VerifyIncomingRPC: b.boolVal(c.VerifyIncomingRPC), - VerifyOutgoing: b.boolVal(c.VerifyOutgoing), - VerifyServerHostname: b.boolVal(c.VerifyServerHostname), - Watches: c.Watches, + AdvertiseAddrLAN: advertiseAddrLAN, + AdvertiseAddrWAN: advertiseAddrWAN, + BindAddr: bindAddr, + Bootstrap: b.boolVal(c.Bootstrap), + BootstrapExpect: b.intVal(c.BootstrapExpect), + CAFile: b.stringVal(c.CAFile), + CAPath: b.stringVal(c.CAPath), + CertFile: b.stringVal(c.CertFile), + CheckUpdateInterval: b.durationVal("check_update_interval", c.CheckUpdateInterval), + Checks: checks, + ClientAddrs: clientAddrs, + ConnectEnabled: connectEnabled, + ConnectCAProvider: connectCAProvider, + ConnectCAConfig: connectCAConfig, + ConnectProxyBindMinPort: proxyBindMinPort, + ConnectProxyBindMaxPort: proxyBindMaxPort, + ConnectProxyDefaultExecMode: proxyDefaultExecMode, + ConnectProxyDefaultDaemonCommand: proxyDefaultDaemonCommand, + ConnectProxyDefaultScriptCommand: proxyDefaultScriptCommand, + ConnectProxyDefaultConfig: proxyDefaultConfig, + DataDir: b.stringVal(c.DataDir), + Datacenter: strings.ToLower(b.stringVal(c.Datacenter)), + DevMode: b.boolVal(b.Flags.DevMode), + DisableAnonymousSignature: b.boolVal(c.DisableAnonymousSignature), + DisableCoordinates: b.boolVal(c.DisableCoordinates), + DisableHostNodeID: b.boolVal(c.DisableHostNodeID), + DisableKeyringFile: b.boolVal(c.DisableKeyringFile), + DisableRemoteExec: b.boolVal(c.DisableRemoteExec), + DisableUpdateCheck: b.boolVal(c.DisableUpdateCheck), + DiscardCheckOutput: b.boolVal(c.DiscardCheckOutput), + DiscoveryMaxStale: b.durationVal("discovery_max_stale", c.DiscoveryMaxStale), + EnableAgentTLSForChecks: b.boolVal(c.EnableAgentTLSForChecks), + EnableDebug: b.boolVal(c.EnableDebug), + EnableScriptChecks: b.boolVal(c.EnableScriptChecks), + EnableSyslog: b.boolVal(c.EnableSyslog), + EnableUI: b.boolVal(c.UI), + EncryptKey: b.stringVal(c.EncryptKey), + EncryptVerifyIncoming: b.boolVal(c.EncryptVerifyIncoming), + EncryptVerifyOutgoing: b.boolVal(c.EncryptVerifyOutgoing), + KeyFile: b.stringVal(c.KeyFile), + LeaveDrainTime: b.durationVal("performance.leave_drain_time", c.Performance.LeaveDrainTime), + LeaveOnTerm: leaveOnTerm, + LogLevel: b.stringVal(c.LogLevel), + NodeID: types.NodeID(b.stringVal(c.NodeID)), + NodeMeta: c.NodeMeta, + NodeName: b.nodeName(c.NodeName), + NonVotingServer: b.boolVal(c.NonVotingServer), + PidFile: b.stringVal(c.PidFile), + RPCAdvertiseAddr: rpcAdvertiseAddr, + RPCBindAddr: rpcBindAddr, + RPCHoldTimeout: b.durationVal("performance.rpc_hold_timeout", c.Performance.RPCHoldTimeout), + RPCMaxBurst: b.intVal(c.Limits.RPCMaxBurst), + RPCProtocol: b.intVal(c.RPCProtocol), + RPCRateLimit: rate.Limit(b.float64Val(c.Limits.RPCRate)), + RaftProtocol: b.intVal(c.RaftProtocol), + RaftSnapshotThreshold: b.intVal(c.RaftSnapshotThreshold), + RaftSnapshotInterval: b.durationVal("raft_snapshot_interval", c.RaftSnapshotInterval), + ReconnectTimeoutLAN: b.durationVal("reconnect_timeout", c.ReconnectTimeoutLAN), + ReconnectTimeoutWAN: b.durationVal("reconnect_timeout_wan", c.ReconnectTimeoutWAN), + RejoinAfterLeave: b.boolVal(c.RejoinAfterLeave), + RetryJoinIntervalLAN: b.durationVal("retry_interval", c.RetryJoinIntervalLAN), + RetryJoinIntervalWAN: b.durationVal("retry_interval_wan", c.RetryJoinIntervalWAN), + RetryJoinLAN: b.expandAllOptionalAddrs("retry_join", c.RetryJoinLAN), + RetryJoinMaxAttemptsLAN: b.intVal(c.RetryJoinMaxAttemptsLAN), + RetryJoinMaxAttemptsWAN: b.intVal(c.RetryJoinMaxAttemptsWAN), + RetryJoinWAN: b.expandAllOptionalAddrs("retry_join_wan", c.RetryJoinWAN), + SegmentName: b.stringVal(c.SegmentName), + Segments: segments, + SerfAdvertiseAddrLAN: serfAdvertiseAddrLAN, + SerfAdvertiseAddrWAN: serfAdvertiseAddrWAN, + SerfBindAddrLAN: serfBindAddrLAN, + SerfBindAddrWAN: serfBindAddrWAN, + SerfPortLAN: serfPortLAN, + SerfPortWAN: serfPortWAN, + ServerMode: b.boolVal(c.ServerMode), + ServerName: b.stringVal(c.ServerName), + ServerPort: serverPort, + Services: services, + SessionTTLMin: b.durationVal("session_ttl_min", c.SessionTTLMin), + SkipLeaveOnInt: skipLeaveOnInt, + StartJoinAddrsLAN: b.expandAllOptionalAddrs("start_join", c.StartJoinAddrsLAN), + StartJoinAddrsWAN: b.expandAllOptionalAddrs("start_join_wan", c.StartJoinAddrsWAN), + SyslogFacility: b.stringVal(c.SyslogFacility), + TLSCipherSuites: b.tlsCipherSuites("tls_cipher_suites", c.TLSCipherSuites), + TLSMinVersion: b.stringVal(c.TLSMinVersion), + TLSPreferServerCipherSuites: b.boolVal(c.TLSPreferServerCipherSuites), + TaggedAddresses: c.TaggedAddresses, + TranslateWANAddrs: b.boolVal(c.TranslateWANAddrs), + UIDir: b.stringVal(c.UIDir), + UnixSocketGroup: b.stringVal(c.UnixSocket.Group), + UnixSocketMode: b.stringVal(c.UnixSocket.Mode), + UnixSocketUser: b.stringVal(c.UnixSocket.User), + VerifyIncoming: b.boolVal(c.VerifyIncoming), + VerifyIncomingHTTPS: b.boolVal(c.VerifyIncomingHTTPS), + VerifyIncomingRPC: b.boolVal(c.VerifyIncomingRPC), + VerifyOutgoing: b.boolVal(c.VerifyOutgoing), + VerifyServerHostname: b.boolVal(c.VerifyServerHostname), + Watches: c.Watches, } if rt.BootstrapExpect == 1 { diff --git a/agent/config/runtime.go b/agent/config/runtime.go index 15a7ac2ba..ea04d5aa0 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -633,15 +633,15 @@ type RuntimeConfig struct { // ConnectProxyDefaultExecMode is used where a registration doesn't include an // exec_mode. Defaults to daemon. - ConnectProxyDefaultExecMode *string + ConnectProxyDefaultExecMode string // ConnectProxyDefaultDaemonCommand is used to start proxy in exec_mode = // daemon if not specified at registration time. - ConnectProxyDefaultDaemonCommand *string + ConnectProxyDefaultDaemonCommand string // ConnectProxyDefaultScriptCommand is used to start proxy in exec_mode = // script if not specified at registration time. - ConnectProxyDefaultScriptCommand *string + ConnectProxyDefaultScriptCommand string // ConnectProxyDefaultConfig is merged with any config specified at // registration time to allow global control of defaults. diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 9dff7733b..36fffe16a 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -2830,7 +2830,9 @@ func TestFullConfig(t *testing.T) { script_command = "proxyctl.sh" config = { foo = "bar" - connect_timeout_ms = 1000 + # hack float since json parses numbers as float and we have to + # assert against the same thing + connect_timeout_ms = 1000.0 pedantic_mode = true } } @@ -3423,6 +3425,14 @@ func TestFullConfig(t *testing.T) { "g4cvJyys": "IRLXE9Ds", "hyMy9Oxn": "XeBp4Sis", }, + ConnectProxyDefaultExecMode: "script", + ConnectProxyDefaultDaemonCommand: "consul connect proxy", + ConnectProxyDefaultScriptCommand: "proxyctl.sh", + ConnectProxyDefaultConfig: map[string]interface{}{ + "foo": "bar", + "connect_timeout_ms": float64(1000), + "pedantic_mode": true, + }, DNSAddrs: []net.Addr{tcpAddr("93.95.95.81:7001"), udpAddr("93.95.95.81:7001")}, DNSARecordLimit: 29907, DNSAllowStale: true, @@ -4099,9 +4109,9 @@ func TestSanitize(t *testing.T) { "ConnectProxyBindMaxPort": 0, "ConnectProxyBindMinPort": 0, "ConnectProxyDefaultConfig": {}, - "ConnectProxyDefaultDaemonCommand": null, - "ConnectProxyDefaultExecMode": null, - "ConnectProxyDefaultScriptCommand": null, + "ConnectProxyDefaultDaemonCommand": "", + "ConnectProxyDefaultExecMode": "", + "ConnectProxyDefaultScriptCommand": "", "ConsulCoordinateUpdateBatchSize": 0, "ConsulCoordinateUpdateMaxBatches": 0, "ConsulCoordinateUpdatePeriod": "15s", diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index fbb5eed49..552c57535 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -150,7 +150,7 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) (string, string spiffeId := &SpiffeIDService{ Host: fmt.Sprintf("%s.consul", testClusterID), Namespace: "default", - Datacenter: "dc01", + Datacenter: "dc1", Service: service, } diff --git a/agent/connect/testing_spiffe.go b/agent/connect/testing_spiffe.go index e2e7a470f..d6a70cb81 100644 --- a/agent/connect/testing_spiffe.go +++ b/agent/connect/testing_spiffe.go @@ -9,7 +9,7 @@ func TestSpiffeIDService(t testing.T, service string) *SpiffeIDService { return &SpiffeIDService{ Host: testClusterID + ".consul", Namespace: "default", - Datacenter: "dc01", + Datacenter: "dc1", Service: service, } } diff --git a/agent/http_oss.go b/agent/http_oss.go index d9b8068ef..9b9857e40 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -48,7 +48,7 @@ func init() { registerEndpoint("/v1/connect/ca/roots", []string{"GET"}, (*HTTPServer).ConnectCARoots) registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPServer).IntentionEndpoint) registerEndpoint("/v1/connect/intentions/match", []string{"GET"}, (*HTTPServer).IntentionMatch) - registerEndpoint("/v1/connect/intentions/", []string{"GET"}, (*HTTPServer).IntentionSpecific) + registerEndpoint("/v1/connect/intentions/", []string{"GET", "PUT", "DELETE"}, (*HTTPServer).IntentionSpecific) registerEndpoint("/v1/coordinate/datacenters", []string{"GET"}, (*HTTPServer).CoordinateDatacenters) registerEndpoint("/v1/coordinate/nodes", []string{"GET"}, (*HTTPServer).CoordinateNodes) registerEndpoint("/v1/coordinate/node/", []string{"GET"}, (*HTTPServer).CoordinateNode) diff --git a/agent/local/state.go b/agent/local/state.go index 839b3cdb2..8df600b32 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -608,7 +608,12 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*str l.Lock() defer l.Unlock() - // Allocate port if needed (min and max inclusive) + // Does this proxy instance allready exist? + if existing, ok := l.managedProxies[svc.ID]; ok { + svc.Port = existing.Proxy.ProxyService.Port + } + + // Allocate port if needed (min and max inclusive). rangeLen := l.config.ProxyBindMaxPort - l.config.ProxyBindMinPort + 1 if svc.Port < 1 && l.config.ProxyBindMinPort > 0 && rangeLen > 0 { // This should be a really short list so don't bother optimising lookup yet. diff --git a/agent/local/state_test.go b/agent/local/state_test.go index 16975d963..dd887ccb1 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -1721,6 +1721,21 @@ func TestStateProxyManagement(t *testing.T) { // Port is non-deterministic but could be either of 20000 or 20001 assert.Contains([]int{20000, 20001}, svc.Port) + { + // Re-registering same proxy again should not pick a random port but re-use + // the assigned one. + svcDup, err := state.AddProxy(&p1, "fake-token") + require.NoError(err) + + assert.Equal("web-proxy", svcDup.ID) + assert.Equal("web-proxy", svcDup.Service) + assert.Equal(structs.ServiceKindConnectProxy, svcDup.Kind) + assert.Equal("web", svcDup.ProxyDestination) + assert.Equal("", svcDup.Address, "should have empty address by default") + // Port must be same as before + assert.Equal(svc.Port, svcDup.Port) + } + // Second proxy should claim other port p2 := p1 p2.TargetServiceID = "cache" diff --git a/agent/structs/connect.go b/agent/structs/connect.go index 20970c1bf..90513ae8c 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -24,8 +24,11 @@ type ConnectAuthorizeRequest struct { type ProxyExecMode int const ( + // ProxyExecModeUnspecified uses the global default proxy mode. + ProxyExecModeUnspecified ProxyExecMode = iota + // ProxyExecModeDaemon executes a proxy process as a supervised daemon. - ProxyExecModeDaemon ProxyExecMode = iota + ProxyExecModeDaemon // ProxyExecModeScript executes a proxy config script on each change to it's // config. @@ -35,6 +38,8 @@ const ( // String implements Stringer func (m ProxyExecMode) String() string { switch m { + case ProxyExecModeUnspecified: + return "global_default" case ProxyExecModeDaemon: return "daemon" case ProxyExecModeScript: diff --git a/agent/structs/service_definition.go b/agent/structs/service_definition.go index 2ed424178..d4dc21414 100644 --- a/agent/structs/service_definition.go +++ b/agent/structs/service_definition.go @@ -55,10 +55,11 @@ func (s *ServiceDefinition) ConnectManagedProxy() (*ConnectManagedProxy, error) // which we shouldn't hard code ourselves here... ns := s.NodeService() - execMode := ProxyExecModeDaemon + execMode := ProxyExecModeUnspecified switch s.Connect.Proxy.ExecMode { case "": - execMode = ProxyExecModeDaemon + // Use default + break case "daemon": execMode = ProxyExecModeDaemon case "script": diff --git a/api/agent.go b/api/agent.go index a81fd96f8..b8125c91e 100644 --- a/api/agent.go +++ b/api/agent.go @@ -609,9 +609,6 @@ func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) } // ConnectCALeaf gets the leaf certificate for the given service ID. -// -// TODO(mitchellh): we need to test this better once we have a way to -// configure CAs from the API package (when the CA work is done). func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *QueryMeta, error) { r := a.c.newRequest("GET", "/v1/agent/connect/ca/leaf/"+serviceID) r.setQueryOptions(q) diff --git a/api/agent_test.go b/api/agent_test.go index 8cc58e012..1f816c23a 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -1049,17 +1049,71 @@ func TestAPI_AgentConnectCARoots_empty(t *testing.T) { agent := c.Agent() list, meta, err := agent.ConnectCARoots(nil) - require.Nil(err) + require.NoError(err) require.Equal(uint64(0), meta.LastIndex) require.Len(list.Roots, 0) } +func TestAPI_AgentConnectCARoots_list(t *testing.T) { + t.Parallel() + + require := require.New(t) + c, s := makeClientWithConfig(t, nil, func(c *testutil.TestServerConfig) { + // Force auto port range to 1 port so we have deterministic response. + c.Connect = map[string]interface{}{ + "enabled": true, + } + }) + defer s.Stop() + + agent := c.Agent() + list, meta, err := agent.ConnectCARoots(nil) + require.NoError(err) + require.True(meta.LastIndex > 0) + require.Len(list.Roots, 1) +} + +func TestAPI_AgentConnectCALeaf(t *testing.T) { + t.Parallel() + + require := require.New(t) + c, s := makeClientWithConfig(t, nil, func(c *testutil.TestServerConfig) { + // Force auto port range to 1 port so we have deterministic response. + c.Connect = map[string]interface{}{ + "enabled": true, + } + }) + defer s.Stop() + + agent := c.Agent() + // Setup service + reg := &AgentServiceRegistration{ + Name: "foo", + Tags: []string{"bar", "baz"}, + Port: 8000, + } + require.NoError(agent.ServiceRegister(reg)) + + leaf, meta, err := agent.ConnectCALeaf("foo", nil) + require.NoError(err) + require.True(meta.LastIndex > 0) + // Sanity checks here as we have actual certificate validation checks at many + // other levels. + require.NotEmpty(leaf.SerialNumber) + require.NotEmpty(leaf.CertPEM) + require.NotEmpty(leaf.PrivateKeyPEM) + require.Equal("foo", leaf.Service) + require.True(strings.HasSuffix(leaf.ServiceURI, "/svc/foo")) + require.True(leaf.ModifyIndex > 0) + require.True(leaf.ValidAfter.Before(time.Now())) + require.True(leaf.ValidBefore.After(time.Now())) +} + // TODO(banks): once we have CA stuff setup properly we can probably make this // much more complete. This is just a sanity check that the agent code basically // works. func TestAPI_AgentConnectAuthorize(t *testing.T) { t.Parallel() - require := require.New(t) c, s := makeClient(t) defer s.Stop() @@ -1079,7 +1133,15 @@ func TestAPI_AgentConnectAuthorize(t *testing.T) { func TestAPI_AgentConnectProxyConfig(t *testing.T) { t.Parallel() - c, s := makeClient(t) + c, s := makeClientWithConfig(t, nil, func(c *testutil.TestServerConfig) { + // Force auto port range to 1 port so we have deterministic response. + c.Connect = map[string]interface{}{ + "proxy_defaults": map[string]interface{}{ + "bind_min_port": 20000, + "bind_max_port": 20000, + }, + } + }) defer s.Stop() agent := c.Agent() @@ -1107,9 +1169,12 @@ func TestAPI_AgentConnectProxyConfig(t *testing.T) { TargetServiceName: "foo", ContentHash: "e662ea8600d84cf0", ExecMode: "daemon", - Command: "", + Command: "consul connect proxy", Config: map[string]interface{}{ - "foo": "bar", + "bind_address": "127.0.0.1", + "bind_port": float64(20000), + "foo": "bar", + "local_service_address": "127.0.0.1:8000", }, } require.Equal(t, expectConfig, config) diff --git a/connect/proxy/config.go b/connect/proxy/config.go index 6fad0bd55..840afa896 100644 --- a/connect/proxy/config.go +++ b/connect/proxy/config.go @@ -52,11 +52,6 @@ type Config struct { // private key to be used in development instead of the ones supplied by // Connect. DevServiceKeyFile string `json:"dev_service_key_file" hcl:"dev_service_key_file"` - - // service is a connect.Service instance representing the proxied service. It - // is created internally by the code responsible for setting up config as it - // may depend on other external dependencies - service *connect.Service } // PublicListenerConfig contains the parameters needed for the incoming mTLS @@ -89,6 +84,9 @@ func (plc *PublicListenerConfig) applyDefaults() { if plc.HandshakeTimeoutMs == 0 { plc.HandshakeTimeoutMs = 10000 } + if plc.BindAddress == "" { + plc.BindAddress = "0.0.0.0" + } } // UpstreamConfig configures an upstream (outgoing) listener. @@ -258,7 +256,6 @@ func NewAgentConfigWatcher(client *api.Client, proxyID string, func (w *AgentConfigWatcher) handler(blockVal watch.BlockingParamVal, val interface{}) { - log.Printf("DEBUG: got hash %s", blockVal.(watch.WaitHashVal)) resp, ok := val.(*api.ConnectProxyConfig) if !ok { @@ -266,25 +263,16 @@ func (w *AgentConfigWatcher) handler(blockVal watch.BlockingParamVal, return } - // Setup Service instance now we know target ID etc - service, err := connect.NewService(resp.TargetServiceID, w.client) - if err != nil { - w.logger.Printf("[WARN] proxy config watch failed to initialize"+ - " service: %s", err) - return - } - // Create proxy config from the response cfg := &Config{ ProxyID: w.proxyID, // Token should be already setup in the client ProxiedServiceID: resp.TargetServiceID, ProxiedServiceNamespace: "default", - service: service, } // Unmarshal configs - err = mapstructure.Decode(resp.Config, &cfg.PublicListener) + err := mapstructure.Decode(resp.Config, &cfg.PublicListener) if err != nil { w.logger.Printf("[ERR] proxy config watch public listener config "+ "couldn't be parsed: %s", err) diff --git a/connect/proxy/config_test.go b/connect/proxy/config_test.go index e576d5f82..1473e8fea 100644 --- a/connect/proxy/config_test.go +++ b/connect/proxy/config_test.go @@ -175,11 +175,6 @@ func TestAgentConfigWatcher(t *testing.T) { }, } - // nil this out as comparisons are problematic, we'll explicitly sanity check - // it's reasonable later. - assert.NotNil(t, cfg.service) - cfg.service = nil - assert.Equal(t, expectCfg, cfg) // TODO(banks): Sanity check the service is viable and gets TLS certs eventually from @@ -213,11 +208,6 @@ func TestAgentConfigWatcher(t *testing.T) { }) expectCfg.PublicListener.LocalConnectTimeoutMs = 444 - // nil this out as comparisons are problematic, we'll explicitly sanity check - // it's reasonable later. - assert.NotNil(t, cfg.service) - cfg.service = nil - assert.Equal(t, expectCfg, cfg) } diff --git a/connect/proxy/listener.go b/connect/proxy/listener.go index 12134f840..33f1f5292 100644 --- a/connect/proxy/listener.go +++ b/connect/proxy/listener.go @@ -20,8 +20,10 @@ type Listener struct { // Service is the connect service instance to use. Service *connect.Service + // listenFunc, dialFunc and bindAddr are set by type-specific constructors listenFunc func() (net.Listener, error) dialFunc func() (net.Conn, error) + bindAddr string stopFlag int32 stopChan chan struct{} @@ -42,17 +44,17 @@ type Listener struct { // connections and proxy them to the configured local application over TCP. func NewPublicListener(svc *connect.Service, cfg PublicListenerConfig, logger *log.Logger) *Listener { + bindAddr := fmt.Sprintf("%s:%d", cfg.BindAddress, cfg.BindPort) return &Listener{ Service: svc, listenFunc: func() (net.Listener, error) { - return tls.Listen("tcp", - fmt.Sprintf("%s:%d", cfg.BindAddress, cfg.BindPort), - svc.ServerTLSConfig()) + return tls.Listen("tcp", bindAddr, svc.ServerTLSConfig()) }, dialFunc: func() (net.Conn, error) { return net.DialTimeout("tcp", cfg.LocalServiceAddress, time.Duration(cfg.LocalConnectTimeoutMs)*time.Millisecond) }, + bindAddr: bindAddr, stopChan: make(chan struct{}), listeningChan: make(chan struct{}), logger: logger, @@ -63,11 +65,11 @@ func NewPublicListener(svc *connect.Service, cfg PublicListenerConfig, // connections that are proxied to a discovered Connect service instance. func NewUpstreamListener(svc *connect.Service, cfg UpstreamConfig, logger *log.Logger) *Listener { + bindAddr := fmt.Sprintf("%s:%d", cfg.LocalBindAddress, cfg.LocalBindPort) return &Listener{ Service: svc, listenFunc: func() (net.Listener, error) { - return net.Listen("tcp", - fmt.Sprintf("%s:%d", cfg.LocalBindAddress, cfg.LocalBindPort)) + return net.Listen("tcp", bindAddr) }, dialFunc: func() (net.Conn, error) { if cfg.resolver == nil { @@ -78,6 +80,7 @@ func NewUpstreamListener(svc *connect.Service, cfg UpstreamConfig, defer cancel() return svc.Dial(ctx, cfg.resolver) }, + bindAddr: bindAddr, stopChan: make(chan struct{}), listeningChan: make(chan struct{}), logger: logger, @@ -142,3 +145,8 @@ func (l *Listener) Close() error { func (l *Listener) Wait() { <-l.listeningChan } + +// BindAddr returns the address the listen is bound to. +func (l *Listener) BindAddr() string { + return l.bindAddr +} diff --git a/connect/proxy/proxy.go b/connect/proxy/proxy.go index bda6f3afb..717d45ae6 100644 --- a/connect/proxy/proxy.go +++ b/connect/proxy/proxy.go @@ -1,6 +1,8 @@ package proxy import ( + "bytes" + "crypto/x509" "log" "github.com/hashicorp/consul/api" @@ -14,6 +16,7 @@ type Proxy struct { cfgWatcher ConfigWatcher stopChan chan struct{} logger *log.Logger + service *connect.Service } // NewFromConfigFile returns a Proxy instance configured just from a local file. @@ -27,12 +30,11 @@ func NewFromConfigFile(client *api.Client, filename string, } service, err := connect.NewDevServiceFromCertFiles(cfg.ProxiedServiceID, - client, logger, cfg.DevCAFile, cfg.DevServiceCertFile, + logger, cfg.DevCAFile, cfg.DevServiceCertFile, cfg.DevServiceKeyFile) if err != nil { return nil, err } - cfg.service = service p := &Proxy{ proxyID: cfg.ProxyID, @@ -40,6 +42,7 @@ func NewFromConfigFile(client *api.Client, filename string, cfgWatcher: NewStaticConfigWatcher(cfg), stopChan: make(chan struct{}), logger: logger, + service: service, } return p, nil } @@ -47,16 +50,18 @@ func NewFromConfigFile(client *api.Client, filename string, // New returns a Proxy with the given id, consuming the provided (configured) // agent. It is ready to Run(). func New(client *api.Client, proxyID string, logger *log.Logger) (*Proxy, error) { + cw, err := NewAgentConfigWatcher(client, proxyID, logger) + if err != nil { + return nil, err + } p := &Proxy{ - proxyID: proxyID, - client: client, - cfgWatcher: &AgentConfigWatcher{ - client: client, - proxyID: proxyID, - logger: logger, - }, - stopChan: make(chan struct{}), - logger: logger, + proxyID: proxyID, + client: client, + cfgWatcher: cw, + stopChan: make(chan struct{}), + logger: logger, + // Can't load service yet as we only have the proxy's ID not the service's + // until initial config fetch happens. } return p, nil } @@ -71,16 +76,29 @@ func (p *Proxy) Serve() error { select { case newCfg := <-p.cfgWatcher.Watch(): p.logger.Printf("[DEBUG] got new config") - if newCfg.service == nil { - p.logger.Printf("[ERR] new config has nil service") - continue - } + if cfg == nil { // Initial setup + // Setup Service instance now we know target ID etc + service, err := connect.NewService(newCfg.ProxiedServiceID, p.client) + if err != nil { + return err + } + p.service = service + + go func() { + <-service.ReadyWait() + p.logger.Printf("[INFO] proxy loaded config and ready to serve") + tcfg := service.ServerTLSConfig() + cert, _ := tcfg.GetCertificate(nil) + leaf, _ := x509.ParseCertificate(cert.Certificate[0]) + p.logger.Printf("[DEBUG] leaf: %s roots: %s", leaf.URIs[0], bytes.Join(tcfg.RootCAs.Subjects(), []byte(","))) + }() + newCfg.PublicListener.applyDefaults() - l := NewPublicListener(newCfg.service, newCfg.PublicListener, p.logger) - err := p.startListener("public listener", l) + l := NewPublicListener(p.service, newCfg.PublicListener, p.logger) + err = p.startListener("public listener", l) if err != nil { return err } @@ -93,7 +111,13 @@ func (p *Proxy) Serve() error { uc.applyDefaults() uc.resolver = UpstreamResolverFromClient(p.client, uc) - l := NewUpstreamListener(newCfg.service, uc, p.logger) + if uc.LocalBindPort < 1 { + p.logger.Printf("[ERR] upstream %s has no local_bind_port. "+ + "Can't start upstream.", uc.String()) + continue + } + + l := NewUpstreamListener(p.service, uc, p.logger) err := p.startListener(uc.String(), l) if err != nil { p.logger.Printf("[ERR] failed to start upstream %s: %s", uc.String(), @@ -110,6 +134,7 @@ func (p *Proxy) Serve() error { // startPublicListener is run from the internal state machine loop func (p *Proxy) startListener(name string, l *Listener) error { + p.logger.Printf("[INFO] %s starting on %s", name, l.BindAddr()) go func() { err := l.Serve() if err != nil { @@ -122,6 +147,7 @@ func (p *Proxy) startListener(name string, l *Listener) error { go func() { <-p.stopChan l.Close() + }() return nil @@ -131,4 +157,7 @@ func (p *Proxy) startListener(name string, l *Listener) error { // called only once. func (p *Proxy) Close() { close(p.stopChan) + if p.service != nil { + p.service.Close() + } } diff --git a/connect/resolver.go b/connect/resolver.go index 9873fcdf1..98d8c88d3 100644 --- a/connect/resolver.go +++ b/connect/resolver.go @@ -7,7 +7,6 @@ import ( "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/api" - testing "github.com/mitchellh/go-testing-interface" ) // Resolver is the interface implemented by a service discovery mechanism to get @@ -122,7 +121,12 @@ func (cr *ConsulResolver) resolveService(ctx context.Context) (string, connect.C // propagating these trust domains we need to actually fetch the trust domain // somehow. We also need to implement namespaces. Use of test function here is // temporary pending the work on trust domains. - certURI := connect.TestSpiffeIDService(&testing.RuntimeT{}, cr.Name) + certURI := &connect.SpiffeIDService{ + Host: "11111111-2222-3333-4444-555555555555.consul", + Namespace: "default", + Datacenter: svcs[idx].Node.Datacenter, + Service: svcs[idx].Service.ProxyDestination, + } return fmt.Sprintf("%s:%d", addr, port), certURI, nil } diff --git a/connect/service.go b/connect/service.go index 4f38558a3..af9fbfcb7 100644 --- a/connect/service.go +++ b/connect/service.go @@ -41,7 +41,8 @@ type Service struct { // fetch certificates and print a loud error message. It will not Close() or // kill the process since that could lead to a crash loop in every service if // ACL token was revoked. All attempts to dial will error and any incoming - // connections will fail to verify. + // connections will fail to verify. It may be nil if the Service is being + // configured from local files for development or testing. client *api.Client // tlsCfg is the dynamic TLS config @@ -63,6 +64,10 @@ type Service struct { // NewService creates and starts a Service. The caller must close the returned // service to free resources and allow the program to exit normally. This is // typically called in a signal handler. +// +// Caller must provide client which is already configured to speak to the local +// Consul agent, and with an ACL token that has `service:write` privileges for +// the serviceID specified. func NewService(serviceID string, client *api.Client) (*Service, error) { return NewServiceWithLogger(serviceID, client, log.New(os.Stderr, "", log.LstdFlags)) @@ -89,7 +94,8 @@ func NewServiceWithLogger(serviceID string, client *api.Client, s.rootsWatch.HybridHandler = s.rootsWatchHandler p, err = watch.Parse(map[string]interface{}{ - "type": "connect_leaf", + "type": "connect_leaf", + "service_id": s.serviceID, }) if err != nil { return nil, err @@ -97,26 +103,33 @@ func NewServiceWithLogger(serviceID string, client *api.Client, s.leafWatch = p s.leafWatch.HybridHandler = s.leafWatchHandler - //go s.rootsWatch.RunWithClientAndLogger(s.client, s.logger) - //go s.leafWatch.RunWithClientAndLogger(s.client, s.logger) + go s.rootsWatch.RunWithClientAndLogger(client, s.logger) + go s.leafWatch.RunWithClientAndLogger(client, s.logger) return s, nil } // NewDevServiceFromCertFiles creates a Service using certificate and key files // passed instead of fetching them from the client. -func NewDevServiceFromCertFiles(serviceID string, client *api.Client, - logger *log.Logger, caFile, certFile, keyFile string) (*Service, error) { - s := &Service{ - serviceID: serviceID, - client: client, - logger: logger, - } +func NewDevServiceFromCertFiles(serviceID string, logger *log.Logger, + caFile, certFile, keyFile string) (*Service, error) { + tlsCfg, err := devTLSConfigFromFiles(caFile, certFile, keyFile) if err != nil { return nil, err } - s.tlsCfg = newDynamicTLSConfig(tlsCfg) + return NewDevServiceWithTLSConfig(serviceID, logger, tlsCfg) +} + +// NewDevServiceWithTLSConfig creates a Service using static TLS config passed. +// It's mostly useful for testing. +func NewDevServiceWithTLSConfig(serviceID string, logger *log.Logger, + tlsCfg *tls.Config) (*Service, error) { + s := &Service{ + serviceID: serviceID, + logger: logger, + tlsCfg: newDynamicTLSConfig(tlsCfg), + } return s, nil } @@ -274,3 +287,17 @@ func (s *Service) leafWatchHandler(blockParam watch.BlockingParamVal, raw interf s.tlsCfg.SetLeaf(&cert) } + +// Ready returns whether or not both roots and a leaf certificate are +// configured. If both are non-nil, they are assumed to be valid and usable. +func (s *Service) Ready() bool { + return s.tlsCfg.Ready() +} + +// ReadyWait returns a chan that is closed when the the Service becomes ready +// for use. Note that if the Service is ready when it is called it returns a nil +// chan. Ready means that it has root and leaf certificates configured which we +// assume are valid. +func (s *Service) ReadyWait() <-chan struct{} { + return s.tlsCfg.ReadyWait() +} diff --git a/connect/service_test.go b/connect/service_test.go index 20433d1f5..64ca28fc7 100644 --- a/connect/service_test.go +++ b/connect/service_test.go @@ -1,16 +1,21 @@ package connect import ( + "bytes" "context" "crypto/tls" + "crypto/x509" "fmt" "io" "io/ioutil" "net/http" + "strings" "testing" "time" + "github.com/hashicorp/consul/agent" "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/testutil/retry" "github.com/stretchr/testify/require" ) @@ -111,10 +116,91 @@ func TestService_Dial(t *testing.T) { } func TestService_ServerTLSConfig(t *testing.T) { - // TODO(banks): it's mostly meaningless to test this now since we directly set - // the tlsCfg in our TestService helper which is all we'd be asserting on here - // not the actual implementation. Once agent tls fetching is built, it becomes - // more meaningful to actually verify it's returning the correct config. + require := require.New(t) + + a := agent.NewTestAgent("007", "") + defer a.Shutdown() + client := a.Client() + agent := client.Agent() + + // NewTestAgent setup a CA already by default + + // Register a local agent service with a managed proxy + reg := &api.AgentServiceRegistration{ + Name: "web", + Port: 8080, + } + err := agent.ServiceRegister(reg) + require.NoError(err) + + // Now we should be able to create a service that will eventually get it's TLS + // all by itself! + service, err := NewService("web", client) + require.NoError(err) + + // Wait for it to be ready + select { + case <-service.ReadyWait(): + // continue with test case below + case <-time.After(1 * time.Second): + t.Fatalf("timeout waiting for Service.ReadyWait after 1s") + } + + tlsCfg := service.ServerTLSConfig() + + // Sanity check it has a leaf with the right ServiceID and that validates with + // the given roots. + require.NotNil(tlsCfg.GetCertificate) + leaf, err := tlsCfg.GetCertificate(&tls.ClientHelloInfo{}) + require.NoError(err) + cert, err := x509.ParseCertificate(leaf.Certificate[0]) + require.NoError(err) + require.Len(cert.URIs, 1) + require.True(strings.HasSuffix(cert.URIs[0].String(), "/svc/web")) + + // Verify it as a client would + err = clientSideVerifier(tlsCfg, leaf.Certificate) + require.NoError(err) + + // Now test that rotating the root updates + { + // Setup a new generated CA + connect.TestCAConfigSet(t, a, nil) + } + + // After some time, both root and leaves should be different but both should + // still be correct. + oldRootSubjects := bytes.Join(tlsCfg.RootCAs.Subjects(), []byte(", ")) + //oldLeafSerial := connect.HexString(cert.SerialNumber.Bytes()) + oldLeafKeyID := connect.HexString(cert.SubjectKeyId) + retry.Run(t, func(r *retry.R) { + updatedCfg := service.ServerTLSConfig() + + // Wait until roots are different + rootSubjects := bytes.Join(updatedCfg.RootCAs.Subjects(), []byte(", ")) + if bytes.Equal(oldRootSubjects, rootSubjects) { + r.Fatalf("root certificates should have changed, got %s", + rootSubjects) + } + + leaf, err := updatedCfg.GetCertificate(&tls.ClientHelloInfo{}) + r.Check(err) + cert, err := x509.ParseCertificate(leaf.Certificate[0]) + r.Check(err) + + // TODO(banks): Current CA implementation resets the serial index when CA + // config changes which means same serial is issued by new CA config failing + // this test. Re-enable once the CA is changed to fix that. + + // if oldLeafSerial == connect.HexString(cert.SerialNumber.Bytes()) { + // r.Fatalf("leaf certificate should have changed, got serial %s", + // oldLeafSerial) + // } + if oldLeafKeyID == connect.HexString(cert.SubjectKeyId) { + r.Fatalf("leaf should have a different key, got matching SubjectKeyID = %s", + oldLeafKeyID) + } + }) } func TestService_HTTPClient(t *testing.T) { diff --git a/connect/testing.go b/connect/testing.go index 491036aaf..073134b12 100644 --- a/connect/testing.go +++ b/connect/testing.go @@ -8,6 +8,7 @@ import ( "log" "net" "net/http" + "os" "sync/atomic" "github.com/hashicorp/consul/agent/connect" @@ -20,16 +21,12 @@ import ( func TestService(t testing.T, service string, ca *structs.CARoot) *Service { t.Helper() - // Don't need to talk to client since we are setting TLSConfig locally. This - // will cause server verification to skip AuthZ too. - svc, err := NewService(service, nil) + // Don't need to talk to client since we are setting TLSConfig locally + svc, err := NewDevServiceWithTLSConfig(service, + log.New(os.Stderr, "", log.LstdFlags), TestTLSConfig(t, service, ca)) if err != nil { t.Fatal(err) } - - // Override the tlsConfig hackily. - svc.tlsCfg = newDynamicTLSConfig(TestTLSConfig(t, service, ca)) - return svc } diff --git a/connect/tls.go b/connect/tls.go index f5cb95a75..6f14cd787 100644 --- a/connect/tls.go +++ b/connect/tls.go @@ -4,7 +4,9 @@ import ( "crypto/tls" "crypto/x509" "errors" + "fmt" "io/ioutil" + "log" "sync" "github.com/hashicorp/consul/agent/connect" @@ -104,7 +106,8 @@ func verifyServerCertMatchesURI(certs []*x509.Certificate, if cert.URIs[0].String() == expectedStr { return nil } - return errors.New("peer certificate mismatch") + return fmt.Errorf("peer certificate mismatch got %s, want %s", + cert.URIs[0].String(), expectedStr) } // newServerSideVerifier returns a verifierFunc that wraps the provided @@ -115,21 +118,25 @@ func newServerSideVerifier(client *api.Client, serviceID string) verifierFunc { return func(tlsCfg *tls.Config, rawCerts [][]byte) error { leaf, err := verifyChain(tlsCfg, rawCerts, false) if err != nil { + log.Printf("connect: failed TLS verification: %s", err) return err } // Check leaf is a cert we understand if len(leaf.URIs) < 1 { + log.Printf("connect: invalid leaf certificate") return errors.New("connect: invalid leaf certificate") } certURI, err := connect.ParseCertURI(leaf.URIs[0]) if err != nil { + log.Printf("connect: invalid leaf certificate URI") return errors.New("connect: invalid leaf certificate URI") } // No AuthZ if there is no client. if client == nil { + log.Printf("connect: nil client") return nil } @@ -148,9 +155,11 @@ func newServerSideVerifier(client *api.Client, serviceID string) verifierFunc { } resp, err := client.Agent().ConnectAuthorize(req) if err != nil { + log.Printf("connect: authz call failed: %s", err) return errors.New("connect: authz call failed: " + err.Error()) } if !resp.Authorized { + log.Printf("connect: authz call denied: %s", resp.Reason) return errors.New("connect: authz denied: " + resp.Reason) } return nil @@ -217,9 +226,17 @@ func verifyChain(tlsCfg *tls.Config, rawCerts [][]byte, client bool) (*x509.Cert type dynamicTLSConfig struct { base *tls.Config - sync.Mutex + sync.RWMutex leaf *tls.Certificate roots *x509.CertPool + // readyCh is closed when the config first gets both leaf and roots set. + // Watchers can wait on this via ReadyWait. + readyCh chan struct{} +} + +type tlsCfgUpdate struct { + ch chan struct{} + next *tlsCfgUpdate } // newDynamicTLSConfig returns a dynamicTLSConfig constructed from base. @@ -235,6 +252,9 @@ func newDynamicTLSConfig(base *tls.Config) *dynamicTLSConfig { if base.RootCAs != nil { cfg.roots = base.RootCAs } + if !cfg.Ready() { + cfg.readyCh = make(chan struct{}) + } return cfg } @@ -246,8 +266,8 @@ func newDynamicTLSConfig(base *tls.Config) *dynamicTLSConfig { // client can use this config for a long time and will still verify against the // latest roots even though the roots in the struct is has can't change. func (cfg *dynamicTLSConfig) Get(v verifierFunc) *tls.Config { - cfg.Lock() - defer cfg.Unlock() + cfg.RLock() + defer cfg.RUnlock() copy := cfg.base.Clone() copy.RootCAs = cfg.roots copy.ClientCAs = cfg.roots @@ -281,6 +301,7 @@ func (cfg *dynamicTLSConfig) SetRoots(roots *x509.CertPool) error { cfg.Lock() defer cfg.Unlock() cfg.roots = roots + cfg.notify() return nil } @@ -289,19 +310,43 @@ func (cfg *dynamicTLSConfig) SetLeaf(leaf *tls.Certificate) error { cfg.Lock() defer cfg.Unlock() cfg.leaf = leaf + cfg.notify() return nil } +// notify is called under lock during an update to check if we are now ready. +func (cfg *dynamicTLSConfig) notify() { + if cfg.readyCh != nil && cfg.leaf != nil && cfg.roots != nil { + close(cfg.readyCh) + cfg.readyCh = nil + } +} + // Roots returns the current CA root CertPool. func (cfg *dynamicTLSConfig) Roots() *x509.CertPool { - cfg.Lock() - defer cfg.Unlock() + cfg.RLock() + defer cfg.RUnlock() return cfg.roots } // Leaf returns the current Leaf certificate. func (cfg *dynamicTLSConfig) Leaf() *tls.Certificate { - cfg.Lock() - defer cfg.Unlock() + cfg.RLock() + defer cfg.RUnlock() return cfg.leaf } + +// Ready returns whether or not both roots and a leaf certificate are +// configured. If both are non-nil, they are assumed to be valid and usable. +func (cfg *dynamicTLSConfig) Ready() bool { + cfg.RLock() + defer cfg.RUnlock() + return cfg.leaf != nil && cfg.roots != nil +} + +// ReadyWait returns a chan that is closed when the the tlsConfig becomes Ready +// for use. Note that if the config is ready when it is called it returns a nil +// chan. +func (cfg *dynamicTLSConfig) ReadyWait() <-chan struct{} { + return cfg.readyCh +} diff --git a/connect/tls_test.go b/connect/tls_test.go index aa1063f3e..a9fd6fe8c 100644 --- a/connect/tls_test.go +++ b/connect/tls_test.go @@ -358,3 +358,45 @@ func TestDynamicTLSConfig(t *testing.T) { requireCorrectVerifier(t, newCfg, gotBefore, v1Ch) requireCorrectVerifier(t, newCfg, gotAfter, v2Ch) } + +func TestDynamicTLSConfig_Ready(t *testing.T) { + require := require.New(t) + + ca1 := connect.TestCA(t, nil) + baseCfg := TestTLSConfig(t, "web", ca1) + + c := newDynamicTLSConfig(defaultTLSConfig()) + readyCh := c.ReadyWait() + assertBlocked(t, readyCh) + require.False(c.Ready(), "no roots or leaf, should not be ready") + + err := c.SetLeaf(&baseCfg.Certificates[0]) + require.NoError(err) + assertBlocked(t, readyCh) + require.False(c.Ready(), "no roots, should not be ready") + + err = c.SetRoots(baseCfg.RootCAs) + require.NoError(err) + assertNotBlocked(t, readyCh) + require.True(c.Ready(), "should be ready") +} + +func assertBlocked(t *testing.T, ch <-chan struct{}) { + t.Helper() + select { + case <-ch: + t.Fatalf("want blocked chan") + default: + return + } +} + +func assertNotBlocked(t *testing.T, ch <-chan struct{}) { + t.Helper() + select { + case <-ch: + return + default: + t.Fatalf("want unblocked chan but it blocked") + } +} diff --git a/testutil/server.go b/testutil/server.go index 06c0fdfd2..f188079d7 100644 --- a/testutil/server.go +++ b/testutil/server.go @@ -17,6 +17,7 @@ import ( "fmt" "io" "io/ioutil" + "log" "net" "net/http" "os" @@ -94,6 +95,7 @@ type TestServerConfig struct { VerifyIncomingHTTPS bool `json:"verify_incoming_https,omitempty"` VerifyOutgoing bool `json:"verify_outgoing,omitempty"` EnableScriptChecks bool `json:"enable_script_checks,omitempty"` + Connect map[string]interface{} `json:"connect,omitempty"` ReadyTimeout time.Duration `json:"-"` Stdout, Stderr io.Writer `json:"-"` Args []string `json:"-"` @@ -211,6 +213,7 @@ func newTestServerConfigT(t *testing.T, cb ServerConfigCallback) (*TestServer, e return nil, errors.Wrap(err, "failed marshaling json") } + log.Printf("CONFIG JSON: %s", string(b)) configFile := filepath.Join(tmpdir, "config.json") if err := ioutil.WriteFile(configFile, b, 0644); err != nil { defer os.RemoveAll(tmpdir) diff --git a/watch/funcs.go b/watch/funcs.go index 5e72e40a6..3b1b854ed 100644 --- a/watch/funcs.go +++ b/watch/funcs.go @@ -236,8 +236,7 @@ func eventWatch(params map[string]interface{}) (WatcherFunc, error) { // connectRootsWatch is used to watch for changes to Connect Root certificates. func connectRootsWatch(params map[string]interface{}) (WatcherFunc, error) { - // We don't support stale since roots are likely to be cached locally in the - // agent anyway. + // We don't support stale since roots are cached locally in the agent. fn := func(p *Plan) (BlockingParamVal, interface{}, error) { agent := p.client.Agent() @@ -257,8 +256,7 @@ func connectRootsWatch(params map[string]interface{}) (WatcherFunc, error) { // connectLeafWatch is used to watch for changes to Connect Leaf certificates // for given local service id. func connectLeafWatch(params map[string]interface{}) (WatcherFunc, error) { - // We don't support stale since certs are likely to be cached locally in the - // agent anyway. + // We don't support stale since certs are cached locally in the agent. var serviceID string if err := assignValue(params, "service_id", &serviceID); err != nil { diff --git a/watch/funcs_test.go b/watch/funcs_test.go index d5253de44..b304a803f 100644 --- a/watch/funcs_test.go +++ b/watch/funcs_test.go @@ -7,8 +7,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/hashicorp/consul/agent" - "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/agent/connect" consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/watch" "github.com/stretchr/testify/require" @@ -526,14 +528,12 @@ func TestEventWatch(t *testing.T) { } func TestConnectRootsWatch(t *testing.T) { - // TODO(banks) enable and make it work once this is supported. Note that this - // test actually passes currently just by busy-polling the roots endpoint - // until it changes. - t.Skip("CA and Leaf implementation don't actually support blocking yet") t.Parallel() - a := agent.NewTestAgent(t.Name(), ``) + // NewTestAgent will bootstrap a new CA + a := agent.NewTestAgent(t.Name(), "") defer a.Shutdown() + var originalCAID string invoke := makeInvokeCh() plan := mustParse(t, `{"type":"connect_roots"}`) plan.Handler = func(idx uint64, raw interface{}) { @@ -544,7 +544,14 @@ func TestConnectRootsWatch(t *testing.T) { if !ok || v == nil { return // ignore } - // TODO(banks): verify the right roots came back. + // Only 1 CA is the bootstrapped state (i.e. first response). Ignore this + // state and wait for the new CA to show up too. + if len(v.Roots) == 1 { + originalCAID = v.ActiveRootID + return + } + assert.NotEmpty(t, originalCAID) + assert.NotEqual(t, originalCAID, v.ActiveRootID) invoke <- nil } @@ -553,22 +560,8 @@ func TestConnectRootsWatch(t *testing.T) { go func() { defer wg.Done() time.Sleep(20 * time.Millisecond) - // TODO(banks): this is a hack since CA config is in flux. We _did_ expose a - // temporary agent endpoint for PUTing config, but didn't expose it in `api` - // package intentionally. If we are going to hack around with temporary API, - // we can might as well drop right down to the RPC level... - args := structs.CAConfiguration{ - Provider: "static", - Config: map[string]interface{}{ - "Name": "test-1", - "Generate": true, - }, - } - var reply interface{} - if err := a.RPC("ConnectCA.ConfigurationSet", &args, &reply); err != nil { - t.Fatalf("err: %v", err) - } - + // Set a new CA + connect.TestCAConfigSet(t, a, nil) }() wg.Add(1) @@ -588,9 +581,8 @@ func TestConnectRootsWatch(t *testing.T) { } func TestConnectLeafWatch(t *testing.T) { - // TODO(banks) enable and make it work once this is supported. - t.Skip("CA and Leaf implementation don't actually support blocking yet") t.Parallel() + // NewTestAgent will bootstrap a new CA a := agent.NewTestAgent(t.Name(), ``) defer a.Shutdown() @@ -606,25 +598,10 @@ func TestConnectLeafWatch(t *testing.T) { require.Nil(t, err) } - // Setup a new generated CA - // - // TODO(banks): this is a hack since CA config is in flux. We _did_ expose a - // temporary agent endpoint for PUTing config, but didn't expose it in `api` - // package intentionally. If we are going to hack around with temporary API, - // we can might as well drop right down to the RPC level... - args := structs.CAConfiguration{ - Provider: "static", - Config: map[string]interface{}{ - "Name": "test-1", - "Generate": true, - }, - } - var reply interface{} - if err := a.RPC("ConnectCA.ConfigurationSet", &args, &reply); err != nil { - t.Fatalf("err: %v", err) - } + var lastCert *consulapi.LeafCert - invoke := makeInvokeCh() + //invoke := makeInvokeCh() + invoke := make(chan error) plan := mustParse(t, `{"type":"connect_leaf", "service_id":"web"}`) plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { @@ -634,7 +611,18 @@ func TestConnectLeafWatch(t *testing.T) { if !ok || v == nil { return // ignore } - // TODO(banks): verify the right leaf came back. + if lastCert == nil { + // Initial fetch, just store the cert and return + lastCert = v + return + } + // TODO(banks): right now the root rotation actually causes Serial numbers + // to reset so these end up all being the same. That needs fixing but it's + // a bigger task than I want to bite off for this PR. + //assert.NotEqual(t, lastCert.SerialNumber, v.SerialNumber) + assert.NotEqual(t, lastCert.CertPEM, v.CertPEM) + assert.NotEqual(t, lastCert.PrivateKeyPEM, v.PrivateKeyPEM) + assert.NotEqual(t, lastCert.ModifyIndex, v.ModifyIndex) invoke <- nil } @@ -643,20 +631,8 @@ func TestConnectLeafWatch(t *testing.T) { go func() { defer wg.Done() time.Sleep(20 * time.Millisecond) - - // Change the CA which should eventually trigger a leaf change but probably - // won't now so this test has no way to succeed yet. - args := structs.CAConfiguration{ - Provider: "static", - Config: map[string]interface{}{ - "Name": "test-2", - "Generate": true, - }, - } - var reply interface{} - if err := a.RPC("ConnectCA.ConfigurationSet", &args, &reply); err != nil { - t.Fatalf("err: %v", err) - } + // Change the CA to trigger a leaf change + connect.TestCAConfigSet(t, a, nil) }() wg.Add(1) @@ -740,6 +716,7 @@ func TestConnectProxyConfigWatch(t *testing.T) { } func mustParse(t *testing.T, q string) *watch.Plan { + t.Helper() var params map[string]interface{} if err := json.Unmarshal([]byte(q), ¶ms); err != nil { t.Fatal(err) From c47ad68f25f594e98396a5d487633051fe9d9e9c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 25 Apr 2018 14:21:03 -0700 Subject: [PATCH 203/627] wip --- agent/proxy/daemon.go | 24 ++++++++++++++++++++++++ agent/proxy/manager.go | 1 + agent/proxy/proxy.go | 12 ++++++++++++ 3 files changed, 37 insertions(+) create mode 100644 agent/proxy/daemon.go create mode 100644 agent/proxy/manager.go create mode 100644 agent/proxy/proxy.go diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go new file mode 100644 index 000000000..f432271b6 --- /dev/null +++ b/agent/proxy/daemon.go @@ -0,0 +1,24 @@ +package proxy + +import ( + "os/exec" +) + +// Daemon is a long-running proxy process. It is expected to keep running +// and to use blocking queries to detect changes in configuration, certs, +// and more. +// +// Consul will ensure that if the daemon crashes, that it is restarted. +type Daemon struct { + // Command is the command to execute to start this daemon. This must + // be a Cmd that isn't yet started. + Command *exec.Cmd + + // ProxyToken is the special local-only ACL token that allows a proxy + // to communicate to the Connect-specific endpoints. + ProxyToken string +} + +// Start starts the daemon and keeps it running. +func (p *Daemon) Start() error { +} diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go new file mode 100644 index 000000000..943b369ff --- /dev/null +++ b/agent/proxy/manager.go @@ -0,0 +1 @@ +package proxy diff --git a/agent/proxy/proxy.go b/agent/proxy/proxy.go new file mode 100644 index 000000000..d42ec3903 --- /dev/null +++ b/agent/proxy/proxy.go @@ -0,0 +1,12 @@ +// Package proxy contains logic for agent interaction with proxies, +// primarily "managed" proxies. Managed proxies are proxy processes for +// Connect-compatible endpoints that Consul owns and controls the lifecycle +// for. +// +// This package does not contain the built-in proxy for Connect. The source +// for that is available in the "connect/proxy" package. +package proxy + +// EnvProxyToken is the name of the environment variable that is passed +// to managed proxies containing the proxy token. +const EnvProxyToken = "CONNECT_PROXY_TOKEN" From c2f50f1688b1c00ee27185e7d53364dfce100b82 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 25 Apr 2018 16:54:00 -0700 Subject: [PATCH 204/627] agent/proxy: Daemon works, tests cover it too --- agent/proxy/daemon.go | 128 +++++++++++++++++++++++++++++++++++++ agent/proxy/daemon_test.go | 91 ++++++++++++++++++++++++++ agent/proxy/proxy_test.go | 116 +++++++++++++++++++++++++++++++++ 3 files changed, 335 insertions(+) create mode 100644 agent/proxy/daemon_test.go create mode 100644 agent/proxy/proxy_test.go diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index f432271b6..74fa62d44 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -1,7 +1,11 @@ package proxy import ( + "fmt" + "log" + "os" "os/exec" + "sync" ) // Daemon is a long-running proxy process. It is expected to keep running @@ -17,8 +21,132 @@ type Daemon struct { // ProxyToken is the special local-only ACL token that allows a proxy // to communicate to the Connect-specific endpoints. ProxyToken string + + // Logger is where logs will be sent around the management of this + // daemon. The actual logs for the daemon itself will be sent to + // a file. + Logger *log.Logger + + // process is the started process + lock sync.Mutex + process *os.Process + stopCh chan struct{} } // Start starts the daemon and keeps it running. +// +// This function returns after the process is successfully started. func (p *Daemon) Start() error { + p.lock.Lock() + defer p.lock.Unlock() + + // If the daemon is already started, return no error. + if p.stopCh != nil { + return nil + } + + // Start it for the first time + process, err := p.start() + if err != nil { + return err + } + + // Create the stop channel we use to notify when we've gracefully stopped + stopCh := make(chan struct{}) + p.stopCh = stopCh + + // Store the process so that we can signal it later + p.process = process + + go p.keepAlive(stopCh) + + return nil +} + +func (p *Daemon) keepAlive(stopCh chan struct{}) { + p.lock.Lock() + process := p.process + p.lock.Unlock() + + for { + if process == nil { + p.lock.Lock() + + // If we gracefully stopped (stopCh is closed) then don't restart. We + // check stopCh and not p.stopCh because the latter could reference + // a new process. + select { + case <-stopCh: + p.lock.Unlock() + return + default: + } + + // Process isn't started currently. We're restarting. + var err error + process, err = p.start() + if err != nil { + p.Logger.Printf("[ERR] agent/proxy: error restarting daemon: %s", err) + } + + p.process = process + p.lock.Unlock() + } + + _, err := process.Wait() + process = nil + p.Logger.Printf("[INFO] agent/proxy: daemon exited: %s", err) + } +} + +// start starts and returns the process. This will create a copy of the +// configured *exec.Command with the modifications documented on Daemon +// such as setting the proxy token environmental variable. +func (p *Daemon) start() (*os.Process, error) { + cmd := *p.Command + + // Add the proxy token to the environment. We first copy the env because + // it is a slice and therefore the "copy" above will only copy the slice + // reference. We allocate an exactly sized slice. + cmd.Env = make([]string, len(p.Command.Env), len(p.Command.Env)+1) + copy(cmd.Env, p.Command.Env) + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvProxyToken, p.ProxyToken)) + + // Start it + err := cmd.Start() + return cmd.Process, err +} + +// Stop stops the daemon. +// +// This will attempt a graceful stop (SIGINT) before force killing the +// process (SIGKILL). In either case, the process won't be automatically +// restarted unless Start is called again. +// +// This is safe to call multiple times. If the daemon is already stopped, +// then this returns no error. +func (p *Daemon) Stop() error { + p.lock.Lock() + defer p.lock.Unlock() + + // If we don't have a stopCh then we never even started yet. + if p.stopCh == nil { + return nil + } + + // If stopCh is closed, then we're already stopped + select { + case <-p.stopCh: + return nil + default: + } + + err := p.process.Signal(os.Interrupt) + + // This signals that we've stopped and therefore don't want to restart + close(p.stopCh) + p.stopCh = nil + + return err + //return p.Command.Process.Kill() } diff --git a/agent/proxy/daemon_test.go b/agent/proxy/daemon_test.go new file mode 100644 index 000000000..0af971b93 --- /dev/null +++ b/agent/proxy/daemon_test.go @@ -0,0 +1,91 @@ +package proxy + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/hashicorp/consul/testutil/retry" + "github.com/hashicorp/go-uuid" + "github.com/stretchr/testify/require" +) + +func TestDaemonStartStop(t *testing.T) { + require := require.New(t) + td, closer := testTempDir(t) + defer closer() + + path := filepath.Join(td, "file") + uuid, err := uuid.GenerateUUID() + require.NoError(err) + + d := &Daemon{ + Command: helperProcess("start-stop", path), + ProxyToken: uuid, + Logger: testLogger, + } + require.NoError(d.Start()) + + // Wait for the file to exist + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + + r.Fatalf("error: %s", err) + }) + + // Verify that the contents of the file is the token. This verifies + // that we properly passed the token as an env var. + data, err := ioutil.ReadFile(path) + require.NoError(err) + require.Equal(uuid, string(data)) + + // Stop the process + require.NoError(d.Stop()) + + // File should no longer exist. + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if os.IsNotExist(err) { + return + } + + // err might be nil here but that's okay + r.Fatalf("should not exist: %s", err) + }) +} + +func TestDaemonRestart(t *testing.T) { + require := require.New(t) + td, closer := testTempDir(t) + defer closer() + path := filepath.Join(td, "file") + + d := &Daemon{ + Command: helperProcess("restart", path), + Logger: testLogger, + } + require.NoError(d.Start()) + defer d.Stop() + + // Wait for the file to exist. We save the func so we can reuse the test. + waitFile := func() { + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + r.Fatalf("error waiting for path: %s", err) + }) + } + waitFile() + + // Delete the file + require.NoError(os.Remove(path)) + + // File should re-appear because the process is restart + waitFile() +} diff --git a/agent/proxy/proxy_test.go b/agent/proxy/proxy_test.go new file mode 100644 index 000000000..fa8eef128 --- /dev/null +++ b/agent/proxy/proxy_test.go @@ -0,0 +1,116 @@ +package proxy + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "os/signal" + "testing" + "time" +) + +// testLogger is a logger that can be used by tests that require a +// *log.Logger instance. +var testLogger = log.New(os.Stderr, "logger: ", log.LstdFlags) + +// testTempDir returns a temporary directory and a cleanup function. +func testTempDir(t *testing.T) (string, func()) { + t.Helper() + + td, err := ioutil.TempDir("", "test-agent-proxy") + if err != nil { + t.Fatalf("err: %s", err) + } + + return td, func() { + if err := os.RemoveAll(td); err != nil { + t.Fatalf("err: %s", err) + } + } +} + +// helperProcess returns an *exec.Cmd that can be used to execute the +// TestHelperProcess function below. This can be used to test multi-process +// interactions. +func helperProcess(s ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcess", "--"} + cs = append(cs, s...) + env := []string{"GO_WANT_HELPER_PROCESS=1"} + + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = append(env, os.Environ()...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd +} + +// This is not a real test. This is just a helper process kicked off by tests +// using the helperProcess helper function. +func TestHelperProcess(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + defer os.Exit(0) + + args := os.Args + for len(args) > 0 { + if args[0] == "--" { + args = args[1:] + break + } + + args = args[1:] + } + + if len(args) == 0 { + fmt.Fprintf(os.Stderr, "No command\n") + os.Exit(2) + } + + cmd, args := args[0], args[1:] + switch cmd { + // While running, this creates a file in the given directory (args[0]) + // and deletes it only whe nit is stopped. + case "start-stop": + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + defer signal.Stop(ch) + + path := args[0] + data := []byte(os.Getenv(EnvProxyToken)) + + if err := ioutil.WriteFile(path, data, 0644); err != nil { + t.Fatalf("err: %s", err) + } + defer os.Remove(path) + + <-ch + + // Restart writes to a file and keeps running while that file still + // exists. When that file is removed, this process exits. This can be + // used to test restarting. + case "restart": + // Write the file + path := args[0] + if err := ioutil.WriteFile(path, []byte("hello"), 0644); err != nil { + fmt.Fprintf(os.Stderr, "Error: %s\n", err) + os.Exit(1) + } + + // While the file still exists, do nothing. When the file no longer + // exists, we exit. + for { + time.Sleep(25 * time.Millisecond) + if _, err := os.Stat(path); os.IsNotExist(err) { + break + } + } + + default: + fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd) + os.Exit(2) + } +} From 659ab7ee2d00e4c7e16bdcea9cd7337c798ac91c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 25 Apr 2018 17:39:32 -0700 Subject: [PATCH 205/627] agent/proxy: exponential backoff on restarts --- agent/proxy/daemon.go | 40 ++++++++++++++++++++++++++++++++++++++++ agent/proxy/manager.go | 1 - 2 files changed, 40 insertions(+), 1 deletion(-) delete mode 100644 agent/proxy/manager.go diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index 74fa62d44..3a8c1b11b 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -6,6 +6,16 @@ import ( "os" "os/exec" "sync" + "time" +) + +// Constants related to restart timers with the daemon mode proxies. At some +// point we will probably want to expose these knobs to an end user, but +// reasonable defaults are chosen. +const ( + DaemonRestartHealthy = 10 * time.Second // time before considering healthy + DaemonRestartBackoffMin = 3 // 3 attempts before backing off + DaemonRestartMaxWait = 1 * time.Minute // maximum backoff wait time ) // Daemon is a long-running proxy process. It is expected to keep running @@ -68,8 +78,38 @@ func (p *Daemon) keepAlive(stopCh chan struct{}) { process := p.process p.lock.Unlock() + // attemptsDeadline is the time at which we consider the daemon to have + // been alive long enough that we can reset the attempt counter. + // + // attempts keeps track of the number of restart attempts we've had and + // is used to calculate the wait time using an exponential backoff. + var attemptsDeadline time.Time + var attempts uint + for { if process == nil { + // If we're passed the attempt deadline then reset the attempts + if !attemptsDeadline.IsZero() && time.Now().After(attemptsDeadline) { + attempts = 0 + } + attemptsDeadline = time.Now().Add(DaemonRestartHealthy) + attempts++ + + // Calculate the exponential backoff and wait if we have to + if attempts > DaemonRestartBackoffMin { + waitTime := (1 << (attempts - DaemonRestartBackoffMin)) * time.Second + if waitTime > DaemonRestartMaxWait { + waitTime = DaemonRestartMaxWait + } + + if waitTime > 0 { + p.Logger.Printf( + "[WARN] agent/proxy: waiting %s before restarting daemon", + waitTime) + time.Sleep(waitTime) + } + } + p.lock.Lock() // If we gracefully stopped (stopCh is closed) then don't restart. We diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go deleted file mode 100644 index 943b369ff..000000000 --- a/agent/proxy/manager.go +++ /dev/null @@ -1 +0,0 @@ -package proxy From 76c6849ffee86423108997fb4c46e839f3bcff46 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 26 Apr 2018 21:11:56 -0700 Subject: [PATCH 206/627] agent/local: store proxy on local state, wip, not working yet --- agent/local/proxy.go | 26 ++++++++++++++++++++++++++ agent/local/state.go | 19 ++++++++++++++++--- agent/proxy/daemon_test.go | 4 ++++ agent/proxy/proxy.go | 16 ++++++++++++++++ 4 files changed, 62 insertions(+), 3 deletions(-) create mode 100644 agent/local/proxy.go diff --git a/agent/local/proxy.go b/agent/local/proxy.go new file mode 100644 index 000000000..7f004a7ab --- /dev/null +++ b/agent/local/proxy.go @@ -0,0 +1,26 @@ +package local + +import ( + "fmt" + "os/exec" + + "github.com/hashicorp/consul/agent/proxy" + "github.com/hashicorp/consul/agent/structs" +) + +// newProxyProcess returns the proxy.Proxy for the given ManagedProxy +// state entry. proxy.Proxy is the actual managed process. The returned value +// is the initialized struct but isn't explicitly started. +func (s *State) newProxyProcess(p *structs.ConnectManagedProxy, pToken string) (proxy.Proxy, error) { + switch p.ExecMode { + case structs.ProxyExecModeDaemon: + return &proxy.Daemon{ + Command: exec.Command(p.Command), + ProxyToken: pToken, + Logger: s.logger, + }, nil + + default: + return nil, fmt.Errorf("unsupported managed proxy type: %q", p.ExecMode) + } +} diff --git a/agent/local/state.go b/agent/local/state.go index 8df600b32..ccb4d77e1 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/go-uuid" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/proxy" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/api" @@ -126,6 +127,9 @@ type ManagedProxy struct { // use service-scoped ACL tokens distributed externally. ProxyToken string + // ManagedProxy is the managed proxy itself that is running. + ManagedProxy proxy.Proxy + // WatchCh is a close-only chan that is closed when the proxy is removed or // updated. WatchCh chan struct{} @@ -603,6 +607,14 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*str return nil, err } + // Initialize the managed proxy process. This doesn't start anything, + // it only sets up the structures we'll use. To start the proxy, the + // caller should call Proxy and use the returned ManagedProxy instance. + proxyProcess, err := l.newProxyProcess(proxy, pToken) + if err != nil { + return nil, err + } + // Lock now. We can't lock earlier as l.Service would deadlock and shouldn't // anyway to minimise the critical section. l.Lock() @@ -650,9 +662,10 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*str close(old.WatchCh) } l.managedProxies[svc.ID] = &ManagedProxy{ - Proxy: proxy, - ProxyToken: pToken, - WatchCh: make(chan struct{}), + Proxy: proxy, + ProxyToken: pToken, + ManagedProxy: proxyProcess, + WatchCh: make(chan struct{}), } // No need to trigger sync as proxy state is local only. diff --git a/agent/proxy/daemon_test.go b/agent/proxy/daemon_test.go index 0af971b93..dbd2099bc 100644 --- a/agent/proxy/daemon_test.go +++ b/agent/proxy/daemon_test.go @@ -11,6 +11,10 @@ import ( "github.com/stretchr/testify/require" ) +func TestDaemon_impl(t *testing.T) { + var _ Proxy = new(Daemon) +} + func TestDaemonStartStop(t *testing.T) { require := require.New(t) td, closer := testTempDir(t) diff --git a/agent/proxy/proxy.go b/agent/proxy/proxy.go index d42ec3903..44228b521 100644 --- a/agent/proxy/proxy.go +++ b/agent/proxy/proxy.go @@ -10,3 +10,19 @@ package proxy // EnvProxyToken is the name of the environment variable that is passed // to managed proxies containing the proxy token. const EnvProxyToken = "CONNECT_PROXY_TOKEN" + +// Proxy is the interface implemented by all types of managed proxies. +// +// Calls to all the functions on this interface must be concurrency safe. +// Please read the documentation carefully on top of each function for expected +// behavior. +type Proxy interface { + // Start starts the proxy. If an error is returned then the managed + // proxy registration is rejected. Therefore, this should only fail if + // the configuration of the proxy itself is irrecoverable, and should + // retry starting for other failures. + Start() error + + // Stop stops the proxy. + Stop() error +} From 536f31571b2436258930b5114ac9da1d3aff985d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 26 Apr 2018 22:16:21 -0700 Subject: [PATCH 207/627] agent: change connect command paths to be slices, not strings This matches other executable configuration and allows us to cleanly separate executable from arguments without trying to emulate shell parsing. --- agent/agent_endpoint_test.go | 6 +++--- agent/agent_test.go | 5 +++-- agent/config/builder.go | 10 +++++----- agent/config/config.go | 6 +++--- agent/config/runtime.go | 4 ++-- agent/config/runtime_test.go | 24 ++++++++++++------------ agent/structs/connect.go | 2 +- agent/structs/service_definition.go | 2 +- api/agent.go | 4 ++-- 9 files changed, 32 insertions(+), 31 deletions(-) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index be97bf5a4..6cff0fa59 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -63,7 +63,7 @@ func TestAgent_Services(t *testing.T) { // Add a managed proxy for that service prxy1 := &structs.ConnectManagedProxy{ ExecMode: structs.ProxyExecModeScript, - Command: "proxy.sh", + Command: []string{"proxy.sh"}, Config: map[string]interface{}{ "bind_port": 1234, "foo": "bar", @@ -1404,7 +1404,7 @@ func TestAgent_RegisterService_ManagedConnectProxy(t *testing.T) { Connect: &api.AgentServiceConnect{ Proxy: &api.AgentServiceConnectProxy{ ExecMode: "script", - Command: "proxy.sh", + Command: []string{"proxy.sh"}, Config: map[string]interface{}{ "foo": "bar", }, @@ -2354,7 +2354,7 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) { TargetServiceName: "test", ContentHash: "84346af2031659c9", ExecMode: "daemon", - Command: "consul connect proxy", + Command: nil, Config: map[string]interface{}{ "upstreams": []interface{}{ map[string]interface{}{ diff --git a/agent/agent_test.go b/agent/agent_test.go index 730c10bc9..022c25f8e 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -2333,7 +2333,7 @@ func TestAgent_AddProxy(t *testing.T) { desc: "basic proxy adding, unregistered service", proxy: &structs.ConnectManagedProxy{ ExecMode: structs.ProxyExecModeDaemon, - Command: "consul connect proxy", + Command: []string{"consul", "connect", "proxy"}, Config: map[string]interface{}{ "foo": "bar", }, @@ -2346,7 +2346,7 @@ func TestAgent_AddProxy(t *testing.T) { desc: "basic proxy adding, unregistered service", proxy: &structs.ConnectManagedProxy{ ExecMode: structs.ProxyExecModeDaemon, - Command: "consul connect proxy", + Command: []string{"consul", "connect", "proxy"}, Config: map[string]interface{}{ "foo": "bar", }, @@ -2392,6 +2392,7 @@ func TestAgent_RemoveProxy(t *testing.T) { // Add a proxy for web pReg := &structs.ConnectManagedProxy{ TargetServiceID: "web", + Command: []string{"foo"}, } require.NoError(a.AddProxy(pReg, false)) diff --git a/agent/config/builder.go b/agent/config/builder.go index 3d9818adc..cd851aaf3 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -532,13 +532,13 @@ func (b *Builder) Build() (rt RuntimeConfig, err error) { } proxyDefaultExecMode := "" - proxyDefaultDaemonCommand := "" - proxyDefaultScriptCommand := "" + var proxyDefaultDaemonCommand []string + var proxyDefaultScriptCommand []string proxyDefaultConfig := make(map[string]interface{}) if c.Connect != nil && c.Connect.ProxyDefaults != nil { proxyDefaultExecMode = b.stringVal(c.Connect.ProxyDefaults.ExecMode) - proxyDefaultDaemonCommand = b.stringVal(c.Connect.ProxyDefaults.DaemonCommand) - proxyDefaultScriptCommand = b.stringVal(c.Connect.ProxyDefaults.ScriptCommand) + proxyDefaultDaemonCommand = c.Connect.ProxyDefaults.DaemonCommand + proxyDefaultScriptCommand = c.Connect.ProxyDefaults.ScriptCommand proxyDefaultConfig = c.Connect.ProxyDefaults.Config } @@ -1051,7 +1051,7 @@ func (b *Builder) serviceConnectVal(v *ServiceConnect) *structs.ServiceDefinitio if v.Proxy != nil { proxy = &structs.ServiceDefinitionConnectProxy{ ExecMode: b.stringVal(v.Proxy.ExecMode), - Command: b.stringVal(v.Proxy.Command), + Command: v.Proxy.Command, Config: v.Proxy.Config, } } diff --git a/agent/config/config.go b/agent/config/config.go index 5eb231472..6dc652aed 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -359,7 +359,7 @@ type ServiceConnect struct { } type ServiceConnectProxy struct { - Command *string `json:"command,omitempty" hcl:"command" mapstructure:"command"` + Command []string `json:"command,omitempty" hcl:"command" mapstructure:"command"` ExecMode *string `json:"exec_mode,omitempty" hcl:"exec_mode" mapstructure:"exec_mode"` Config map[string]interface{} `json:"config,omitempty" hcl:"config" mapstructure:"config"` } @@ -386,10 +386,10 @@ type ConnectProxyDefaults struct { ExecMode *string `json:"exec_mode,omitempty" hcl:"exec_mode" mapstructure:"exec_mode"` // DaemonCommand is used to start proxy in exec_mode = daemon if not specified // at registration time. - DaemonCommand *string `json:"daemon_command,omitempty" hcl:"daemon_command" mapstructure:"daemon_command"` + DaemonCommand []string `json:"daemon_command,omitempty" hcl:"daemon_command" mapstructure:"daemon_command"` // ScriptCommand is used to start proxy in exec_mode = script if not specified // at registration time. - ScriptCommand *string `json:"script_command,omitempty" hcl:"script_command" mapstructure:"script_command"` + ScriptCommand []string `json:"script_command,omitempty" hcl:"script_command" mapstructure:"script_command"` // Config is merged into an Config specified at registration time. Config map[string]interface{} `json:"config,omitempty" hcl:"config" mapstructure:"config"` } diff --git a/agent/config/runtime.go b/agent/config/runtime.go index ea04d5aa0..8baf02ab2 100644 --- a/agent/config/runtime.go +++ b/agent/config/runtime.go @@ -637,11 +637,11 @@ type RuntimeConfig struct { // ConnectProxyDefaultDaemonCommand is used to start proxy in exec_mode = // daemon if not specified at registration time. - ConnectProxyDefaultDaemonCommand string + ConnectProxyDefaultDaemonCommand []string // ConnectProxyDefaultScriptCommand is used to start proxy in exec_mode = // script if not specified at registration time. - ConnectProxyDefaultScriptCommand string + ConnectProxyDefaultScriptCommand []string // ConnectProxyDefaultConfig is merged with any config specified at // registration time to allow global control of defaults. diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 36fffe16a..3268c6c70 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -2364,8 +2364,8 @@ func TestFullConfig(t *testing.T) { "bind_min_port": 2000, "bind_max_port": 3000, "exec_mode": "script", - "daemon_command": "consul connect proxy", - "script_command": "proxyctl.sh", + "daemon_command": ["consul", "connect", "proxy"], + "script_command": ["proxyctl.sh"], "config": { "foo": "bar", "connect_timeout_ms": 1000, @@ -2637,7 +2637,7 @@ func TestFullConfig(t *testing.T) { "connect": { "proxy": { "exec_mode": "daemon", - "command": "awesome-proxy", + "command": ["awesome-proxy"], "config": { "foo": "qux" } @@ -2826,13 +2826,13 @@ func TestFullConfig(t *testing.T) { bind_min_port = 2000 bind_max_port = 3000 exec_mode = "script" - daemon_command = "consul connect proxy" - script_command = "proxyctl.sh" + daemon_command = ["consul", "connect", "proxy"] + script_command = ["proxyctl.sh"] config = { foo = "bar" # hack float since json parses numbers as float and we have to # assert against the same thing - connect_timeout_ms = 1000.0 + connect_timeout_ms = 1000.0 pedantic_mode = true } } @@ -3101,7 +3101,7 @@ func TestFullConfig(t *testing.T) { connect { proxy { exec_mode = "daemon" - command = "awesome-proxy" + command = ["awesome-proxy"] config = { foo = "qux" } @@ -3426,8 +3426,8 @@ func TestFullConfig(t *testing.T) { "hyMy9Oxn": "XeBp4Sis", }, ConnectProxyDefaultExecMode: "script", - ConnectProxyDefaultDaemonCommand: "consul connect proxy", - ConnectProxyDefaultScriptCommand: "proxyctl.sh", + ConnectProxyDefaultDaemonCommand: []string{"consul", "connect", "proxy"}, + ConnectProxyDefaultScriptCommand: []string{"proxyctl.sh"}, ConnectProxyDefaultConfig: map[string]interface{}{ "foo": "bar", "connect_timeout_ms": float64(1000), @@ -3608,7 +3608,7 @@ func TestFullConfig(t *testing.T) { Connect: &structs.ServiceDefinitionConnect{ Proxy: &structs.ServiceDefinitionConnectProxy{ ExecMode: "daemon", - Command: "awesome-proxy", + Command: []string{"awesome-proxy"}, Config: map[string]interface{}{ "foo": "qux", }, @@ -4109,9 +4109,9 @@ func TestSanitize(t *testing.T) { "ConnectProxyBindMaxPort": 0, "ConnectProxyBindMinPort": 0, "ConnectProxyDefaultConfig": {}, - "ConnectProxyDefaultDaemonCommand": "", + "ConnectProxyDefaultDaemonCommand": [], "ConnectProxyDefaultExecMode": "", - "ConnectProxyDefaultScriptCommand": "", + "ConnectProxyDefaultScriptCommand": [], "ConsulCoordinateUpdateBatchSize": 0, "ConsulCoordinateUpdateMaxBatches": 0, "ConsulCoordinateUpdatePeriod": "15s", diff --git a/agent/structs/connect.go b/agent/structs/connect.go index 90513ae8c..29330a652 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -64,7 +64,7 @@ type ConnectManagedProxy struct { // Command is the command to execute. Empty defaults to self-invoking the same // consul binary with proxy subcomand for ProxyExecModeDaemon and is an error // for ProxyExecModeScript. - Command string + Command []string // Config is the arbitrary configuration data provided with the registration. Config map[string]interface{} diff --git a/agent/structs/service_definition.go b/agent/structs/service_definition.go index d4dc21414..7163b5549 100644 --- a/agent/structs/service_definition.go +++ b/agent/structs/service_definition.go @@ -110,7 +110,7 @@ type ServiceDefinitionConnect struct { // registration. Note this is duplicated in config.ServiceConnectProxy and needs // to be kept in sync. type ServiceDefinitionConnectProxy struct { - Command string + Command []string ExecMode string Config map[string]interface{} } diff --git a/api/agent.go b/api/agent.go index b8125c91e..16241c6f9 100644 --- a/api/agent.go +++ b/api/agent.go @@ -76,7 +76,7 @@ type AgentServiceConnect struct { // service. type AgentServiceConnectProxy struct { ExecMode ProxyExecMode - Command string + Command []string Config map[string]interface{} } @@ -225,7 +225,7 @@ type ConnectProxyConfig struct { TargetServiceName string ContentHash string ExecMode ProxyExecMode - Command string + Command []string Config map[string]interface{} } From 93cdd3f2061de88ed86e2b4c13b115c63f4e8d35 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 27 Apr 2018 10:44:16 -0700 Subject: [PATCH 208/627] agent/proxy: clean up usage, can't be restarted --- agent/proxy/daemon.go | 80 ++++++++++++++++++++++++------------------- agent/proxy/proxy.go | 8 ++++- 2 files changed, 51 insertions(+), 37 deletions(-) diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index 3a8c1b11b..231f25cb8 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -39,8 +39,9 @@ type Daemon struct { // process is the started process lock sync.Mutex - process *os.Process + stopped bool stopCh chan struct{} + process *os.Process } // Start starts the daemon and keeps it running. @@ -50,30 +51,29 @@ func (p *Daemon) Start() error { p.lock.Lock() defer p.lock.Unlock() - // If the daemon is already started, return no error. - if p.stopCh != nil { + // A stopped proxy cannot be restarted + if p.stopped { + return fmt.Errorf("stopped") + } + + // If we're already running, that is okay + if p.process != nil { return nil } - // Start it for the first time - process, err := p.start() - if err != nil { - return err - } - - // Create the stop channel we use to notify when we've gracefully stopped + // Setup our stop channel stopCh := make(chan struct{}) p.stopCh = stopCh - // Store the process so that we can signal it later - p.process = process - + // Start the loop. go p.keepAlive(stopCh) return nil } -func (p *Daemon) keepAlive(stopCh chan struct{}) { +// keepAlive starts and keeps the configured process alive until it +// is stopped via Stop. +func (p *Daemon) keepAlive(stopCh <-chan struct{}) { p.lock.Lock() process := p.process p.lock.Unlock() @@ -106,31 +106,43 @@ func (p *Daemon) keepAlive(stopCh chan struct{}) { p.Logger.Printf( "[WARN] agent/proxy: waiting %s before restarting daemon", waitTime) - time.Sleep(waitTime) + + timer := time.NewTimer(waitTime) + select { + case <-timer.C: + // Timer is up, good! + + case <-stopCh: + // During our backoff wait, we've been signalled to + // quit, so just quit. + timer.Stop() + return + } } } p.lock.Lock() - // If we gracefully stopped (stopCh is closed) then don't restart. We - // check stopCh and not p.stopCh because the latter could reference - // a new process. - select { - case <-stopCh: + // If we gracefully stopped then don't restart. + if p.stopped { p.lock.Unlock() return - default: } - // Process isn't started currently. We're restarting. + // Process isn't started currently. We're restarting. Start it + // and save the process if we have it. var err error process, err = p.start() + if err == nil { + p.process = process + } + p.lock.Unlock() + if err != nil { p.Logger.Printf("[ERR] agent/proxy: error restarting daemon: %s", err) + continue } - p.process = process - p.lock.Unlock() } _, err := process.Wait() @@ -169,24 +181,20 @@ func (p *Daemon) Stop() error { p.lock.Lock() defer p.lock.Unlock() - // If we don't have a stopCh then we never even started yet. - if p.stopCh == nil { + // If we're already stopped or never started, then no problem. + if p.stopped || p.process == nil { + // In the case we never even started, calling Stop makes it so + // that we can't ever start in the future, either, so mark this. + p.stopped = true return nil } - // If stopCh is closed, then we're already stopped - select { - case <-p.stopCh: - return nil - default: - } + // Note that we've stopped + p.stopped = true + close(p.stopCh) err := p.process.Signal(os.Interrupt) - // This signals that we've stopped and therefore don't want to restart - close(p.stopCh) - p.stopCh = nil - return err //return p.Command.Process.Kill() } diff --git a/agent/proxy/proxy.go b/agent/proxy/proxy.go index 44228b521..a07bb5681 100644 --- a/agent/proxy/proxy.go +++ b/agent/proxy/proxy.go @@ -21,8 +21,14 @@ type Proxy interface { // proxy registration is rejected. Therefore, this should only fail if // the configuration of the proxy itself is irrecoverable, and should // retry starting for other failures. + // + // Starting an already-started proxy should not return an error. Start() error - // Stop stops the proxy. + // Stop stops the proxy and disallows it from ever being started again. + // + // If the proxy is not started yet, this should not return an error, but + // it should disallow Start from working again. If the proxy is already + // stopped, this should not return an error. Stop() error } From f64a002f68a662f584c45f4d1f71e88951335bc3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 27 Apr 2018 11:24:49 -0700 Subject: [PATCH 209/627] agent: start/stop proxies --- agent/agent.go | 71 +++++++++++++++++++++++++++++++++++- agent/agent_endpoint_test.go | 8 ++-- agent/local/proxy.go | 22 ++++++++++- agent/local/state.go | 36 ++++++++++-------- agent/local/state_test.go | 21 +++++++---- agent/proxy/noop.go | 7 ++++ agent/proxy/noop_test.go | 9 +++++ agent/structs/connect.go | 3 ++ 8 files changed, 147 insertions(+), 30 deletions(-) create mode 100644 agent/proxy/noop.go create mode 100644 agent/proxy/noop_test.go diff --git a/agent/agent.go b/agent/agent.go index 9dfe2abea..f70c16379 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -40,6 +40,7 @@ import ( "github.com/hashicorp/memberlist" "github.com/hashicorp/raft" "github.com/hashicorp/serf/serf" + "github.com/kardianos/osext" "github.com/shirou/gopsutil/host" "golang.org/x/net/http2" ) @@ -1268,6 +1269,11 @@ func (a *Agent) ShutdownAgent() error { chk.Stop() } + // Unload all our proxies so that we stop the running processes. + if err := a.unloadProxies(); err != nil { + a.logger.Printf("[WARN] agent: error stopping managed proxies: %s", err) + } + var err error if a.delegate != nil { err = a.delegate.Shutdown() @@ -2032,19 +2038,58 @@ func (a *Agent) AddProxy(proxy *structs.ConnectManagedProxy, persist bool) error // Lookup the target service token in state if there is one. token := a.State.ServiceToken(proxy.TargetServiceID) + // Determine if we need to default the command + if proxy.ExecMode == structs.ProxyExecModeDaemon && len(proxy.Command) == 0 { + // We use the globally configured default command. If it is empty + // then we need to determine the subcommand for this agent. + cmd := a.config.ConnectProxyDefaultDaemonCommand + if len(cmd) == 0 { + var err error + cmd, err = a.defaultProxyCommand() + if err != nil { + return err + } + } + + proxy.CommandDefault = cmd + } + // Add the proxy to local state first since we may need to assign a port which // needs to be coordinate under state lock. AddProxy will generate the // NodeService for the proxy populated with the allocated (or configured) port // and an ID, but it doesn't add it to the agent directly since that could // deadlock and we may need to coordinate adding it and persisting etc. - proxyService, err := a.State.AddProxy(proxy, token) + proxyState, oldProxy, err := a.State.AddProxy(proxy, token) if err != nil { return err } + proxyService := proxyState.Proxy.ProxyService + + // If we replaced an existing proxy, stop that process. + if oldProxy != nil { + if err := oldProxy.ProxyProcess.Stop(); err != nil { + a.logger.Printf( + "[ERR] error stopping managed proxy, may still be running: %s", + err) + } + } + + // Start the proxy process + if err := proxyState.ProxyProcess.Start(); err != nil { + a.State.RemoveProxy(proxyService.ID) + return fmt.Errorf("error starting managed proxy: %s", err) + } // TODO(banks): register proxy health checks. err = a.AddService(proxyService, nil, persist, token) if err != nil { + // Stop the proxy process if it was started + if err := proxyState.ProxyProcess.Stop(); err != nil { + a.logger.Printf( + "[ERR] error stopping managed proxy, may still be running: %s", + err) + } + // Remove the state too a.State.RemoveProxy(proxyService.ID) return err @@ -2061,15 +2106,37 @@ func (a *Agent) RemoveProxy(proxyID string, persist bool) error { return fmt.Errorf("proxyID missing") } - if err := a.State.RemoveProxy(proxyID); err != nil { + // Remove the proxy from the local state + proxyState, err := a.State.RemoveProxy(proxyID) + if err != nil { return err } + // Stop the process. The proxy implementation is expected to perform + // retries so if this fails then retries have already been performed and + // the most we can do is just error. + if err := proxyState.ProxyProcess.Stop(); err != nil { + return fmt.Errorf("error stopping managed proxy process: %s", err) + } + // TODO(banks): unpersist proxy return nil } +// defaultProxyCommand returns the default Connect managed proxy command. +func (a *Agent) defaultProxyCommand() ([]string, error) { + // Get the path to the current exectuable. This is cached once by the + // library so this is effectively just a variable read. + execPath, err := osext.Executable() + if err != nil { + return nil, err + } + + // "consul connect proxy" default value for managed daemon proxy + return []string{execPath, "connect", "proxy"}, nil +} + func (a *Agent) cancelCheckMonitors(checkID types.CheckID) { // Stop any monitors delete(a.checkReapAfter, checkID) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 6cff0fa59..26a04dddd 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -70,7 +70,7 @@ func TestAgent_Services(t *testing.T) { }, TargetServiceID: "mysql", } - _, err := a.State.AddProxy(prxy1, "") + _, _, err := a.State.AddProxy(prxy1, "") require.NoError(t, err) req, _ := http.NewRequest("GET", "/v1/agent/services", nil) @@ -1435,7 +1435,7 @@ func TestAgent_RegisterService_ManagedConnectProxy(t *testing.T) { proxy := a.State.Proxy("web-proxy") require.NotNil(proxy) assert.Equal(structs.ProxyExecModeScript, proxy.Proxy.ExecMode) - assert.Equal("proxy.sh", proxy.Proxy.Command) + assert.Equal([]string{"proxy.sh"}, proxy.Proxy.Command) assert.Equal(args.Connect.Proxy.Config, proxy.Proxy.Config) // Ensure the token was configured @@ -2352,7 +2352,7 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) { ProxyServiceID: "test-proxy", TargetServiceID: "test", TargetServiceName: "test", - ContentHash: "84346af2031659c9", + ContentHash: "365a50cbb9a748b6", ExecMode: "daemon", Command: nil, Config: map[string]interface{}{ @@ -2372,7 +2372,7 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) { ur, err := copystructure.Copy(expectedResponse) require.NoError(t, err) updatedResponse := ur.(*api.ConnectProxyConfig) - updatedResponse.ContentHash = "e1e3395f0d00cd41" + updatedResponse.ContentHash = "b5bb0e4a0a58ca25" upstreams := updatedResponse.Config["upstreams"].([]interface{}) upstreams = append(upstreams, map[string]interface{}{ diff --git a/agent/local/proxy.go b/agent/local/proxy.go index 7f004a7ab..37484a32f 100644 --- a/agent/local/proxy.go +++ b/agent/local/proxy.go @@ -14,12 +14,32 @@ import ( func (s *State) newProxyProcess(p *structs.ConnectManagedProxy, pToken string) (proxy.Proxy, error) { switch p.ExecMode { case structs.ProxyExecModeDaemon: + command := p.Command + if len(command) == 0 { + command = p.CommandDefault + } + + // This should never happen since validation should happen upstream + // but verify it because the alternative is to panic below. + if len(command) == 0 { + return nil, fmt.Errorf("daemon mode managed proxy requires command") + } + + // Build the command to execute. + var cmd exec.Cmd + cmd.Path = command[0] + cmd.Args = command[1:] + + // Build the daemon structure return &proxy.Daemon{ - Command: exec.Command(p.Command), + Command: &cmd, ProxyToken: pToken, Logger: s.logger, }, nil + case structs.ProxyExecModeScript: + return &proxy.Noop{}, nil + default: return nil, fmt.Errorf("unsupported managed proxy type: %q", p.ExecMode) } diff --git a/agent/local/state.go b/agent/local/state.go index ccb4d77e1..ecd3299fd 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -127,8 +127,8 @@ type ManagedProxy struct { // use service-scoped ACL tokens distributed externally. ProxyToken string - // ManagedProxy is the managed proxy itself that is running. - ManagedProxy proxy.Proxy + // ProxyProcess is the managed proxy itself that is running. + ProxyProcess proxy.Proxy // WatchCh is a close-only chan that is closed when the proxy is removed or // updated. @@ -573,22 +573,26 @@ func (l *State) CriticalCheckStates() map[types.CheckID]*CheckState { // (since that has to do other book keeping). The token passed here is the ACL // token the service used to register itself so must have write on service // record. -func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*structs.NodeService, error) { +// +// AddProxy returns the newly added proxy, any replaced proxy, and an error. +// The second return value (replaced proxy) can be used to determine if +// the process needs to be updated or not. +func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*ManagedProxy, *ManagedProxy, error) { if proxy == nil { - return nil, fmt.Errorf("no proxy") + return nil, nil, fmt.Errorf("no proxy") } // Lookup the local service target := l.Service(proxy.TargetServiceID) if target == nil { - return nil, fmt.Errorf("target service ID %s not registered", + return nil, nil, fmt.Errorf("target service ID %s not registered", proxy.TargetServiceID) } // Get bind info from config cfg, err := proxy.ParseConfig() if err != nil { - return nil, err + return nil, nil, err } // Construct almost all of the NodeService that needs to be registered by the @@ -604,7 +608,7 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*str pToken, err := uuid.GenerateUUID() if err != nil { - return nil, err + return nil, nil, err } // Initialize the managed proxy process. This doesn't start anything, @@ -612,7 +616,7 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*str // caller should call Proxy and use the returned ManagedProxy instance. proxyProcess, err := l.newProxyProcess(proxy, pToken) if err != nil { - return nil, err + return nil, nil, err } // Lock now. We can't lock earlier as l.Service would deadlock and shouldn't @@ -646,7 +650,7 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*str } // If no ports left (or auto ports disabled) fail if svc.Port < 1 { - return nil, fmt.Errorf("no port provided for proxy bind_port and none "+ + return nil, nil, fmt.Errorf("no port provided for proxy bind_port and none "+ " left in the allocated range [%d, %d]", l.config.ProxyBindMinPort, l.config.ProxyBindMaxPort) } @@ -654,7 +658,8 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*str proxy.ProxyService = svc // All set, add the proxy and return the service - if old, ok := l.managedProxies[svc.ID]; ok { + old, ok := l.managedProxies[svc.ID] + if ok { // Notify watchers of the existing proxy config that it's changing. Note // this is safe here even before the map is updated since we still hold the // state lock and the watcher can't re-read the new config until we return @@ -664,22 +669,23 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*str l.managedProxies[svc.ID] = &ManagedProxy{ Proxy: proxy, ProxyToken: pToken, - ManagedProxy: proxyProcess, + ProxyProcess: proxyProcess, WatchCh: make(chan struct{}), } // No need to trigger sync as proxy state is local only. - return svc, nil + return l.managedProxies[svc.ID], old, nil } // RemoveProxy is used to remove a proxy entry from the local state. -func (l *State) RemoveProxy(id string) error { +// This returns the proxy that was removed. +func (l *State) RemoveProxy(id string) (*ManagedProxy, error) { l.Lock() defer l.Unlock() p := l.managedProxies[id] if p == nil { - return fmt.Errorf("Proxy %s does not exist", id) + return nil, fmt.Errorf("Proxy %s does not exist", id) } delete(l.managedProxies, id) @@ -687,7 +693,7 @@ func (l *State) RemoveProxy(id string) error { close(p.WatchCh) // No need to trigger sync as proxy state is local only. - return nil + return p, nil } // Proxy returns the local proxy state. diff --git a/agent/local/state_test.go b/agent/local/state_test.go index dd887ccb1..f79249a73 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -1684,7 +1684,7 @@ func TestStateProxyManagement(t *testing.T) { p1 := structs.ConnectManagedProxy{ ExecMode: structs.ProxyExecModeDaemon, - Command: "consul connect proxy", + Command: []string{"consul", "connect", "proxy"}, TargetServiceID: "web", } @@ -1710,9 +1710,10 @@ func TestStateProxyManagement(t *testing.T) { require.NoError(err) // Should work now - svc, err := state.AddProxy(&p1, "fake-token") + pstate, err := state.AddProxy(&p1, "fake-token") require.NoError(err) + svc := pstate.Proxy.ProxyService assert.Equal("web-proxy", svc.ID) assert.Equal("web-proxy", svc.Service) assert.Equal(structs.ServiceKindConnectProxy, svc.Kind) @@ -1739,8 +1740,9 @@ func TestStateProxyManagement(t *testing.T) { // Second proxy should claim other port p2 := p1 p2.TargetServiceID = "cache" - svc2, err := state.AddProxy(&p2, "fake-token") + pstate2, err := state.AddProxy(&p2, "fake-token") require.NoError(err) + svc2 := pstate2.Proxy.ProxyService assert.Contains([]int{20000, 20001}, svc2.Port) assert.NotEqual(svc.Port, svc2.Port) @@ -1758,8 +1760,9 @@ func TestStateProxyManagement(t *testing.T) { "bind_port": 1234, "bind_address": "0.0.0.0", } - svc3, err := state.AddProxy(&p3, "fake-token") + pstate3, err := state.AddProxy(&p3, "fake-token") require.NoError(err) + svc3 := pstate3.Proxy.ProxyService require.Equal("0.0.0.0", svc3.Address) require.Equal(1234, svc3.Port) @@ -1771,8 +1774,9 @@ func TestStateProxyManagement(t *testing.T) { require.NotNil(gotP3) var ws memdb.WatchSet ws.Add(gotP3.WatchCh) - svc3, err = state.AddProxy(&p3updated, "fake-token") + pstate3, err = state.AddProxy(&p3updated, "fake-token") require.NoError(err) + svc3 = pstate3.Proxy.ProxyService require.Equal("0.0.0.0", svc3.Address) require.Equal(1234, svc3.Port) gotProxy3 := state.Proxy(svc3.ID) @@ -1782,19 +1786,20 @@ func TestStateProxyManagement(t *testing.T) { "watch should have fired so ws.Watch should not timeout") // Remove one of the auto-assigned proxies - err = state.RemoveProxy(svc2.ID) + _, err = state.RemoveProxy(svc2.ID) require.NoError(err) // Should be able to create a new proxy for that service with the port (it // should have been "freed"). p4 := p2 - svc4, err := state.AddProxy(&p4, "fake-token") + pstate4, err := state.AddProxy(&p4, "fake-token") require.NoError(err) + svc4 := pstate4.Proxy.ProxyService assert.Contains([]int{20000, 20001}, svc2.Port) assert.Equal(svc4.Port, svc2.Port, "should get the same port back that we freed") // Remove a proxy that doesn't exist should error - err = state.RemoveProxy("nope") + _, err = state.RemoveProxy("nope") require.Error(err) assert.Equal(&p4, state.Proxy(p4.ProxyService.ID).Proxy, diff --git a/agent/proxy/noop.go b/agent/proxy/noop.go new file mode 100644 index 000000000..9b35a2427 --- /dev/null +++ b/agent/proxy/noop.go @@ -0,0 +1,7 @@ +package proxy + +// Noop implements Proxy and does nothing. +type Noop struct{} + +func (p *Noop) Start() error { return nil } +func (p *Noop) Stop() error { return nil } diff --git a/agent/proxy/noop_test.go b/agent/proxy/noop_test.go new file mode 100644 index 000000000..77513ad29 --- /dev/null +++ b/agent/proxy/noop_test.go @@ -0,0 +1,9 @@ +package proxy + +import ( + "testing" +) + +func TestNoop_impl(t *testing.T) { + var _ Proxy = new(Noop) +} diff --git a/agent/structs/connect.go b/agent/structs/connect.go index 29330a652..b40091adf 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -66,6 +66,9 @@ type ConnectManagedProxy struct { // for ProxyExecModeScript. Command []string + // CommandDefault is the default command to execute if Command is empty. + CommandDefault []string `json:"-" hash:"ignore"` + // Config is the arbitrary configuration data provided with the registration. Config map[string]interface{} From fae8dc895117522601202146c943b33da5a7400c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 30 Apr 2018 21:12:55 -0700 Subject: [PATCH 210/627] agent/local: add Notify mechanism for proxy changes --- agent/local/proxy.go | 46 ------------------- agent/local/state.go | 96 +++++++++++++++++++++++++-------------- agent/local/state_test.go | 35 ++++++++++++++ 3 files changed, 98 insertions(+), 79 deletions(-) delete mode 100644 agent/local/proxy.go diff --git a/agent/local/proxy.go b/agent/local/proxy.go deleted file mode 100644 index 37484a32f..000000000 --- a/agent/local/proxy.go +++ /dev/null @@ -1,46 +0,0 @@ -package local - -import ( - "fmt" - "os/exec" - - "github.com/hashicorp/consul/agent/proxy" - "github.com/hashicorp/consul/agent/structs" -) - -// newProxyProcess returns the proxy.Proxy for the given ManagedProxy -// state entry. proxy.Proxy is the actual managed process. The returned value -// is the initialized struct but isn't explicitly started. -func (s *State) newProxyProcess(p *structs.ConnectManagedProxy, pToken string) (proxy.Proxy, error) { - switch p.ExecMode { - case structs.ProxyExecModeDaemon: - command := p.Command - if len(command) == 0 { - command = p.CommandDefault - } - - // This should never happen since validation should happen upstream - // but verify it because the alternative is to panic below. - if len(command) == 0 { - return nil, fmt.Errorf("daemon mode managed proxy requires command") - } - - // Build the command to execute. - var cmd exec.Cmd - cmd.Path = command[0] - cmd.Args = command[1:] - - // Build the daemon structure - return &proxy.Daemon{ - Command: &cmd, - ProxyToken: pToken, - Logger: s.logger, - }, nil - - case structs.ProxyExecModeScript: - return &proxy.Noop{}, nil - - default: - return nil, fmt.Errorf("unsupported managed proxy type: %q", p.ExecMode) - } -} diff --git a/agent/local/state.go b/agent/local/state.go index ecd3299fd..03dbbd96c 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -14,7 +14,6 @@ import ( "github.com/hashicorp/go-uuid" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/proxy" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/api" @@ -127,9 +126,6 @@ type ManagedProxy struct { // use service-scoped ACL tokens distributed externally. ProxyToken string - // ProxyProcess is the managed proxy itself that is running. - ProxyProcess proxy.Proxy - // WatchCh is a close-only chan that is closed when the proxy is removed or // updated. WatchCh chan struct{} @@ -187,19 +183,24 @@ type State struct { // registration) do not appear here as the agent doesn't need to manage their // process nor config. The _do_ still exist in services above though as // services with Kind == connect-proxy. - managedProxies map[string]*ManagedProxy + // + // managedProxyHandlers is a map of registered channel listeners that + // are sent a message each time a proxy changes via Add or RemoveProxy. + managedProxies map[string]*ManagedProxy + managedProxyHandlers map[chan<- struct{}]struct{} } // NewState creates a new local state for the agent. func NewState(c Config, lg *log.Logger, tokens *token.Store) *State { l := &State{ - config: c, - logger: lg, - services: make(map[string]*ServiceState), - checks: make(map[types.CheckID]*CheckState), - metadata: make(map[string]string), - tokens: tokens, - managedProxies: make(map[string]*ManagedProxy), + config: c, + logger: lg, + services: make(map[string]*ServiceState), + checks: make(map[types.CheckID]*CheckState), + metadata: make(map[string]string), + tokens: tokens, + managedProxies: make(map[string]*ManagedProxy), + managedProxyHandlers: make(map[chan<- struct{}]struct{}), } l.SetDiscardCheckOutput(c.DiscardCheckOutput) return l @@ -577,22 +578,22 @@ func (l *State) CriticalCheckStates() map[types.CheckID]*CheckState { // AddProxy returns the newly added proxy, any replaced proxy, and an error. // The second return value (replaced proxy) can be used to determine if // the process needs to be updated or not. -func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*ManagedProxy, *ManagedProxy, error) { +func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*ManagedProxy, error) { if proxy == nil { - return nil, nil, fmt.Errorf("no proxy") + return nil, fmt.Errorf("no proxy") } // Lookup the local service target := l.Service(proxy.TargetServiceID) if target == nil { - return nil, nil, fmt.Errorf("target service ID %s not registered", + return nil, fmt.Errorf("target service ID %s not registered", proxy.TargetServiceID) } // Get bind info from config cfg, err := proxy.ParseConfig() if err != nil { - return nil, nil, err + return nil, err } // Construct almost all of the NodeService that needs to be registered by the @@ -608,15 +609,7 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*Man pToken, err := uuid.GenerateUUID() if err != nil { - return nil, nil, err - } - - // Initialize the managed proxy process. This doesn't start anything, - // it only sets up the structures we'll use. To start the proxy, the - // caller should call Proxy and use the returned ManagedProxy instance. - proxyProcess, err := l.newProxyProcess(proxy, pToken) - if err != nil { - return nil, nil, err + return nil, err } // Lock now. We can't lock earlier as l.Service would deadlock and shouldn't @@ -650,7 +643,7 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*Man } // If no ports left (or auto ports disabled) fail if svc.Port < 1 { - return nil, nil, fmt.Errorf("no port provided for proxy bind_port and none "+ + return nil, fmt.Errorf("no port provided for proxy bind_port and none "+ " left in the allocated range [%d, %d]", l.config.ProxyBindMinPort, l.config.ProxyBindMaxPort) } @@ -658,8 +651,7 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*Man proxy.ProxyService = svc // All set, add the proxy and return the service - old, ok := l.managedProxies[svc.ID] - if ok { + if old, ok := l.managedProxies[svc.ID]; ok { // Notify watchers of the existing proxy config that it's changing. Note // this is safe here even before the map is updated since we still hold the // state lock and the watcher can't re-read the new config until we return @@ -667,14 +659,22 @@ func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*Man close(old.WatchCh) } l.managedProxies[svc.ID] = &ManagedProxy{ - Proxy: proxy, - ProxyToken: pToken, - ProxyProcess: proxyProcess, - WatchCh: make(chan struct{}), + Proxy: proxy, + ProxyToken: pToken, + WatchCh: make(chan struct{}), + } + + // Notify + for ch := range l.managedProxyHandlers { + // Do not block + select { + case ch <- struct{}{}: + default: + } } // No need to trigger sync as proxy state is local only. - return l.managedProxies[svc.ID], old, nil + return l.managedProxies[svc.ID], nil } // RemoveProxy is used to remove a proxy entry from the local state. @@ -692,6 +692,15 @@ func (l *State) RemoveProxy(id string) (*ManagedProxy, error) { // Notify watchers of the existing proxy config that it's changed. close(p.WatchCh) + // Notify + for ch := range l.managedProxyHandlers { + // Do not block + select { + case ch <- struct{}{}: + default: + } + } + // No need to trigger sync as proxy state is local only. return p, nil } @@ -715,6 +724,27 @@ func (l *State) Proxies() map[string]*ManagedProxy { return m } +// NotifyProxy will register a channel to receive messages when the +// configuration or set of proxies changes. This will not block on +// channel send so ensure the channel has a large enough buffer. +// +// NOTE(mitchellh): This could be more generalized but for my use case I +// only needed proxy events. In the future if it were to be generalized I +// would add a new Notify method and remove the proxy-specific ones. +func (l *State) NotifyProxy(ch chan<- struct{}) { + l.Lock() + defer l.Unlock() + l.managedProxyHandlers[ch] = struct{}{} +} + +// StopNotifyProxy will deregister a channel receiving proxy notifications. +// Pair this with all calls to NotifyProxy to clean up state. +func (l *State) StopNotifyProxy(ch chan<- struct{}) { + l.Lock() + defer l.Unlock() + delete(l.managedProxyHandlers, ch) +} + // Metadata returns the local node metadata fields that the // agent is aware of and are being kept in sync with the server func (l *State) Metadata() map[string]string { diff --git a/agent/local/state_test.go b/agent/local/state_test.go index f79249a73..800c017d6 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -1737,6 +1737,13 @@ func TestStateProxyManagement(t *testing.T) { assert.Equal(svc.Port, svcDup.Port) } + // Let's register a notifier now + notifyCh := make(chan struct{}, 1) + state.NotifyProxy(notifyCh) + defer state.StopNotifyProxy(notifyCh) + assert.Empty(notifyCh) + drainCh(notifyCh) + // Second proxy should claim other port p2 := p1 p2.TargetServiceID = "cache" @@ -1746,6 +1753,10 @@ func TestStateProxyManagement(t *testing.T) { assert.Contains([]int{20000, 20001}, svc2.Port) assert.NotEqual(svc.Port, svc2.Port) + // Should have a notification + assert.NotEmpty(notifyCh) + drainCh(notifyCh) + // Store this for later p2token := state.Proxy(svc2.ID).ProxyToken @@ -1755,6 +1766,9 @@ func TestStateProxyManagement(t *testing.T) { _, err = state.AddProxy(&p3, "fake-token") require.Error(err) + // Should have a notification but we'll do nothing so that the next + // receive should block (we set cap == 1 above) + // But if we set a port explicitly it should be OK p3.Config = map[string]interface{}{ "bind_port": 1234, @@ -1766,6 +1780,10 @@ func TestStateProxyManagement(t *testing.T) { require.Equal("0.0.0.0", svc3.Address) require.Equal(1234, svc3.Port) + // Should have a notification + assert.NotEmpty(notifyCh) + drainCh(notifyCh) + // Update config of an already registered proxy should work p3updated := p3 p3updated.Config["foo"] = "bar" @@ -1785,10 +1803,16 @@ func TestStateProxyManagement(t *testing.T) { assert.False(ws.Watch(time.After(500*time.Millisecond)), "watch should have fired so ws.Watch should not timeout") + drainCh(notifyCh) + // Remove one of the auto-assigned proxies _, err = state.RemoveProxy(svc2.ID) require.NoError(err) + // Should have a notification + assert.NotEmpty(notifyCh) + drainCh(notifyCh) + // Should be able to create a new proxy for that service with the port (it // should have been "freed"). p4 := p2 @@ -1829,3 +1853,14 @@ func TestStateProxyManagement(t *testing.T) { } } } + +// drainCh drains a channel by reading messages until it would block. +func drainCh(ch chan struct{}) { + for { + select { + case <-ch: + default: + return + } + } +} From a2167a7fd10ff20490109ef6989c5ac5bdab9919 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Mon, 30 Apr 2018 23:35:23 -0700 Subject: [PATCH 211/627] agent/proxy: manager and basic tests, not great coverage yet coming soon --- agent/agent.go | 62 ++------ agent/local/testing.go | 19 +++ agent/proxy/daemon_test.go | 4 + agent/proxy/manager.go | 300 ++++++++++++++++++++++++++++++++++++ agent/proxy/manager_test.go | 79 ++++++++++ agent/proxy/proxy_test.go | 24 ++- agent/proxy/test.go | 13 ++ agent/structs/connect.go | 5 + 8 files changed, 448 insertions(+), 58 deletions(-) create mode 100644 agent/local/testing.go create mode 100644 agent/proxy/manager.go create mode 100644 agent/proxy/manager_test.go create mode 100644 agent/proxy/test.go diff --git a/agent/agent.go b/agent/agent.go index f70c16379..365a76af0 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2038,58 +2038,38 @@ func (a *Agent) AddProxy(proxy *structs.ConnectManagedProxy, persist bool) error // Lookup the target service token in state if there is one. token := a.State.ServiceToken(proxy.TargetServiceID) - // Determine if we need to default the command - if proxy.ExecMode == structs.ProxyExecModeDaemon && len(proxy.Command) == 0 { - // We use the globally configured default command. If it is empty - // then we need to determine the subcommand for this agent. - cmd := a.config.ConnectProxyDefaultDaemonCommand - if len(cmd) == 0 { - var err error - cmd, err = a.defaultProxyCommand() - if err != nil { - return err + /* + // Determine if we need to default the command + if proxy.ExecMode == structs.ProxyExecModeDaemon && len(proxy.Command) == 0 { + // We use the globally configured default command. If it is empty + // then we need to determine the subcommand for this agent. + cmd := a.config.ConnectProxyDefaultDaemonCommand + if len(cmd) == 0 { + var err error + cmd, err = a.defaultProxyCommand() + if err != nil { + return err + } } - } - proxy.CommandDefault = cmd - } + proxy.CommandDefault = cmd + } + */ // Add the proxy to local state first since we may need to assign a port which // needs to be coordinate under state lock. AddProxy will generate the // NodeService for the proxy populated with the allocated (or configured) port // and an ID, but it doesn't add it to the agent directly since that could // deadlock and we may need to coordinate adding it and persisting etc. - proxyState, oldProxy, err := a.State.AddProxy(proxy, token) + proxyState, err := a.State.AddProxy(proxy, token) if err != nil { return err } proxyService := proxyState.Proxy.ProxyService - // If we replaced an existing proxy, stop that process. - if oldProxy != nil { - if err := oldProxy.ProxyProcess.Stop(); err != nil { - a.logger.Printf( - "[ERR] error stopping managed proxy, may still be running: %s", - err) - } - } - - // Start the proxy process - if err := proxyState.ProxyProcess.Start(); err != nil { - a.State.RemoveProxy(proxyService.ID) - return fmt.Errorf("error starting managed proxy: %s", err) - } - // TODO(banks): register proxy health checks. err = a.AddService(proxyService, nil, persist, token) if err != nil { - // Stop the proxy process if it was started - if err := proxyState.ProxyProcess.Stop(); err != nil { - a.logger.Printf( - "[ERR] error stopping managed proxy, may still be running: %s", - err) - } - // Remove the state too a.State.RemoveProxy(proxyService.ID) return err @@ -2107,18 +2087,10 @@ func (a *Agent) RemoveProxy(proxyID string, persist bool) error { } // Remove the proxy from the local state - proxyState, err := a.State.RemoveProxy(proxyID) - if err != nil { + if _, err := a.State.RemoveProxy(proxyID); err != nil { return err } - // Stop the process. The proxy implementation is expected to perform - // retries so if this fails then retries have already been performed and - // the most we can do is just error. - if err := proxyState.ProxyProcess.Stop(); err != nil { - return fmt.Errorf("error stopping managed proxy process: %s", err) - } - // TODO(banks): unpersist proxy return nil diff --git a/agent/local/testing.go b/agent/local/testing.go new file mode 100644 index 000000000..6ca9d12ae --- /dev/null +++ b/agent/local/testing.go @@ -0,0 +1,19 @@ +package local + +import ( + "log" + "os" + + "github.com/hashicorp/consul/agent/token" + "github.com/mitchellh/go-testing-interface" +) + +// TestState returns a configured *State for testing. +func TestState(t testing.T) *State { + result := NewState(Config{ + ProxyBindMinPort: 20000, + ProxyBindMaxPort: 20500, + }, log.New(os.Stderr, "", log.LstdFlags), &token.Store{}) + result.TriggerSyncChanges = func() {} + return result +} diff --git a/agent/proxy/daemon_test.go b/agent/proxy/daemon_test.go index dbd2099bc..22948bdaf 100644 --- a/agent/proxy/daemon_test.go +++ b/agent/proxy/daemon_test.go @@ -16,6 +16,8 @@ func TestDaemon_impl(t *testing.T) { } func TestDaemonStartStop(t *testing.T) { + t.Parallel() + require := require.New(t) td, closer := testTempDir(t) defer closer() @@ -63,6 +65,8 @@ func TestDaemonStartStop(t *testing.T) { } func TestDaemonRestart(t *testing.T) { + t.Parallel() + require := require.New(t) td, closer := testTempDir(t) defer closer() diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go new file mode 100644 index 000000000..05445d93a --- /dev/null +++ b/agent/proxy/manager.go @@ -0,0 +1,300 @@ +package proxy + +import ( + "fmt" + "log" + "os" + "os/exec" + "sync" + + "github.com/hashicorp/consul/agent/local" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/go-multierror" +) + +// Manager starts, stops, snapshots, and restores managed proxies. +// +// The manager will not start or stop any processes until Start is called. +// Prior to this, any configuration, snapshot loading, etc. can be done. +// Even if a process is no longer running after loading the snapshot, it +// will not be restarted until Start is called. +// +// The Manager works by subscribing to change notifications on a local.State +// structure. Whenever a change is detected, the Manager syncs its internal +// state with the local.State and starts/stops any necessary proxies. The +// manager never holds a lock on local.State (except to read the proxies) +// and state updates may occur while the Manger is syncing. This is okay, +// since a change notification will be queued to trigger another sync. +// +// NOTE(mitchellh): Change notifications are not coalesced currently. Under +// conditions where managed proxy configurations are changing in a hot +// loop, it is possible for the manager to constantly attempt to sync. This +// is unlikely, but its also easy to introduce basic coalescing (even over +// millisecond intervals) to prevent total waste compute cycles. +type Manager struct { + // State is the local state that is the source of truth for all + // configured managed proxies. + State *local.State + + // Logger is the logger for information about manager behavior. + // Output for proxies will not go here generally but varies by proxy + // implementation type. + Logger *log.Logger + + // lock is held while reading/writing any internal state of the manager. + // cond is a condition variable on lock that is broadcasted for runState + // changes. + lock *sync.Mutex + cond *sync.Cond + + // runState is the current state of the manager. To read this the + // lock must be held. The condition variable cond can be waited on + // for changes to this value. + runState managerRunState + + proxies map[string]Proxy +} + +// defaultLogger is the defaultLogger for NewManager so there it is never nil +var defaultLogger = log.New(os.Stderr, "", log.LstdFlags) + +// NewManager initializes a Manager. After initialization, the exported +// fields should be configured as desired. To start the Manager, execute +// Run in a goroutine. +func NewManager() *Manager { + var lock sync.Mutex + return &Manager{ + Logger: defaultLogger, + lock: &lock, + cond: sync.NewCond(&lock), + proxies: make(map[string]Proxy), + } +} + +// managerRunState is the state of the Manager. +// +// This is a basic state machine with the following transitions: +// +// * idle => running, stopped +// * running => stopping, stopped +// * stopping => stopped +// * stopped => <> +// +type managerRunState uint8 + +const ( + managerStateIdle managerRunState = iota + managerStateRunning + managerStateStopping + managerStateStopped +) + +// Close stops the manager. Managed processes are NOT stopped. +func (m *Manager) Close() error { + m.lock.Lock() + defer m.lock.Unlock() + + for { + switch m.runState { + case managerStateIdle: + // Idle so just set it to stopped and return. We notify + // the condition variable in case others are waiting. + m.runState = managerStateStopped + m.cond.Broadcast() + return nil + + case managerStateRunning: + // Set the state to stopping and broadcast to all waiters, + // since Run is sitting on cond.Wait. + m.runState = managerStateStopping + m.cond.Broadcast() + m.cond.Wait() // Wait on the stopping event + + case managerStateStopping: + // Still stopping, wait... + m.cond.Wait() + + case managerStateStopped: + // Stopped, target state reached + return nil + } + } +} + +// Kill will Close the manager and Kill all proxies that were being managed. +// +// This is safe to call with Close already called since Close is idempotent. +func (m *Manager) Kill() error { + // Close first so that we aren't getting changes in proxies + if err := m.Close(); err != nil { + return err + } + + m.lock.Lock() + defer m.lock.Unlock() + + var err error + for id, proxy := range m.proxies { + if err := proxy.Stop(); err != nil { + err = multierror.Append( + err, fmt.Errorf("failed to stop proxy %q: %s", id, err)) + continue + } + + // Remove it since it is already stopped successfully + delete(m.proxies, id) + } + + return err +} + +// Run syncs with the local state and supervises existing proxies. +// +// This blocks and should be run in a goroutine. If another Run is already +// executing, this will do nothing and return. +func (m *Manager) Run() { + m.lock.Lock() + if m.runState != managerStateIdle { + m.lock.Unlock() + return + } + + // Set the state to running + m.runState = managerStateRunning + m.lock.Unlock() + + // Start a goroutine that just waits for a stop request + stopCh := make(chan struct{}) + go func() { + defer close(stopCh) + m.lock.Lock() + defer m.lock.Unlock() + + // We wait for anything not running, just so we're more resilient + // in the face of state machine issues. Basically any state change + // will cause us to quit. + for m.runState != managerStateRunning { + m.cond.Wait() + } + }() + + // When we exit, we set the state to stopped and broadcast to any + // waiting Close functions that they can return. + defer func() { + m.lock.Lock() + m.runState = managerStateStopped + m.cond.Broadcast() + m.lock.Unlock() + }() + + // Register for proxy catalog change notifications + notifyCh := make(chan struct{}, 1) + m.State.NotifyProxy(notifyCh) + defer m.State.StopNotifyProxy(notifyCh) + + for { + // Sync first, before waiting on further notifications so that + // we can start with a known-current state. + m.sync() + + select { + case <-notifyCh: + // Changes exit select so we can reloop and reconfigure proxies + + case <-stopCh: + // Stop immediately, no cleanup + return + } + } +} + +// sync syncs data with the local state store to update the current manager +// state and start/stop necessary proxies. +func (m *Manager) sync() { + m.lock.Lock() + defer m.lock.Unlock() + + // Get the current set of proxies + state := m.State.Proxies() + + // Go through our existing proxies that we're currently managing to + // determine if they're still in the state or not. If they're in the + // state, we need to diff to determine if we're starting a new proxy + // If they're not in the state, then we need to stop the proxy since it + // is now orphaned. + for id, proxy := range m.proxies { + // Get the proxy. + stateProxy, ok := state[id] + if !ok { + // Proxy is deregistered. Remove it from our map and stop it + delete(m.proxies, id) + if err := proxy.Stop(); err != nil { + m.Logger.Printf("[ERROR] agent/proxy: failed to stop deregistered proxy for %q: %s", id, err) + } + + continue + } + + // Proxy is in the state. Always delete it so that the remainder + // are NEW proxies that we start after this loop. + delete(state, id) + + // TODO: diff and restart if necessary + println(stateProxy) + } + + // Remaining entries in state are new proxies. Start them! + for id, stateProxy := range state { + proxy, err := m.newProxy(stateProxy) + if err != nil { + m.Logger.Printf("[ERROR] agent/proxy: failed to initialize proxy for %q: %s", id, err) + continue + } + + if err := proxy.Start(); err != nil { + m.Logger.Printf("[ERROR] agent/proxy: failed to start proxy for %q: %s", id, err) + continue + } + + m.proxies[id] = proxy + } +} + +// newProxy creates the proper Proxy implementation for the configured +// local managed proxy. +func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { + // Defensive because the alternative is to panic which is not desired + if mp == nil || mp.Proxy == nil { + return nil, fmt.Errorf("internal error: nil *local.ManagedProxy or Proxy field") + } + + p := mp.Proxy + switch p.ExecMode { + case structs.ProxyExecModeDaemon: + command := p.Command + if len(command) == 0 { + command = p.CommandDefault + } + + // This should never happen since validation should happen upstream + // but verify it because the alternative is to panic below. + if len(command) == 0 { + return nil, fmt.Errorf("daemon mode managed proxy requires command") + } + + // Build the command to execute. + var cmd exec.Cmd + cmd.Path = command[0] + cmd.Args = command[1:] + + // Build the daemon structure + return &Daemon{ + Command: &cmd, + ProxyToken: mp.ProxyToken, + Logger: m.Logger, + }, nil + + default: + return nil, fmt.Errorf("unsupported managed proxy type: %q", p.ExecMode) + } +} diff --git a/agent/proxy/manager_test.go b/agent/proxy/manager_test.go new file mode 100644 index 000000000..13a57d7b7 --- /dev/null +++ b/agent/proxy/manager_test.go @@ -0,0 +1,79 @@ +package proxy + +import ( + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/hashicorp/consul/agent/local" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/testutil/retry" + "github.com/stretchr/testify/require" +) + +func TestManagerClose_noRun(t *testing.T) { + t.Parallel() + + // Really we're testing that it doesn't deadlock here. + m := NewManager() + require.NoError(t, m.Close()) + + // Close again for sanity + require.NoError(t, m.Close()) +} + +// Test that Run performs an initial sync (if local.State is already set) +// rather than waiting for a notification from the local state. +func TestManagerRun_initialSync(t *testing.T) { + t.Parallel() + + state := testState(t) + m := NewManager() + m.State = state + defer m.Kill() + + // Add the proxy before we start the manager to verify initial sync + td, closer := testTempDir(t) + defer closer() + path := filepath.Join(td, "file") + testStateProxy(t, state, helperProcess("restart", path)) + + // Start the manager + go m.Run() + + // We should see the path appear shortly + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + r.Fatalf("error waiting for path: %s", err) + }) +} + +func testState(t *testing.T) *local.State { + state := local.TestState(t) + require.NoError(t, state.AddService(&structs.NodeService{ + Service: "web", + }, "web")) + + return state +} + +// testStateProxy registers a proxy with the given local state and the command +// (expected to be from the helperProcess function call). It returns the +// ID for deregistration. +func testStateProxy(t *testing.T, state *local.State, cmd *exec.Cmd) string { + command := []string{cmd.Path} + command = append(command, cmd.Args...) + + p, err := state.AddProxy(&structs.ConnectManagedProxy{ + ExecMode: structs.ProxyExecModeDaemon, + Command: command, + TargetServiceID: "web", + }, "web") + require.NoError(t, err) + + return p.Proxy.ProxyService.ID +} diff --git a/agent/proxy/proxy_test.go b/agent/proxy/proxy_test.go index fa8eef128..1d6df99ef 100644 --- a/agent/proxy/proxy_test.go +++ b/agent/proxy/proxy_test.go @@ -31,16 +31,19 @@ func testTempDir(t *testing.T) (string, func()) { } } +// helperProcessSentinel is a sentinel value that is put as the first +// argument following "--" and is used to determine if TestHelperProcess +// should run. +const helperProcessSentinel = "WANT_HELPER_PROCESS" + // helperProcess returns an *exec.Cmd that can be used to execute the // TestHelperProcess function below. This can be used to test multi-process // interactions. func helperProcess(s ...string) *exec.Cmd { - cs := []string{"-test.run=TestHelperProcess", "--"} + cs := []string{"-test.run=TestHelperProcess", "--", helperProcessSentinel} cs = append(cs, s...) - env := []string{"GO_WANT_HELPER_PROCESS=1"} cmd := exec.Command(os.Args[0], cs...) - cmd.Env = append(env, os.Environ()...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return cmd @@ -49,12 +52,6 @@ func helperProcess(s ...string) *exec.Cmd { // This is not a real test. This is just a helper process kicked off by tests // using the helperProcess helper function. func TestHelperProcess(t *testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - return - } - - defer os.Exit(0) - args := os.Args for len(args) > 0 { if args[0] == "--" { @@ -65,15 +62,16 @@ func TestHelperProcess(t *testing.T) { args = args[1:] } - if len(args) == 0 { - fmt.Fprintf(os.Stderr, "No command\n") - os.Exit(2) + if len(args) == 0 || args[0] != helperProcessSentinel { + return } + defer os.Exit(0) + args = args[1:] // strip sentinel value cmd, args := args[0], args[1:] switch cmd { // While running, this creates a file in the given directory (args[0]) - // and deletes it only whe nit is stopped. + // and deletes it only when it is stopped. case "start-stop": ch := make(chan os.Signal, 1) signal.Notify(ch, os.Interrupt) diff --git a/agent/proxy/test.go b/agent/proxy/test.go new file mode 100644 index 000000000..b6b35bb04 --- /dev/null +++ b/agent/proxy/test.go @@ -0,0 +1,13 @@ +package proxy + +// defaultTestProxy is the test proxy that is instantiated for proxies with +// an execution mode of ProxyExecModeTest. +var defaultTestProxy = testProxy{} + +// testProxy is a Proxy implementation that stores state in-memory and +// is only used for unit testing. It is in a non _test.go file because the +// factory for initializing it is exported (newProxy). +type testProxy struct { + Start uint32 + Stop uint32 +} diff --git a/agent/structs/connect.go b/agent/structs/connect.go index b40091adf..02b5ba1fa 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -33,6 +33,11 @@ const ( // ProxyExecModeScript executes a proxy config script on each change to it's // config. ProxyExecModeScript + + // ProxyExecModeTest tracks the start/stop of the proxy in-memory + // and is only used for tests. This shouldn't be set outside of tests, + // but even if it is it has no external effect. + ProxyExecModeTest ) // String implements Stringer From 8ce3deac5d1bee9424dd36fc0b3eeea421f09bb6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 09:57:06 -0700 Subject: [PATCH 212/627] agent/proxy: test removing proxies and stopping them --- agent/proxy/manager.go | 4 +- agent/proxy/manager_test.go | 99 ++++++++++++++++++++++++++++++++----- agent/proxy/proxy_test.go | 13 +++++ 3 files changed, 103 insertions(+), 13 deletions(-) diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index 05445d93a..765e9f022 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -173,7 +173,7 @@ func (m *Manager) Run() { // We wait for anything not running, just so we're more resilient // in the face of state machine issues. Basically any state change // will cause us to quit. - for m.runState != managerStateRunning { + for m.runState == managerStateRunning { m.cond.Wait() } }() @@ -240,7 +240,7 @@ func (m *Manager) sync() { delete(state, id) // TODO: diff and restart if necessary - println(stateProxy) + println("DIFF", id, stateProxy) } // Remaining entries in state are new proxies. Start them! diff --git a/agent/proxy/manager_test.go b/agent/proxy/manager_test.go index 13a57d7b7..eba7a674a 100644 --- a/agent/proxy/manager_test.go +++ b/agent/proxy/manager_test.go @@ -5,6 +5,7 @@ import ( "os/exec" "path/filepath" "testing" + "time" "github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/structs" @@ -28,7 +29,7 @@ func TestManagerClose_noRun(t *testing.T) { func TestManagerRun_initialSync(t *testing.T) { t.Parallel() - state := testState(t) + state := local.TestState(t) m := NewManager() m.State = state defer m.Kill() @@ -37,7 +38,7 @@ func TestManagerRun_initialSync(t *testing.T) { td, closer := testTempDir(t) defer closer() path := filepath.Join(td, "file") - testStateProxy(t, state, helperProcess("restart", path)) + testStateProxy(t, state, "web", helperProcess("restart", path)) // Start the manager go m.Run() @@ -52,27 +53,103 @@ func TestManagerRun_initialSync(t *testing.T) { }) } -func testState(t *testing.T) *local.State { - state := local.TestState(t) - require.NoError(t, state.AddService(&structs.NodeService{ - Service: "web", - }, "web")) +func TestManagerRun_syncNew(t *testing.T) { + t.Parallel() - return state + state := local.TestState(t) + m := NewManager() + m.State = state + defer m.Kill() + + // Start the manager + go m.Run() + + // Sleep a bit, this is just an attempt for Run to already be running. + // Its not a big deal if this sleep doesn't happen (slow CI). + time.Sleep(100 * time.Millisecond) + + // Add the first proxy + td, closer := testTempDir(t) + defer closer() + path := filepath.Join(td, "file") + testStateProxy(t, state, "web", helperProcess("restart", path)) + + // We should see the path appear shortly + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + r.Fatalf("error waiting for path: %s", err) + }) + + // Add another proxy + path = path + "2" + testStateProxy(t, state, "db", helperProcess("restart", path)) + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + r.Fatalf("error waiting for path: %s", err) + }) +} + +func TestManagerRun_syncDelete(t *testing.T) { + t.Parallel() + + state := local.TestState(t) + m := NewManager() + m.State = state + defer m.Kill() + + // Start the manager + go m.Run() + + // Add the first proxy + td, closer := testTempDir(t) + defer closer() + path := filepath.Join(td, "file") + id := testStateProxy(t, state, "web", helperProcess("restart", path)) + + // We should see the path appear shortly + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + r.Fatalf("error waiting for path: %s", err) + }) + + // Remove the proxy + _, err := state.RemoveProxy(id) + require.NoError(t, err) + + // File should disappear as process is killed + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + r.Fatalf("path exists") + } + }) } // testStateProxy registers a proxy with the given local state and the command // (expected to be from the helperProcess function call). It returns the // ID for deregistration. -func testStateProxy(t *testing.T, state *local.State, cmd *exec.Cmd) string { +func testStateProxy(t *testing.T, state *local.State, service string, cmd *exec.Cmd) string { command := []string{cmd.Path} command = append(command, cmd.Args...) + require.NoError(t, state.AddService(&structs.NodeService{ + Service: service, + }, "token")) + p, err := state.AddProxy(&structs.ConnectManagedProxy{ ExecMode: structs.ProxyExecModeDaemon, Command: command, - TargetServiceID: "web", - }, "web") + TargetServiceID: service, + }, "token") require.NoError(t, err) return p.Proxy.ProxyService.ID diff --git a/agent/proxy/proxy_test.go b/agent/proxy/proxy_test.go index 1d6df99ef..11994b1bf 100644 --- a/agent/proxy/proxy_test.go +++ b/agent/proxy/proxy_test.go @@ -91,6 +91,10 @@ func TestHelperProcess(t *testing.T) { // exists. When that file is removed, this process exits. This can be // used to test restarting. case "restart": + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + defer signal.Stop(ch) + // Write the file path := args[0] if err := ioutil.WriteFile(path, []byte("hello"), 0644); err != nil { @@ -105,6 +109,15 @@ func TestHelperProcess(t *testing.T) { if _, err := os.Stat(path); os.IsNotExist(err) { break } + + select { + case <-ch: + // We received an interrupt, clean exit + os.Remove(path) + break + + default: + } } default: From 6884654c9da8515044701341d0ab90d09e27f9df Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 11:02:58 -0700 Subject: [PATCH 213/627] agent/proxy: detect config change to stop/start proxies --- agent/proxy/daemon.go | 16 +++++++ agent/proxy/daemon_test.go | 91 +++++++++++++++++++++++++++++++++++++ agent/proxy/manager.go | 37 ++++++++++----- agent/proxy/manager_test.go | 47 +++++++++++++++++++ agent/proxy/noop.go | 5 +- agent/proxy/proxy.go | 8 ++++ 6 files changed, 190 insertions(+), 14 deletions(-) diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index 231f25cb8..c43eb48a1 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -5,6 +5,7 @@ import ( "log" "os" "os/exec" + "reflect" "sync" "time" ) @@ -198,3 +199,18 @@ func (p *Daemon) Stop() error { return err //return p.Command.Process.Kill() } + +// Equal implements Proxy to check for equality. +func (p *Daemon) Equal(raw Proxy) bool { + p2, ok := raw.(*Daemon) + if !ok { + return false + } + + // We compare equality on a subset of the command configuration + return p.ProxyToken == p2.ProxyToken && + p.Command.Path == p2.Command.Path && + p.Command.Dir == p2.Command.Dir && + reflect.DeepEqual(p.Command.Args, p2.Command.Args) && + reflect.DeepEqual(p.Command.Env, p2.Command.Env) +} diff --git a/agent/proxy/daemon_test.go b/agent/proxy/daemon_test.go index 22948bdaf..a1638b266 100644 --- a/agent/proxy/daemon_test.go +++ b/agent/proxy/daemon_test.go @@ -3,6 +3,7 @@ package proxy import ( "io/ioutil" "os" + "os/exec" "path/filepath" "testing" @@ -97,3 +98,93 @@ func TestDaemonRestart(t *testing.T) { // File should re-appear because the process is restart waitFile() } + +func TestDaemonEqual(t *testing.T) { + cases := []struct { + Name string + D1, D2 Proxy + Expected bool + }{ + { + "Different type", + &Daemon{ + Command: &exec.Cmd{}, + }, + &Noop{}, + false, + }, + + { + "Nil", + &Daemon{ + Command: &exec.Cmd{}, + }, + nil, + false, + }, + + { + "Equal", + &Daemon{ + Command: &exec.Cmd{}, + }, + &Daemon{ + Command: &exec.Cmd{}, + }, + true, + }, + + { + "Different path", + &Daemon{ + Command: &exec.Cmd{Path: "/foo"}, + }, + &Daemon{ + Command: &exec.Cmd{Path: "/bar"}, + }, + false, + }, + + { + "Different dir", + &Daemon{ + Command: &exec.Cmd{Dir: "/foo"}, + }, + &Daemon{ + Command: &exec.Cmd{Dir: "/bar"}, + }, + false, + }, + + { + "Different args", + &Daemon{ + Command: &exec.Cmd{Args: []string{"foo"}}, + }, + &Daemon{ + Command: &exec.Cmd{Args: []string{"bar"}}, + }, + false, + }, + + { + "Different token", + &Daemon{ + Command: &exec.Cmd{}, + ProxyToken: "one", + }, + &Daemon{ + Command: &exec.Cmd{}, + ProxyToken: "two", + }, + false, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + actual := tc.D1.Equal(tc.D2) + require.Equal(t, tc.Expected, actual) + }) + } +} diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index 765e9f022..d2ab8a106 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -225,22 +225,35 @@ func (m *Manager) sync() { for id, proxy := range m.proxies { // Get the proxy. stateProxy, ok := state[id] - if !ok { - // Proxy is deregistered. Remove it from our map and stop it - delete(m.proxies, id) - if err := proxy.Stop(); err != nil { - m.Logger.Printf("[ERROR] agent/proxy: failed to stop deregistered proxy for %q: %s", id, err) + if ok { + // Remove the proxy from the state so we don't start it new. + delete(state, id) + + // Make the proxy so we can compare. This does not start it. + proxy2, err := m.newProxy(stateProxy) + if err != nil { + m.Logger.Printf("[ERROR] agent/proxy: failed to initialize proxy for %q: %s", id, err) + continue } - continue + // If the proxies are equal, then do nothing + if proxy.Equal(proxy2) { + continue + } + + // Proxies are not equal, so we should stop it. We add it + // back to the state here (unlikely case) so the loop below starts + // the new one. + state[id] = stateProxy + + // Continue out of `if` as if proxy didn't exist so we stop it } - // Proxy is in the state. Always delete it so that the remainder - // are NEW proxies that we start after this loop. - delete(state, id) - - // TODO: diff and restart if necessary - println("DIFF", id, stateProxy) + // Proxy is deregistered. Remove it from our map and stop it + delete(m.proxies, id) + if err := proxy.Stop(); err != nil { + m.Logger.Printf("[ERROR] agent/proxy: failed to stop deregistered proxy for %q: %s", id, err) + } } // Remaining entries in state are new proxies. Start them! diff --git a/agent/proxy/manager_test.go b/agent/proxy/manager_test.go index eba7a674a..97086d491 100644 --- a/agent/proxy/manager_test.go +++ b/agent/proxy/manager_test.go @@ -134,6 +134,53 @@ func TestManagerRun_syncDelete(t *testing.T) { }) } +func TestManagerRun_syncUpdate(t *testing.T) { + t.Parallel() + + state := local.TestState(t) + m := NewManager() + m.State = state + defer m.Kill() + + // Start the manager + go m.Run() + + // Add the first proxy + td, closer := testTempDir(t) + defer closer() + path := filepath.Join(td, "file") + testStateProxy(t, state, "web", helperProcess("restart", path)) + + // We should see the path appear shortly + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + r.Fatalf("error waiting for path: %s", err) + }) + + // Update the proxy with a new path + oldPath := path + path = path + "2" + testStateProxy(t, state, "web", helperProcess("restart", path)) + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + r.Fatalf("error waiting for path: %s", err) + }) + + // Old path should be gone + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(oldPath) + if err == nil { + r.Fatalf("old path exists") + } + }) +} + // testStateProxy registers a proxy with the given local state and the command // (expected to be from the helperProcess function call). It returns the // ID for deregistration. diff --git a/agent/proxy/noop.go b/agent/proxy/noop.go index 9b35a2427..9ce013554 100644 --- a/agent/proxy/noop.go +++ b/agent/proxy/noop.go @@ -3,5 +3,6 @@ package proxy // Noop implements Proxy and does nothing. type Noop struct{} -func (p *Noop) Start() error { return nil } -func (p *Noop) Stop() error { return nil } +func (p *Noop) Start() error { return nil } +func (p *Noop) Stop() error { return nil } +func (p *Noop) Equal(Proxy) bool { return true } diff --git a/agent/proxy/proxy.go b/agent/proxy/proxy.go index a07bb5681..549a6ee26 100644 --- a/agent/proxy/proxy.go +++ b/agent/proxy/proxy.go @@ -31,4 +31,12 @@ type Proxy interface { // it should disallow Start from working again. If the proxy is already // stopped, this should not return an error. Stop() error + + // Equal returns true if the argument is equal to the proxy being called. + // This is called by the manager to determine if a change in configuration + // results in a proxy that needs to be restarted or not. If Equal returns + // false, then the manager will stop the old proxy and start the new one. + // If Equal returns true, the old proxy will remain running and the new + // one will be ignored. + Equal(Proxy) bool } From 669268f85c670027b991c7262362b1a5e2bae3f1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 11:38:18 -0700 Subject: [PATCH 214/627] agent: start proxy manager --- agent/agent.go | 48 ++++++++++++++++++++++-------------- agent/agent_endpoint_test.go | 2 +- agent/proxy/daemon.go | 16 ++++++++++-- agent/proxy/manager.go | 4 ++- agent/structs/connect.go | 2 ++ 5 files changed, 50 insertions(+), 22 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 365a76af0..79c9eb112 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -27,6 +27,7 @@ import ( "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/local" + "github.com/hashicorp/consul/agent/proxy" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/systemd" "github.com/hashicorp/consul/agent/token" @@ -200,6 +201,9 @@ type Agent struct { // be updated at runtime, so should always be used instead of going to // the configuration directly. tokens *token.Store + + // proxyManager is the proxy process manager for managed Connect proxies. + proxyManager *proxy.Manager } func New(c *config.RuntimeConfig) (*Agent, error) { @@ -353,6 +357,14 @@ func (a *Agent) Start() error { return err } + // create the proxy process manager and start it. This is purposely + // done here after the local state above is loaded in so we can have + // a more accurate initial state view. + a.proxyManager = proxy.NewManager() + a.proxyManager.State = a.State + a.proxyManager.Logger = a.logger + go a.proxyManager.Run() + // Start watching for critical services to deregister, based on their // checks. go a.reapServices() @@ -1269,9 +1281,11 @@ func (a *Agent) ShutdownAgent() error { chk.Stop() } - // Unload all our proxies so that we stop the running processes. - if err := a.unloadProxies(); err != nil { - a.logger.Printf("[WARN] agent: error stopping managed proxies: %s", err) + // Stop the proxy manager + // NOTE(mitchellh): we use Kill for now to kill the processes since + // snapshotting isn't implemented. This should change to Close later. + if err := a.proxyManager.Kill(); err != nil { + a.logger.Printf("[WARN] agent: error shutting down proxy manager: %s", err) } var err error @@ -2038,23 +2052,21 @@ func (a *Agent) AddProxy(proxy *structs.ConnectManagedProxy, persist bool) error // Lookup the target service token in state if there is one. token := a.State.ServiceToken(proxy.TargetServiceID) - /* - // Determine if we need to default the command - if proxy.ExecMode == structs.ProxyExecModeDaemon && len(proxy.Command) == 0 { - // We use the globally configured default command. If it is empty - // then we need to determine the subcommand for this agent. - cmd := a.config.ConnectProxyDefaultDaemonCommand - if len(cmd) == 0 { - var err error - cmd, err = a.defaultProxyCommand() - if err != nil { - return err - } + // Determine if we need to default the command + if proxy.ExecMode == structs.ProxyExecModeDaemon && len(proxy.Command) == 0 { + // We use the globally configured default command. If it is empty + // then we need to determine the subcommand for this agent. + cmd := a.config.ConnectProxyDefaultDaemonCommand + if len(cmd) == 0 { + var err error + cmd, err = a.defaultProxyCommand() + if err != nil { + return err } - - proxy.CommandDefault = cmd } - */ + + proxy.CommandDefault = cmd + } // Add the proxy to local state first since we may need to assign a port which // needs to be coordinate under state lock. AddProxy will generate the diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 26a04dddd..e6a47cbaa 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -70,7 +70,7 @@ func TestAgent_Services(t *testing.T) { }, TargetServiceID: "mysql", } - _, _, err := a.State.AddProxy(prxy1, "") + _, err := a.State.AddProxy(prxy1, "") require.NoError(t, err) req, _ := http.NewRequest("GET", "/v1/agent/services", nil) diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index c43eb48a1..1d716950b 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -7,6 +7,7 @@ import ( "os/exec" "reflect" "sync" + "syscall" "time" ) @@ -146,9 +147,15 @@ func (p *Daemon) keepAlive(stopCh <-chan struct{}) { } - _, err := process.Wait() + ps, err := process.Wait() process = nil - p.Logger.Printf("[INFO] agent/proxy: daemon exited: %s", err) + if err != nil { + p.Logger.Printf("[INFO] agent/proxy: daemon exited with error: %s", err) + } else if status, ok := ps.Sys().(syscall.WaitStatus); ok { + p.Logger.Printf( + "[INFO] agent/proxy: daemon exited with exit code: %d", + status.ExitStatus()) + } } } @@ -165,7 +172,12 @@ func (p *Daemon) start() (*os.Process, error) { copy(cmd.Env, p.Command.Env) cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvProxyToken, p.ProxyToken)) + // TODO(mitchellh): temporary until we introduce the file based logging + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + // Start it + p.Logger.Printf("[DEBUG] agent/proxy: starting proxy: %q %#v", cmd.Path, cmd.Args) err := cmd.Start() return cmd.Process, err } diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index d2ab8a106..dedc6f737 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -192,6 +192,7 @@ func (m *Manager) Run() { m.State.NotifyProxy(notifyCh) defer m.State.StopNotifyProxy(notifyCh) + m.Logger.Println("[DEBUG] agent/proxy: managed Connect proxy manager started") for { // Sync first, before waiting on further notifications so that // we can start with a known-current state. @@ -203,6 +204,7 @@ func (m *Manager) Run() { case <-stopCh: // Stop immediately, no cleanup + m.Logger.Println("[DEBUG] agent/proxy: Stopping managed Connect proxy manager") return } } @@ -298,7 +300,7 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { // Build the command to execute. var cmd exec.Cmd cmd.Path = command[0] - cmd.Args = command[1:] + cmd.Args = command // idx 0 is path but preserved since it should be // Build the daemon structure return &Daemon{ diff --git a/agent/structs/connect.go b/agent/structs/connect.go index 02b5ba1fa..aca9764fa 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -49,6 +49,8 @@ func (m ProxyExecMode) String() string { return "daemon" case ProxyExecModeScript: return "script" + case ProxyExecModeTest: + return "test" default: return "unknown" } From 10fe87bd4ac1361dca26c0f2098e63de49ddafb0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 11:47:57 -0700 Subject: [PATCH 215/627] agent/proxy: pull exit status extraction to constrained file --- agent/proxy/daemon.go | 16 ++++++++++------ agent/proxy/exitstatus_other.go | 10 ++++++++++ agent/proxy/exitstatus_syscall.go | 18 ++++++++++++++++++ 3 files changed, 38 insertions(+), 6 deletions(-) create mode 100644 agent/proxy/exitstatus_other.go create mode 100644 agent/proxy/exitstatus_syscall.go diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index 1d716950b..d5fd30256 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -7,7 +7,6 @@ import ( "os/exec" "reflect" "sync" - "syscall" "time" ) @@ -151,10 +150,8 @@ func (p *Daemon) keepAlive(stopCh <-chan struct{}) { process = nil if err != nil { p.Logger.Printf("[INFO] agent/proxy: daemon exited with error: %s", err) - } else if status, ok := ps.Sys().(syscall.WaitStatus); ok { - p.Logger.Printf( - "[INFO] agent/proxy: daemon exited with exit code: %d", - status.ExitStatus()) + } else if status, ok := exitStatus(ps); ok { + p.Logger.Printf("[INFO] agent/proxy: daemon exited with exit code: %d", status) } } } @@ -176,8 +173,15 @@ func (p *Daemon) start() (*os.Process, error) { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr + // Args must always contain a 0 entry which is usually the executed binary. + // To be safe and a bit more robust we default this, but only to prevent + // a panic below. + if len(cmd.Args) == 0 { + cmd.Args = []string{cmd.Path} + } + // Start it - p.Logger.Printf("[DEBUG] agent/proxy: starting proxy: %q %#v", cmd.Path, cmd.Args) + p.Logger.Printf("[DEBUG] agent/proxy: starting proxy: %q %#v", cmd.Path, cmd.Args[1:]) err := cmd.Start() return cmd.Process, err } diff --git a/agent/proxy/exitstatus_other.go b/agent/proxy/exitstatus_other.go new file mode 100644 index 000000000..84dd88867 --- /dev/null +++ b/agent/proxy/exitstatus_other.go @@ -0,0 +1,10 @@ +// +build !darwin,!linux,!windows + +package proxy + +import "os" + +// exitStatus for other platforms where we don't know how to extract it. +func exitStatus(ps *os.ProcessState) (int, bool) { + return 0, false +} diff --git a/agent/proxy/exitstatus_syscall.go b/agent/proxy/exitstatus_syscall.go new file mode 100644 index 000000000..1caeda4bf --- /dev/null +++ b/agent/proxy/exitstatus_syscall.go @@ -0,0 +1,18 @@ +// +build darwin linux windows + +package proxy + +import ( + "os" + "syscall" +) + +// exitStatus for platforms with syscall.WaitStatus which are listed +// at the top of this file in the build constraints. +func exitStatus(ps *os.ProcessState) (int, bool) { + if status, ok := ps.Sys().(syscall.WaitStatus); ok { + return status.ExitStatus(), true + } + + return 0, false +} From 4722e3ef763ad1f6a124e378c7fb7b74e65313b5 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 11:51:47 -0700 Subject: [PATCH 216/627] agent: fix crash that could happen if proxy was nil on load --- agent/agent.go | 3 +++ agent/agent_test.go | 24 ++++++++++++++++++++++-- 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 79c9eb112..212672ea9 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2523,6 +2523,9 @@ func (a *Agent) loadProxies(conf *config.RuntimeConfig) error { if err != nil { return fmt.Errorf("failed adding proxy: %s", err) } + if proxy == nil { + continue + } if err := a.AddProxy(proxy, false); err != nil { return fmt.Errorf("failed adding proxy: %s", err) } diff --git a/agent/agent_test.go b/agent/agent_test.go index 022c25f8e..768d1c951 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -15,8 +15,6 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" - "github.com/hashicorp/consul/agent/checks" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" @@ -25,6 +23,7 @@ import ( "github.com/hashicorp/consul/types" uuid "github.com/hashicorp/go-uuid" "github.com/pascaldekloe/goe/verify" + "github.com/stretchr/testify/require" ) func externalIP() (string, error) { @@ -1669,6 +1668,27 @@ func TestAgent_loadProxies(t *testing.T) { } } +func TestAgent_loadProxies_nilProxy(t *testing.T) { + t.Parallel() + a := NewTestAgent(t.Name(), ` + service = { + id = "rabbitmq" + name = "rabbitmq" + port = 5672 + token = "abc123" + connect { + } + } + `) + defer a.Shutdown() + + services := a.State.Services() + require.Contains(t, services, "rabbitmq") + require.Equal(t, "abc123", a.State.ServiceToken("rabbitmq")) + require.NotContains(t, services, "rabbitme-proxy") + require.Empty(t, a.State.Proxies()) +} + func TestAgent_unloadProxies(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), ` From 16f529a13c13bee9c296e90720b49f022323e5cc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 12:25:00 -0700 Subject: [PATCH 217/627] agent/proxy: implement force kill of unresponsive proxy process --- agent/proxy/daemon.go | 47 ++++++++++++++++++++++++++++++-------- agent/proxy/daemon_test.go | 43 ++++++++++++++++++++++++++++++++++ agent/proxy/proxy_test.go | 18 +++++++++++++++ 3 files changed, 98 insertions(+), 10 deletions(-) diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index d5fd30256..a930c978b 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -38,11 +38,16 @@ type Daemon struct { // a file. Logger *log.Logger + // For tests, they can set this to change the default duration to wait + // for a graceful quit. + gracefulWait time.Duration + // process is the started process - lock sync.Mutex - stopped bool - stopCh chan struct{} - process *os.Process + lock sync.Mutex + stopped bool + stopCh chan struct{} + exitedCh chan struct{} + process *os.Process } // Start starts the daemon and keeps it running. @@ -64,17 +69,21 @@ func (p *Daemon) Start() error { // Setup our stop channel stopCh := make(chan struct{}) + exitedCh := make(chan struct{}) p.stopCh = stopCh + p.exitedCh = exitedCh // Start the loop. - go p.keepAlive(stopCh) + go p.keepAlive(stopCh, exitedCh) return nil } // keepAlive starts and keeps the configured process alive until it // is stopped via Stop. -func (p *Daemon) keepAlive(stopCh <-chan struct{}) { +func (p *Daemon) keepAlive(stopCh <-chan struct{}, exitedCh chan<- struct{}) { + defer close(exitedCh) + p.lock.Lock() process := p.process p.lock.Unlock() @@ -196,24 +205,42 @@ func (p *Daemon) start() (*os.Process, error) { // then this returns no error. func (p *Daemon) Stop() error { p.lock.Lock() - defer p.lock.Unlock() // If we're already stopped or never started, then no problem. if p.stopped || p.process == nil { // In the case we never even started, calling Stop makes it so // that we can't ever start in the future, either, so mark this. p.stopped = true + p.lock.Unlock() return nil } // Note that we've stopped p.stopped = true close(p.stopCh) + process := p.process + p.lock.Unlock() - err := p.process.Signal(os.Interrupt) + gracefulWait := p.gracefulWait + if gracefulWait == 0 { + gracefulWait = 5 * time.Second + } - return err - //return p.Command.Process.Kill() + // First, try a graceful stop + err := process.Signal(os.Interrupt) + if err == nil { + select { + case <-p.exitedCh: + // Success! + return nil + + case <-time.After(gracefulWait): + // Interrupt didn't work + } + } + + // Graceful didn't work, forcibly kill + return process.Kill() } // Equal implements Proxy to check for equality. diff --git a/agent/proxy/daemon_test.go b/agent/proxy/daemon_test.go index a1638b266..32acde636 100644 --- a/agent/proxy/daemon_test.go +++ b/agent/proxy/daemon_test.go @@ -6,6 +6,7 @@ import ( "os/exec" "path/filepath" "testing" + "time" "github.com/hashicorp/consul/testutil/retry" "github.com/hashicorp/go-uuid" @@ -99,6 +100,48 @@ func TestDaemonRestart(t *testing.T) { waitFile() } +func TestDaemonStop_kill(t *testing.T) { + t.Parallel() + + require := require.New(t) + td, closer := testTempDir(t) + defer closer() + + path := filepath.Join(td, "file") + + d := &Daemon{ + Command: helperProcess("stop-kill", path), + ProxyToken: "hello", + Logger: testLogger, + gracefulWait: 200 * time.Millisecond, + } + require.NoError(d.Start()) + + // Wait for the file to exist + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + + r.Fatalf("error: %s", err) + }) + + // Stop the process + require.NoError(d.Stop()) + + // State the file so that we can get the mtime + fi, err := os.Stat(path) + require.NoError(err) + mtime := fi.ModTime() + + // The mtime shouldn't change + time.Sleep(100 * time.Millisecond) + fi, err = os.Stat(path) + require.NoError(err) + require.Equal(mtime, fi.ModTime()) +} + func TestDaemonEqual(t *testing.T) { cases := []struct { Name string diff --git a/agent/proxy/proxy_test.go b/agent/proxy/proxy_test.go index 11994b1bf..71cfd4ebc 100644 --- a/agent/proxy/proxy_test.go +++ b/agent/proxy/proxy_test.go @@ -120,6 +120,24 @@ func TestHelperProcess(t *testing.T) { } } + case "stop-kill": + // Setup listeners so it is ignored + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + defer signal.Stop(ch) + + path := args[0] + data := []byte(os.Getenv(EnvProxyToken)) + for { + if err := ioutil.WriteFile(path, data, 0644); err != nil { + t.Fatalf("err: %s", err) + } + time.Sleep(25 * time.Millisecond) + } + + // Run forever + <-make(chan struct{}) + default: fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd) os.Exit(2) From 6a78ecea57465becc809b7b9bf6780ce8f808a96 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 13:38:24 -0700 Subject: [PATCH 218/627] agent/proxy: local state event coalescing --- agent/proxy/manager.go | 83 ++++++++++++++++++++++++++++--------- agent/proxy/manager_test.go | 20 ++++++--- 2 files changed, 79 insertions(+), 24 deletions(-) diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index dedc6f737..4e45d22a8 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -6,12 +6,26 @@ import ( "os" "os/exec" "sync" + "time" "github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-multierror" ) +const ( + // ManagerCoalescePeriod and ManagerQuiescentPeriod relate to how + // notifications in updates from the local state are colaesced to prevent + // lots of churn in the manager. + // + // When the local state updates, the manager will wait for quiescence. + // For each update, the quiscence timer is reset. If the coalesce period + // is reached, the manager will update proxies regardless of the frequent + // changes. Then the whole cycle resets. + ManagerCoalescePeriod = 5 * time.Second + ManagerQuiescentPeriod = 500 * time.Millisecond +) + // Manager starts, stops, snapshots, and restores managed proxies. // // The manager will not start or stop any processes until Start is called. @@ -26,11 +40,9 @@ import ( // and state updates may occur while the Manger is syncing. This is okay, // since a change notification will be queued to trigger another sync. // -// NOTE(mitchellh): Change notifications are not coalesced currently. Under -// conditions where managed proxy configurations are changing in a hot -// loop, it is possible for the manager to constantly attempt to sync. This -// is unlikely, but its also easy to introduce basic coalescing (even over -// millisecond intervals) to prevent total waste compute cycles. +// The change notifications from the local state are coalesced (see +// ManagerCoalescePeriod) so that frequent changes within the local state +// do not trigger dozens of proxy resyncs. type Manager struct { // State is the local state that is the source of truth for all // configured managed proxies. @@ -41,6 +53,13 @@ type Manager struct { // implementation type. Logger *log.Logger + // CoalescePeriod and QuiescencePeriod control the timers for coalescing + // updates from the local state. See the defaults at the top of this + // file for more documentation. These will be set to those defaults + // by NewManager. + CoalescePeriod time.Duration + QuiescentPeriod time.Duration + // lock is held while reading/writing any internal state of the manager. // cond is a condition variable on lock that is broadcasted for runState // changes. @@ -55,22 +74,24 @@ type Manager struct { proxies map[string]Proxy } -// defaultLogger is the defaultLogger for NewManager so there it is never nil -var defaultLogger = log.New(os.Stderr, "", log.LstdFlags) - // NewManager initializes a Manager. After initialization, the exported // fields should be configured as desired. To start the Manager, execute // Run in a goroutine. func NewManager() *Manager { var lock sync.Mutex return &Manager{ - Logger: defaultLogger, - lock: &lock, - cond: sync.NewCond(&lock), - proxies: make(map[string]Proxy), + Logger: defaultLogger, + CoalescePeriod: ManagerCoalescePeriod, + QuiescentPeriod: ManagerQuiescentPeriod, + lock: &lock, + cond: sync.NewCond(&lock), + proxies: make(map[string]Proxy), } } +// defaultLogger is the defaultLogger for NewManager so there it is never nil +var defaultLogger = log.New(os.Stderr, "", log.LstdFlags) + // managerRunState is the state of the Manager. // // This is a basic state machine with the following transitions: @@ -193,19 +214,43 @@ func (m *Manager) Run() { defer m.State.StopNotifyProxy(notifyCh) m.Logger.Println("[DEBUG] agent/proxy: managed Connect proxy manager started") +SYNC: for { // Sync first, before waiting on further notifications so that // we can start with a known-current state. m.sync() - select { - case <-notifyCh: - // Changes exit select so we can reloop and reconfigure proxies + // Note for these variables we don't use a time.Timer because both + // periods are relatively short anyways so they end up being eligible + // for GC very quickly, so overhead is not a concern. + var quiescent, quantum <-chan time.Time - case <-stopCh: - // Stop immediately, no cleanup - m.Logger.Println("[DEBUG] agent/proxy: Stopping managed Connect proxy manager") - return + // Start a loop waiting for events from the local state store. This + // loops rather than just `select` so we can coalesce many state + // updates over a period of time. + for { + select { + case <-notifyCh: + // If this is our first notification since the last sync, + // reset the quantum timer which is the max time we'll wait. + if quantum == nil { + quantum = time.After(m.CoalescePeriod) + } + + // Always reset the quiescent timer + quiescent = time.After(m.QuiescentPeriod) + + case <-quantum: + continue SYNC + + case <-quiescent: + continue SYNC + + case <-stopCh: + // Stop immediately, no cleanup + m.Logger.Println("[DEBUG] agent/proxy: Stopping managed Connect proxy manager") + return + } } } } diff --git a/agent/proxy/manager_test.go b/agent/proxy/manager_test.go index 97086d491..4ee84f56a 100644 --- a/agent/proxy/manager_test.go +++ b/agent/proxy/manager_test.go @@ -17,7 +17,7 @@ func TestManagerClose_noRun(t *testing.T) { t.Parallel() // Really we're testing that it doesn't deadlock here. - m := NewManager() + m := testManager(t) require.NoError(t, m.Close()) // Close again for sanity @@ -30,7 +30,7 @@ func TestManagerRun_initialSync(t *testing.T) { t.Parallel() state := local.TestState(t) - m := NewManager() + m := testManager(t) m.State = state defer m.Kill() @@ -57,7 +57,7 @@ func TestManagerRun_syncNew(t *testing.T) { t.Parallel() state := local.TestState(t) - m := NewManager() + m := testManager(t) m.State = state defer m.Kill() @@ -99,7 +99,7 @@ func TestManagerRun_syncDelete(t *testing.T) { t.Parallel() state := local.TestState(t) - m := NewManager() + m := testManager(t) m.State = state defer m.Kill() @@ -138,7 +138,7 @@ func TestManagerRun_syncUpdate(t *testing.T) { t.Parallel() state := local.TestState(t) - m := NewManager() + m := testManager(t) m.State = state defer m.Kill() @@ -181,6 +181,16 @@ func TestManagerRun_syncUpdate(t *testing.T) { }) } +func testManager(t *testing.T) *Manager { + m := NewManager() + + // Set these periods low to speed up tests + m.CoalescePeriod = 1 * time.Millisecond + m.QuiescentPeriod = 1 * time.Millisecond + + return m +} + // testStateProxy registers a proxy with the given local state and the command // (expected to be from the helperProcess function call). It returns the // ID for deregistration. From bae428326ac45ae8fa3146db51db190a0c5c25ae Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 14:05:43 -0700 Subject: [PATCH 219/627] agent: use os.Executable --- agent/agent.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 212672ea9..fb4d6c4a7 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -41,7 +41,6 @@ import ( "github.com/hashicorp/memberlist" "github.com/hashicorp/raft" "github.com/hashicorp/serf/serf" - "github.com/kardianos/osext" "github.com/shirou/gopsutil/host" "golang.org/x/net/http2" ) @@ -2112,7 +2111,7 @@ func (a *Agent) RemoveProxy(proxyID string, persist bool) error { func (a *Agent) defaultProxyCommand() ([]string, error) { // Get the path to the current exectuable. This is cached once by the // library so this is effectively just a variable read. - execPath, err := osext.Executable() + execPath, err := os.Executable() if err != nil { return nil, err } From 31b09c0674110abde42f7a7ef268e2a6275c7f4c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 14:28:29 -0700 Subject: [PATCH 220/627] agent/local: remove outdated comment --- agent/local/state.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/agent/local/state.go b/agent/local/state.go index 03dbbd96c..a9c3ebade 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -573,11 +573,7 @@ func (l *State) CriticalCheckStates() map[types.CheckID]*CheckState { // assumes the proxy's NodeService is already registered via Agent.AddService // (since that has to do other book keeping). The token passed here is the ACL // token the service used to register itself so must have write on service -// record. -// -// AddProxy returns the newly added proxy, any replaced proxy, and an error. -// The second return value (replaced proxy) can be used to determine if -// the process needs to be updated or not. +// record. AddProxy returns the newly added proxy and an error. func (l *State) AddProxy(proxy *structs.ConnectManagedProxy, token string) (*ManagedProxy, error) { if proxy == nil { return nil, fmt.Errorf("no proxy") From 657c09133ae2413cd672e3da0975b9c20eaee929 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 14:31:03 -0700 Subject: [PATCH 221/627] agent/local: clarify the non-risk of a full buffer --- agent/local/state.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/agent/local/state.go b/agent/local/state.go index a9c3ebade..22d654af4 100644 --- a/agent/local/state.go +++ b/agent/local/state.go @@ -722,7 +722,11 @@ func (l *State) Proxies() map[string]*ManagedProxy { // NotifyProxy will register a channel to receive messages when the // configuration or set of proxies changes. This will not block on -// channel send so ensure the channel has a large enough buffer. +// channel send so ensure the channel has a buffer. Note that any buffer +// size is generally fine since actual data is not sent over the channel, +// so a dropped send due to a full buffer does not result in any loss of +// data. The fact that a buffer already contains a notification means that +// the receiver will still be notified that changes occurred. // // NOTE(mitchellh): This could be more generalized but for my use case I // only needed proxy events. In the future if it were to be generalized I From ed14e9edf8cac2d0e38d3763add0e1a798703038 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 08:52:36 -0700 Subject: [PATCH 222/627] agent: resolve some conflicts and fix tests --- agent/agent_endpoint.go | 6 +++--- agent/agent_endpoint_test.go | 26 +++++++++++++------------- agent/local/state_test.go | 3 ++- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index fde7ca5e2..ea81ba0b1 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -1021,7 +1021,7 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http // done deeper though as it will be needed for actually managing proxy // lifecycle. command := proxy.Proxy.Command - if command == "" { + if len(command) == 0 { if execMode == "daemon" { command = s.agent.config.ConnectProxyDefaultDaemonCommand } @@ -1030,8 +1030,8 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http } } // No global defaults set either... - if command == "" { - command = "consul connect proxy" + if len(command) == 0 { + command = []string{"consul", "connect", "proxy"} } // Set defaults for anything that is still not specified but required. diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index e6a47cbaa..98f95e69e 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2354,7 +2354,7 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) { TargetServiceName: "test", ContentHash: "365a50cbb9a748b6", ExecMode: "daemon", - Command: nil, + Command: []string{"consul", "connect", "proxy"}, Config: map[string]interface{}{ "upstreams": []interface{}{ map[string]interface{}{ @@ -2372,7 +2372,7 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) { ur, err := copystructure.Copy(expectedResponse) require.NoError(t, err) updatedResponse := ur.(*api.ConnectProxyConfig) - updatedResponse.ContentHash = "b5bb0e4a0a58ca25" + updatedResponse.ContentHash = "538d0366b7b1dc3e" upstreams := updatedResponse.Config["upstreams"].([]interface{}) upstreams = append(upstreams, map[string]interface{}{ @@ -2538,7 +2538,7 @@ func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { globalConfig string proxy structs.ServiceDefinitionConnectProxy wantMode api.ProxyExecMode - wantCommand string + wantCommand []string wantConfig map[string]interface{} }{ { @@ -2555,7 +2555,7 @@ func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { `, proxy: structs.ServiceDefinitionConnectProxy{}, wantMode: api.ProxyExecModeDaemon, - wantCommand: "consul connect proxy", + wantCommand: []string{"consul", "connect", "proxy"}, wantConfig: map[string]interface{}{ "bind_address": "0.0.0.0", "bind_port": 10000, // "randomly" chosen from our range of 1 @@ -2572,13 +2572,13 @@ func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { bind_min_port = 10000 bind_max_port = 10000 exec_mode = "script" - script_command = "script.sh" + script_command = ["script.sh"] } } `, proxy: structs.ServiceDefinitionConnectProxy{}, wantMode: api.ProxyExecModeScript, - wantCommand: "script.sh", + wantCommand: []string{"script.sh"}, wantConfig: map[string]interface{}{ "bind_address": "0.0.0.0", "bind_port": 10000, // "randomly" chosen from our range of 1 @@ -2595,13 +2595,13 @@ func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { bind_min_port = 10000 bind_max_port = 10000 exec_mode = "daemon" - daemon_command = "daemon.sh" + daemon_command = ["daemon.sh"] } } `, proxy: structs.ServiceDefinitionConnectProxy{}, wantMode: api.ProxyExecModeDaemon, - wantCommand: "daemon.sh", + wantCommand: []string{"daemon.sh"}, wantConfig: map[string]interface{}{ "bind_address": "0.0.0.0", "bind_port": 10000, // "randomly" chosen from our range of 1 @@ -2629,7 +2629,7 @@ func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { }, }, wantMode: api.ProxyExecModeDaemon, - wantCommand: "consul connect proxy", + wantCommand: []string{"consul", "connect", "proxy"}, wantConfig: map[string]interface{}{ "bind_address": "0.0.0.0", "bind_port": 10000, // "randomly" chosen from our range of 1 @@ -2648,8 +2648,8 @@ func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { bind_min_port = 10000 bind_max_port = 10000 exec_mode = "daemon" - daemon_command = "daemon.sh" - script_command = "script.sh" + daemon_command = ["daemon.sh"] + script_command = ["script.sh"] config = { connect_timeout_ms = 1000 } @@ -2658,7 +2658,7 @@ func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { `, proxy: structs.ServiceDefinitionConnectProxy{ ExecMode: "script", - Command: "foo.sh", + Command: []string{"foo.sh"}, Config: map[string]interface{}{ "connect_timeout_ms": 2000, "bind_address": "127.0.0.1", @@ -2667,7 +2667,7 @@ func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { }, }, wantMode: api.ProxyExecModeScript, - wantCommand: "foo.sh", + wantCommand: []string{"foo.sh"}, wantConfig: map[string]interface{}{ "bind_address": "127.0.0.1", "bind_port": float64(1024), diff --git a/agent/local/state_test.go b/agent/local/state_test.go index 800c017d6..3f416fbb3 100644 --- a/agent/local/state_test.go +++ b/agent/local/state_test.go @@ -1725,8 +1725,9 @@ func TestStateProxyManagement(t *testing.T) { { // Re-registering same proxy again should not pick a random port but re-use // the assigned one. - svcDup, err := state.AddProxy(&p1, "fake-token") + pstateDup, err := state.AddProxy(&p1, "fake-token") require.NoError(err) + svcDup := pstateDup.Proxy.ProxyService assert.Equal("web-proxy", svcDup.ID) assert.Equal("web-proxy", svcDup.Service) From 917db9770da409e1e6c708c2dfe51bdba1c7c31d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 09:24:20 -0700 Subject: [PATCH 223/627] Remove temporary hacks from Makefile --- GNUmakefile | 3 --- 1 file changed, 3 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 660a82725..40366e317 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -40,9 +40,6 @@ bin: tools dev: changelogfmt vendorfmt dev-build dev-build: - @echo "--> TEMPORARY HACK: installing hashstructure to make CI pass until we vendor it upstream" - go get github.com/mitchellh/hashstructure - go get github.com/stretchr/testify/mock @echo "--> Building consul" mkdir -p pkg/$(GOOS)_$(GOARCH)/ bin/ go install -ldflags '$(GOLDFLAGS)' -tags '$(GOTAGS)' From 52665f7d2366de632894c5b56e4e1ba825fadd91 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 10:44:10 -0700 Subject: [PATCH 224/627] agent: clean up defaulting of proxy configuration This cleans up and unifies how proxy settings defaults are applied. --- agent/agent.go | 86 ++++++++++++++++++++--------- agent/agent_endpoint.go | 31 +---------- agent/agent_endpoint_test.go | 15 +++-- agent/agent_test.go | 3 + agent/proxy/manager.go | 3 - agent/structs/connect.go | 19 ++++++- agent/structs/service_definition.go | 18 +----- 7 files changed, 93 insertions(+), 82 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index fb4d6c4a7..6f77547e6 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2051,20 +2051,11 @@ func (a *Agent) AddProxy(proxy *structs.ConnectManagedProxy, persist bool) error // Lookup the target service token in state if there is one. token := a.State.ServiceToken(proxy.TargetServiceID) - // Determine if we need to default the command - if proxy.ExecMode == structs.ProxyExecModeDaemon && len(proxy.Command) == 0 { - // We use the globally configured default command. If it is empty - // then we need to determine the subcommand for this agent. - cmd := a.config.ConnectProxyDefaultDaemonCommand - if len(cmd) == 0 { - var err error - cmd, err = a.defaultProxyCommand() - if err != nil { - return err - } - } - - proxy.CommandDefault = cmd + // Copy the basic proxy structure so it isn't modified w/ defaults + proxyCopy := *proxy + proxy = &proxyCopy + if err := a.applyProxyDefaults(proxy); err != nil { + return err } // Add the proxy to local state first since we may need to assign a port which @@ -2090,6 +2081,47 @@ func (a *Agent) AddProxy(proxy *structs.ConnectManagedProxy, persist bool) error return nil } +// applyProxyDefaults modifies the given proxy by applying any configured +// defaults, such as the default execution mode, command, etc. +func (a *Agent) applyProxyDefaults(proxy *structs.ConnectManagedProxy) error { + // Set the default exec mode + if proxy.ExecMode == structs.ProxyExecModeUnspecified { + mode, err := structs.NewProxyExecMode(a.config.ConnectProxyDefaultExecMode) + if err != nil { + return err + } + + proxy.ExecMode = mode + } + if proxy.ExecMode == structs.ProxyExecModeUnspecified { + proxy.ExecMode = structs.ProxyExecModeDaemon + } + + // Set the default command to the globally configured default + if len(proxy.Command) == 0 { + switch proxy.ExecMode { + case structs.ProxyExecModeDaemon: + proxy.Command = a.config.ConnectProxyDefaultDaemonCommand + + case structs.ProxyExecModeScript: + proxy.Command = a.config.ConnectProxyDefaultScriptCommand + } + } + + // If there is no globally configured default we need to get the + // default command so we can do "consul connect proxy" + if len(proxy.Command) == 0 { + command, err := defaultProxyCommand() + if err != nil { + return err + } + + proxy.Command = command + } + + return nil +} + // RemoveProxy stops and removes a local proxy instance. func (a *Agent) RemoveProxy(proxyID string, persist bool) error { // Validate proxyID @@ -2107,19 +2139,6 @@ func (a *Agent) RemoveProxy(proxyID string, persist bool) error { return nil } -// defaultProxyCommand returns the default Connect managed proxy command. -func (a *Agent) defaultProxyCommand() ([]string, error) { - // Get the path to the current exectuable. This is cached once by the - // library so this is effectively just a variable read. - execPath, err := os.Executable() - if err != nil { - return nil, err - } - - // "consul connect proxy" default value for managed daemon proxy - return []string{execPath, "connect", "proxy"}, nil -} - func (a *Agent) cancelCheckMonitors(checkID types.CheckID) { // Stop any monitors delete(a.checkReapAfter, checkID) @@ -2751,3 +2770,16 @@ func (a *Agent) registerCache() { RefreshTimeout: 10 * time.Minute, }) } + +// defaultProxyCommand returns the default Connect managed proxy command. +func defaultProxyCommand() ([]string, error) { + // Get the path to the current exectuable. This is cached once by the + // library so this is effectively just a variable read. + execPath, err := os.Executable() + if err != nil { + return nil, err + } + + // "consul connect proxy" default value for managed daemon proxy + return []string{execPath, "connect", "proxy"}, nil +} diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index ea81ba0b1..8f080ea7a 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -1007,33 +1007,6 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http } } - execMode := "daemon" - // If there is a global default mode use that instead - if s.agent.config.ConnectProxyDefaultExecMode != "" { - execMode = s.agent.config.ConnectProxyDefaultExecMode - } - // If it's actually set though, use the one set - if proxy.Proxy.ExecMode != structs.ProxyExecModeUnspecified { - execMode = proxy.Proxy.ExecMode.String() - } - - // TODO(banks): default the binary to current binary. Probably needs to be - // done deeper though as it will be needed for actually managing proxy - // lifecycle. - command := proxy.Proxy.Command - if len(command) == 0 { - if execMode == "daemon" { - command = s.agent.config.ConnectProxyDefaultDaemonCommand - } - if execMode == "script" { - command = s.agent.config.ConnectProxyDefaultScriptCommand - } - } - // No global defaults set either... - if len(command) == 0 { - command = []string{"consul", "connect", "proxy"} - } - // Set defaults for anything that is still not specified but required. // Note that these are not included in the content hash. Since we expect // them to be static in general but some like the default target service @@ -1061,8 +1034,8 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http TargetServiceID: target.ID, TargetServiceName: target.Service, ContentHash: contentHash, - ExecMode: api.ProxyExecMode(execMode), - Command: command, + ExecMode: api.ProxyExecMode(proxy.Proxy.ExecMode.String()), + Command: proxy.Proxy.Command, Config: config, } return contentHash, reply, nil diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 98f95e69e..fa01eab89 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2334,6 +2334,7 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) { }, Connect: &structs.ServiceDefinitionConnect{ Proxy: &structs.ServiceDefinitionConnectProxy{ + Command: []string{"tubes.sh"}, Config: map[string]interface{}{ "bind_port": 1234, "connect_timeout_ms": 500, @@ -2352,9 +2353,9 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) { ProxyServiceID: "test-proxy", TargetServiceID: "test", TargetServiceName: "test", - ContentHash: "365a50cbb9a748b6", + ContentHash: "4662e51e78609569", ExecMode: "daemon", - Command: []string{"consul", "connect", "proxy"}, + Command: []string{"tubes.sh"}, Config: map[string]interface{}{ "upstreams": []interface{}{ map[string]interface{}{ @@ -2372,7 +2373,7 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) { ur, err := copystructure.Copy(expectedResponse) require.NoError(t, err) updatedResponse := ur.(*api.ConnectProxyConfig) - updatedResponse.ContentHash = "538d0366b7b1dc3e" + updatedResponse.ContentHash = "23b5b6b3767601e1" upstreams := updatedResponse.Config["upstreams"].([]interface{}) upstreams = append(upstreams, map[string]interface{}{ @@ -2519,6 +2520,10 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) { func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { t.Parallel() + // Get the default command to compare below + defaultCommand, err := defaultProxyCommand() + require.NoError(t, err) + // Define a local service with a managed proxy. It's registered in the test // loop to make sure agent state is predictable whatever order tests execute // since some alter this service config. @@ -2555,7 +2560,7 @@ func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { `, proxy: structs.ServiceDefinitionConnectProxy{}, wantMode: api.ProxyExecModeDaemon, - wantCommand: []string{"consul", "connect", "proxy"}, + wantCommand: defaultCommand, wantConfig: map[string]interface{}{ "bind_address": "0.0.0.0", "bind_port": 10000, // "randomly" chosen from our range of 1 @@ -2629,7 +2634,7 @@ func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { }, }, wantMode: api.ProxyExecModeDaemon, - wantCommand: []string{"consul", "connect", "proxy"}, + wantCommand: defaultCommand, wantConfig: map[string]interface{}{ "bind_address": "0.0.0.0", "bind_port": 10000, // "randomly" chosen from our range of 1 diff --git a/agent/agent_test.go b/agent/agent_test.go index 768d1c951..d0fcbff5e 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -2389,6 +2389,7 @@ func TestAgent_AddProxy(t *testing.T) { // Test the ID was created as we expect. got := a.State.Proxy("web-proxy") + tt.proxy.ProxyService = got.Proxy.ProxyService require.Equal(tt.proxy, got.Proxy) }) } @@ -2412,12 +2413,14 @@ func TestAgent_RemoveProxy(t *testing.T) { // Add a proxy for web pReg := &structs.ConnectManagedProxy{ TargetServiceID: "web", + ExecMode: structs.ProxyExecModeDaemon, Command: []string{"foo"}, } require.NoError(a.AddProxy(pReg, false)) // Test the ID was created as we expect. gotProxy := a.State.Proxy("web-proxy") + gotProxy.Proxy.ProxyService = nil require.Equal(pReg, gotProxy.Proxy) err := a.RemoveProxy("web-proxy", false) diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index 4e45d22a8..4b9971d4c 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -332,9 +332,6 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { switch p.ExecMode { case structs.ProxyExecModeDaemon: command := p.Command - if len(command) == 0 { - command = p.CommandDefault - } // This should never happen since validation should happen upstream // but verify it because the alternative is to panic below. diff --git a/agent/structs/connect.go b/agent/structs/connect.go index aca9764fa..e08d01646 100644 --- a/agent/structs/connect.go +++ b/agent/structs/connect.go @@ -1,6 +1,8 @@ package structs import ( + "fmt" + "github.com/mitchellh/mapstructure" ) @@ -40,6 +42,20 @@ const ( ProxyExecModeTest ) +// NewProxyExecMode returns the proper ProxyExecMode for the given string value. +func NewProxyExecMode(raw string) (ProxyExecMode, error) { + switch raw { + case "": + return ProxyExecModeUnspecified, nil + case "daemon": + return ProxyExecModeDaemon, nil + case "script": + return ProxyExecModeScript, nil + default: + return 0, fmt.Errorf("invalid exec mode: %s", raw) + } +} + // String implements Stringer func (m ProxyExecMode) String() string { switch m { @@ -73,9 +89,6 @@ type ConnectManagedProxy struct { // for ProxyExecModeScript. Command []string - // CommandDefault is the default command to execute if Command is empty. - CommandDefault []string `json:"-" hash:"ignore"` - // Config is the arbitrary configuration data provided with the registration. Config map[string]interface{} diff --git a/agent/structs/service_definition.go b/agent/structs/service_definition.go index 7163b5549..be69a7b57 100644 --- a/agent/structs/service_definition.go +++ b/agent/structs/service_definition.go @@ -1,9 +1,5 @@ package structs -import ( - "fmt" -) - // ServiceDefinition is used to JSON decode the Service definitions. For // documentation on specific fields see NodeService which is better documented. type ServiceDefinition struct { @@ -55,17 +51,9 @@ func (s *ServiceDefinition) ConnectManagedProxy() (*ConnectManagedProxy, error) // which we shouldn't hard code ourselves here... ns := s.NodeService() - execMode := ProxyExecModeUnspecified - switch s.Connect.Proxy.ExecMode { - case "": - // Use default - break - case "daemon": - execMode = ProxyExecModeDaemon - case "script": - execMode = ProxyExecModeScript - default: - return nil, fmt.Errorf("invalid exec mode: %s", s.Connect.Proxy.ExecMode) + execMode, err := NewProxyExecMode(s.Connect.Proxy.ExecMode) + if err != nil { + return nil, err } p := &ConnectManagedProxy{ From 515c47be7d82cdbaecf08ae98d016a7d5c815568 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 12:51:47 -0700 Subject: [PATCH 225/627] agent: add additional tests for defaulting in AddProxy --- agent/agent_test.go | 61 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 56 insertions(+), 5 deletions(-) diff --git a/agent/agent_test.go b/agent/agent_test.go index d0fcbff5e..6219b70da 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -2334,6 +2334,14 @@ func TestAgent_AddProxy(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), ` node_name = "node1" + + connect { + proxy_defaults { + exec_mode = "script" + daemon_command = ["foo", "bar"] + script_command = ["bar", "foo"] + } + } `) defer a.Shutdown() @@ -2345,9 +2353,9 @@ func TestAgent_AddProxy(t *testing.T) { require.NoError(t, a.AddService(reg, nil, false, "")) tests := []struct { - desc string - proxy *structs.ConnectManagedProxy - wantErr bool + desc string + proxy, wantProxy *structs.ConnectManagedProxy + wantErr bool }{ { desc: "basic proxy adding, unregistered service", @@ -2374,6 +2382,45 @@ func TestAgent_AddProxy(t *testing.T) { }, wantErr: false, }, + { + desc: "default global exec mode", + proxy: &structs.ConnectManagedProxy{ + Command: []string{"consul", "connect", "proxy"}, + TargetServiceID: "web", + }, + wantProxy: &structs.ConnectManagedProxy{ + ExecMode: structs.ProxyExecModeScript, + Command: []string{"consul", "connect", "proxy"}, + TargetServiceID: "web", + }, + wantErr: false, + }, + { + desc: "default daemon command", + proxy: &structs.ConnectManagedProxy{ + ExecMode: structs.ProxyExecModeDaemon, + TargetServiceID: "web", + }, + wantProxy: &structs.ConnectManagedProxy{ + ExecMode: structs.ProxyExecModeDaemon, + Command: []string{"foo", "bar"}, + TargetServiceID: "web", + }, + wantErr: false, + }, + { + desc: "default script command", + proxy: &structs.ConnectManagedProxy{ + ExecMode: structs.ProxyExecModeScript, + TargetServiceID: "web", + }, + wantProxy: &structs.ConnectManagedProxy{ + ExecMode: structs.ProxyExecModeScript, + Command: []string{"bar", "foo"}, + TargetServiceID: "web", + }, + wantErr: false, + }, } for _, tt := range tests { @@ -2389,8 +2436,12 @@ func TestAgent_AddProxy(t *testing.T) { // Test the ID was created as we expect. got := a.State.Proxy("web-proxy") - tt.proxy.ProxyService = got.Proxy.ProxyService - require.Equal(tt.proxy, got.Proxy) + wantProxy := tt.wantProxy + if wantProxy == nil { + wantProxy = tt.proxy + } + wantProxy.ProxyService = got.Proxy.ProxyService + require.Equal(wantProxy, got.Proxy) }) } } From 49bc7181a4b94997e97a0a0821184d95604a1083 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 20:11:58 -0700 Subject: [PATCH 226/627] agent/proxy: send logs to the correct location for daemon proxies --- agent/agent.go | 1 + agent/proxy/daemon.go | 4 --- agent/proxy/manager.go | 56 ++++++++++++++++++++++++++++++ agent/proxy/manager_test.go | 69 +++++++++++++++++++++++++++++++++---- agent/proxy/proxy_test.go | 5 +++ 5 files changed, 124 insertions(+), 11 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 6f77547e6..128a40a4a 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -362,6 +362,7 @@ func (a *Agent) Start() error { a.proxyManager = proxy.NewManager() a.proxyManager.State = a.State a.proxyManager.Logger = a.logger + a.proxyManager.LogDir = filepath.Join(a.config.DataDir, "proxy", "logs") go a.proxyManager.Run() // Start watching for critical services to deregister, based on their diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index a930c978b..d6b68ad3a 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -178,10 +178,6 @@ func (p *Daemon) start() (*os.Process, error) { copy(cmd.Env, p.Command.Env) cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvProxyToken, p.ProxyToken)) - // TODO(mitchellh): temporary until we introduce the file based logging - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - // Args must always contain a 0 entry which is usually the executed binary. // To be safe and a bit more robust we default this, but only to prevent // a panic below. diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index 4b9971d4c..01c0b142c 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -5,6 +5,7 @@ import ( "log" "os" "os/exec" + "path/filepath" "sync" "time" @@ -53,6 +54,12 @@ type Manager struct { // implementation type. Logger *log.Logger + // LogDir is the path to the directory where logs will be written + // for daemon mode proxies. This directory will be created if it does + // not exist. If this is empty then logs will be dumped into the + // working directory. + LogDir string + // CoalescePeriod and QuiescencePeriod control the timers for coalescing // updates from the local state. See the defaults at the top of this // file for more documentation. These will be set to those defaults @@ -328,6 +335,13 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { return nil, fmt.Errorf("internal error: nil *local.ManagedProxy or Proxy field") } + // Attempt to create the log directory now that we have a proxy + if m.LogDir != "" { + if err := os.MkdirAll(m.LogDir, 0700); err != nil { + m.Logger.Printf("[ERROR] agent/proxy: failed to create log directory: %s", err) + } + } + p := mp.Proxy switch p.ExecMode { case structs.ProxyExecModeDaemon: @@ -343,6 +357,9 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { var cmd exec.Cmd cmd.Path = command[0] cmd.Args = command // idx 0 is path but preserved since it should be + if err := m.configureLogDir(p.ProxyService.ID, &cmd); err != nil { + return nil, fmt.Errorf("error configuring proxy logs: %s", err) + } // Build the daemon structure return &Daemon{ @@ -355,3 +372,42 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { return nil, fmt.Errorf("unsupported managed proxy type: %q", p.ExecMode) } } + +// configureLogDir sets up the file descriptors to stdout/stderr so that +// they log to the proper file path for the given service ID. +func (m *Manager) configureLogDir(id string, cmd *exec.Cmd) error { + // Create the log directory + if m.LogDir != "" { + if err := os.MkdirAll(m.LogDir, 0700); err != nil { + return err + } + } + + // Configure the stdout, stderr paths + stdoutPath := logPath(m.LogDir, id, "stdout") + stderrPath := logPath(m.LogDir, id, "stderr") + + // Open the files. We want to append to each. We expect these files + // to be rotated by some external process. + stdoutF, err := os.OpenFile(stdoutPath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + return fmt.Errorf("error creating stdout file: %s", err) + } + stderrF, err := os.OpenFile(stderrPath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + // Don't forget to close stdoutF which successfully opened + stdoutF.Close() + + return fmt.Errorf("error creating stderr file: %s", err) + } + + cmd.Stdout = stdoutF + cmd.Stderr = stderrF + return nil +} + +// logPath is a helper to return the path to the log file for the given +// directory, service ID, and stream type (stdout or stderr). +func logPath(dir, id, stream string) string { + return filepath.Join(dir, fmt.Sprintf("%s-%s.log", id, stream)) +} diff --git a/agent/proxy/manager_test.go b/agent/proxy/manager_test.go index 4ee84f56a..0b5640241 100644 --- a/agent/proxy/manager_test.go +++ b/agent/proxy/manager_test.go @@ -1,6 +1,7 @@ package proxy import ( + "io/ioutil" "os" "os/exec" "path/filepath" @@ -17,7 +18,8 @@ func TestManagerClose_noRun(t *testing.T) { t.Parallel() // Really we're testing that it doesn't deadlock here. - m := testManager(t) + m, closer := testManager(t) + defer closer() require.NoError(t, m.Close()) // Close again for sanity @@ -30,7 +32,8 @@ func TestManagerRun_initialSync(t *testing.T) { t.Parallel() state := local.TestState(t) - m := testManager(t) + m, closer := testManager(t) + defer closer() m.State = state defer m.Kill() @@ -57,7 +60,8 @@ func TestManagerRun_syncNew(t *testing.T) { t.Parallel() state := local.TestState(t) - m := testManager(t) + m, closer := testManager(t) + defer closer() m.State = state defer m.Kill() @@ -99,7 +103,8 @@ func TestManagerRun_syncDelete(t *testing.T) { t.Parallel() state := local.TestState(t) - m := testManager(t) + m, closer := testManager(t) + defer closer() m.State = state defer m.Kill() @@ -138,7 +143,8 @@ func TestManagerRun_syncUpdate(t *testing.T) { t.Parallel() state := local.TestState(t) - m := testManager(t) + m, closer := testManager(t) + defer closer() m.State = state defer m.Kill() @@ -181,14 +187,63 @@ func TestManagerRun_syncUpdate(t *testing.T) { }) } -func testManager(t *testing.T) *Manager { +func TestManagerRun_daemonLogs(t *testing.T) { + t.Parallel() + + require := require.New(t) + state := local.TestState(t) + m, closer := testManager(t) + defer closer() + m.State = state + defer m.Kill() + + // Configure a log dir so that we can read the logs + td, closer := testTempDir(t) + defer closer() + m.LogDir = filepath.Join(td, "logs") + + // Create the service and calculate the log paths + id := testStateProxy(t, state, "web", helperProcess("output")) + stdoutPath := logPath(m.LogDir, id, "stdout") + stderrPath := logPath(m.LogDir, id, "stderr") + + // Start the manager + go m.Run() + + // We should see the path appear shortly + retry.Run(t, func(r *retry.R) { + if _, err := os.Stat(stdoutPath); err != nil { + r.Fatalf("error waiting for stdout path: %s", err) + } + + if _, err := os.Stat(stderrPath); err != nil { + r.Fatalf("error waiting for stderr path: %s", err) + } + }) + + expectedOut := "hello stdout\n" + actual, err := ioutil.ReadFile(stdoutPath) + require.NoError(err) + require.Equal([]byte(expectedOut), actual) + + expectedErr := "hello stderr\n" + actual, err = ioutil.ReadFile(stderrPath) + require.NoError(err) + require.Equal([]byte(expectedErr), actual) +} + +func testManager(t *testing.T) (*Manager, func()) { m := NewManager() // Set these periods low to speed up tests m.CoalescePeriod = 1 * time.Millisecond m.QuiescentPeriod = 1 * time.Millisecond - return m + // Setup a temporary directory for logs + td, closer := testTempDir(t) + m.LogDir = td + + return m, func() { closer() } } // testStateProxy registers a proxy with the given local state and the command diff --git a/agent/proxy/proxy_test.go b/agent/proxy/proxy_test.go index 71cfd4ebc..d79ea5bc8 100644 --- a/agent/proxy/proxy_test.go +++ b/agent/proxy/proxy_test.go @@ -138,6 +138,11 @@ func TestHelperProcess(t *testing.T) { // Run forever <-make(chan struct{}) + case "output": + fmt.Fprintf(os.Stdout, "hello stdout\n") + fmt.Fprintf(os.Stderr, "hello stderr\n") + <-make(chan struct{}) + default: fmt.Fprintf(os.Stderr, "Unknown command: %q\n", cmd) os.Exit(2) From d019d33bc61ce4f6e749f5b3ebc08f9da7143ea0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 20:13:40 -0700 Subject: [PATCH 227/627] agent/proxy: don't create the directory in newProxy --- agent/proxy/manager.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index 01c0b142c..bc2a8e728 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -335,13 +335,6 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { return nil, fmt.Errorf("internal error: nil *local.ManagedProxy or Proxy field") } - // Attempt to create the log directory now that we have a proxy - if m.LogDir != "" { - if err := os.MkdirAll(m.LogDir, 0700); err != nil { - m.Logger.Printf("[ERROR] agent/proxy: failed to create log directory: %s", err) - } - } - p := mp.Proxy switch p.ExecMode { case structs.ProxyExecModeDaemon: From e2133fd39140a622dfabf0bb2045a077350b0175 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 20:16:29 -0700 Subject: [PATCH 228/627] agent/proxy: make the logs test a bit more robust by waiting for file --- agent/proxy/manager_test.go | 9 +++------ agent/proxy/proxy_test.go | 12 ++++++++++++ 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/agent/proxy/manager_test.go b/agent/proxy/manager_test.go index 0b5640241..3bf200805 100644 --- a/agent/proxy/manager_test.go +++ b/agent/proxy/manager_test.go @@ -203,7 +203,8 @@ func TestManagerRun_daemonLogs(t *testing.T) { m.LogDir = filepath.Join(td, "logs") // Create the service and calculate the log paths - id := testStateProxy(t, state, "web", helperProcess("output")) + path := filepath.Join(td, "notify") + id := testStateProxy(t, state, "web", helperProcess("output", path)) stdoutPath := logPath(m.LogDir, id, "stdout") stderrPath := logPath(m.LogDir, id, "stderr") @@ -212,13 +213,9 @@ func TestManagerRun_daemonLogs(t *testing.T) { // We should see the path appear shortly retry.Run(t, func(r *retry.R) { - if _, err := os.Stat(stdoutPath); err != nil { + if _, err := os.Stat(path); err != nil { r.Fatalf("error waiting for stdout path: %s", err) } - - if _, err := os.Stat(stderrPath); err != nil { - r.Fatalf("error waiting for stderr path: %s", err) - } }) expectedOut := "hello stdout\n" diff --git a/agent/proxy/proxy_test.go b/agent/proxy/proxy_test.go index d79ea5bc8..d0812fc07 100644 --- a/agent/proxy/proxy_test.go +++ b/agent/proxy/proxy_test.go @@ -141,6 +141,18 @@ func TestHelperProcess(t *testing.T) { case "output": fmt.Fprintf(os.Stdout, "hello stdout\n") fmt.Fprintf(os.Stderr, "hello stderr\n") + + // Sync to be sure it is written out of buffers + os.Stdout.Sync() + os.Stderr.Sync() + + // Output a file to signal we've written to stdout/err + path := args[0] + if err := ioutil.WriteFile(path, []byte("hello"), 0644); err != nil { + fmt.Fprintf(os.Stderr, "Error: %s\n", err) + os.Exit(1) + } + <-make(chan struct{}) default: From 09093a1a1aaa15893b71767b689989ee3613457f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 2 May 2018 20:34:23 -0700 Subject: [PATCH 229/627] agent/proxy: change LogDir to DataDir to reuse for other things --- agent/proxy/manager.go | 28 +++++++++++++++++++--------- agent/proxy/manager_test.go | 12 +++++------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index bc2a8e728..7ed278037 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -54,11 +54,19 @@ type Manager struct { // implementation type. Logger *log.Logger - // LogDir is the path to the directory where logs will be written - // for daemon mode proxies. This directory will be created if it does - // not exist. If this is empty then logs will be dumped into the - // working directory. - LogDir string + // DataDir is the path to the directory where data for proxies is + // written, including snapshots for any state changes in the manager. + // Within the data dir, files will be written in the following locatins: + // + // * logs/ - log files named -std{out|err}.log + // * pids/ - pid files for daemons named .pid + // * state.ext - the state of the manager + // + DataDir string + + // SnapshotDir is the path to the directory where snapshots will + // be written + SnapshotDir string // CoalescePeriod and QuiescencePeriod control the timers for coalescing // updates from the local state. See the defaults at the top of this @@ -370,15 +378,17 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { // they log to the proper file path for the given service ID. func (m *Manager) configureLogDir(id string, cmd *exec.Cmd) error { // Create the log directory - if m.LogDir != "" { - if err := os.MkdirAll(m.LogDir, 0700); err != nil { + logDir := "" + if m.DataDir != "" { + logDir = filepath.Join(m.DataDir, "logs") + if err := os.MkdirAll(logDir, 0700); err != nil { return err } } // Configure the stdout, stderr paths - stdoutPath := logPath(m.LogDir, id, "stdout") - stderrPath := logPath(m.LogDir, id, "stderr") + stdoutPath := logPath(logDir, id, "stdout") + stderrPath := logPath(logDir, id, "stderr") // Open the files. We want to append to each. We expect these files // to be rotated by some external process. diff --git a/agent/proxy/manager_test.go b/agent/proxy/manager_test.go index 3bf200805..d14b71afa 100644 --- a/agent/proxy/manager_test.go +++ b/agent/proxy/manager_test.go @@ -198,15 +198,13 @@ func TestManagerRun_daemonLogs(t *testing.T) { defer m.Kill() // Configure a log dir so that we can read the logs - td, closer := testTempDir(t) - defer closer() - m.LogDir = filepath.Join(td, "logs") + logDir := filepath.Join(m.DataDir, "logs") // Create the service and calculate the log paths - path := filepath.Join(td, "notify") + path := filepath.Join(m.DataDir, "notify") id := testStateProxy(t, state, "web", helperProcess("output", path)) - stdoutPath := logPath(m.LogDir, id, "stdout") - stderrPath := logPath(m.LogDir, id, "stderr") + stdoutPath := logPath(logDir, id, "stdout") + stderrPath := logPath(logDir, id, "stderr") // Start the manager go m.Run() @@ -238,7 +236,7 @@ func testManager(t *testing.T) (*Manager, func()) { // Setup a temporary directory for logs td, closer := testTempDir(t) - m.LogDir = td + m.DataDir = td return m, func() { closer() } } From 5e0f0ba1785f1a67ccea7d010785180a266fda13 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 13:56:42 -0700 Subject: [PATCH 230/627] agent/proxy: write pid file whenever the daemon process changes --- agent/agent.go | 44 ++------------------ agent/proxy/daemon.go | 26 +++++++++++- agent/proxy/daemon_test.go | 85 ++++++++++++++++++++++++++++++++++++++ lib/file/atomic.go | 46 +++++++++++++++++++++ 4 files changed, 159 insertions(+), 42 deletions(-) create mode 100644 lib/file/atomic.go diff --git a/agent/agent.go b/agent/agent.go index 128a40a4a..2c18c08d3 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -34,6 +34,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/ipaddr" "github.com/hashicorp/consul/lib" + "github.com/hashicorp/consul/lib/file" "github.com/hashicorp/consul/logger" "github.com/hashicorp/consul/types" "github.com/hashicorp/consul/watch" @@ -362,7 +363,7 @@ func (a *Agent) Start() error { a.proxyManager = proxy.NewManager() a.proxyManager.State = a.State a.proxyManager.Logger = a.logger - a.proxyManager.LogDir = filepath.Join(a.config.DataDir, "proxy", "logs") + a.proxyManager.DataDir = filepath.Join(a.config.DataDir, "proxy") go a.proxyManager.Run() // Start watching for critical services to deregister, based on their @@ -1557,7 +1558,7 @@ func (a *Agent) persistService(service *structs.NodeService) error { return err } - return writeFileAtomic(svcPath, encoded) + return file.WriteAtomic(svcPath, encoded) } // purgeService removes a persisted service definition file from the data dir @@ -1585,7 +1586,7 @@ func (a *Agent) persistCheck(check *structs.HealthCheck, chkType *structs.CheckT return err } - return writeFileAtomic(checkPath, encoded) + return file.WriteAtomic(checkPath, encoded) } // purgeCheck removes a persisted check definition file from the data dir @@ -1597,43 +1598,6 @@ func (a *Agent) purgeCheck(checkID types.CheckID) error { return nil } -// writeFileAtomic writes the given contents to a temporary file in the same -// directory, does an fsync and then renames the file to its real path -func writeFileAtomic(path string, contents []byte) error { - uuid, err := uuid.GenerateUUID() - if err != nil { - return err - } - tempPath := fmt.Sprintf("%s-%s.tmp", path, uuid) - - if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { - return err - } - fh, err := os.OpenFile(tempPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) - if err != nil { - return err - } - if _, err := fh.Write(contents); err != nil { - fh.Close() - os.Remove(tempPath) - return err - } - if err := fh.Sync(); err != nil { - fh.Close() - os.Remove(tempPath) - return err - } - if err := fh.Close(); err != nil { - os.Remove(tempPath) - return err - } - if err := os.Rename(tempPath, path); err != nil { - os.Remove(tempPath) - return err - } - return nil -} - // AddService is used to add a service entry. // This entry is persistent and the agent will make a best effort to // ensure it is registered diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index d6b68ad3a..e3b376c05 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -6,8 +6,11 @@ import ( "os" "os/exec" "reflect" + "strconv" "sync" "time" + + "github.com/hashicorp/consul/lib/file" ) // Constants related to restart timers with the daemon mode proxies. At some @@ -38,6 +41,12 @@ type Daemon struct { // a file. Logger *log.Logger + // PidPath is the path where a pid file will be created storing the + // pid of the active process. If this is empty then a pid-file won't + // be created. Under erroneous conditions, the pid file may not be + // created but the error will be logged to the Logger. + PidPath string + // For tests, they can set this to change the default duration to wait // for a graceful quit. gracefulWait time.Duration @@ -187,8 +196,21 @@ func (p *Daemon) start() (*os.Process, error) { // Start it p.Logger.Printf("[DEBUG] agent/proxy: starting proxy: %q %#v", cmd.Path, cmd.Args[1:]) - err := cmd.Start() - return cmd.Process, err + if err := cmd.Start(); err != nil { + return nil, err + } + + // Write the pid file. This might error and that's okay. + if p.PidPath != "" { + pid := strconv.FormatInt(int64(cmd.Process.Pid), 10) + if err := file.WriteAtomic(p.PidPath, []byte(pid)); err != nil { + p.Logger.Printf( + "[DEBUG] agent/proxy: error writing pid file %q: %s", + p.PidPath, err) + } + } + + return cmd.Process, nil } // Stop stops the daemon. diff --git a/agent/proxy/daemon_test.go b/agent/proxy/daemon_test.go index 32acde636..652364c5e 100644 --- a/agent/proxy/daemon_test.go +++ b/agent/proxy/daemon_test.go @@ -142,6 +142,91 @@ func TestDaemonStop_kill(t *testing.T) { require.Equal(mtime, fi.ModTime()) } +func TestDaemonStart_pidFile(t *testing.T) { + t.Parallel() + + require := require.New(t) + td, closer := testTempDir(t) + defer closer() + + path := filepath.Join(td, "file") + pidPath := filepath.Join(td, "pid") + uuid, err := uuid.GenerateUUID() + require.NoError(err) + + d := &Daemon{ + Command: helperProcess("start-once", path), + ProxyToken: uuid, + Logger: testLogger, + PidPath: pidPath, + } + require.NoError(d.Start()) + defer d.Stop() + + // Wait for the file to exist + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(pidPath) + if err == nil { + return + } + + r.Fatalf("error: %s", err) + }) + + // Check the pid file + pidRaw, err := ioutil.ReadFile(pidPath) + require.NoError(err) + require.NotEmpty(pidRaw) +} + +// Verify the pid file changes on restart +func TestDaemonRestart_pidFile(t *testing.T) { + t.Parallel() + + require := require.New(t) + td, closer := testTempDir(t) + defer closer() + path := filepath.Join(td, "file") + pidPath := filepath.Join(td, "pid") + + d := &Daemon{ + Command: helperProcess("restart", path), + Logger: testLogger, + PidPath: pidPath, + } + require.NoError(d.Start()) + defer d.Stop() + + // Wait for the file to exist. We save the func so we can reuse the test. + waitFile := func() { + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + r.Fatalf("error waiting for path: %s", err) + }) + } + waitFile() + + // Check the pid file + pidRaw, err := ioutil.ReadFile(pidPath) + require.NoError(err) + require.NotEmpty(pidRaw) + + // Delete the file + require.NoError(os.Remove(path)) + + // File should re-appear because the process is restart + waitFile() + + // Check the pid file and it should not equal + pidRaw2, err := ioutil.ReadFile(pidPath) + require.NoError(err) + require.NotEmpty(pidRaw2) + require.NotEqual(pidRaw, pidRaw2) +} + func TestDaemonEqual(t *testing.T) { cases := []struct { Name string diff --git a/lib/file/atomic.go b/lib/file/atomic.go new file mode 100644 index 000000000..e1d6e6693 --- /dev/null +++ b/lib/file/atomic.go @@ -0,0 +1,46 @@ +package file + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/hashicorp/go-uuid" +) + +// WriteAtomic writes the given contents to a temporary file in the same +// directory, does an fsync and then renames the file to its real path +func WriteAtomic(path string, contents []byte) error { + uuid, err := uuid.GenerateUUID() + if err != nil { + return err + } + tempPath := fmt.Sprintf("%s-%s.tmp", path, uuid) + + if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil { + return err + } + fh, err := os.OpenFile(tempPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600) + if err != nil { + return err + } + if _, err := fh.Write(contents); err != nil { + fh.Close() + os.Remove(tempPath) + return err + } + if err := fh.Sync(); err != nil { + fh.Close() + os.Remove(tempPath) + return err + } + if err := fh.Close(); err != nil { + os.Remove(tempPath) + return err + } + if err := os.Rename(tempPath, path); err != nil { + os.Remove(tempPath) + return err + } + return nil +} From 9675ed626d9fdbc308f977d132b39faf8add5e69 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 14:09:30 -0700 Subject: [PATCH 231/627] agent/proxy: manager configures the daemon pid path to write pids --- agent/proxy/manager.go | 17 ++++++++++++++++- agent/proxy/manager_test.go | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index 7ed278037..f5c2d996e 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -354,11 +354,14 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { return nil, fmt.Errorf("daemon mode managed proxy requires command") } + // We reuse the service ID a few times + id := p.ProxyService.ID + // Build the command to execute. var cmd exec.Cmd cmd.Path = command[0] cmd.Args = command // idx 0 is path but preserved since it should be - if err := m.configureLogDir(p.ProxyService.ID, &cmd); err != nil { + if err := m.configureLogDir(id, &cmd); err != nil { return nil, fmt.Errorf("error configuring proxy logs: %s", err) } @@ -367,6 +370,7 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { Command: &cmd, ProxyToken: mp.ProxyToken, Logger: m.Logger, + PidPath: pidPath(filepath.Join(m.DataDir, "pids"), id), }, nil default: @@ -414,3 +418,14 @@ func (m *Manager) configureLogDir(id string, cmd *exec.Cmd) error { func logPath(dir, id, stream string) string { return filepath.Join(dir, fmt.Sprintf("%s-%s.log", id, stream)) } + +// pidPath is a helper to return the path to the pid file for the given +// directory and service ID. +func pidPath(dir, id string) string { + // If no directory is given we do not write a pid + if dir == "" { + return "" + } + + return filepath.Join(dir, fmt.Sprintf("%s.pid", id)) +} diff --git a/agent/proxy/manager_test.go b/agent/proxy/manager_test.go index d14b71afa..d9a817af3 100644 --- a/agent/proxy/manager_test.go +++ b/agent/proxy/manager_test.go @@ -227,6 +227,40 @@ func TestManagerRun_daemonLogs(t *testing.T) { require.Equal([]byte(expectedErr), actual) } +func TestManagerRun_daemonPid(t *testing.T) { + t.Parallel() + + require := require.New(t) + state := local.TestState(t) + m, closer := testManager(t) + defer closer() + m.State = state + defer m.Kill() + + // Configure a log dir so that we can read the logs + pidDir := filepath.Join(m.DataDir, "pids") + + // Create the service and calculate the log paths + path := filepath.Join(m.DataDir, "notify") + id := testStateProxy(t, state, "web", helperProcess("output", path)) + pidPath := pidPath(pidDir, id) + + // Start the manager + go m.Run() + + // We should see the path appear shortly + retry.Run(t, func(r *retry.R) { + if _, err := os.Stat(path); err != nil { + r.Fatalf("error waiting for stdout path: %s", err) + } + }) + + // Verify the pid file is not empty + pidRaw, err := ioutil.ReadFile(pidPath) + require.NoError(err) + require.NotEmpty(pidRaw) +} + func testManager(t *testing.T) (*Manager, func()) { m := NewManager() From a3a0bc7b13358a0b842d7d62bdd8148a1f6c1e13 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 15:46:00 -0700 Subject: [PATCH 232/627] agent/proxy: implement snapshotting for daemons --- agent/proxy/daemon.go | 99 ++++++++++++++++++++++++++++++++++++++ agent/proxy/daemon_test.go | 92 +++++++++++++++++++++++++++++++++++ agent/proxy/manager.go | 2 +- agent/proxy/noop.go | 8 +-- agent/proxy/proxy.go | 16 ++++++ agent/proxy/snapshot.go | 31 ++++++++++++ 6 files changed, 244 insertions(+), 4 deletions(-) create mode 100644 agent/proxy/snapshot.go diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index e3b376c05..e1ec2e1b0 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -11,6 +11,7 @@ import ( "time" "github.com/hashicorp/consul/lib/file" + "github.com/mitchellh/mapstructure" ) // Constants related to restart timers with the daemon mode proxies. At some @@ -261,6 +262,26 @@ func (p *Daemon) Stop() error { return process.Kill() } +// stopKeepAlive is like Stop but keeps the process running. This is +// used only for tests. +func (p *Daemon) stopKeepAlive() error { + p.lock.Lock() + + // If we're already stopped or never started, then no problem. + if p.stopped || p.process == nil { + p.stopped = true + p.lock.Unlock() + return nil + } + + // Note that we've stopped + p.stopped = true + close(p.stopCh) + p.lock.Unlock() + + return nil +} + // Equal implements Proxy to check for equality. func (p *Daemon) Equal(raw Proxy) bool { p2, ok := raw.(*Daemon) @@ -275,3 +296,81 @@ func (p *Daemon) Equal(raw Proxy) bool { reflect.DeepEqual(p.Command.Args, p2.Command.Args) && reflect.DeepEqual(p.Command.Env, p2.Command.Env) } + +// MarshalSnapshot implements Proxy +func (p *Daemon) MarshalSnapshot() map[string]interface{} { + p.lock.Lock() + defer p.lock.Unlock() + + // If we're stopped or have no process, then nothing to snapshot. + if p.stopped || p.process == nil { + return nil + } + + return map[string]interface{}{ + "Pid": p.process.Pid, + "CommandPath": p.Command.Path, + "CommandArgs": p.Command.Args, + "CommandDir": p.Command.Dir, + "CommandEnv": p.Command.Env, + "ProxyToken": p.ProxyToken, + } +} + +// UnmarshalSnapshot implements Proxy +func (p *Daemon) UnmarshalSnapshot(m map[string]interface{}) error { + var s daemonSnapshot + if err := mapstructure.Decode(m, &s); err != nil { + return err + } + + p.lock.Lock() + defer p.lock.Unlock() + + // Set the basic fields + p.ProxyToken = s.ProxyToken + p.Command = &exec.Cmd{ + Path: s.CommandPath, + Args: s.CommandArgs, + Dir: s.CommandDir, + Env: s.CommandEnv, + } + + // For the pid, we want to find the process. + proc, err := os.FindProcess(s.Pid) + if err != nil { + return err + } + + // TODO(mitchellh): we should check if proc refers to a process that + // is currently alive. If not, we should return here and not manage the + // process. + + // "Start it" + stopCh := make(chan struct{}) + exitedCh := make(chan struct{}) + p.stopCh = stopCh + p.exitedCh = exitedCh + p.process = proc + go p.keepAlive(stopCh, exitedCh) + + return nil +} + +// daemonSnapshot is the structure of the marshalled data for snapshotting. +type daemonSnapshot struct { + // Pid of the process. This is the only value actually required to + // regain mangement control. The remainder values are for Equal. + Pid int + + // Command information + CommandPath string + CommandArgs []string + CommandDir string + CommandEnv []string + + // NOTE(mitchellh): longer term there are discussions/plans to only + // store the hash of the token but for now we need the full token in + // case the process dies and has to be restarted. + ProxyToken string +} diff --git a/agent/proxy/daemon_test.go b/agent/proxy/daemon_test.go index 652364c5e..6e74cdf88 100644 --- a/agent/proxy/daemon_test.go +++ b/agent/proxy/daemon_test.go @@ -316,3 +316,95 @@ func TestDaemonEqual(t *testing.T) { }) } } + +func TestDaemonMarshalSnapshot(t *testing.T) { + cases := []struct { + Name string + Proxy Proxy + Expected map[string]interface{} + }{ + { + "stopped daemon", + &Daemon{ + Command: &exec.Cmd{Path: "/foo"}, + }, + nil, + }, + + { + "basic", + &Daemon{ + Command: &exec.Cmd{Path: "/foo"}, + process: &os.Process{Pid: 42}, + }, + map[string]interface{}{ + "Pid": 42, + "CommandPath": "/foo", + "CommandArgs": []string(nil), + "CommandDir": "", + "CommandEnv": []string(nil), + "ProxyToken": "", + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + actual := tc.Proxy.MarshalSnapshot() + require.Equal(t, tc.Expected, actual) + }) + } +} + +func TestDaemonUnmarshalSnapshot(t *testing.T) { + t.Parallel() + + require := require.New(t) + td, closer := testTempDir(t) + defer closer() + + path := filepath.Join(td, "file") + uuid, err := uuid.GenerateUUID() + require.NoError(err) + + d := &Daemon{ + Command: helperProcess("start-stop", path), + ProxyToken: uuid, + Logger: testLogger, + } + require.NoError(d.Start()) + + // Wait for the file to exist + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + + r.Fatalf("error: %s", err) + }) + + // Snapshot + snap := d.MarshalSnapshot() + + // Stop the original daemon but keep it alive + require.NoError(d.stopKeepAlive()) + + // Restore the second daemon + d2 := &Daemon{Logger: testLogger} + require.NoError(d2.UnmarshalSnapshot(snap)) + + // Stop the process + require.NoError(d2.Stop()) + + // File should no longer exist. + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if os.IsNotExist(err) { + return + } + + // err might be nil here but that's okay + r.Fatalf("should not exist: %s", err) + }) +} diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index f5c2d996e..c9c63311f 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -60,7 +60,7 @@ type Manager struct { // // * logs/ - log files named -std{out|err}.log // * pids/ - pid files for daemons named .pid - // * state.ext - the state of the manager + // * snapshot.json - the state of the manager // DataDir string diff --git a/agent/proxy/noop.go b/agent/proxy/noop.go index 9ce013554..a96425d84 100644 --- a/agent/proxy/noop.go +++ b/agent/proxy/noop.go @@ -3,6 +3,8 @@ package proxy // Noop implements Proxy and does nothing. type Noop struct{} -func (p *Noop) Start() error { return nil } -func (p *Noop) Stop() error { return nil } -func (p *Noop) Equal(Proxy) bool { return true } +func (p *Noop) Start() error { return nil } +func (p *Noop) Stop() error { return nil } +func (p *Noop) Equal(Proxy) bool { return true } +func (p *Noop) MarshalSnapshot() map[string]interface{} { return nil } +func (p *Noop) UnmarshalSnapshot(map[string]interface{}) error { return nil } diff --git a/agent/proxy/proxy.go b/agent/proxy/proxy.go index 549a6ee26..e1bad92c0 100644 --- a/agent/proxy/proxy.go +++ b/agent/proxy/proxy.go @@ -39,4 +39,20 @@ type Proxy interface { // If Equal returns true, the old proxy will remain running and the new // one will be ignored. Equal(Proxy) bool + + // MarshalSnapshot returns the state that will be stored in a snapshot + // so that Consul can recover the proxy process after a restart. The + // result should only contain primitive values and containers (lists/maps). + // + // UnmarshalSnapshot is called to restore the receiving Proxy from its + // marshalled state. If UnmarshalSnapshot returns an error, the snapshot + // is ignored and the marshalled snapshot will be lost. The manager will + // log. + // + // This should save/restore enough state to be able to regain management + // of a proxy process as well as to perform the Equal method above. The + // Equal method will be called when a local state sync happens to determine + // if the recovered process should be restarted or not. + MarshalSnapshot() map[string]interface{} + UnmarshalSnapshot(map[string]interface{}) error } diff --git a/agent/proxy/snapshot.go b/agent/proxy/snapshot.go new file mode 100644 index 000000000..b119bfddf --- /dev/null +++ b/agent/proxy/snapshot.go @@ -0,0 +1,31 @@ +package proxy + +import ( + "github.com/hashicorp/consul/agent/structs" +) + +// snapshot is the structure of the snapshot file. This is unexported because +// we don't want this being a public API. +// +// The snapshot doesn't contain any configuration for the manager. We only +// want to restore the proxies that we're managing, and we use the config +// set at runtime to sync and reconcile what proxies we should start, +// restart, stop, or have already running. +type snapshot struct { + // Version is the version of the snapshot format and can be used + // to safely update the format in the future if necessary. + Version int + + // Proxies are the set of proxies that the manager has. + Proxies []snapshotProxy +} + +// snapshotProxy represents a single proxy. +type snapshotProxy struct { + // Mode corresponds to the type of proxy running. + Mode structs.ProxyExecMode + + // Config is an opaque mapping of primitive values that the proxy + // implementation uses to restore state. + Config map[string]interface{} +} From 64fc9e021824ba540a6e92f78a43f8f76d0bc3fc Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 15:55:49 -0700 Subject: [PATCH 233/627] agent/proxy: check if process is alive --- agent/proxy/daemon.go | 8 ++++--- agent/proxy/daemon_test.go | 41 ++++++++++++++++++++++++++++++++++ agent/proxy/process_unix.go | 23 +++++++++++++++++++ agent/proxy/process_windows.go | 19 ++++++++++++++++ 4 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 agent/proxy/process_unix.go create mode 100644 agent/proxy/process_windows.go diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index e1ec2e1b0..15950c196 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -342,9 +342,11 @@ func (p *Daemon) UnmarshalSnapshot(m map[string]interface{}) error { return err } - // TODO(mitchellh): we should check if proc refers to a process that - // is currently alive. If not, we should return here and not manage the - // process. + // FindProcess on many systems returns no error even if the process + // is now dead. We perform an extra check that the process is alive. + if err := processAlive(proc); err != nil { + return err + } // "Start it" stopCh := make(chan struct{}) diff --git a/agent/proxy/daemon_test.go b/agent/proxy/daemon_test.go index 6e74cdf88..a96e90699 100644 --- a/agent/proxy/daemon_test.go +++ b/agent/proxy/daemon_test.go @@ -372,6 +372,7 @@ func TestDaemonUnmarshalSnapshot(t *testing.T) { ProxyToken: uuid, Logger: testLogger, } + defer d.Stop() require.NoError(d.Start()) // Wait for the file to exist @@ -408,3 +409,43 @@ func TestDaemonUnmarshalSnapshot(t *testing.T) { r.Fatalf("should not exist: %s", err) }) } + +func TestDaemonUnmarshalSnapshot_notRunning(t *testing.T) { + t.Parallel() + + require := require.New(t) + td, closer := testTempDir(t) + defer closer() + + path := filepath.Join(td, "file") + uuid, err := uuid.GenerateUUID() + require.NoError(err) + + d := &Daemon{ + Command: helperProcess("start-stop", path), + ProxyToken: uuid, + Logger: testLogger, + } + defer d.Stop() + require.NoError(d.Start()) + + // Wait for the file to exist + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + + r.Fatalf("error: %s", err) + }) + + // Snapshot + snap := d.MarshalSnapshot() + + // Stop the original daemon + require.NoError(d.Stop()) + + // Restore the second daemon + d2 := &Daemon{Logger: testLogger} + require.Error(d2.UnmarshalSnapshot(snap)) +} diff --git a/agent/proxy/process_unix.go b/agent/proxy/process_unix.go new file mode 100644 index 000000000..6db64c59c --- /dev/null +++ b/agent/proxy/process_unix.go @@ -0,0 +1,23 @@ +// +build !windows + +package proxy + +import ( + "os" + "syscall" +) + +// processAlive for non-Windows. Note that this very likely doesn't +// work for all non-Windows platforms Go supports and we should expand +// support as we experience it. +func processAlive(p *os.Process) error { + // On Unix-like systems, we can verify a process is alive by sending + // a 0 signal. This will do nothing to the process but will still + // return errors if the process is gone. + err := p.Signal(syscall.Signal(0)) + if err == nil || err == syscall.EPERM { + return nil + } + + return err +} diff --git a/agent/proxy/process_windows.go b/agent/proxy/process_windows.go new file mode 100644 index 000000000..0268958e9 --- /dev/null +++ b/agent/proxy/process_windows.go @@ -0,0 +1,19 @@ +// +build windows + +package proxy + +import ( + "fmt" + "os" +) + +func processAlive(p *os.Process) error { + // On Windows, os.FindProcess will error if the process is not alive, + // so we don't have to do any further checking. The nature of it being + // non-nil means it seems to be healthy. + if p == nil { + return fmt.Errof("process no longer alive") + } + + return nil +} From eb31827fac680cf60187e24f624bea3995ad9efd Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 17:44:54 -0700 Subject: [PATCH 234/627] agent/proxy: implement periodic snapshotting in the manager --- agent/proxy/manager.go | 83 +++++++++++++++++----- agent/proxy/manager_test.go | 89 ++++++++++++++++++++++++ agent/proxy/proxy.go | 21 ++++++ agent/proxy/snapshot.go | 134 +++++++++++++++++++++++++++++++++++- 4 files changed, 310 insertions(+), 17 deletions(-) diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index c9c63311f..7a8cd5d4c 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -25,6 +25,11 @@ const ( // changes. Then the whole cycle resets. ManagerCoalescePeriod = 5 * time.Second ManagerQuiescentPeriod = 500 * time.Millisecond + + // ManagerSnapshotPeriod is the interval that snapshots are taken. + // The last snapshot state is preserved and if it matches a file isn't + // written, so its safe for this to be reasonably frequent. + ManagerSnapshotPeriod = 1 * time.Second ) // Manager starts, stops, snapshots, and restores managed proxies. @@ -64,9 +69,14 @@ type Manager struct { // DataDir string - // SnapshotDir is the path to the directory where snapshots will - // be written - SnapshotDir string + // SnapshotPeriod is the duration between snapshots. This can be set + // relatively low to ensure accuracy, because if the new snapshot matches + // the last snapshot taken, no file will be written. Therefore, setting + // this low causes only slight CPU/memory usage but doesn't result in + // disk IO. If this isn't set, ManagerSnapshotPeriod will be the default. + // + // This only has an effect if snapshots are enabled (DataDir is set). + SnapshotPeriod time.Duration // CoalescePeriod and QuiescencePeriod control the timers for coalescing // updates from the local state. See the defaults at the top of this @@ -86,7 +96,8 @@ type Manager struct { // for changes to this value. runState managerRunState - proxies map[string]Proxy + proxies map[string]Proxy + lastSnapshot *snapshot } // NewManager initializes a Manager. After initialization, the exported @@ -96,6 +107,7 @@ func NewManager() *Manager { var lock sync.Mutex return &Manager{ Logger: defaultLogger, + SnapshotPeriod: ManagerSnapshotPeriod, CoalescePeriod: ManagerCoalescePeriod, QuiescentPeriod: ManagerQuiescentPeriod, lock: &lock, @@ -228,6 +240,12 @@ func (m *Manager) Run() { m.State.NotifyProxy(notifyCh) defer m.State.StopNotifyProxy(notifyCh) + // Start the timer for snapshots. We don't use a ticker because disk + // IO can be slow and we don't want overlapping notifications. So we only + // reset the timer once the snapshot is complete rather than continously. + snapshotTimer := time.NewTimer(m.SnapshotPeriod) + defer snapshotTimer.Stop() + m.Logger.Println("[DEBUG] agent/proxy: managed Connect proxy manager started") SYNC: for { @@ -261,6 +279,17 @@ SYNC: case <-quiescent: continue SYNC + case <-snapshotTimer.C: + // Perform a snapshot + if path := m.SnapshotPath(); path != "" { + if err := m.snapshot(path, true); err != nil { + m.Logger.Printf("[WARN] agent/proxy: failed to snapshot state: %s", err) + } + } + + // Reset + snapshotTimer.Reset(m.SnapshotPeriod) + case <-stopCh: // Stop immediately, no cleanup m.Logger.Println("[DEBUG] agent/proxy: Stopping managed Connect proxy manager") @@ -342,10 +371,22 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { if mp == nil || mp.Proxy == nil { return nil, fmt.Errorf("internal error: nil *local.ManagedProxy or Proxy field") } - p := mp.Proxy - switch p.ExecMode { - case structs.ProxyExecModeDaemon: + + // We reuse the service ID a few times + id := p.ProxyService.ID + + // Create the Proxy. We could just as easily switch on p.ExecMode + // but I wanted there to be only location where ExecMode => Proxy so + // it lowers the chance that is wrong. + proxy, err := m.newProxyFromMode(p.ExecMode, id) + if err != nil { + return nil, err + } + + // Depending on the proxy type we configure the rest from our ManagedProxy + switch proxy := proxy.(type) { + case *Daemon: command := p.Command // This should never happen since validation should happen upstream @@ -354,9 +395,6 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { return nil, fmt.Errorf("daemon mode managed proxy requires command") } - // We reuse the service ID a few times - id := p.ProxyService.ID - // Build the command to execute. var cmd exec.Cmd cmd.Path = command[0] @@ -366,18 +404,31 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { } // Build the daemon structure - return &Daemon{ - Command: &cmd, - ProxyToken: mp.ProxyToken, - Logger: m.Logger, - PidPath: pidPath(filepath.Join(m.DataDir, "pids"), id), - }, nil + proxy.Command = &cmd + proxy.ProxyToken = mp.ProxyToken + return proxy, nil default: return nil, fmt.Errorf("unsupported managed proxy type: %q", p.ExecMode) } } +// newProxyFromMode just initializes the proxy structure from only the mode +// and the service ID. This is a shared method between newProxy and Restore +// so that we only have one location where we turn ExecMode into a Proxy. +func (m *Manager) newProxyFromMode(mode structs.ProxyExecMode, id string) (Proxy, error) { + switch mode { + case structs.ProxyExecModeDaemon: + return &Daemon{ + Logger: m.Logger, + PidPath: pidPath(filepath.Join(m.DataDir, "pids"), id), + }, nil + + default: + return nil, fmt.Errorf("unsupported managed proxy type: %q", mode) + } +} + // configureLogDir sets up the file descriptors to stdout/stderr so that // they log to the proper file path for the given service ID. func (m *Manager) configureLogDir(id string, cmd *exec.Cmd) error { diff --git a/agent/proxy/manager_test.go b/agent/proxy/manager_test.go index d9a817af3..28922cbfa 100644 --- a/agent/proxy/manager_test.go +++ b/agent/proxy/manager_test.go @@ -261,9 +261,98 @@ func TestManagerRun_daemonPid(t *testing.T) { require.NotEmpty(pidRaw) } +// Test the Snapshot/Restore works. +func TestManagerRun_snapshotRestore(t *testing.T) { + t.Parallel() + + require := require.New(t) + state := local.TestState(t) + m, closer := testManager(t) + defer closer() + m.State = state + defer m.Kill() + + // Add the proxy + td, closer := testTempDir(t) + defer closer() + path := filepath.Join(td, "file") + testStateProxy(t, state, "web", helperProcess("start-stop", path)) + + // Set a low snapshot period so we get a snapshot + m.SnapshotPeriod = 10 * time.Millisecond + + // Start the manager + go m.Run() + + // We should see the path appear shortly + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err == nil { + return + } + r.Fatalf("error waiting for path: %s", err) + }) + + // Wait for the snapshot + snapPath := m.SnapshotPath() + retry.Run(t, func(r *retry.R) { + raw, err := ioutil.ReadFile(snapPath) + if err != nil { + r.Fatalf("error waiting for path: %s", err) + } + if len(raw) < 30 { + r.Fatalf("snapshot too small") + } + }) + + // Stop the sync + require.NoError(m.Close()) + + // File should still exist + _, err := os.Stat(path) + require.NoError(err) + + // Restore a manager from a snapshot + m2, closer := testManager(t) + m2.State = state + defer closer() + defer m2.Kill() + require.NoError(m2.Restore(snapPath)) + + // Start + go m2.Run() + + // Add a second proxy so that we can determine when we're up + // and running. + path2 := filepath.Join(td, "file") + testStateProxy(t, state, "db", helperProcess("start-stop", path2)) + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path2) + if err == nil { + return + } + r.Fatalf("error waiting for path: %s", err) + }) + + // Kill m2, which should kill our main process + require.NoError(m2.Kill()) + + // File should no longer exist + retry.Run(t, func(r *retry.R) { + _, err := os.Stat(path) + if err != nil { + return + } + r.Fatalf("file still exists") + }) +} + func testManager(t *testing.T) (*Manager, func()) { m := NewManager() + // Setup a default state + m.State = local.TestState(t) + // Set these periods low to speed up tests m.CoalescePeriod = 1 * time.Millisecond m.QuiescentPeriod = 1 * time.Millisecond diff --git a/agent/proxy/proxy.go b/agent/proxy/proxy.go index e1bad92c0..1bb88da8e 100644 --- a/agent/proxy/proxy.go +++ b/agent/proxy/proxy.go @@ -7,6 +7,10 @@ // for that is available in the "connect/proxy" package. package proxy +import ( + "github.com/hashicorp/consul/agent/structs" +) + // EnvProxyToken is the name of the environment variable that is passed // to managed proxies containing the proxy token. const EnvProxyToken = "CONNECT_PROXY_TOKEN" @@ -16,6 +20,9 @@ const EnvProxyToken = "CONNECT_PROXY_TOKEN" // Calls to all the functions on this interface must be concurrency safe. // Please read the documentation carefully on top of each function for expected // behavior. +// +// Whenever a new proxy type is implemented, please also update proxyExecMode +// and newProxyFromMode and newProxy to support the new proxy. type Proxy interface { // Start starts the proxy. If an error is returned then the managed // proxy registration is rejected. Therefore, this should only fail if @@ -56,3 +63,17 @@ type Proxy interface { MarshalSnapshot() map[string]interface{} UnmarshalSnapshot(map[string]interface{}) error } + +// proxyExecMode returns the ProxyExecMode for a Proxy instance. +func proxyExecMode(p Proxy) structs.ProxyExecMode { + switch p.(type) { + case *Daemon: + return structs.ProxyExecModeDaemon + + case *Noop: + return structs.ProxyExecModeTest + + default: + return structs.ProxyExecModeUnspecified + } +} diff --git a/agent/proxy/snapshot.go b/agent/proxy/snapshot.go index b119bfddf..973a2f083 100644 --- a/agent/proxy/snapshot.go +++ b/agent/proxy/snapshot.go @@ -1,7 +1,15 @@ package proxy import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "github.com/hashicorp/consul/agent/structs" + "github.com/hashicorp/consul/lib/file" ) // snapshot is the structure of the snapshot file. This is unexported because @@ -17,7 +25,7 @@ type snapshot struct { Version int // Proxies are the set of proxies that the manager has. - Proxies []snapshotProxy + Proxies map[string]snapshotProxy } // snapshotProxy represents a single proxy. @@ -29,3 +37,127 @@ type snapshotProxy struct { // implementation uses to restore state. Config map[string]interface{} } + +// snapshotVersion is the current version to encode within the snapshot. +const snapshotVersion = 1 + +// SnapshotPath returns the default snapshot path for this manager. This +// will return empty if DataDir is not set. This file may not exist yet. +func (m *Manager) SnapshotPath() string { + if m.DataDir == "" { + return "" + } + + return filepath.Join(m.DataDir, "snapshot.json") +} + +// Snapshot will persist a snapshot of the proxy manager state that +// can be restored with Restore. +// +// If DataDir is non-empty, then the Manager will automatically snapshot +// whenever the set of managed proxies changes. This method generally doesn't +// need to be called manually. +func (m *Manager) Snapshot(path string) error { + m.lock.Lock() + defer m.lock.Unlock() + return m.snapshot(path, false) +} + +// snapshot is the internal function analogous to Snapshot but expects +// a lock to already be held. +// +// checkDup when set will store the snapshot on lastSnapshot and use +// reflect.DeepEqual to verify that its not writing an identical snapshot. +func (m *Manager) snapshot(path string, checkDup bool) error { + // Build the snapshot + s := snapshot{ + Version: snapshotVersion, + Proxies: make(map[string]snapshotProxy, len(m.proxies)), + } + for id, p := range m.proxies { + // Get the snapshot configuration. If the configuration is nil or + // empty then we don't persist this proxy. + config := p.MarshalSnapshot() + if len(config) == 0 { + continue + } + + s.Proxies[id] = snapshotProxy{ + Mode: proxyExecMode(p), + Config: config, + } + } + + // Dup detection, if the snapshot is identical to the last, do nothing + if checkDup && reflect.DeepEqual(m.lastSnapshot, &s) { + return nil + } + + // Encode as JSON + encoded, err := json.Marshal(&s) + if err != nil { + return err + } + + // Write the file + err = file.WriteAtomic(path, encoded) + if err == nil && checkDup { + m.lastSnapshot = &s + } + return err +} + +// Restore restores the manager state from a snapshot at path. If path +// doesn't exist, this does nothing and no error is returned. +// +// This restores proxy state but does not restore any Manager configuration +// such as DataDir, Logger, etc. All of those should be set _before_ Restore +// is called. +// +// Restore must be called before Run. Restore will immediately start +// supervising the restored processes but will not sync with the local +// state store until Run is called. +// +// If an error is returned the manager state is left untouched. +func (m *Manager) Restore(path string) error { + buf, err := ioutil.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + var s snapshot + if err := json.Unmarshal(buf, &s); err != nil { + return err + } + + // Verify the version matches so we can be more confident that we're + // decoding a structure that we expect. + if s.Version != snapshotVersion { + return fmt.Errorf("unknown snapshot version, expecting %d", snapshotVersion) + } + + // Build the proxies from the snapshot + proxies := make(map[string]Proxy, len(s.Proxies)) + for id, sp := range s.Proxies { + p, err := m.newProxyFromMode(sp.Mode, id) + if err != nil { + return err + } + + if err := p.UnmarshalSnapshot(sp.Config); err != nil { + return err + } + + proxies[id] = p + } + + // Overwrite the proxies. The documentation notes that this will happen. + m.lock.Lock() + defer m.lock.Unlock() + m.proxies = proxies + return nil +} From e3be9f7a029ca9f402fa07a84fdc8650dcd911d3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 17:47:38 -0700 Subject: [PATCH 235/627] agent/proxy: improve comments on snapshotting --- agent/proxy/manager.go | 8 +++++++- agent/proxy/snapshot.go | 5 ++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index 7a8cd5d4c..5bb871c65 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -96,8 +96,14 @@ type Manager struct { // for changes to this value. runState managerRunState - proxies map[string]Proxy + // lastSnapshot stores a pointer to the last snapshot that successfully + // wrote to disk. This is used for dup detection to prevent rewriting + // the same snapshot multiple times. snapshots should never be that + // large so keeping it in-memory should be cheap even for thousands of + // proxies (unlikely scenario). lastSnapshot *snapshot + + proxies map[string]Proxy } // NewManager initializes a Manager. After initialization, the exported diff --git a/agent/proxy/snapshot.go b/agent/proxy/snapshot.go index 973a2f083..8f81a182a 100644 --- a/agent/proxy/snapshot.go +++ b/agent/proxy/snapshot.go @@ -101,7 +101,10 @@ func (m *Manager) snapshot(path string, checkDup bool) error { // Write the file err = file.WriteAtomic(path, encoded) - if err == nil && checkDup { + + // If we are checking for dups and we had a successful write, store + // it so we don't rewrite the same value. + if checkDup && err == nil { m.lastSnapshot = &s } return err From 4301f7f1f53ec412b5235c4796aba3e2edf30327 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 17:51:49 -0700 Subject: [PATCH 236/627] agent: only set the proxy manager data dir if its set --- agent/agent.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/agent/agent.go b/agent/agent.go index 2c18c08d3..6f7531ed9 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -363,7 +363,12 @@ func (a *Agent) Start() error { a.proxyManager = proxy.NewManager() a.proxyManager.State = a.State a.proxyManager.Logger = a.logger - a.proxyManager.DataDir = filepath.Join(a.config.DataDir, "proxy") + if a.config.DataDir != "" { + // DataDir is required for all non-dev mode agents, but we want + // to allow setting the data dir for demos and so on for the agent, + // so do the check above instead. + a.proxyManager.DataDir = filepath.Join(a.config.DataDir, "proxy") + } go a.proxyManager.Run() // Start watching for critical services to deregister, based on their From 1d24df38276204b732d92c47b1c2ac5da56f63da Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 18:25:11 -0700 Subject: [PATCH 237/627] agent/proxy: check if process is alive in addition to Wait --- agent/proxy/daemon.go | 27 ++++++++++++++++++++------- agent/proxy/process_unix.go | 17 ++++++++++++----- agent/proxy/process_windows.go | 9 ++------- agent/proxy/snapshot.go | 7 ++++++- 4 files changed, 40 insertions(+), 20 deletions(-) diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index 15950c196..e9bf318a2 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -165,7 +165,25 @@ func (p *Daemon) keepAlive(stopCh <-chan struct{}, exitedCh chan<- struct{}) { } + // Wait for the process to exit. Note that if we restored this proxy + // then Wait will always fail because we likely aren't the parent + // process. Therefore, we do an extra sanity check after to use other + // syscalls to verify the process is truly dead. ps, err := process.Wait() + if _, err := findProcess(process.Pid); err == nil { + select { + case <-time.After(1 * time.Second): + // We want a busy loop, but not too busy. 1 second between + // detecting a process death seems reasonable. + + case <-stopCh: + // If we receive a stop request we want to exit immediately. + return + } + + continue + } + process = nil if err != nil { p.Logger.Printf("[INFO] agent/proxy: daemon exited with error: %s", err) @@ -336,15 +354,10 @@ func (p *Daemon) UnmarshalSnapshot(m map[string]interface{}) error { Env: s.CommandEnv, } - // For the pid, we want to find the process. - proc, err := os.FindProcess(s.Pid) - if err != nil { - return err - } - // FindProcess on many systems returns no error even if the process // is now dead. We perform an extra check that the process is alive. - if err := processAlive(proc); err != nil { + proc, err := findProcess(s.Pid) + if err != nil { return err } diff --git a/agent/proxy/process_unix.go b/agent/proxy/process_unix.go index 6db64c59c..deb45d080 100644 --- a/agent/proxy/process_unix.go +++ b/agent/proxy/process_unix.go @@ -3,21 +3,28 @@ package proxy import ( + "fmt" "os" "syscall" ) -// processAlive for non-Windows. Note that this very likely doesn't +// findProcess for non-Windows. Note that this very likely doesn't // work for all non-Windows platforms Go supports and we should expand // support as we experience it. -func processAlive(p *os.Process) error { +func findProcess(pid int) (*os.Process, error) { + // FindProcess never fails on unix-like systems. + p, err := os.FindProcess(pid) + if err != nil { + return nil, err + } + // On Unix-like systems, we can verify a process is alive by sending // a 0 signal. This will do nothing to the process but will still // return errors if the process is gone. - err := p.Signal(syscall.Signal(0)) + err = p.Signal(syscall.Signal(0)) if err == nil || err == syscall.EPERM { - return nil + return p, nil } - return err + return nil, fmt.Errorf("process is dead") } diff --git a/agent/proxy/process_windows.go b/agent/proxy/process_windows.go index 0268958e9..0a00d81ee 100644 --- a/agent/proxy/process_windows.go +++ b/agent/proxy/process_windows.go @@ -3,17 +3,12 @@ package proxy import ( - "fmt" "os" ) -func processAlive(p *os.Process) error { +func findProcess(pid int) (*os.Process, error) { // On Windows, os.FindProcess will error if the process is not alive, // so we don't have to do any further checking. The nature of it being // non-nil means it seems to be healthy. - if p == nil { - return fmt.Errof("process no longer alive") - } - - return nil + return os.FindProcess(pid) } diff --git a/agent/proxy/snapshot.go b/agent/proxy/snapshot.go index 8f81a182a..dbe03fd83 100644 --- a/agent/proxy/snapshot.go +++ b/agent/proxy/snapshot.go @@ -151,8 +151,13 @@ func (m *Manager) Restore(path string) error { return err } + // Unmarshal the proxy. If there is an error we just continue on and + // ignore it. Errors restoring proxies should be exceptionally rare + // and only under scenarios where the proxy isn't running anymore or + // we won't have permission to access it. We log and continue. if err := p.UnmarshalSnapshot(sp.Config); err != nil { - return err + m.Logger.Printf("[WARN] agent/proxy: error restoring proxy %q: %s", id, err) + continue } proxies[id] = p From 147b066c676e14525fdf467acbe211939653ac6a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 18:25:32 -0700 Subject: [PATCH 238/627] agent: restore proxy snapshot but still Kill proxies --- agent/agent.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/agent/agent.go b/agent/agent.go index 6f7531ed9..2d431be92 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -368,6 +368,11 @@ func (a *Agent) Start() error { // to allow setting the data dir for demos and so on for the agent, // so do the check above instead. a.proxyManager.DataDir = filepath.Join(a.config.DataDir, "proxy") + + // Restore from our snapshot (if it exists) + if err := a.proxyManager.Restore(a.proxyManager.SnapshotPath()); err != nil { + a.logger.Printf("[WARN] agent: error restoring proxy state: %s", err) + } } go a.proxyManager.Run() @@ -1289,7 +1294,7 @@ func (a *Agent) ShutdownAgent() error { // Stop the proxy manager // NOTE(mitchellh): we use Kill for now to kill the processes since - // snapshotting isn't implemented. This should change to Close later. + // there isn't a clean way to cleanup the managed proxies. This is coming if err := a.proxyManager.Kill(); err != nil { a.logger.Printf("[WARN] agent: error shutting down proxy manager: %s", err) } From 1cb9046ad5d05842f3d33c6d43e0d30ddeaa0d6c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 21:58:07 -0700 Subject: [PATCH 239/627] lib/file: add tests for WriteAtomic --- lib/file/atomic_test.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 lib/file/atomic_test.go diff --git a/lib/file/atomic_test.go b/lib/file/atomic_test.go new file mode 100644 index 000000000..5be41f245 --- /dev/null +++ b/lib/file/atomic_test.go @@ -0,0 +1,32 @@ +package file + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +// This doesn't really test the "atomic" part of this function. It really +// tests that it just writes the file properly. I would love to test this +// better but I'm not sure how. -mitchellh +func TestWriteAtomic(t *testing.T) { + require := require.New(t) + td, err := ioutil.TempDir("", "lib-file") + require.NoError(err) + defer os.RemoveAll(td) + + // Create a subdir that doesn't exist to test that it is created + path := filepath.Join(td, "subdir", "file") + + // Write + expected := []byte("hello") + require.NoError(WriteAtomic(path, expected)) + + // Read and verify + actual, err := ioutil.ReadFile(path) + require.NoError(err) + require.Equal(expected, actual) +} From 7bb13246a8be175c227258a440c8a659e0fe8cef Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 3 May 2018 22:13:18 -0700 Subject: [PATCH 240/627] agent: clarify why we Kill still --- agent/agent.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/agent/agent.go b/agent/agent.go index 2d431be92..e5c35c94e 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -1294,7 +1294,8 @@ func (a *Agent) ShutdownAgent() error { // Stop the proxy manager // NOTE(mitchellh): we use Kill for now to kill the processes since - // there isn't a clean way to cleanup the managed proxies. This is coming + // the local state isn't snapshotting meaning the proxy tokens are + // regenerated each time forcing the processes to restart anyways. if err := a.proxyManager.Kill(); err != nil { a.logger.Printf("[WARN] agent: error shutting down proxy manager: %s", err) } From a89238a9d360639dedf11d5ae0d9752b24307aec Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 4 May 2018 08:09:44 -0700 Subject: [PATCH 241/627] agent/proxy: address PR feedback --- agent/proxy/daemon.go | 2 +- agent/proxy/process_unix.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index e9bf318a2..7a15b12ab 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -284,6 +284,7 @@ func (p *Daemon) Stop() error { // used only for tests. func (p *Daemon) stopKeepAlive() error { p.lock.Lock() + defer p.lock.Unlock() // If we're already stopped or never started, then no problem. if p.stopped || p.process == nil { @@ -295,7 +296,6 @@ func (p *Daemon) stopKeepAlive() error { // Note that we've stopped p.stopped = true close(p.stopCh) - p.lock.Unlock() return nil } diff --git a/agent/proxy/process_unix.go b/agent/proxy/process_unix.go index deb45d080..9bca07c2b 100644 --- a/agent/proxy/process_unix.go +++ b/agent/proxy/process_unix.go @@ -22,9 +22,9 @@ func findProcess(pid int) (*os.Process, error) { // a 0 signal. This will do nothing to the process but will still // return errors if the process is gone. err = p.Signal(syscall.Signal(0)) - if err == nil || err == syscall.EPERM { + if err == nil { return p, nil } - return nil, fmt.Errorf("process is dead") + return nil, fmt.Errorf("process %d is dead or running as another user", pid) } From 1dfb4762f59e7a574f99620d6d5030d45a080d6f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 4 May 2018 09:14:40 -0700 Subject: [PATCH 242/627] agent: increase timer for blocking cache endpoints --- agent/agent.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index e5c35c94e..9c74b3760 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2723,7 +2723,7 @@ func (a *Agent) registerCache() { }, &cache.RegisterOptions{ // Maintain a blocking query, retry dropped connections quickly Refresh: true, - RefreshTimer: 0, + RefreshTimer: 3 * time.Second, RefreshTimeout: 10 * time.Minute, }) @@ -2733,7 +2733,7 @@ func (a *Agent) registerCache() { }, &cache.RegisterOptions{ // Maintain a blocking query, retry dropped connections quickly Refresh: true, - RefreshTimer: 0, + RefreshTimer: 3 * time.Second, RefreshTimeout: 10 * time.Minute, }) @@ -2742,7 +2742,7 @@ func (a *Agent) registerCache() { }, &cache.RegisterOptions{ // Maintain a blocking query, retry dropped connections quickly Refresh: true, - RefreshTimer: 0, + RefreshTimer: 3 * time.Second, RefreshTimeout: 10 * time.Minute, }) } From 257d31e319d79445697b8fb3525b1bd42fa553be Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 4 May 2018 09:44:59 -0700 Subject: [PATCH 243/627] agent/proxy: delete pid file on Stop --- agent/proxy/daemon.go | 14 ++++++++++++++ agent/proxy/daemon_test.go | 7 +++++++ 2 files changed, 21 insertions(+) diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index 7a15b12ab..c0ae7fdee 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -263,6 +263,20 @@ func (p *Daemon) Stop() error { gracefulWait = 5 * time.Second } + // Defer removing the pid file. Even under error conditions we + // delete the pid file since Stop means that the manager is no + // longer managing this proxy and therefore nothing else will ever + // clean it up. + if p.PidPath != "" { + defer func() { + if err := os.Remove(p.PidPath); err != nil && !os.IsNotExist(err) { + p.Logger.Printf( + "[DEBUG] agent/proxy: error removing pid file %q: %s", + p.PidPath, err) + } + }() + } + // First, try a graceful stop err := process.Signal(os.Interrupt) if err == nil { diff --git a/agent/proxy/daemon_test.go b/agent/proxy/daemon_test.go index a96e90699..17e821a44 100644 --- a/agent/proxy/daemon_test.go +++ b/agent/proxy/daemon_test.go @@ -177,6 +177,13 @@ func TestDaemonStart_pidFile(t *testing.T) { pidRaw, err := ioutil.ReadFile(pidPath) require.NoError(err) require.NotEmpty(pidRaw) + + // Stop + require.NoError(d.Stop()) + + // Pid file should be gone + _, err = os.Stat(pidPath) + require.True(os.IsNotExist(err)) } // Verify the pid file changes on restart From 954d286d73d7c03b7f66f574a5714a0dfd4511cc Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Fri, 4 May 2018 22:31:06 +0100 Subject: [PATCH 244/627] Make CSR work with jank domain --- agent/cache-types/connect_ca_leaf.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/cache-types/connect_ca_leaf.go b/agent/cache-types/connect_ca_leaf.go index 2c1cd156a..1058ec26a 100644 --- a/agent/cache-types/connect_ca_leaf.go +++ b/agent/cache-types/connect_ca_leaf.go @@ -102,7 +102,7 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache // needs a correct host ID, and we probably don't want to use TestCSR // and want a non-test-specific way to create a CSR. csr, pk := connect.TestCSR(&testing.RuntimeT{}, &connect.SpiffeIDService{ - Host: "1234.consul", + Host: "11111111-2222-3333-4444-555555555555.consul", Namespace: "default", Datacenter: reqReal.Datacenter, Service: reqReal.Service, From 498c63a6f117865f7f2be32e95eb440ea684391d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 4 May 2018 13:33:05 -0700 Subject: [PATCH 245/627] agent/config: default connect enabled in dev mode This enables `consul agent -dev` to begin using Connect features with the built-in CA. I think this is expected behavior since you can imagine that new users would want to try. There is no real downside since we're just using the built-in CA. --- agent/config/default.go | 4 ++++ agent/config/runtime_test.go | 1 + 2 files changed, 5 insertions(+) diff --git a/agent/config/default.go b/agent/config/default.go index b266a83b9..9d96f5a5c 100644 --- a/agent/config/default.go +++ b/agent/config/default.go @@ -108,6 +108,10 @@ func DevSource() Source { ui = true log_level = "DEBUG" server = true + + connect = { + enabled = true + } performance = { raft_multiplier = 1 } diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 3268c6c70..82787482f 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -263,6 +263,7 @@ func TestConfigFlagsAndEdgecases(t *testing.T) { rt.AdvertiseAddrLAN = ipAddr("127.0.0.1") rt.AdvertiseAddrWAN = ipAddr("127.0.0.1") rt.BindAddr = ipAddr("127.0.0.1") + rt.ConnectEnabled = true rt.DevMode = true rt.DisableAnonymousSignature = true rt.DisableKeyringFile = true From 662f38c62521616a78e973441a574580f913f491 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 4 May 2018 14:10:03 -0700 Subject: [PATCH 246/627] agent/structs: validate service definitions, port required for proxy --- agent/config/builder.go | 7 +++ agent/structs/service_definition.go | 31 ++++++++++++ agent/structs/service_definition_test.go | 54 +++++++++++++++++++++ agent/structs/testing_catalog.go | 8 +++ agent/structs/testing_service_definition.go | 7 +++ 5 files changed, 107 insertions(+) diff --git a/agent/config/builder.go b/agent/config/builder.go index cd851aaf3..88f65ec76 100644 --- a/agent/config/builder.go +++ b/agent/config/builder.go @@ -912,6 +912,13 @@ func (b *Builder) Validate(rt RuntimeConfig) error { return b.err } + // Check for errors in the service definitions + for _, s := range rt.Services { + if err := s.Validate(); err != nil { + return fmt.Errorf("service %q: %s", s.Name, err) + } + } + // ---------------------------------------------------------------- // warnings // diff --git a/agent/structs/service_definition.go b/agent/structs/service_definition.go index be69a7b57..506015649 100644 --- a/agent/structs/service_definition.go +++ b/agent/structs/service_definition.go @@ -1,5 +1,11 @@ package structs +import ( + "fmt" + + "github.com/hashicorp/go-multierror" +) + // ServiceDefinition is used to JSON decode the Service definitions. For // documentation on specific fields see NodeService which is better documented. type ServiceDefinition struct { @@ -68,6 +74,31 @@ func (s *ServiceDefinition) ConnectManagedProxy() (*ConnectManagedProxy, error) return p, nil } +// Validate validates the service definition. This also calls the underlying +// Validate method on the NodeService. +// +// NOTE(mitchellh): This currently only validates fields related to Connect +// and is incomplete with regards to other fields. +func (s *ServiceDefinition) Validate() error { + var result error + + if s.Kind == ServiceKindTypical { + if s.Connect != nil && s.Connect.Proxy != nil { + if s.Port == 0 { + result = multierror.Append(result, fmt.Errorf( + "Services with a Connect managed proxy must have a port set")) + } + } + } + + // Validate the NodeService which covers a lot + if err := s.NodeService().Validate(); err != nil { + result = multierror.Append(result, err) + } + + return result +} + func (s *ServiceDefinition) CheckTypes() (checks CheckTypes, err error) { if !s.Check.Empty() { err := s.Check.Validate() diff --git a/agent/structs/service_definition_test.go b/agent/structs/service_definition_test.go index d3bab4a08..c73cff217 100644 --- a/agent/structs/service_definition_test.go +++ b/agent/structs/service_definition_test.go @@ -2,10 +2,12 @@ package structs import ( "fmt" + "strings" "testing" "time" "github.com/pascaldekloe/goe/verify" + "github.com/stretchr/testify/require" ) func TestAgentStructs_CheckTypes(t *testing.T) { @@ -54,3 +56,55 @@ func TestAgentStructs_CheckTypes(t *testing.T) { } } } + +func TestServiceDefinitionValidate(t *testing.T) { + cases := []struct { + Name string + Modify func(*ServiceDefinition) + Err string + }{ + { + "valid", + func(x *ServiceDefinition) {}, + "", + }, + + { + "managed proxy with a port set", + func(x *ServiceDefinition) { + x.Port = 8080 + x.Connect = &ServiceDefinitionConnect{ + Proxy: &ServiceDefinitionConnectProxy{}, + } + }, + "", + }, + + { + "managed proxy with no port set", + func(x *ServiceDefinition) { + x.Connect = &ServiceDefinitionConnect{ + Proxy: &ServiceDefinitionConnectProxy{}, + } + }, + "must have a port", + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + require := require.New(t) + service := TestServiceDefinition(t) + tc.Modify(service) + + err := service.Validate() + t.Logf("error: %s", err) + require.Equal(err != nil, tc.Err != "") + if err == nil { + return + } + + require.Contains(strings.ToLower(err.Error()), strings.ToLower(tc.Err)) + }) + } +} diff --git a/agent/structs/testing_catalog.go b/agent/structs/testing_catalog.go index 1394b7081..a274ced77 100644 --- a/agent/structs/testing_catalog.go +++ b/agent/structs/testing_catalog.go @@ -29,6 +29,14 @@ func TestRegisterRequestProxy(t testing.T) *RegisterRequest { } } +// TestNodeService returns a *NodeService representing a valid regular service. +func TestNodeService(t testing.T) *NodeService { + return &NodeService{ + Kind: ServiceKindTypical, + Service: "web", + } +} + // TestNodeServiceProxy returns a *NodeService representing a valid // Connect proxy. func TestNodeServiceProxy(t testing.T) *NodeService { diff --git a/agent/structs/testing_service_definition.go b/agent/structs/testing_service_definition.go index b14e1e2ff..370458371 100644 --- a/agent/structs/testing_service_definition.go +++ b/agent/structs/testing_service_definition.go @@ -4,6 +4,13 @@ import ( "github.com/mitchellh/go-testing-interface" ) +// TestServiceDefinition returns a ServiceDefinition for a typical service. +func TestServiceDefinition(t testing.T) *ServiceDefinition { + return &ServiceDefinition{ + Name: "db", + } +} + // TestServiceDefinitionProxy returns a ServiceDefinition for a proxy. func TestServiceDefinitionProxy(t testing.T) *ServiceDefinition { return &ServiceDefinition{ From 80b6d0a6cf806e66a8b71f5e786b78cdc6f40e72 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 4 May 2018 16:13:40 -0700 Subject: [PATCH 247/627] Add missing vendor dep github.com/stretchr/objx --- vendor/github.com/stretchr/objx/Gopkg.lock | 30 + vendor/github.com/stretchr/objx/Gopkg.toml | 8 + vendor/github.com/stretchr/objx/LICENSE | 22 + vendor/github.com/stretchr/objx/README.md | 80 + vendor/github.com/stretchr/objx/Taskfile.yml | 31 + vendor/github.com/stretchr/objx/accessors.go | 119 + .../github.com/stretchr/objx/conversions.go | 109 + vendor/github.com/stretchr/objx/doc.go | 66 + vendor/github.com/stretchr/objx/map.go | 228 ++ vendor/github.com/stretchr/objx/mutations.go | 77 + vendor/github.com/stretchr/objx/security.go | 12 + vendor/github.com/stretchr/objx/tests.go | 17 + .../stretchr/objx/type_specific_codegen.go | 2516 +++++++++++++++++ vendor/github.com/stretchr/objx/value.go | 53 + vendor/vendor.json | 1 + 15 files changed, 3369 insertions(+) create mode 100644 vendor/github.com/stretchr/objx/Gopkg.lock create mode 100644 vendor/github.com/stretchr/objx/Gopkg.toml create mode 100644 vendor/github.com/stretchr/objx/LICENSE create mode 100644 vendor/github.com/stretchr/objx/README.md create mode 100644 vendor/github.com/stretchr/objx/Taskfile.yml create mode 100644 vendor/github.com/stretchr/objx/accessors.go create mode 100644 vendor/github.com/stretchr/objx/conversions.go create mode 100644 vendor/github.com/stretchr/objx/doc.go create mode 100644 vendor/github.com/stretchr/objx/map.go create mode 100644 vendor/github.com/stretchr/objx/mutations.go create mode 100644 vendor/github.com/stretchr/objx/security.go create mode 100644 vendor/github.com/stretchr/objx/tests.go create mode 100644 vendor/github.com/stretchr/objx/type_specific_codegen.go create mode 100644 vendor/github.com/stretchr/objx/value.go diff --git a/vendor/github.com/stretchr/objx/Gopkg.lock b/vendor/github.com/stretchr/objx/Gopkg.lock new file mode 100644 index 000000000..3e4e06df8 --- /dev/null +++ b/vendor/github.com/stretchr/objx/Gopkg.lock @@ -0,0 +1,30 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require" + ] + revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" + version = "v1.2.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "2d160a7dea4ffd13c6c31dab40373822f9d78c73beba016d662bef8f7a998876" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/stretchr/objx/Gopkg.toml b/vendor/github.com/stretchr/objx/Gopkg.toml new file mode 100644 index 000000000..d70f1570b --- /dev/null +++ b/vendor/github.com/stretchr/objx/Gopkg.toml @@ -0,0 +1,8 @@ +[prune] + unused-packages = true + non-go = true + go-tests = true + +[[constraint]] + name = "github.com/stretchr/testify" + version = "~1.2.0" diff --git a/vendor/github.com/stretchr/objx/LICENSE b/vendor/github.com/stretchr/objx/LICENSE new file mode 100644 index 000000000..44d4d9d5a --- /dev/null +++ b/vendor/github.com/stretchr/objx/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md new file mode 100644 index 000000000..8fc8fa277 --- /dev/null +++ b/vendor/github.com/stretchr/objx/README.md @@ -0,0 +1,80 @@ +# Objx +[![Build Status](https://travis-ci.org/stretchr/objx.svg?branch=master)](https://travis-ci.org/stretchr/objx) +[![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/objx)](https://goreportcard.com/report/github.com/stretchr/objx) +[![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) +[![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) +[![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) +[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx) + +Objx - Go package for dealing with maps, slices, JSON and other data. + +Get started: + +- Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) +- Check out the API Documentation http://godoc.org/github.com/stretchr/objx + +## Overview +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. + +### Pattern +Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. + +### Reading data +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +### Ranging +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } + +## Installation +To install Objx, use go get: + + go get github.com/stretchr/objx + +### Staying up to date +To update Objx to the latest version, run: + + go get -u github.com/stretchr/objx + +### Supported go versions +We support the lastest four major Go versions, which are 1.6, 1.7, 1.8 and 1.9 at the moment. + +## Contributing +Please feel free to submit issues, fork the repository and send pull requests! diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml new file mode 100644 index 000000000..7d0199450 --- /dev/null +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -0,0 +1,31 @@ +default: + deps: [test] + +dl-deps: + desc: Downloads cli dependencies + cmds: + - go get -u github.com/golang/lint/golint + +update-deps: + desc: Updates dependencies + cmds: + - dep ensure + - dep ensure -update + +lint: + desc: Runs golint + cmds: + - go fmt $(go list ./... | grep -v /vendor/) + - go vet $(go list ./... | grep -v /vendor/) + - golint $(ls *.go | grep -v "doc.go") + silent: true + +test: + desc: Runs go tests + cmds: + - go test -race . + +test-coverage: + desc: Runs go tests and calucates test coverage + cmds: + - go test -coverprofile=c.out . diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go new file mode 100644 index 000000000..676316281 --- /dev/null +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -0,0 +1,119 @@ +package objx + +import ( + "regexp" + "strconv" + "strings" +) + +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // arrayAccesRegexString is the regex used to extract the array number + // from the access path + arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` +) + +// arrayAccesRegex is the compiled arrayAccesRegexString +var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) + +// Get gets the value using the specified selector and +// returns it inside a new Obj object. +// +// If it cannot find the value, Get will return a nil +// value inside an instance of Obj. +// +// Get can only operate directly on map[string]interface{} and []interface. +// +// Example +// +// To access the title of the third chapter of the second book, do: +// +// o.Get("books[1].chapters[2].title") +func (m Map) Get(selector string) *Value { + rawObj := access(m, selector, nil, false) + return &Value{data: rawObj} +} + +// Set sets the value using the specified selector and +// returns the object on which Set was called. +// +// Set can only operate directly on map[string]interface{} and []interface +// +// Example +// +// To set the title of the third chapter of the second book, do: +// +// o.Set("books[1].chapters[2].title","Time to Go") +func (m Map) Set(selector string, value interface{}) Map { + access(m, selector, value, true) + return m +} + +// getIndex returns the index, which is hold in s by two braches. +// It also returns s withour the index part, e.g. name[1] will return (1, name). +// If no index is found, -1 is returned +func getIndex(s string) (int, string) { + arrayMatches := arrayAccesRegex.FindStringSubmatch(s) + if len(arrayMatches) > 0 { + // Get the key into the map + selector := arrayMatches[1] + // Get the index into the array at the key + // We know this cannt fail because arrayMatches[2] is an int for sure + index, _ := strconv.Atoi(arrayMatches[2]) + return index, selector + } + return -1, s +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current interface{}, selector string, value interface{}, isSet bool) interface{} { + selSegs := strings.SplitN(selector, PathSeparator, 2) + thisSel := selSegs[0] + index := -1 + + if strings.Contains(thisSel, "[") { + index, thisSel = getIndex(thisSel) + } + + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if len(selSegs) <= 1 && isSet { + curMSI[thisSel] = value + return nil + } + + _, ok := curMSI[thisSel].(map[string]interface{}) + if (curMSI[thisSel] == nil || !ok) && index == -1 && isSet { + curMSI[thisSel] = map[string]interface{}{} + } + + current = curMSI[thisSel] + default: + current = nil + } + // do we need to access the item of an array? + if index > -1 { + if array, ok := current.([]interface{}); ok { + if index < len(array) { + current = array[index] + } else { + current = nil + } + } + } + if len(selSegs) > 1 { + current = access(current, selSegs[1], value, isSet) + } + return current +} diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go new file mode 100644 index 000000000..ca1c2dec6 --- /dev/null +++ b/vendor/github.com/stretchr/objx/conversions.go @@ -0,0 +1,109 @@ +package objx + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" +) + +// SignatureSeparator is the character that is used to +// separate the Base64 string from the security signature. +const SignatureSeparator = "_" + +// JSON converts the contained object to a JSON string +// representation +func (m Map) JSON() (string, error) { + result, err := json.Marshal(m) + if err != nil { + err = errors.New("objx: JSON encode failed with: " + err.Error()) + } + return string(result), err +} + +// MustJSON converts the contained object to a JSON string +// representation and panics if there is an error +func (m Map) MustJSON() string { + result, err := m.JSON() + if err != nil { + panic(err.Error()) + } + return result +} + +// Base64 converts the contained object to a Base64 string +// representation of the JSON string representation +func (m Map) Base64() (string, error) { + var buf bytes.Buffer + + jsonData, err := m.JSON() + if err != nil { + return "", err + } + + encoder := base64.NewEncoder(base64.StdEncoding, &buf) + _, _ = encoder.Write([]byte(jsonData)) + _ = encoder.Close() + + return buf.String(), nil +} + +// MustBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and panics +// if there is an error +func (m Map) MustBase64() string { + result, err := m.Base64() + if err != nil { + panic(err.Error()) + } + return result +} + +// SignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key. +func (m Map) SignedBase64(key string) (string, error) { + base64, err := m.Base64() + if err != nil { + return "", err + } + + sig := HashWithKey(base64, key) + return base64 + SignatureSeparator + sig, nil +} + +// MustSignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key and panics if there is an error +func (m Map) MustSignedBase64(key string) string { + result, err := m.SignedBase64(key) + if err != nil { + panic(err.Error()) + } + return result +} + +/* + URL Query + ------------------------------------------------ +*/ + +// URLValues creates a url.Values object from an Obj. This +// function requires that the wrapped object be a map[string]interface{} +func (m Map) URLValues() url.Values { + vals := make(url.Values) + for k, v := range m { + //TODO: can this be done without sprintf? + vals.Set(k, fmt.Sprintf("%v", v)) + } + return vals +} + +// URLQuery gets an encoded URL query representing the given +// Obj. This function requires that the wrapped object be a +// map[string]interface{} +func (m Map) URLQuery() (string, error) { + return m.URLValues().Encode(), nil +} diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go new file mode 100644 index 000000000..6d6af1a83 --- /dev/null +++ b/vendor/github.com/stretchr/objx/doc.go @@ -0,0 +1,66 @@ +/* +Objx - Go package for dealing with maps, slices, JSON and other data. + +Overview + +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +a powerful `Get` method (among others) that allows you to easily and quickly get +access to data within the map, without having to worry too much about type assertions, +missing data, default values etc. + +Pattern + +Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, +or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, +manipulating and selecting that data. You can find out more by exploring the index below. + +Reading data + +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +Ranging + +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. +For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } +*/ +package objx diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go new file mode 100644 index 000000000..95149c06a --- /dev/null +++ b/vendor/github.com/stretchr/objx/map.go @@ -0,0 +1,228 @@ +package objx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strings" +) + +// MSIConvertable is an interface that defines methods for converting your +// custom types to a map[string]interface{} representation. +type MSIConvertable interface { + // MSI gets a map[string]interface{} (msi) representing the + // object. + MSI() map[string]interface{} +} + +// Map provides extended functionality for working with +// untyped data, in particular map[string]interface (msi). +type Map map[string]interface{} + +// Value returns the internal value instance +func (m Map) Value() *Value { + return &Value{data: m} +} + +// Nil represents a nil Map. +var Nil = New(nil) + +// New creates a new Map containing the map[string]interface{} in the data argument. +// If the data argument is not a map[string]interface, New attempts to call the +// MSI() method on the MSIConvertable interface to create one. +func New(data interface{}) Map { + if _, ok := data.(map[string]interface{}); !ok { + if converter, ok := data.(MSIConvertable); ok { + data = converter.MSI() + } else { + return nil + } + } + return Map(data.(map[string]interface{})) +} + +// MSI creates a map[string]interface{} and puts it inside a new Map. +// +// The arguments follow a key, value pattern. +// +// +// Returns nil if any key argument is non-string or if there are an odd number of arguments. +// +// Example +// +// To easily create Maps: +// +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// +// // creates an Map equivalent to +// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} +func MSI(keyAndValuePairs ...interface{}) Map { + newMap := Map{} + keyAndValuePairsLen := len(keyAndValuePairs) + if keyAndValuePairsLen%2 != 0 { + return nil + } + for i := 0; i < keyAndValuePairsLen; i = i + 2 { + key := keyAndValuePairs[i] + value := keyAndValuePairs[i+1] + + // make sure the key is a string + keyString, keyStringOK := key.(string) + if !keyStringOK { + return nil + } + newMap[keyString] = value + } + return newMap +} + +// ****** Conversion Constructors + +// MustFromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Panics if the JSON is invalid. +func MustFromJSON(jsonString string) Map { + o, err := FromJSON(jsonString) + if err != nil { + panic("objx: MustFromJSON failed with error: " + err.Error()) + } + return o +} + +// FromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Returns an error if the JSON is invalid. +func FromJSON(jsonString string) (Map, error) { + var m Map + err := json.Unmarshal([]byte(jsonString), &m) + if err != nil { + return Nil, err + } + m.tryConvertFloat64() + return m, nil +} + +func (m Map) tryConvertFloat64() { + for k, v := range m { + switch v.(type) { + case float64: + f := v.(float64) + if float64(int(f)) == f { + m[k] = int(f) + } + case map[string]interface{}: + t := New(v) + t.tryConvertFloat64() + m[k] = t + case []interface{}: + m[k] = tryConvertFloat64InSlice(v.([]interface{})) + } + } +} + +func tryConvertFloat64InSlice(s []interface{}) []interface{} { + for k, v := range s { + switch v.(type) { + case float64: + f := v.(float64) + if float64(int(f)) == f { + s[k] = int(f) + } + case map[string]interface{}: + t := New(v) + t.tryConvertFloat64() + s[k] = t + case []interface{}: + s[k] = tryConvertFloat64InSlice(v.([]interface{})) + } + } + return s +} + +// FromBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by Base64 +func FromBase64(base64String string) (Map, error) { + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) + decoded, err := ioutil.ReadAll(decoder) + if err != nil { + return nil, err + } + return FromJSON(string(decoded)) +} + +// MustFromBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromBase64(base64String string) Map { + result, err := FromBase64(base64String) + if err != nil { + panic("objx: MustFromBase64 failed with error: " + err.Error()) + } + return result +} + +// FromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by SignedBase64 +func FromSignedBase64(base64String, key string) (Map, error) { + parts := strings.Split(base64String, SignatureSeparator) + if len(parts) != 2 { + return nil, errors.New("objx: Signed base64 string is malformed") + } + + sig := HashWithKey(parts[0], key) + if parts[1] != sig { + return nil, errors.New("objx: Signature for base64 data does not match") + } + return FromBase64(parts[0]) +} + +// MustFromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromSignedBase64(base64String, key string) Map { + result, err := FromSignedBase64(base64String, key) + if err != nil { + panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) + } + return result +} + +// FromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +func FromURLQuery(query string) (Map, error) { + vals, err := url.ParseQuery(query) + if err != nil { + return nil, err + } + m := Map{} + for k, vals := range vals { + m[k] = vals[0] + } + return m, nil +} + +// MustFromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +// +// Panics if it encounters an error +func MustFromURLQuery(query string) Map { + o, err := FromURLQuery(query) + if err != nil { + panic("objx: MustFromURLQuery failed with error: " + err.Error()) + } + return o +} diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go new file mode 100644 index 000000000..c3400a3f7 --- /dev/null +++ b/vendor/github.com/stretchr/objx/mutations.go @@ -0,0 +1,77 @@ +package objx + +// Exclude returns a new Map with the keys in the specified []string +// excluded. +func (m Map) Exclude(exclude []string) Map { + excluded := make(Map) + for k, v := range m { + if !contains(exclude, k) { + excluded[k] = v + } + } + return excluded +} + +// Copy creates a shallow copy of the Obj. +func (m Map) Copy() Map { + copied := Map{} + for k, v := range m { + copied[k] = v + } + return copied +} + +// Merge blends the specified map with a copy of this map and returns the result. +// +// Keys that appear in both will be selected from the specified map. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) Merge(merge Map) Map { + return m.Copy().MergeHere(merge) +} + +// MergeHere blends the specified map with this map and returns the current map. +// +// Keys that appear in both will be selected from the specified map. The original map +// will be modified. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) MergeHere(merge Map) Map { + for k, v := range merge { + m[k] = v + } + return m +} + +// Transform builds a new Obj giving the transformer a chance +// to change the keys and values as it goes. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { + newMap := Map{} + for k, v := range m { + modifiedKey, modifiedVal := transformer(k, v) + newMap[modifiedKey] = modifiedVal + } + return newMap +} + +// TransformKeys builds a new map using the specified key mapping. +// +// Unspecified keys will be unaltered. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) TransformKeys(mapping map[string]string) Map { + return m.Transform(func(key string, value interface{}) (string, interface{}) { + if newKey, ok := mapping[key]; ok { + return newKey, value + } + return key, value + }) +} + +// Checks if a string slice contains a string +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go new file mode 100644 index 000000000..692be8e2a --- /dev/null +++ b/vendor/github.com/stretchr/objx/security.go @@ -0,0 +1,12 @@ +package objx + +import ( + "crypto/sha1" + "encoding/hex" +) + +// HashWithKey hashes the specified string using the security key +func HashWithKey(data, key string) string { + d := sha1.Sum([]byte(data + ":" + key)) + return hex.EncodeToString(d[:]) +} diff --git a/vendor/github.com/stretchr/objx/tests.go b/vendor/github.com/stretchr/objx/tests.go new file mode 100644 index 000000000..d9e0b479a --- /dev/null +++ b/vendor/github.com/stretchr/objx/tests.go @@ -0,0 +1,17 @@ +package objx + +// Has gets whether there is something at the specified selector +// or not. +// +// If m is nil, Has will always return false. +func (m Map) Has(selector string) bool { + if m == nil { + return false + } + return !m.Get(selector).IsNil() +} + +// IsNil gets whether the data is nil or not. +func (v *Value) IsNil() bool { + return v == nil || v.data == nil +} diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go new file mode 100644 index 000000000..de4240955 --- /dev/null +++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -0,0 +1,2516 @@ +package objx + +/* + Inter (interface{} and []interface{}) +*/ + +// Inter gets the value as a interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Inter(optionalDefault ...interface{}) interface{} { + if s, ok := v.data.(interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInter gets the value as a interface{}. +// +// Panics if the object is not a interface{}. +func (v *Value) MustInter() interface{} { + return v.data.(interface{}) +} + +// InterSlice gets the value as a []interface{}, returns the optionalDefault +// value or nil if the value is not a []interface{}. +func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { + if s, ok := v.data.([]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInterSlice gets the value as a []interface{}. +// +// Panics if the object is not a []interface{}. +func (v *Value) MustInterSlice() []interface{} { + return v.data.([]interface{}) +} + +// IsInter gets whether the object contained is a interface{} or not. +func (v *Value) IsInter() bool { + _, ok := v.data.(interface{}) + return ok +} + +// IsInterSlice gets whether the object contained is a []interface{} or not. +func (v *Value) IsInterSlice() bool { + _, ok := v.data.([]interface{}) + return ok +} + +// EachInter calls the specified callback for each object +// in the []interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { + for index, val := range v.MustInterSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInter uses the specified decider function to select items +// from the []interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { + var selected []interface{} + v.EachInter(func(index int, val interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInter uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]interface{}. +func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { + groups := make(map[string][]interface{}) + v.EachInter(func(index int, val interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInter uses the specified function to replace each interface{}s +// by iterating each item. The data in the returned result will be a +// []interface{} containing the replaced items. +func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { + arr := v.MustInterSlice() + replaced := make([]interface{}, len(arr)) + v.EachInter(func(index int, val interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInter uses the specified collector function to collect a value +// for each of the interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { + arr := v.MustInterSlice() + collected := make([]interface{}, len(arr)) + v.EachInter(func(index int, val interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + MSI (map[string]interface{} and []map[string]interface{}) +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + var selected []map[string]interface{} + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + groups := make(map[string][]map[string]interface{}) + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([]Map); ok { + return s + } + s, ok := v.data.([]interface{}) + if !ok { + if len(optionalDefault) == 1 { + return optionalDefault[0] + } else { + return nil + } + } + + result := make([]Map, len(s)) + for i := range s { + switch s[i].(type) { + case Map: + result[i] = s[i].(Map) + default: + return nil + } + } + return result +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + var selected [](Map) + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + groups := make(map[string][](Map)) + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Bool (bool and []bool) +*/ + +// Bool gets the value as a bool, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Bool(optionalDefault ...bool) bool { + if s, ok := v.data.(bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return false +} + +// MustBool gets the value as a bool. +// +// Panics if the object is not a bool. +func (v *Value) MustBool() bool { + return v.data.(bool) +} + +// BoolSlice gets the value as a []bool, returns the optionalDefault +// value or nil if the value is not a []bool. +func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { + if s, ok := v.data.([]bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustBoolSlice gets the value as a []bool. +// +// Panics if the object is not a []bool. +func (v *Value) MustBoolSlice() []bool { + return v.data.([]bool) +} + +// IsBool gets whether the object contained is a bool or not. +func (v *Value) IsBool() bool { + _, ok := v.data.(bool) + return ok +} + +// IsBoolSlice gets whether the object contained is a []bool or not. +func (v *Value) IsBoolSlice() bool { + _, ok := v.data.([]bool) + return ok +} + +// EachBool calls the specified callback for each object +// in the []bool. +// +// Panics if the object is the wrong type. +func (v *Value) EachBool(callback func(int, bool) bool) *Value { + for index, val := range v.MustBoolSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereBool uses the specified decider function to select items +// from the []bool. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereBool(decider func(int, bool) bool) *Value { + var selected []bool + v.EachBool(func(index int, val bool) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupBool uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]bool. +func (v *Value) GroupBool(grouper func(int, bool) string) *Value { + groups := make(map[string][]bool) + v.EachBool(func(index int, val bool) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]bool, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceBool uses the specified function to replace each bools +// by iterating each item. The data in the returned result will be a +// []bool containing the replaced items. +func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { + arr := v.MustBoolSlice() + replaced := make([]bool, len(arr)) + v.EachBool(func(index int, val bool) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectBool uses the specified collector function to collect a value +// for each of the bools in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { + arr := v.MustBoolSlice() + collected := make([]interface{}, len(arr)) + v.EachBool(func(index int, val bool) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Str (string and []string) +*/ + +// Str gets the value as a string, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Str(optionalDefault ...string) string { + if s, ok := v.data.(string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return "" +} + +// MustStr gets the value as a string. +// +// Panics if the object is not a string. +func (v *Value) MustStr() string { + return v.data.(string) +} + +// StrSlice gets the value as a []string, returns the optionalDefault +// value or nil if the value is not a []string. +func (v *Value) StrSlice(optionalDefault ...[]string) []string { + if s, ok := v.data.([]string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustStrSlice gets the value as a []string. +// +// Panics if the object is not a []string. +func (v *Value) MustStrSlice() []string { + return v.data.([]string) +} + +// IsStr gets whether the object contained is a string or not. +func (v *Value) IsStr() bool { + _, ok := v.data.(string) + return ok +} + +// IsStrSlice gets whether the object contained is a []string or not. +func (v *Value) IsStrSlice() bool { + _, ok := v.data.([]string) + return ok +} + +// EachStr calls the specified callback for each object +// in the []string. +// +// Panics if the object is the wrong type. +func (v *Value) EachStr(callback func(int, string) bool) *Value { + for index, val := range v.MustStrSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereStr uses the specified decider function to select items +// from the []string. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereStr(decider func(int, string) bool) *Value { + var selected []string + v.EachStr(func(index int, val string) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupStr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]string. +func (v *Value) GroupStr(grouper func(int, string) string) *Value { + groups := make(map[string][]string) + v.EachStr(func(index int, val string) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]string, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceStr uses the specified function to replace each strings +// by iterating each item. The data in the returned result will be a +// []string containing the replaced items. +func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { + arr := v.MustStrSlice() + replaced := make([]string, len(arr)) + v.EachStr(func(index int, val string) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectStr uses the specified collector function to collect a value +// for each of the strings in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { + arr := v.MustStrSlice() + collected := make([]interface{}, len(arr)) + v.EachStr(func(index int, val string) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int (int and []int) +*/ + +// Int gets the value as a int, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int(optionalDefault ...int) int { + if s, ok := v.data.(int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt gets the value as a int. +// +// Panics if the object is not a int. +func (v *Value) MustInt() int { + return v.data.(int) +} + +// IntSlice gets the value as a []int, returns the optionalDefault +// value or nil if the value is not a []int. +func (v *Value) IntSlice(optionalDefault ...[]int) []int { + if s, ok := v.data.([]int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustIntSlice gets the value as a []int. +// +// Panics if the object is not a []int. +func (v *Value) MustIntSlice() []int { + return v.data.([]int) +} + +// IsInt gets whether the object contained is a int or not. +func (v *Value) IsInt() bool { + _, ok := v.data.(int) + return ok +} + +// IsIntSlice gets whether the object contained is a []int or not. +func (v *Value) IsIntSlice() bool { + _, ok := v.data.([]int) + return ok +} + +// EachInt calls the specified callback for each object +// in the []int. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt(callback func(int, int) bool) *Value { + for index, val := range v.MustIntSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt uses the specified decider function to select items +// from the []int. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt(decider func(int, int) bool) *Value { + var selected []int + v.EachInt(func(index int, val int) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int. +func (v *Value) GroupInt(grouper func(int, int) string) *Value { + groups := make(map[string][]int) + v.EachInt(func(index int, val int) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt uses the specified function to replace each ints +// by iterating each item. The data in the returned result will be a +// []int containing the replaced items. +func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { + arr := v.MustIntSlice() + replaced := make([]int, len(arr)) + v.EachInt(func(index int, val int) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt uses the specified collector function to collect a value +// for each of the ints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { + arr := v.MustIntSlice() + collected := make([]interface{}, len(arr)) + v.EachInt(func(index int, val int) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int8 (int8 and []int8) +*/ + +// Int8 gets the value as a int8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int8(optionalDefault ...int8) int8 { + if s, ok := v.data.(int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt8 gets the value as a int8. +// +// Panics if the object is not a int8. +func (v *Value) MustInt8() int8 { + return v.data.(int8) +} + +// Int8Slice gets the value as a []int8, returns the optionalDefault +// value or nil if the value is not a []int8. +func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { + if s, ok := v.data.([]int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt8Slice gets the value as a []int8. +// +// Panics if the object is not a []int8. +func (v *Value) MustInt8Slice() []int8 { + return v.data.([]int8) +} + +// IsInt8 gets whether the object contained is a int8 or not. +func (v *Value) IsInt8() bool { + _, ok := v.data.(int8) + return ok +} + +// IsInt8Slice gets whether the object contained is a []int8 or not. +func (v *Value) IsInt8Slice() bool { + _, ok := v.data.([]int8) + return ok +} + +// EachInt8 calls the specified callback for each object +// in the []int8. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt8(callback func(int, int8) bool) *Value { + for index, val := range v.MustInt8Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt8 uses the specified decider function to select items +// from the []int8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { + var selected []int8 + v.EachInt8(func(index int, val int8) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int8. +func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { + groups := make(map[string][]int8) + v.EachInt8(func(index int, val int8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt8 uses the specified function to replace each int8s +// by iterating each item. The data in the returned result will be a +// []int8 containing the replaced items. +func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { + arr := v.MustInt8Slice() + replaced := make([]int8, len(arr)) + v.EachInt8(func(index int, val int8) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt8 uses the specified collector function to collect a value +// for each of the int8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { + arr := v.MustInt8Slice() + collected := make([]interface{}, len(arr)) + v.EachInt8(func(index int, val int8) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int16 (int16 and []int16) +*/ + +// Int16 gets the value as a int16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int16(optionalDefault ...int16) int16 { + if s, ok := v.data.(int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt16 gets the value as a int16. +// +// Panics if the object is not a int16. +func (v *Value) MustInt16() int16 { + return v.data.(int16) +} + +// Int16Slice gets the value as a []int16, returns the optionalDefault +// value or nil if the value is not a []int16. +func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { + if s, ok := v.data.([]int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt16Slice gets the value as a []int16. +// +// Panics if the object is not a []int16. +func (v *Value) MustInt16Slice() []int16 { + return v.data.([]int16) +} + +// IsInt16 gets whether the object contained is a int16 or not. +func (v *Value) IsInt16() bool { + _, ok := v.data.(int16) + return ok +} + +// IsInt16Slice gets whether the object contained is a []int16 or not. +func (v *Value) IsInt16Slice() bool { + _, ok := v.data.([]int16) + return ok +} + +// EachInt16 calls the specified callback for each object +// in the []int16. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt16(callback func(int, int16) bool) *Value { + for index, val := range v.MustInt16Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt16 uses the specified decider function to select items +// from the []int16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { + var selected []int16 + v.EachInt16(func(index int, val int16) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int16. +func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { + groups := make(map[string][]int16) + v.EachInt16(func(index int, val int16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt16 uses the specified function to replace each int16s +// by iterating each item. The data in the returned result will be a +// []int16 containing the replaced items. +func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { + arr := v.MustInt16Slice() + replaced := make([]int16, len(arr)) + v.EachInt16(func(index int, val int16) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt16 uses the specified collector function to collect a value +// for each of the int16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { + arr := v.MustInt16Slice() + collected := make([]interface{}, len(arr)) + v.EachInt16(func(index int, val int16) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int32 (int32 and []int32) +*/ + +// Int32 gets the value as a int32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int32(optionalDefault ...int32) int32 { + if s, ok := v.data.(int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt32 gets the value as a int32. +// +// Panics if the object is not a int32. +func (v *Value) MustInt32() int32 { + return v.data.(int32) +} + +// Int32Slice gets the value as a []int32, returns the optionalDefault +// value or nil if the value is not a []int32. +func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { + if s, ok := v.data.([]int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt32Slice gets the value as a []int32. +// +// Panics if the object is not a []int32. +func (v *Value) MustInt32Slice() []int32 { + return v.data.([]int32) +} + +// IsInt32 gets whether the object contained is a int32 or not. +func (v *Value) IsInt32() bool { + _, ok := v.data.(int32) + return ok +} + +// IsInt32Slice gets whether the object contained is a []int32 or not. +func (v *Value) IsInt32Slice() bool { + _, ok := v.data.([]int32) + return ok +} + +// EachInt32 calls the specified callback for each object +// in the []int32. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt32(callback func(int, int32) bool) *Value { + for index, val := range v.MustInt32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt32 uses the specified decider function to select items +// from the []int32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { + var selected []int32 + v.EachInt32(func(index int, val int32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int32. +func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { + groups := make(map[string][]int32) + v.EachInt32(func(index int, val int32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt32 uses the specified function to replace each int32s +// by iterating each item. The data in the returned result will be a +// []int32 containing the replaced items. +func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { + arr := v.MustInt32Slice() + replaced := make([]int32, len(arr)) + v.EachInt32(func(index int, val int32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt32 uses the specified collector function to collect a value +// for each of the int32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { + arr := v.MustInt32Slice() + collected := make([]interface{}, len(arr)) + v.EachInt32(func(index int, val int32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Int64 (int64 and []int64) +*/ + +// Int64 gets the value as a int64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int64(optionalDefault ...int64) int64 { + if s, ok := v.data.(int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt64 gets the value as a int64. +// +// Panics if the object is not a int64. +func (v *Value) MustInt64() int64 { + return v.data.(int64) +} + +// Int64Slice gets the value as a []int64, returns the optionalDefault +// value or nil if the value is not a []int64. +func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { + if s, ok := v.data.([]int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt64Slice gets the value as a []int64. +// +// Panics if the object is not a []int64. +func (v *Value) MustInt64Slice() []int64 { + return v.data.([]int64) +} + +// IsInt64 gets whether the object contained is a int64 or not. +func (v *Value) IsInt64() bool { + _, ok := v.data.(int64) + return ok +} + +// IsInt64Slice gets whether the object contained is a []int64 or not. +func (v *Value) IsInt64Slice() bool { + _, ok := v.data.([]int64) + return ok +} + +// EachInt64 calls the specified callback for each object +// in the []int64. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt64(callback func(int, int64) bool) *Value { + for index, val := range v.MustInt64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereInt64 uses the specified decider function to select items +// from the []int64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { + var selected []int64 + v.EachInt64(func(index int, val int64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupInt64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int64. +func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { + groups := make(map[string][]int64) + v.EachInt64(func(index int, val int64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceInt64 uses the specified function to replace each int64s +// by iterating each item. The data in the returned result will be a +// []int64 containing the replaced items. +func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { + arr := v.MustInt64Slice() + replaced := make([]int64, len(arr)) + v.EachInt64(func(index int, val int64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectInt64 uses the specified collector function to collect a value +// for each of the int64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { + arr := v.MustInt64Slice() + collected := make([]interface{}, len(arr)) + v.EachInt64(func(index int, val int64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint (uint and []uint) +*/ + +// Uint gets the value as a uint, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint(optionalDefault ...uint) uint { + if s, ok := v.data.(uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint gets the value as a uint. +// +// Panics if the object is not a uint. +func (v *Value) MustUint() uint { + return v.data.(uint) +} + +// UintSlice gets the value as a []uint, returns the optionalDefault +// value or nil if the value is not a []uint. +func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { + if s, ok := v.data.([]uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintSlice gets the value as a []uint. +// +// Panics if the object is not a []uint. +func (v *Value) MustUintSlice() []uint { + return v.data.([]uint) +} + +// IsUint gets whether the object contained is a uint or not. +func (v *Value) IsUint() bool { + _, ok := v.data.(uint) + return ok +} + +// IsUintSlice gets whether the object contained is a []uint or not. +func (v *Value) IsUintSlice() bool { + _, ok := v.data.([]uint) + return ok +} + +// EachUint calls the specified callback for each object +// in the []uint. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint(callback func(int, uint) bool) *Value { + for index, val := range v.MustUintSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint uses the specified decider function to select items +// from the []uint. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint(decider func(int, uint) bool) *Value { + var selected []uint + v.EachUint(func(index int, val uint) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint. +func (v *Value) GroupUint(grouper func(int, uint) string) *Value { + groups := make(map[string][]uint) + v.EachUint(func(index int, val uint) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint uses the specified function to replace each uints +// by iterating each item. The data in the returned result will be a +// []uint containing the replaced items. +func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { + arr := v.MustUintSlice() + replaced := make([]uint, len(arr)) + v.EachUint(func(index int, val uint) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint uses the specified collector function to collect a value +// for each of the uints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { + arr := v.MustUintSlice() + collected := make([]interface{}, len(arr)) + v.EachUint(func(index int, val uint) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint8 (uint8 and []uint8) +*/ + +// Uint8 gets the value as a uint8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint8(optionalDefault ...uint8) uint8 { + if s, ok := v.data.(uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint8 gets the value as a uint8. +// +// Panics if the object is not a uint8. +func (v *Value) MustUint8() uint8 { + return v.data.(uint8) +} + +// Uint8Slice gets the value as a []uint8, returns the optionalDefault +// value or nil if the value is not a []uint8. +func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { + if s, ok := v.data.([]uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint8Slice gets the value as a []uint8. +// +// Panics if the object is not a []uint8. +func (v *Value) MustUint8Slice() []uint8 { + return v.data.([]uint8) +} + +// IsUint8 gets whether the object contained is a uint8 or not. +func (v *Value) IsUint8() bool { + _, ok := v.data.(uint8) + return ok +} + +// IsUint8Slice gets whether the object contained is a []uint8 or not. +func (v *Value) IsUint8Slice() bool { + _, ok := v.data.([]uint8) + return ok +} + +// EachUint8 calls the specified callback for each object +// in the []uint8. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { + for index, val := range v.MustUint8Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint8 uses the specified decider function to select items +// from the []uint8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { + var selected []uint8 + v.EachUint8(func(index int, val uint8) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint8. +func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { + groups := make(map[string][]uint8) + v.EachUint8(func(index int, val uint8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint8 uses the specified function to replace each uint8s +// by iterating each item. The data in the returned result will be a +// []uint8 containing the replaced items. +func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { + arr := v.MustUint8Slice() + replaced := make([]uint8, len(arr)) + v.EachUint8(func(index int, val uint8) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint8 uses the specified collector function to collect a value +// for each of the uint8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { + arr := v.MustUint8Slice() + collected := make([]interface{}, len(arr)) + v.EachUint8(func(index int, val uint8) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint16 (uint16 and []uint16) +*/ + +// Uint16 gets the value as a uint16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint16(optionalDefault ...uint16) uint16 { + if s, ok := v.data.(uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint16 gets the value as a uint16. +// +// Panics if the object is not a uint16. +func (v *Value) MustUint16() uint16 { + return v.data.(uint16) +} + +// Uint16Slice gets the value as a []uint16, returns the optionalDefault +// value or nil if the value is not a []uint16. +func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { + if s, ok := v.data.([]uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint16Slice gets the value as a []uint16. +// +// Panics if the object is not a []uint16. +func (v *Value) MustUint16Slice() []uint16 { + return v.data.([]uint16) +} + +// IsUint16 gets whether the object contained is a uint16 or not. +func (v *Value) IsUint16() bool { + _, ok := v.data.(uint16) + return ok +} + +// IsUint16Slice gets whether the object contained is a []uint16 or not. +func (v *Value) IsUint16Slice() bool { + _, ok := v.data.([]uint16) + return ok +} + +// EachUint16 calls the specified callback for each object +// in the []uint16. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { + for index, val := range v.MustUint16Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint16 uses the specified decider function to select items +// from the []uint16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { + var selected []uint16 + v.EachUint16(func(index int, val uint16) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint16. +func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { + groups := make(map[string][]uint16) + v.EachUint16(func(index int, val uint16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint16 uses the specified function to replace each uint16s +// by iterating each item. The data in the returned result will be a +// []uint16 containing the replaced items. +func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { + arr := v.MustUint16Slice() + replaced := make([]uint16, len(arr)) + v.EachUint16(func(index int, val uint16) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint16 uses the specified collector function to collect a value +// for each of the uint16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { + arr := v.MustUint16Slice() + collected := make([]interface{}, len(arr)) + v.EachUint16(func(index int, val uint16) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint32 (uint32 and []uint32) +*/ + +// Uint32 gets the value as a uint32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint32(optionalDefault ...uint32) uint32 { + if s, ok := v.data.(uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint32 gets the value as a uint32. +// +// Panics if the object is not a uint32. +func (v *Value) MustUint32() uint32 { + return v.data.(uint32) +} + +// Uint32Slice gets the value as a []uint32, returns the optionalDefault +// value or nil if the value is not a []uint32. +func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { + if s, ok := v.data.([]uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint32Slice gets the value as a []uint32. +// +// Panics if the object is not a []uint32. +func (v *Value) MustUint32Slice() []uint32 { + return v.data.([]uint32) +} + +// IsUint32 gets whether the object contained is a uint32 or not. +func (v *Value) IsUint32() bool { + _, ok := v.data.(uint32) + return ok +} + +// IsUint32Slice gets whether the object contained is a []uint32 or not. +func (v *Value) IsUint32Slice() bool { + _, ok := v.data.([]uint32) + return ok +} + +// EachUint32 calls the specified callback for each object +// in the []uint32. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { + for index, val := range v.MustUint32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint32 uses the specified decider function to select items +// from the []uint32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { + var selected []uint32 + v.EachUint32(func(index int, val uint32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint32. +func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { + groups := make(map[string][]uint32) + v.EachUint32(func(index int, val uint32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint32 uses the specified function to replace each uint32s +// by iterating each item. The data in the returned result will be a +// []uint32 containing the replaced items. +func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { + arr := v.MustUint32Slice() + replaced := make([]uint32, len(arr)) + v.EachUint32(func(index int, val uint32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint32 uses the specified collector function to collect a value +// for each of the uint32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { + arr := v.MustUint32Slice() + collected := make([]interface{}, len(arr)) + v.EachUint32(func(index int, val uint32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uint64 (uint64 and []uint64) +*/ + +// Uint64 gets the value as a uint64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint64(optionalDefault ...uint64) uint64 { + if s, ok := v.data.(uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint64 gets the value as a uint64. +// +// Panics if the object is not a uint64. +func (v *Value) MustUint64() uint64 { + return v.data.(uint64) +} + +// Uint64Slice gets the value as a []uint64, returns the optionalDefault +// value or nil if the value is not a []uint64. +func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { + if s, ok := v.data.([]uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint64Slice gets the value as a []uint64. +// +// Panics if the object is not a []uint64. +func (v *Value) MustUint64Slice() []uint64 { + return v.data.([]uint64) +} + +// IsUint64 gets whether the object contained is a uint64 or not. +func (v *Value) IsUint64() bool { + _, ok := v.data.(uint64) + return ok +} + +// IsUint64Slice gets whether the object contained is a []uint64 or not. +func (v *Value) IsUint64Slice() bool { + _, ok := v.data.([]uint64) + return ok +} + +// EachUint64 calls the specified callback for each object +// in the []uint64. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { + for index, val := range v.MustUint64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUint64 uses the specified decider function to select items +// from the []uint64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { + var selected []uint64 + v.EachUint64(func(index int, val uint64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUint64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint64. +func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { + groups := make(map[string][]uint64) + v.EachUint64(func(index int, val uint64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUint64 uses the specified function to replace each uint64s +// by iterating each item. The data in the returned result will be a +// []uint64 containing the replaced items. +func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { + arr := v.MustUint64Slice() + replaced := make([]uint64, len(arr)) + v.EachUint64(func(index int, val uint64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUint64 uses the specified collector function to collect a value +// for each of the uint64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { + arr := v.MustUint64Slice() + collected := make([]interface{}, len(arr)) + v.EachUint64(func(index int, val uint64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Uintptr (uintptr and []uintptr) +*/ + +// Uintptr gets the value as a uintptr, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { + if s, ok := v.data.(uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUintptr gets the value as a uintptr. +// +// Panics if the object is not a uintptr. +func (v *Value) MustUintptr() uintptr { + return v.data.(uintptr) +} + +// UintptrSlice gets the value as a []uintptr, returns the optionalDefault +// value or nil if the value is not a []uintptr. +func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { + if s, ok := v.data.([]uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintptrSlice gets the value as a []uintptr. +// +// Panics if the object is not a []uintptr. +func (v *Value) MustUintptrSlice() []uintptr { + return v.data.([]uintptr) +} + +// IsUintptr gets whether the object contained is a uintptr or not. +func (v *Value) IsUintptr() bool { + _, ok := v.data.(uintptr) + return ok +} + +// IsUintptrSlice gets whether the object contained is a []uintptr or not. +func (v *Value) IsUintptrSlice() bool { + _, ok := v.data.([]uintptr) + return ok +} + +// EachUintptr calls the specified callback for each object +// in the []uintptr. +// +// Panics if the object is the wrong type. +func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { + for index, val := range v.MustUintptrSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereUintptr uses the specified decider function to select items +// from the []uintptr. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { + var selected []uintptr + v.EachUintptr(func(index int, val uintptr) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupUintptr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uintptr. +func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { + groups := make(map[string][]uintptr) + v.EachUintptr(func(index int, val uintptr) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uintptr, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceUintptr uses the specified function to replace each uintptrs +// by iterating each item. The data in the returned result will be a +// []uintptr containing the replaced items. +func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { + arr := v.MustUintptrSlice() + replaced := make([]uintptr, len(arr)) + v.EachUintptr(func(index int, val uintptr) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectUintptr uses the specified collector function to collect a value +// for each of the uintptrs in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { + arr := v.MustUintptrSlice() + collected := make([]interface{}, len(arr)) + v.EachUintptr(func(index int, val uintptr) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Float32 (float32 and []float32) +*/ + +// Float32 gets the value as a float32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float32(optionalDefault ...float32) float32 { + if s, ok := v.data.(float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat32 gets the value as a float32. +// +// Panics if the object is not a float32. +func (v *Value) MustFloat32() float32 { + return v.data.(float32) +} + +// Float32Slice gets the value as a []float32, returns the optionalDefault +// value or nil if the value is not a []float32. +func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { + if s, ok := v.data.([]float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat32Slice gets the value as a []float32. +// +// Panics if the object is not a []float32. +func (v *Value) MustFloat32Slice() []float32 { + return v.data.([]float32) +} + +// IsFloat32 gets whether the object contained is a float32 or not. +func (v *Value) IsFloat32() bool { + _, ok := v.data.(float32) + return ok +} + +// IsFloat32Slice gets whether the object contained is a []float32 or not. +func (v *Value) IsFloat32Slice() bool { + _, ok := v.data.([]float32) + return ok +} + +// EachFloat32 calls the specified callback for each object +// in the []float32. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { + for index, val := range v.MustFloat32Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereFloat32 uses the specified decider function to select items +// from the []float32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { + var selected []float32 + v.EachFloat32(func(index int, val float32) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupFloat32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float32. +func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { + groups := make(map[string][]float32) + v.EachFloat32(func(index int, val float32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceFloat32 uses the specified function to replace each float32s +// by iterating each item. The data in the returned result will be a +// []float32 containing the replaced items. +func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { + arr := v.MustFloat32Slice() + replaced := make([]float32, len(arr)) + v.EachFloat32(func(index int, val float32) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectFloat32 uses the specified collector function to collect a value +// for each of the float32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { + arr := v.MustFloat32Slice() + collected := make([]interface{}, len(arr)) + v.EachFloat32(func(index int, val float32) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Float64 (float64 and []float64) +*/ + +// Float64 gets the value as a float64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float64(optionalDefault ...float64) float64 { + if s, ok := v.data.(float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat64 gets the value as a float64. +// +// Panics if the object is not a float64. +func (v *Value) MustFloat64() float64 { + return v.data.(float64) +} + +// Float64Slice gets the value as a []float64, returns the optionalDefault +// value or nil if the value is not a []float64. +func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { + if s, ok := v.data.([]float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat64Slice gets the value as a []float64. +// +// Panics if the object is not a []float64. +func (v *Value) MustFloat64Slice() []float64 { + return v.data.([]float64) +} + +// IsFloat64 gets whether the object contained is a float64 or not. +func (v *Value) IsFloat64() bool { + _, ok := v.data.(float64) + return ok +} + +// IsFloat64Slice gets whether the object contained is a []float64 or not. +func (v *Value) IsFloat64Slice() bool { + _, ok := v.data.([]float64) + return ok +} + +// EachFloat64 calls the specified callback for each object +// in the []float64. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { + for index, val := range v.MustFloat64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereFloat64 uses the specified decider function to select items +// from the []float64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { + var selected []float64 + v.EachFloat64(func(index int, val float64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupFloat64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float64. +func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { + groups := make(map[string][]float64) + v.EachFloat64(func(index int, val float64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceFloat64 uses the specified function to replace each float64s +// by iterating each item. The data in the returned result will be a +// []float64 containing the replaced items. +func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { + arr := v.MustFloat64Slice() + replaced := make([]float64, len(arr)) + v.EachFloat64(func(index int, val float64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectFloat64 uses the specified collector function to collect a value +// for each of the float64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { + arr := v.MustFloat64Slice() + collected := make([]interface{}, len(arr)) + v.EachFloat64(func(index int, val float64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Complex64 (complex64 and []complex64) +*/ + +// Complex64 gets the value as a complex64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex64(optionalDefault ...complex64) complex64 { + if s, ok := v.data.(complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex64 gets the value as a complex64. +// +// Panics if the object is not a complex64. +func (v *Value) MustComplex64() complex64 { + return v.data.(complex64) +} + +// Complex64Slice gets the value as a []complex64, returns the optionalDefault +// value or nil if the value is not a []complex64. +func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { + if s, ok := v.data.([]complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex64Slice gets the value as a []complex64. +// +// Panics if the object is not a []complex64. +func (v *Value) MustComplex64Slice() []complex64 { + return v.data.([]complex64) +} + +// IsComplex64 gets whether the object contained is a complex64 or not. +func (v *Value) IsComplex64() bool { + _, ok := v.data.(complex64) + return ok +} + +// IsComplex64Slice gets whether the object contained is a []complex64 or not. +func (v *Value) IsComplex64Slice() bool { + _, ok := v.data.([]complex64) + return ok +} + +// EachComplex64 calls the specified callback for each object +// in the []complex64. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { + for index, val := range v.MustComplex64Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereComplex64 uses the specified decider function to select items +// from the []complex64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { + var selected []complex64 + v.EachComplex64(func(index int, val complex64) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupComplex64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex64. +func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { + groups := make(map[string][]complex64) + v.EachComplex64(func(index int, val complex64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceComplex64 uses the specified function to replace each complex64s +// by iterating each item. The data in the returned result will be a +// []complex64 containing the replaced items. +func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { + arr := v.MustComplex64Slice() + replaced := make([]complex64, len(arr)) + v.EachComplex64(func(index int, val complex64) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectComplex64 uses the specified collector function to collect a value +// for each of the complex64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { + arr := v.MustComplex64Slice() + collected := make([]interface{}, len(arr)) + v.EachComplex64(func(index int, val complex64) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + Complex128 (complex128 and []complex128) +*/ + +// Complex128 gets the value as a complex128, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex128(optionalDefault ...complex128) complex128 { + if s, ok := v.data.(complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex128 gets the value as a complex128. +// +// Panics if the object is not a complex128. +func (v *Value) MustComplex128() complex128 { + return v.data.(complex128) +} + +// Complex128Slice gets the value as a []complex128, returns the optionalDefault +// value or nil if the value is not a []complex128. +func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { + if s, ok := v.data.([]complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex128Slice gets the value as a []complex128. +// +// Panics if the object is not a []complex128. +func (v *Value) MustComplex128Slice() []complex128 { + return v.data.([]complex128) +} + +// IsComplex128 gets whether the object contained is a complex128 or not. +func (v *Value) IsComplex128() bool { + _, ok := v.data.(complex128) + return ok +} + +// IsComplex128Slice gets whether the object contained is a []complex128 or not. +func (v *Value) IsComplex128Slice() bool { + _, ok := v.data.([]complex128) + return ok +} + +// EachComplex128 calls the specified callback for each object +// in the []complex128. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { + for index, val := range v.MustComplex128Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereComplex128 uses the specified decider function to select items +// from the []complex128. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { + var selected []complex128 + v.EachComplex128(func(index int, val complex128) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupComplex128 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex128. +func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { + groups := make(map[string][]complex128) + v.EachComplex128(func(index int, val complex128) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex128, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceComplex128 uses the specified function to replace each complex128s +// by iterating each item. The data in the returned result will be a +// []complex128 containing the replaced items. +func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { + arr := v.MustComplex128Slice() + replaced := make([]complex128, len(arr)) + v.EachComplex128(func(index int, val complex128) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectComplex128 uses the specified collector function to collect a value +// for each of the complex128s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { + arr := v.MustComplex128Slice() + collected := make([]interface{}, len(arr)) + v.EachComplex128(func(index int, val complex128) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go new file mode 100644 index 000000000..e4b4a1433 --- /dev/null +++ b/vendor/github.com/stretchr/objx/value.go @@ -0,0 +1,53 @@ +package objx + +import ( + "fmt" + "strconv" +) + +// Value provides methods for extracting interface{} data in various +// types. +type Value struct { + // data contains the raw data being managed by this Value + data interface{} +} + +// Data returns the raw data contained by this Value +func (v *Value) Data() interface{} { + return v.data +} + +// String returns the value always as a string +func (v *Value) String() string { + switch { + case v.IsStr(): + return v.Str() + case v.IsBool(): + return strconv.FormatBool(v.Bool()) + case v.IsFloat32(): + return strconv.FormatFloat(float64(v.Float32()), 'f', -1, 32) + case v.IsFloat64(): + return strconv.FormatFloat(v.Float64(), 'f', -1, 64) + case v.IsInt(): + return strconv.FormatInt(int64(v.Int()), 10) + case v.IsInt8(): + return strconv.FormatInt(int64(v.Int8()), 10) + case v.IsInt16(): + return strconv.FormatInt(int64(v.Int16()), 10) + case v.IsInt32(): + return strconv.FormatInt(int64(v.Int32()), 10) + case v.IsInt64(): + return strconv.FormatInt(v.Int64(), 10) + case v.IsUint(): + return strconv.FormatUint(uint64(v.Uint()), 10) + case v.IsUint8(): + return strconv.FormatUint(uint64(v.Uint8()), 10) + case v.IsUint16(): + return strconv.FormatUint(uint64(v.Uint16()), 10) + case v.IsUint32(): + return strconv.FormatUint(uint64(v.Uint32()), 10) + case v.IsUint64(): + return strconv.FormatUint(v.Uint64(), 10) + } + return fmt.Sprintf("%#v", v.Data()) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 897d91b55..d1cf4206c 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -115,6 +115,7 @@ {"path":"github.com/shirou/gopsutil/net","checksumSHA1":"OSvOZs5uK5iolCOeS46nB2InVy8=","revision":"32b6636de04b303274daac3ca2b10d3b0e4afc35","revisionTime":"2017-02-04T05:36:48Z"}, {"path":"github.com/shirou/gopsutil/process","checksumSHA1":"JX0bRK/BdKVfbm4XOxMducVdY58=","revision":"32b6636de04b303274daac3ca2b10d3b0e4afc35","revisionTime":"2017-02-04T05:36:48Z"}, {"path":"github.com/shirou/w32","checksumSHA1":"Nve7SpDmjsv6+rhkXAkfg/UQx94=","revision":"bb4de0191aa41b5507caa14b0650cdbddcd9280b","revisionTime":"2016-09-30T03:27:40Z"}, + {"path":"github.com/stretchr/objx","checksumSHA1":"n+vQ7Bmp+ODWGmCp8cI5MFsaZVA=","revision":"a5cfa15c000af5f09784e5355969ba7eb66ef0de","revisionTime":"2018-04-26T10:50:06Z"}, {"path":"github.com/stretchr/testify/assert","checksumSHA1":"6LwXZI7kXm1C0h4Ui0Y52p9uQhk=","revision":"c679ae2cc0cb27ec3293fea7e254e47386f05d69","revisionTime":"2018-03-14T08:05:35Z"}, {"path":"github.com/stretchr/testify/mock","checksumSHA1":"Qloi2PTvZv+D9FDHXM/banCoaFY=","revision":"c679ae2cc0cb27ec3293fea7e254e47386f05d69","revisionTime":"2018-03-14T08:05:35Z"}, {"path":"github.com/stretchr/testify/require","checksumSHA1":"KqYmXUcuGwsvBL6XVsQnXsFb3LI=","revision":"c679ae2cc0cb27ec3293fea7e254e47386f05d69","revisionTime":"2018-03-14T08:05:35Z"}, From f69c8b85efb01261edebee01e033befd14ff37d9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 5 May 2018 11:10:24 -0700 Subject: [PATCH 248/627] agent/config: add managed proxy upstreams config to skip agent/config will turn [{}] into {} (single element maps into a single map) to work around HCL issues. These are resolved in HCL2 which I'm sure Consul will switch to eventually. This breaks the connect proxy configuration in service definition FILES since we call this patch function. For now, let's just special-case skip this. In the future we maybe Consul will adopt HCL2 and fix it, or we can do something else if we want. This works and is tested. --- agent/config/config.go | 1 + agent/config/runtime_test.go | 95 +++++++++++++++++++++++++++++++++++- 2 files changed, 95 insertions(+), 1 deletion(-) diff --git a/agent/config/config.go b/agent/config/config.go index 6dc652aed..161bbb9bb 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -84,6 +84,7 @@ func Parse(data string, format string) (c Config, err error) { "services", "services.checks", "watches", + "service.connect.proxy.config.upstreams", }) // There is a difference of representation of some fields depending on diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 82787482f..2e72b4639 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -31,6 +31,7 @@ type configTest struct { pre, post func() json, jsontail []string hcl, hcltail []string + skipformat bool privatev4 func() ([]*net.IPAddr, error) publicv6 func() ([]*net.IPAddr, error) patch func(rt *RuntimeConfig) @@ -2069,6 +2070,92 @@ func TestConfigFlagsAndEdgecases(t *testing.T) { rt.DataDir = dataDir }, }, + { + desc: "HCL service managed proxy 'upstreams'", + args: []string{ + `-data-dir=` + dataDir, + }, + hcl: []string{ + `service { + name = "web" + port = 8080 + connect { + proxy { + config { + upstreams { + local_bind_port = 1234 + } + } + } + } + }`, + }, + skipformat: true, // skipping JSON cause we get slightly diff types (okay) + patch: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.Services = []*structs.ServiceDefinition{ + &structs.ServiceDefinition{ + Name: "web", + Port: 8080, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{ + Config: map[string]interface{}{ + "upstreams": []map[string]interface{}{ + map[string]interface{}{ + "local_bind_port": 1234, + }, + }, + }, + }, + }, + }, + } + }, + }, + { + desc: "JSON service managed proxy 'upstreams'", + args: []string{ + `-data-dir=` + dataDir, + }, + json: []string{ + `{ + "service": { + "name": "web", + "port": 8080, + "connect": { + "proxy": { + "config": { + "upstreams": [{ + "local_bind_port": 1234 + }] + } + } + } + } + }`, + }, + skipformat: true, // skipping HCL cause we get slightly diff types (okay) + patch: func(rt *RuntimeConfig) { + rt.DataDir = dataDir + rt.Services = []*structs.ServiceDefinition{ + &structs.ServiceDefinition{ + Name: "web", + Port: 8080, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{ + Config: map[string]interface{}{ + "upstreams": []interface{}{ + map[string]interface{}{ + "local_bind_port": float64(1234), + }, + }, + }, + }, + }, + }, + } + }, + }, } testConfig(t, tests, dataDir) @@ -2090,7 +2177,7 @@ func testConfig(t *testing.T, tests []configTest, dataDir string) { // json and hcl sources need to be in sync // to make sure we're generating the same config - if len(tt.json) != len(tt.hcl) { + if len(tt.json) != len(tt.hcl) && !tt.skipformat { t.Fatal(tt.desc, ": JSON and HCL test case out of sync") } @@ -2100,6 +2187,12 @@ func testConfig(t *testing.T, tests []configTest, dataDir string) { srcs, tails = tt.hcl, tt.hcltail } + // If we're skipping a format and the current format is empty, + // then skip it! + if tt.skipformat && len(srcs) == 0 { + continue + } + // build the description var desc []string if !flagsOnly { From 3a7aaa63bca3588cbcc1c3f3e8c26a23dc6e0118 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 6 May 2018 08:41:25 -0700 Subject: [PATCH 249/627] agent/proxy: pass proxy ID as an env var --- agent/proxy/daemon.go | 11 ++++++++++- agent/proxy/daemon_test.go | 4 +++- agent/proxy/manager.go | 1 + agent/proxy/proxy.go | 17 ++++++++++++++--- agent/proxy/proxy_test.go | 5 ++++- 5 files changed, 32 insertions(+), 6 deletions(-) diff --git a/agent/proxy/daemon.go b/agent/proxy/daemon.go index c0ae7fdee..013fbdc28 100644 --- a/agent/proxy/daemon.go +++ b/agent/proxy/daemon.go @@ -33,6 +33,10 @@ type Daemon struct { // be a Cmd that isn't yet started. Command *exec.Cmd + // ProxyId is the ID of the proxy service. This is required for API + // requests (along with the token) and is passed via env var. + ProxyId string + // ProxyToken is the special local-only ACL token that allows a proxy // to communicate to the Connect-specific endpoints. ProxyToken string @@ -204,7 +208,9 @@ func (p *Daemon) start() (*os.Process, error) { // reference. We allocate an exactly sized slice. cmd.Env = make([]string, len(p.Command.Env), len(p.Command.Env)+1) copy(cmd.Env, p.Command.Env) - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvProxyToken, p.ProxyToken)) + cmd.Env = append(cmd.Env, + fmt.Sprintf("%s=%s", EnvProxyId, p.ProxyId), + fmt.Sprintf("%s=%s", EnvProxyToken, p.ProxyToken)) // Args must always contain a 0 entry which is usually the executed binary. // To be safe and a bit more robust we default this, but only to prevent @@ -387,6 +393,9 @@ func (p *Daemon) UnmarshalSnapshot(m map[string]interface{}) error { } // daemonSnapshot is the structure of the marshalled data for snapshotting. +// +// Note we don't have to store the ProxyId because this is stored directly +// within the manager snapshot and is restored automatically. type daemonSnapshot struct { // Pid of the process. This is the only value actually required to // regain mangement control. The remainder values are for Equal. diff --git a/agent/proxy/daemon_test.go b/agent/proxy/daemon_test.go index 17e821a44..f08716276 100644 --- a/agent/proxy/daemon_test.go +++ b/agent/proxy/daemon_test.go @@ -30,10 +30,12 @@ func TestDaemonStartStop(t *testing.T) { d := &Daemon{ Command: helperProcess("start-stop", path), + ProxyId: "tubes", ProxyToken: uuid, Logger: testLogger, } require.NoError(d.Start()) + defer d.Stop() // Wait for the file to exist retry.Run(t, func(r *retry.R) { @@ -49,7 +51,7 @@ func TestDaemonStartStop(t *testing.T) { // that we properly passed the token as an env var. data, err := ioutil.ReadFile(path) require.NoError(err) - require.Equal(uuid, string(data)) + require.Equal("tubes:"+uuid, string(data)) // Stop the process require.NoError(d.Stop()) diff --git a/agent/proxy/manager.go b/agent/proxy/manager.go index 5bb871c65..09eb1f601 100644 --- a/agent/proxy/manager.go +++ b/agent/proxy/manager.go @@ -411,6 +411,7 @@ func (m *Manager) newProxy(mp *local.ManagedProxy) (Proxy, error) { // Build the daemon structure proxy.Command = &cmd + proxy.ProxyId = id proxy.ProxyToken = mp.ProxyToken return proxy, nil diff --git a/agent/proxy/proxy.go b/agent/proxy/proxy.go index 1bb88da8e..90ae158f4 100644 --- a/agent/proxy/proxy.go +++ b/agent/proxy/proxy.go @@ -11,9 +11,16 @@ import ( "github.com/hashicorp/consul/agent/structs" ) -// EnvProxyToken is the name of the environment variable that is passed -// to managed proxies containing the proxy token. -const EnvProxyToken = "CONNECT_PROXY_TOKEN" +const ( + // EnvProxyId is the name of the environment variable that is set for + // managed proxies containing the proxy service ID. This is required along + // with the token to make API requests related to the proxy. + EnvProxyId = "CONNECT_PROXY_ID" + + // EnvProxyToken is the name of the environment variable that is passed + // to managed proxies containing the proxy token. + EnvProxyToken = "CONNECT_PROXY_TOKEN" +) // Proxy is the interface implemented by all types of managed proxies. // @@ -51,6 +58,10 @@ type Proxy interface { // so that Consul can recover the proxy process after a restart. The // result should only contain primitive values and containers (lists/maps). // + // MarshalSnapshot does NOT need to store the following fields, since they + // are part of the manager snapshot and will be automatically restored + // for any proxies: proxy ID. + // // UnmarshalSnapshot is called to restore the receiving Proxy from its // marshalled state. If UnmarshalSnapshot returns an error, the snapshot // is ignored and the marshalled snapshot will be lost. The manager will diff --git a/agent/proxy/proxy_test.go b/agent/proxy/proxy_test.go index d0812fc07..b46b5d677 100644 --- a/agent/proxy/proxy_test.go +++ b/agent/proxy/proxy_test.go @@ -78,7 +78,10 @@ func TestHelperProcess(t *testing.T) { defer signal.Stop(ch) path := args[0] - data := []byte(os.Getenv(EnvProxyToken)) + var data []byte + data = append(data, []byte(os.Getenv(EnvProxyId))...) + data = append(data, ':') + data = append(data, []byte(os.Getenv(EnvProxyToken))...) if err := ioutil.WriteFile(path, data, 0644); err != nil { t.Fatalf("err: %s", err) From 9435d8088c9473153310cbf30d63ebade782055f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 6 May 2018 08:54:36 -0700 Subject: [PATCH 250/627] command/connect/proxy: set proxy ID from env var if set --- command/connect/proxy/proxy.go | 6 ++++++ connect/proxy/proxy.go | 1 - 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index 362e70459..a4558d21f 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -7,7 +7,9 @@ import ( "log" "net/http" _ "net/http/pprof" // Expose pprof if configured + "os" + proxyAgent "github.com/hashicorp/consul/agent/proxy" "github.com/hashicorp/consul/command/flags" proxyImpl "github.com/hashicorp/consul/connect/proxy" @@ -74,6 +76,10 @@ func (c *cmd) Run(args []string) int { return 1 } + // Load the proxy ID and token from env vars if they're set + if c.proxyID == "" { + c.proxyID = os.Getenv(proxyAgent.EnvProxyId) + } // Setup the log outputs logConfig := &logger.Config{ LogLevel: c.logLevel, diff --git a/connect/proxy/proxy.go b/connect/proxy/proxy.go index 717d45ae6..e3db982fe 100644 --- a/connect/proxy/proxy.go +++ b/connect/proxy/proxy.go @@ -68,7 +68,6 @@ func New(client *api.Client, proxyID string, logger *log.Logger) (*Proxy, error) // Serve the proxy instance until a fatal error occurs or proxy is closed. func (p *Proxy) Serve() error { - var cfg *Config // Watch for config changes (initial setup happens on first "change") From 4100c9567ff4ab7f7bc4a11b039569b03b6543da Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 6 May 2018 20:27:31 -0700 Subject: [PATCH 251/627] command/connect/proxy: set ACL token based on proxy token flag --- command/connect/proxy/proxy.go | 4 ++++ command/flags/http.go | 4 ++++ command/flags/http_test.go | 15 +++++++++++++++ 3 files changed, 23 insertions(+) create mode 100644 command/flags/http_test.go diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index a4558d21f..b797177c5 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -80,6 +80,10 @@ func (c *cmd) Run(args []string) int { if c.proxyID == "" { c.proxyID = os.Getenv(proxyAgent.EnvProxyId) } + if c.http.Token() == "" { + c.http.SetToken(os.Getenv(proxyAgent.EnvProxyToken)) + } + // Setup the log outputs logConfig := &logger.Config{ LogLevel: c.logLevel, diff --git a/command/flags/http.go b/command/flags/http.go index 591567a4f..7d02f6ab3 100644 --- a/command/flags/http.go +++ b/command/flags/http.go @@ -84,6 +84,10 @@ func (f *HTTPFlags) Token() string { return f.token.String() } +func (f *HTTPFlags) SetToken(v string) error { + return f.token.Set(v) +} + func (f *HTTPFlags) APIClient() (*api.Client, error) { c := api.DefaultConfig() diff --git a/command/flags/http_test.go b/command/flags/http_test.go new file mode 100644 index 000000000..867ce2a35 --- /dev/null +++ b/command/flags/http_test.go @@ -0,0 +1,15 @@ +package flags + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestHTTPFlagsSetToken(t *testing.T) { + var f HTTPFlags + require := require.New(t) + require.Empty(f.Token()) + require.NoError(f.SetToken("foo")) + require.Equal("foo", f.Token()) +} From 8f7b5f93cdf7b83b2579a4a74ccdc84c8604b595 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 6 May 2018 21:02:44 -0700 Subject: [PATCH 252/627] agent: verify proxy token for ProxyConfig endpoint + tests --- agent/agent.go | 33 ++++++ agent/agent_endpoint.go | 9 ++ agent/agent_endpoint_test.go | 211 +++++++++++++++++++++++++++++++++++ 3 files changed, 253 insertions(+) diff --git a/agent/agent.go b/agent/agent.go index 9c74b3760..ff840d162 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2115,6 +2115,39 @@ func (a *Agent) RemoveProxy(proxyID string, persist bool) error { return nil } +// verifyProxyToken takes a proxy service ID and a token and verifies +// that the token is allowed to access proxy-related information (leaf +// cert, config, etc.). +// +// The given token may be a local-only proxy token or it may be an ACL +// token. We will attempt to verify the local proxy token first. +func (a *Agent) verifyProxyToken(proxyId, token string) error { + proxy := a.State.Proxy(proxyId) + if proxy == nil { + return fmt.Errorf("unknown proxy service ID: %q", proxyId) + } + + // Easy case is if the token just matches our local proxy token. + // If this happens we can return without any requests. + if token == proxy.ProxyToken { + return nil + } + + // Doesn't match, we have to do a full token resolution. The required + // permission for any proxy-related endpont is service:write, since + // to register a proxy you require that permission and sensitive data + // is usually present in the configuration. + rule, err := a.resolveToken(token) + if err != nil { + return err + } + if rule != nil && !rule.ServiceWrite(proxy.Proxy.TargetServiceID, nil) { + return acl.ErrPermissionDenied + } + + return nil +} + func (a *Agent) cancelCheckMonitors(checkID types.CheckID) { // Stop any monitors delete(a.checkReapAfter, checkID) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 8f080ea7a..de1c0d48e 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -965,6 +965,10 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http return nil, nil } + // Parse the token + var token string + s.parseToken(req, &token) + // Parse hash specially since it's only this endpoint that uses it currently. // Eventually this should happen in parseWait and end up in QueryOptions but I // didn't want to make very general changes right away. @@ -980,6 +984,11 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http return "", nil, nil } + // Validate the ACL token + if err := s.agent.verifyProxyToken(id, token); err != nil { + return "", nil, err + } + // Lookup the target service as a convenience target := s.agent.State.Service(proxy.Proxy.TargetServiceID) if target == nil { diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index fa01eab89..66ebc59ef 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2517,6 +2517,217 @@ func TestAgentConnectProxyConfig_Blocking(t *testing.T) { } } +func TestAgentConnectProxyConfig_aclDefaultDeny(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := NewTestAgent(t.Name(), TestACLConfig()+` + connect { + enabled = true + } + `) + defer a.Shutdown() + + // Register a service with a managed proxy + { + reg := &structs.ServiceDefinition{ + ID: "test-id", + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{}, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + require.NoError(err) + require.Equal(200, resp.Code, "body: %s", resp.Body.String()) + } + + req, _ := http.NewRequest("GET", "/v1/agent/connect/proxy/test-id-proxy", nil) + resp := httptest.NewRecorder() + _, err := a.srv.AgentConnectProxyConfig(resp, req) + require.True(acl.IsErrPermissionDenied(err)) + +} + +func TestAgentConnectProxyConfig_aclProxyToken(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := NewTestAgent(t.Name(), TestACLConfig()+` + connect { + enabled = true + } + `) + defer a.Shutdown() + + // Register a service with a managed proxy + { + reg := &structs.ServiceDefinition{ + ID: "test-id", + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{}, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + require.NoError(err) + require.Equal(200, resp.Code, "body: %s", resp.Body.String()) + } + + // Get the proxy token from the agent directly, since there is no API + // to expose this. + proxy := a.State.Proxy("test-id-proxy") + require.NotNil(proxy) + token := proxy.ProxyToken + require.NotEmpty(token) + + req, _ := http.NewRequest( + "GET", "/v1/agent/connect/proxy/test-id-proxy?token="+token, nil) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentConnectProxyConfig(resp, req) + require.NoError(err) + proxyCfg := obj.(*api.ConnectProxyConfig) + require.Equal("test-id-proxy", proxyCfg.ProxyServiceID) + require.Equal("test-id", proxyCfg.TargetServiceID) + require.Equal("test", proxyCfg.TargetServiceName) +} + +func TestAgentConnectProxyConfig_aclServiceWrite(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := NewTestAgent(t.Name(), TestACLConfig()+` + connect { + enabled = true + } + `) + defer a.Shutdown() + + // Register a service with a managed proxy + { + reg := &structs.ServiceDefinition{ + ID: "test-id", + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{}, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + require.NoError(err) + require.Equal(200, resp.Code, "body: %s", resp.Body.String()) + } + + // Create an ACL with service:write for our service + var token string + { + args := map[string]interface{}{ + "Name": "User Token", + "Type": "client", + "Rules": `service "test" { policy = "write" }`, + } + req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args)) + resp := httptest.NewRecorder() + obj, err := a.srv.ACLCreate(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + aclResp := obj.(aclCreateResponse) + token = aclResp.ID + } + + req, _ := http.NewRequest( + "GET", "/v1/agent/connect/proxy/test-id-proxy?token="+token, nil) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentConnectProxyConfig(resp, req) + require.NoError(err) + proxyCfg := obj.(*api.ConnectProxyConfig) + require.Equal("test-id-proxy", proxyCfg.ProxyServiceID) + require.Equal("test-id", proxyCfg.TargetServiceID) + require.Equal("test", proxyCfg.TargetServiceName) +} + +func TestAgentConnectProxyConfig_aclServiceReadDeny(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := NewTestAgent(t.Name(), TestACLConfig()+` + connect { + enabled = true + } + `) + defer a.Shutdown() + + // Register a service with a managed proxy + { + reg := &structs.ServiceDefinition{ + ID: "test-id", + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{}, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + require.NoError(err) + require.Equal(200, resp.Code, "body: %s", resp.Body.String()) + } + + // Create an ACL with service:read for our service + var token string + { + args := map[string]interface{}{ + "Name": "User Token", + "Type": "client", + "Rules": `service "test" { policy = "read" }`, + } + req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args)) + resp := httptest.NewRecorder() + obj, err := a.srv.ACLCreate(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + aclResp := obj.(aclCreateResponse) + token = aclResp.ID + } + + req, _ := http.NewRequest( + "GET", "/v1/agent/connect/proxy/test-id-proxy?token="+token, nil) + resp := httptest.NewRecorder() + _, err := a.srv.AgentConnectProxyConfig(resp, req) + require.True(acl.IsErrPermissionDenied(err)) +} + func TestAgentConnectProxyConfig_ConfigHandling(t *testing.T) { t.Parallel() From b4f990bc6ce116047165cfa5c50b6d174e06392e Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 6 May 2018 21:46:22 -0700 Subject: [PATCH 253/627] agent: verify local proxy tokens for CA leaf + tests --- agent/acl.go | 13 ++ agent/agent.go | 49 +++++-- agent/agent_endpoint.go | 12 +- agent/agent_endpoint_test.go | 277 ++++++++++++++++++++++++++++++++++- 4 files changed, 333 insertions(+), 18 deletions(-) diff --git a/agent/acl.go b/agent/acl.go index 0bf4180eb..e266feafa 100644 --- a/agent/acl.go +++ b/agent/acl.go @@ -8,6 +8,7 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/config" + "github.com/hashicorp/consul/agent/local" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/types" "github.com/hashicorp/golang-lru" @@ -239,6 +240,18 @@ func (a *Agent) resolveToken(id string) (acl.ACL, error) { return a.acls.lookupACL(a, id) } +// resolveProxyToken attempts to resolve an ACL ID to a local proxy token. +// If a local proxy isn't found with that token, nil is returned. +func (a *Agent) resolveProxyToken(id string) *local.ManagedProxy { + for _, p := range a.State.Proxies() { + if p.ProxyToken == id { + return p + } + } + + return nil +} + // vetServiceRegister makes sure the service registration action is allowed by // the given token. func (a *Agent) vetServiceRegister(token string, service *structs.NodeService) error { diff --git a/agent/agent.go b/agent/agent.go index ff840d162..21ada77cd 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2115,24 +2115,53 @@ func (a *Agent) RemoveProxy(proxyID string, persist bool) error { return nil } -// verifyProxyToken takes a proxy service ID and a token and verifies -// that the token is allowed to access proxy-related information (leaf +// verifyProxyToken takes a token and attempts to verify it against the +// targetService name. If targetProxy is specified, then the local proxy +// token must exactly match the given proxy ID. // cert, config, etc.). // // The given token may be a local-only proxy token or it may be an ACL // token. We will attempt to verify the local proxy token first. -func (a *Agent) verifyProxyToken(proxyId, token string) error { - proxy := a.State.Proxy(proxyId) - if proxy == nil { - return fmt.Errorf("unknown proxy service ID: %q", proxyId) +func (a *Agent) verifyProxyToken(token, targetService, targetProxy string) error { + // If we specify a target proxy, we look up that proxy directly. Otherwise, + // we resolve with any proxy we can find. + var proxy *local.ManagedProxy + if targetProxy != "" { + proxy = a.State.Proxy(targetProxy) + if proxy == nil { + return fmt.Errorf("unknown proxy service ID: %q", targetProxy) + } + + // If the token DOESN'T match, then we reset the proxy which will + // cause the logic below to fall back to normal ACLs. Otherwise, + // we keep the proxy set because we also have to verify that the + // target service matches on the proxy. + if token != proxy.ProxyToken { + proxy = nil + } + } else { + proxy = a.resolveProxyToken(token) } - // Easy case is if the token just matches our local proxy token. - // If this happens we can return without any requests. - if token == proxy.ProxyToken { + // The existence of a token isn't enough, we also need to verify + // that the service name of the matching proxy matches our target + // service. + if proxy != nil { + if proxy.Proxy.TargetServiceID != targetService { + return acl.ErrPermissionDenied + } + return nil } + // Retrieve the service specified. This should always exist because + // we only call this function for proxies and leaf certs and both can + // only be called for local services. + service := a.State.Service(targetService) + if service == nil { + return fmt.Errorf("unknown service ID: %s", targetService) + } + // Doesn't match, we have to do a full token resolution. The required // permission for any proxy-related endpont is service:write, since // to register a proxy you require that permission and sensitive data @@ -2141,7 +2170,7 @@ func (a *Agent) verifyProxyToken(proxyId, token string) error { if err != nil { return err } - if rule != nil && !rule.ServiceWrite(proxy.Proxy.TargetServiceID, nil) { + if rule != nil && !rule.ServiceWrite(service.Service, nil) { return acl.ErrPermissionDenied } diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index de1c0d48e..32b326867 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -925,15 +925,12 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http. } args.MinQueryIndex = qOpts.MinQueryIndex - // Validate token - // TODO(banks): support correct proxy token checking too - rule, err := s.agent.resolveToken(qOpts.Token) + // Verify the proxy token. This will check both the local proxy token + // as well as the ACL if the token isn't local. + err := s.agent.verifyProxyToken(qOpts.Token, id, "") if err != nil { return nil, err } - if rule != nil && !rule.ServiceWrite(service.Service, nil) { - return nil, acl.ErrPermissionDenied - } raw, err := s.agent.cache.Get(cachetype.ConnectCALeafName, &args) if err != nil { @@ -985,7 +982,8 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http } // Validate the ACL token - if err := s.agent.verifyProxyToken(id, token); err != nil { + err := s.agent.verifyProxyToken(token, proxy.Proxy.TargetServiceID, id) + if err != nil { return "", nil, err } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 66ebc59ef..d4b55a50f 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2195,6 +2195,282 @@ func TestAgentConnectCARoots_list(t *testing.T) { } } +func TestAgentConnectCALeafCert_aclDefaultDeny(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := NewTestAgent(t.Name(), TestACLConfig()+` + connect { + enabled = true + } + `) + defer a.Shutdown() + + // Register a service with a managed proxy + { + reg := &structs.ServiceDefinition{ + ID: "test-id", + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{}, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + require.NoError(err) + require.Equal(200, resp.Code, "body: %s", resp.Body.String()) + } + + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test-id", nil) + resp := httptest.NewRecorder() + _, err := a.srv.AgentConnectCALeafCert(resp, req) + require.Error(err) + require.True(acl.IsErrPermissionDenied(err)) +} + +func TestAgentConnectCALeafCert_aclProxyToken(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := NewTestAgent(t.Name(), TestACLConfig()+` + connect { + enabled = true + } + `) + defer a.Shutdown() + + // Register a service with a managed proxy + { + reg := &structs.ServiceDefinition{ + ID: "test-id", + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{}, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + require.NoError(err) + require.Equal(200, resp.Code, "body: %s", resp.Body.String()) + } + + // Get the proxy token from the agent directly, since there is no API. + proxy := a.State.Proxy("test-id-proxy") + require.NotNil(proxy) + token := proxy.ProxyToken + require.NotEmpty(token) + + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test-id?token="+token, nil) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentConnectCALeafCert(resp, req) + require.NoError(err) + + // Get the issued cert + _, ok := obj.(*structs.IssuedCert) + require.True(ok) +} + +func TestAgentConnectCALeafCert_aclProxyTokenOther(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := NewTestAgent(t.Name(), TestACLConfig()+` + connect { + enabled = true + } + `) + defer a.Shutdown() + + // Register a service with a managed proxy + { + reg := &structs.ServiceDefinition{ + ID: "test-id", + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{}, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + require.NoError(err) + require.Equal(200, resp.Code, "body: %s", resp.Body.String()) + } + + // Register another service + { + reg := &structs.ServiceDefinition{ + ID: "wrong-id", + Name: "wrong", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{}, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + require.NoError(err) + require.Equal(200, resp.Code, "body: %s", resp.Body.String()) + } + + // Get the proxy token from the agent directly, since there is no API. + proxy := a.State.Proxy("wrong-id-proxy") + require.NotNil(proxy) + token := proxy.ProxyToken + require.NotEmpty(token) + + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test-id?token="+token, nil) + resp := httptest.NewRecorder() + _, err := a.srv.AgentConnectCALeafCert(resp, req) + require.Error(err) + require.True(acl.IsErrPermissionDenied(err)) +} + +func TestAgentConnectCALeafCert_aclServiceWrite(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := NewTestAgent(t.Name(), TestACLConfig()+` + connect { + enabled = true + } + `) + defer a.Shutdown() + + // Register a service with a managed proxy + { + reg := &structs.ServiceDefinition{ + ID: "test-id", + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{}, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + require.NoError(err) + require.Equal(200, resp.Code, "body: %s", resp.Body.String()) + } + + // Create an ACL with service:write for our service + var token string + { + args := map[string]interface{}{ + "Name": "User Token", + "Type": "client", + "Rules": `service "test" { policy = "write" }`, + } + req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args)) + resp := httptest.NewRecorder() + obj, err := a.srv.ACLCreate(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + aclResp := obj.(aclCreateResponse) + token = aclResp.ID + } + + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test-id?token="+token, nil) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentConnectCALeafCert(resp, req) + require.NoError(err) + + // Get the issued cert + _, ok := obj.(*structs.IssuedCert) + require.True(ok) +} + +func TestAgentConnectCALeafCert_aclServiceReadDeny(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := NewTestAgent(t.Name(), TestACLConfig()+` + connect { + enabled = true + } + `) + defer a.Shutdown() + + // Register a service with a managed proxy + { + reg := &structs.ServiceDefinition{ + ID: "test-id", + Name: "test", + Address: "127.0.0.1", + Port: 8000, + Check: structs.CheckType{ + TTL: 15 * time.Second, + }, + Connect: &structs.ServiceDefinitionConnect{ + Proxy: &structs.ServiceDefinitionConnectProxy{}, + }, + } + + req, _ := http.NewRequest("PUT", "/v1/agent/service/register?token=root", jsonReader(reg)) + resp := httptest.NewRecorder() + _, err := a.srv.AgentRegisterService(resp, req) + require.NoError(err) + require.Equal(200, resp.Code, "body: %s", resp.Body.String()) + } + + // Create an ACL with service:read for our service + var token string + { + args := map[string]interface{}{ + "Name": "User Token", + "Type": "client", + "Rules": `service "test" { policy = "read" }`, + } + req, _ := http.NewRequest("PUT", "/v1/acl/create?token=root", jsonReader(args)) + resp := httptest.NewRecorder() + obj, err := a.srv.ACLCreate(resp, req) + if err != nil { + t.Fatalf("err: %v", err) + } + aclResp := obj.(aclCreateResponse) + token = aclResp.ID + } + + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test-id?token="+token, nil) + resp := httptest.NewRecorder() + _, err := a.srv.AgentConnectCALeafCert(resp, req) + require.Error(err) + require.True(acl.IsErrPermissionDenied(err)) +} + func TestAgentConnectCALeafCert_good(t *testing.T) { t.Parallel() @@ -2554,7 +2830,6 @@ func TestAgentConnectProxyConfig_aclDefaultDeny(t *testing.T) { resp := httptest.NewRecorder() _, err := a.srv.AgentConnectProxyConfig(resp, req) require.True(acl.IsErrPermissionDenied(err)) - } func TestAgentConnectProxyConfig_aclProxyToken(t *testing.T) { From c57405b323399095eb4fb3d6d73e5a82dfdd1d5d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 8 May 2018 21:27:23 -0700 Subject: [PATCH 254/627] agent/consul: retry reading provider a few times --- agent/consul/leader.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 2f01b8833..d71c3ead3 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -488,9 +488,23 @@ func (s *Server) createCAProvider(conf *structs.CAConfiguration) (connect.CAProv } func (s *Server) getCAProvider() connect.CAProvider { + retries := 0 + +RETRY_PROVIDER: s.caProviderLock.RLock() - defer s.caProviderLock.RUnlock() - return s.caProvider + result := s.caProvider + s.caProviderLock.RUnlock() + + // In cases where an agent is started with managed proxies, we may ask + // for the provider before establishLeadership completes. If we're the + // leader, then wait and get the provider again + if result == nil && s.IsLeader() && retries < 10 { + retries++ + time.Sleep(50 * time.Millisecond) + goto RETRY_PROVIDER + } + + return result } func (s *Server) setCAProvider(newProvider connect.CAProvider) { From 749f81373f69e9651582d496a9278564caed7f1f Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 8 May 2018 21:30:18 -0700 Subject: [PATCH 255/627] agent/consul: check nil on getCAProvider result --- agent/consul/connect_ca_endpoint.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 136cbcb49..619418bab 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -157,6 +157,9 @@ func (s *ConnectCA) ConfigurationSet( // Have the old provider cross-sign the new intermediate oldProvider := s.srv.getCAProvider() + if oldProvider == nil { + return fmt.Errorf("internal error: CA provider is nil") + } xcCert, err := oldProvider.CrossSignCA(intermediateCA) if err != nil { return err @@ -283,6 +286,9 @@ func (s *ConnectCA) Sign( } provider := s.srv.getCAProvider() + if provider == nil { + return fmt.Errorf("internal error: CA provider is nil") + } // todo(kyhavlov): more validation on the CSR before signing pem, err := provider.Sign(csr) From 54a1662da8172b4d0b4b043445bfe2692f5f34ee Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 8 May 2018 21:32:47 -0700 Subject: [PATCH 256/627] agent/consul: change provider wait from goto to a loop --- agent/consul/leader.go | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/agent/consul/leader.go b/agent/consul/leader.go index d71c3ead3..4d61ed0c4 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -489,19 +489,22 @@ func (s *Server) createCAProvider(conf *structs.CAConfiguration) (connect.CAProv func (s *Server) getCAProvider() connect.CAProvider { retries := 0 + var result connect.CAProvider + for result == nil { + s.caProviderLock.RLock() + result = s.caProvider + s.caProviderLock.RUnlock() -RETRY_PROVIDER: - s.caProviderLock.RLock() - result := s.caProvider - s.caProviderLock.RUnlock() + // In cases where an agent is started with managed proxies, we may ask + // for the provider before establishLeadership completes. If we're the + // leader, then wait and get the provider again + if result == nil && s.IsLeader() && retries < 10 { + retries++ + time.Sleep(50 * time.Millisecond) + continue + } - // In cases where an agent is started with managed proxies, we may ask - // for the provider before establishLeadership completes. If we're the - // leader, then wait and get the provider again - if result == nil && s.IsLeader() && retries < 10 { - retries++ - time.Sleep(50 * time.Millisecond) - goto RETRY_PROVIDER + break } return result From c42510e1ecb19e2e18307882e8ca64e253ed9c8b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 9 May 2018 11:04:52 -0700 Subject: [PATCH 257/627] agent/cache: implement refresh backoff --- agent/agent.go | 6 ++--- agent/cache/cache.go | 53 ++++++++++++++++++++++++++++----------- agent/cache/cache_test.go | 42 +++++++++++++++++++++++++++++++ agent/cache/entry.go | 7 +++--- 4 files changed, 88 insertions(+), 20 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 21ada77cd..77045c69e 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2785,7 +2785,7 @@ func (a *Agent) registerCache() { }, &cache.RegisterOptions{ // Maintain a blocking query, retry dropped connections quickly Refresh: true, - RefreshTimer: 3 * time.Second, + RefreshTimer: 0 * time.Second, RefreshTimeout: 10 * time.Minute, }) @@ -2795,7 +2795,7 @@ func (a *Agent) registerCache() { }, &cache.RegisterOptions{ // Maintain a blocking query, retry dropped connections quickly Refresh: true, - RefreshTimer: 3 * time.Second, + RefreshTimer: 0 * time.Second, RefreshTimeout: 10 * time.Minute, }) @@ -2804,7 +2804,7 @@ func (a *Agent) registerCache() { }, &cache.RegisterOptions{ // Maintain a blocking query, retry dropped connections quickly Refresh: true, - RefreshTimer: 3 * time.Second, + RefreshTimer: 0 * time.Second, RefreshTimeout: 10 * time.Minute, }) } diff --git a/agent/cache/cache.go b/agent/cache/cache.go index cdcaffc58..754d635df 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -26,6 +26,13 @@ import ( //go:generate mockery -all -inpkg +// Constants related to refresh backoff. We probably don't ever need to +// make these configurable knobs since they primarily exist to lower load. +const ( + CacheRefreshBackoffMin = 3 // 3 attempts before backing off + CacheRefreshMaxWait = 1 * time.Minute // maximum backoff wait time +) + // Cache is a agent-local cache of Consul data. Create a Cache using the // New function. A zero-value Cache is not ready for usage and will result // in a panic. @@ -330,14 +337,6 @@ func (c *Cache) fetch(t, key string, r Request, allowNew bool) (<-chan struct{}, Timeout: tEntry.Opts.RefreshTimeout, }, r) - if err == nil { - metrics.IncrCounter([]string{"consul", "cache", "fetch_success"}, 1) - metrics.IncrCounter([]string{"consul", "cache", t, "fetch_success"}, 1) - } else { - metrics.IncrCounter([]string{"consul", "cache", "fetch_error"}, 1) - metrics.IncrCounter([]string{"consul", "cache", t, "fetch_error"}, 1) - } - // Copy the existing entry to start. newEntry := entry newEntry.Fetching = false @@ -351,10 +350,26 @@ func (c *Cache) fetch(t, key string, r Request, allowNew bool) (<-chan struct{}, newEntry.Valid = true } - // If we have an error and the prior entry wasn't valid, then we - // set the error at least. - if err != nil && !newEntry.Valid { - newEntry.Error = err + // Error handling + if err == nil { + metrics.IncrCounter([]string{"consul", "cache", "fetch_success"}, 1) + metrics.IncrCounter([]string{"consul", "cache", t, "fetch_success"}, 1) + + // Reset the attepts counter so we don't have any backoff + newEntry.ErrAttempts = 0 + } else { + metrics.IncrCounter([]string{"consul", "cache", "fetch_error"}, 1) + metrics.IncrCounter([]string{"consul", "cache", t, "fetch_error"}, 1) + + // Always increment the attempts to control backoff + newEntry.ErrAttempts++ + + // If the entry wasn't valid, we set an error. If it was valid, + // we don't set an error so that the prior value can continue + // being used. This will be evicted if the TTL comes up. + if !newEntry.Valid { + newEntry.Error = err + } } // Create a new waiter that will be used for the next fetch. @@ -384,7 +399,7 @@ func (c *Cache) fetch(t, key string, r Request, allowNew bool) (<-chan struct{}, // If refresh is enabled, run the refresh in due time. The refresh // below might block, but saves us from spawning another goroutine. if tEntry.Opts.Refresh { - c.refresh(tEntry.Opts, t, key, r) + c.refresh(tEntry.Opts, newEntry.ErrAttempts, t, key, r) } }() @@ -417,12 +432,22 @@ func (c *Cache) fetchDirect(t string, r Request) (interface{}, error) { // refresh triggers a fetch for a specific Request according to the // registration options. -func (c *Cache) refresh(opts *RegisterOptions, t string, key string, r Request) { +func (c *Cache) refresh(opts *RegisterOptions, attempt uint8, t string, key string, r Request) { // Sanity-check, we should not schedule anything that has refresh disabled if !opts.Refresh { return } + // If we're over the attempt minimum, start an exponential backoff. + if attempt > CacheRefreshBackoffMin { + waitTime := (1 << (attempt - CacheRefreshBackoffMin)) * time.Second + if waitTime > CacheRefreshMaxWait { + waitTime = CacheRefreshMaxWait + } + + time.Sleep(waitTime) + } + // If we have a timer, wait for it if opts.RefreshTimer > 0 { time.Sleep(opts.RefreshTimer) diff --git a/agent/cache/cache_test.go b/agent/cache/cache_test.go index cf179b2ab..07490be13 100644 --- a/agent/cache/cache_test.go +++ b/agent/cache/cache_test.go @@ -4,6 +4,7 @@ import ( "fmt" "sort" "sync" + "sync/atomic" "testing" "time" @@ -336,6 +337,47 @@ func TestCacheGet_periodicRefresh(t *testing.T) { TestCacheGetChResult(t, resultCh, 12) } +// Test that a refresh performs a backoff. +func TestCacheGet_periodicRefreshErrorBackoff(t *testing.T) { + t.Parallel() + + typ := TestType(t) + defer typ.AssertExpectations(t) + c := TestCache(t) + c.RegisterType("t", typ, &RegisterOptions{ + Refresh: true, + RefreshTimer: 0, + RefreshTimeout: 5 * time.Minute, + }) + + // Configure the type + var retries uint32 + fetchErr := fmt.Errorf("test fetch error") + typ.Static(FetchResult{Value: 1, Index: 4}, nil).Once() + typ.Static(FetchResult{Value: nil, Index: 5}, fetchErr).Run(func(args mock.Arguments) { + atomic.AddUint32(&retries, 1) + }) + + // Fetch + resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"})) + TestCacheGetChResult(t, resultCh, 1) + + // Sleep a bit. The refresh will quietly fail in the background. What we + // want to verify is that it doesn't retry too much. "Too much" is hard + // to measure since its CPU dependent if this test is failing. But due + // to the short sleep below, we can calculate about what we'd expect if + // backoff IS working. + time.Sleep(500 * time.Millisecond) + + // Fetch should work, we should get a 1 still. Errors are ignored. + resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"})) + TestCacheGetChResult(t, resultCh, 1) + + // Check the number + actual := atomic.LoadUint32(&retries) + require.True(t, actual < 10, fmt.Sprintf("actual: %d", actual)) +} + // Test that the backend fetch sets the proper timeout. func TestCacheGet_fetchTimeout(t *testing.T) { t.Parallel() diff --git a/agent/cache/entry.go b/agent/cache/entry.go index 50c575ff7..6bab621a3 100644 --- a/agent/cache/entry.go +++ b/agent/cache/entry.go @@ -16,9 +16,10 @@ type cacheEntry struct { Index uint64 // Metadata that is used for internal accounting - Valid bool // True if the Value is set - Fetching bool // True if a fetch is already active - Waiter chan struct{} // Closed when this entry is invalidated + Valid bool // True if the Value is set + Fetching bool // True if a fetch is already active + Waiter chan struct{} // Closed when this entry is invalidated + ErrAttempts uint8 // Number of fetch errors since last success (Error may be nil) // Expiry contains information about the expiration of this // entry. This is a pointer as its shared as a value in the From 6cf2e1ef1ae0fc5702b1e493b0d88690a3d1383c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 9 May 2018 11:10:17 -0700 Subject: [PATCH 258/627] agent/cache: string through attempt rather than storing on the entry --- agent/cache/cache.go | 16 ++++++++-------- agent/cache/entry.go | 7 +++---- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index 754d635df..d0d5665b1 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -257,7 +257,7 @@ RETRY_GET: // At this point, we know we either don't have a value at all or the // value we have is too old. We need to wait for new data. - waiterCh, err := c.fetch(t, key, r, true) + waiterCh, err := c.fetch(t, key, r, true, 0) if err != nil { return nil, err } @@ -287,7 +287,7 @@ func (c *Cache) entryKey(r *RequestInfo) string { // If allowNew is true then the fetch should create the cache entry // if it doesn't exist. If this is false, then fetch will do nothing // if the entry doesn't exist. This latter case is to support refreshing. -func (c *Cache) fetch(t, key string, r Request, allowNew bool) (<-chan struct{}, error) { +func (c *Cache) fetch(t, key string, r Request, allowNew bool, attempt uint8) (<-chan struct{}, error) { // Get the type that we're fetching c.typesLock.RLock() tEntry, ok := c.types[t] @@ -355,14 +355,14 @@ func (c *Cache) fetch(t, key string, r Request, allowNew bool) (<-chan struct{}, metrics.IncrCounter([]string{"consul", "cache", "fetch_success"}, 1) metrics.IncrCounter([]string{"consul", "cache", t, "fetch_success"}, 1) - // Reset the attepts counter so we don't have any backoff - newEntry.ErrAttempts = 0 + // Reset the attempts counter so we don't have any backoff + attempt = 0 } else { metrics.IncrCounter([]string{"consul", "cache", "fetch_error"}, 1) metrics.IncrCounter([]string{"consul", "cache", t, "fetch_error"}, 1) - // Always increment the attempts to control backoff - newEntry.ErrAttempts++ + // Increment attempt counter + attempt++ // If the entry wasn't valid, we set an error. If it was valid, // we don't set an error so that the prior value can continue @@ -399,7 +399,7 @@ func (c *Cache) fetch(t, key string, r Request, allowNew bool) (<-chan struct{}, // If refresh is enabled, run the refresh in due time. The refresh // below might block, but saves us from spawning another goroutine. if tEntry.Opts.Refresh { - c.refresh(tEntry.Opts, newEntry.ErrAttempts, t, key, r) + c.refresh(tEntry.Opts, attempt, t, key, r) } }() @@ -456,7 +456,7 @@ func (c *Cache) refresh(opts *RegisterOptions, attempt uint8, t string, key stri // Trigger. The "allowNew" field is false because in the time we were // waiting to refresh we may have expired and got evicted. If that // happened, we don't want to create a new entry. - c.fetch(t, key, r, false) + c.fetch(t, key, r, false, attempt) } // runExpiryLoop is a blocking function that watches the expiration diff --git a/agent/cache/entry.go b/agent/cache/entry.go index 6bab621a3..50c575ff7 100644 --- a/agent/cache/entry.go +++ b/agent/cache/entry.go @@ -16,10 +16,9 @@ type cacheEntry struct { Index uint64 // Metadata that is used for internal accounting - Valid bool // True if the Value is set - Fetching bool // True if a fetch is already active - Waiter chan struct{} // Closed when this entry is invalidated - ErrAttempts uint8 // Number of fetch errors since last success (Error may be nil) + Valid bool // True if the Value is set + Fetching bool // True if a fetch is already active + Waiter chan struct{} // Closed when this entry is invalidated // Expiry contains information about the expiration of this // entry. This is a pointer as its shared as a value in the From 4bb745a2d458e7d100b0318b7ea4cb9b11a9be63 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 9 May 2018 11:54:15 -0700 Subject: [PATCH 259/627] agent/cache: change uint8 to uint --- agent/cache/cache.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/agent/cache/cache.go b/agent/cache/cache.go index d0d5665b1..1b4653cb4 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -287,7 +287,7 @@ func (c *Cache) entryKey(r *RequestInfo) string { // If allowNew is true then the fetch should create the cache entry // if it doesn't exist. If this is false, then fetch will do nothing // if the entry doesn't exist. This latter case is to support refreshing. -func (c *Cache) fetch(t, key string, r Request, allowNew bool, attempt uint8) (<-chan struct{}, error) { +func (c *Cache) fetch(t, key string, r Request, allowNew bool, attempt uint) (<-chan struct{}, error) { // Get the type that we're fetching c.typesLock.RLock() tEntry, ok := c.types[t] @@ -432,7 +432,7 @@ func (c *Cache) fetchDirect(t string, r Request) (interface{}, error) { // refresh triggers a fetch for a specific Request according to the // registration options. -func (c *Cache) refresh(opts *RegisterOptions, attempt uint8, t string, key string, r Request) { +func (c *Cache) refresh(opts *RegisterOptions, attempt uint, t string, key string, r Request) { // Sanity-check, we should not schedule anything that has refresh disabled if !opts.Refresh { return From c90b353eea57fc28a506f0f29cf30d623bbf44b6 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Thu, 3 May 2018 12:50:45 -0700 Subject: [PATCH 260/627] Move connect CA provider to separate package --- agent/connect/{ => ca}/ca_provider.go | 4 +- .../ca/ca_provider_consul.go} | 63 ++++---- .../ca/ca_provider_consul_test.go} | 145 +++++++++++------- agent/connect/{ca.go => parsing.go} | 0 agent/consul/connect_ca_endpoint_test.go | 15 +- agent/consul/consul_ca_delegate.go | 28 ++++ agent/consul/leader.go | 7 +- agent/consul/server.go | 4 +- 8 files changed, 158 insertions(+), 108 deletions(-) rename agent/connect/{ => ca}/ca_provider.go (94%) rename agent/{consul/connect_ca_provider.go => connect/ca/ca_provider_consul.go} (90%) rename agent/{consul/connect_ca_provider_test.go => connect/ca/ca_provider_consul_test.go} (57%) rename agent/connect/{ca.go => parsing.go} (100%) create mode 100644 agent/consul/consul_ca_delegate.go diff --git a/agent/connect/ca_provider.go b/agent/connect/ca/ca_provider.go similarity index 94% rename from agent/connect/ca_provider.go rename to agent/connect/ca/ca_provider.go index bec028851..d557d289c 100644 --- a/agent/connect/ca_provider.go +++ b/agent/connect/ca/ca_provider.go @@ -4,10 +4,10 @@ import ( "crypto/x509" ) -// CAProvider is the interface for Consul to interact with +// Provider is the interface for Consul to interact with // an external CA that provides leaf certificate signing for // given SpiffeIDServices. -type CAProvider interface { +type Provider interface { // Active root returns the currently active root CA for this // provider. This should be a parent of the certificate returned by // ActiveIntermediate() diff --git a/agent/consul/connect_ca_provider.go b/agent/connect/ca/ca_provider_consul.go similarity index 90% rename from agent/consul/connect_ca_provider.go rename to agent/connect/ca/ca_provider_consul.go index 0d7d851b0..7d925a40f 100644 --- a/agent/consul/connect_ca_provider.go +++ b/agent/connect/ca/ca_provider_consul.go @@ -1,4 +1,4 @@ -package consul +package connect import ( "bytes" @@ -15,34 +15,39 @@ import ( "time" "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" "github.com/mitchellh/mapstructure" ) type ConsulCAProvider struct { - config *structs.ConsulCAProviderConfig - - id string - srv *Server + config *structs.ConsulCAProviderConfig + id string + delegate ConsulCAStateDelegate sync.RWMutex } +type ConsulCAStateDelegate interface { + State() *state.Store + ApplyCARequest(*structs.CARequest) error +} + // NewConsulCAProvider returns a new instance of the Consul CA provider, // bootstrapping its state in the state store necessary -func NewConsulCAProvider(rawConfig map[string]interface{}, srv *Server) (*ConsulCAProvider, error) { +func NewConsulCAProvider(rawConfig map[string]interface{}, delegate ConsulCAStateDelegate) (*ConsulCAProvider, error) { conf, err := ParseConsulCAConfig(rawConfig) if err != nil { return nil, err } provider := &ConsulCAProvider{ - config: conf, - srv: srv, - id: fmt.Sprintf("%s,%s", conf.PrivateKey, conf.RootCert), + config: conf, + delegate: delegate, + id: fmt.Sprintf("%s,%s", conf.PrivateKey, conf.RootCert), } // Check if this configuration of the provider has already been // initialized in the state store. - state := srv.fsm.State() + state := delegate.State() _, providerState, err := state.CAProviderState(provider.id) if err != nil { return nil, err @@ -64,13 +69,9 @@ func NewConsulCAProvider(rawConfig map[string]interface{}, srv *Server) (*Consul Op: structs.CAOpSetProviderState, ProviderState: &newState, } - resp, err := srv.raftApply(structs.ConnectCARequestType, args) - if err != nil { + if err := delegate.ApplyCARequest(args); err != nil { return nil, err } - if respErr, ok := resp.(error); ok { - return nil, respErr - } } idx, _, err := state.CAProviderState(provider.id) @@ -80,7 +81,7 @@ func NewConsulCAProvider(rawConfig map[string]interface{}, srv *Server) (*Consul // Generate a private key if needed if conf.PrivateKey == "" { - pk, err := generatePrivateKey() + pk, err := GeneratePrivateKey() if err != nil { return nil, err } @@ -105,13 +106,9 @@ func NewConsulCAProvider(rawConfig map[string]interface{}, srv *Server) (*Consul Op: structs.CAOpSetProviderState, ProviderState: &newState, } - resp, err := srv.raftApply(structs.ConnectCARequestType, args) - if err != nil { + if err := delegate.ApplyCARequest(args); err != nil { return nil, err } - if respErr, ok := resp.(error); ok { - return nil, respErr - } return provider, nil } @@ -131,7 +128,7 @@ func ParseConsulCAConfig(raw map[string]interface{}) (*structs.ConsulCAProviderC // Return the active root CA and generate a new one if needed func (c *ConsulCAProvider) ActiveRoot() (string, error) { - state := c.srv.fsm.State() + state := c.delegate.State() _, providerState, err := state.CAProviderState(c.id) if err != nil { return "", err @@ -165,13 +162,9 @@ func (c *ConsulCAProvider) Cleanup() error { Op: structs.CAOpDeleteProviderState, ProviderState: &structs.CAConsulProviderState{ID: c.id}, } - resp, err := c.srv.raftApply(structs.ConnectCARequestType, args) - if err != nil { + if err := c.delegate.ApplyCARequest(args); err != nil { return err } - if respErr, ok := resp.(error); ok { - return respErr - } return nil } @@ -185,7 +178,7 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (string, error) { defer c.Unlock() // Get the provider state - state := c.srv.fsm.State() + state := c.delegate.State() _, providerState, err := state.CAProviderState(c.id) if err != nil { return "", err @@ -274,7 +267,7 @@ func (c *ConsulCAProvider) CrossSignCA(cert *x509.Certificate) (string, error) { defer c.Unlock() // Get the provider state - state := c.srv.fsm.State() + state := c.delegate.State() _, providerState, err := state.CAProviderState(c.id) if err != nil { return "", err @@ -333,19 +326,15 @@ func (c *ConsulCAProvider) incrementSerialIndex(providerState *structs.CAConsulP Op: structs.CAOpSetProviderState, ProviderState: &newState, } - resp, err := c.srv.raftApply(structs.ConnectCARequestType, args) - if err != nil { + if err := c.delegate.ApplyCARequest(args); err != nil { return err } - if respErr, ok := resp.(error); ok { - return respErr - } return nil } -// generatePrivateKey returns a new private key -func generatePrivateKey() (string, error) { +// GeneratePrivateKey returns a new private key +func GeneratePrivateKey() (string, error) { var pk *ecdsa.PrivateKey pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) @@ -369,7 +358,7 @@ func generatePrivateKey() (string, error) { // generateCA makes a new root CA using the current private key func (c *ConsulCAProvider) generateCA(privateKey string, sn uint64) (string, error) { - state := c.srv.fsm.State() + state := c.delegate.State() _, config, err := state.CAConfig() if err != nil { return "", err diff --git a/agent/consul/connect_ca_provider_test.go b/agent/connect/ca/ca_provider_consul_test.go similarity index 57% rename from agent/consul/connect_ca_provider_test.go rename to agent/connect/ca/ca_provider_consul_test.go index ead41309f..fd1c8ec29 100644 --- a/agent/consul/connect_ca_provider_test.go +++ b/agent/connect/ca/ca_provider_consul_test.go @@ -1,28 +1,81 @@ -package consul +package connect import ( - "os" + "fmt" "testing" "time" "github.com/hashicorp/consul/agent/connect" - "github.com/hashicorp/consul/testrpc" + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/structs" "github.com/stretchr/testify/assert" ) +type consulCAMockDelegate struct { + state *state.Store +} + +func (c *consulCAMockDelegate) State() *state.Store { + return c.state +} + +func (c *consulCAMockDelegate) ApplyCARequest(req *structs.CARequest) error { + idx, _, err := c.state.CAConfig() + if err != nil { + return err + } + + switch req.Op { + case structs.CAOpSetProviderState: + _, err := c.state.CASetProviderState(idx+1, req.ProviderState) + if err != nil { + return err + } + + return nil + case structs.CAOpDeleteProviderState: + if err := c.state.CADeleteProviderState(req.ProviderState.ID); err != nil { + return err + } + + return nil + default: + return fmt.Errorf("Invalid CA operation '%s'", req.Op) + } +} + +func newMockDelegate(t *testing.T, conf *structs.CAConfiguration) *consulCAMockDelegate { + s, err := state.NewStateStore(nil) + if err != nil { + t.Fatalf("err: %s", err) + } + if s == nil { + t.Fatalf("missing state store") + } + if err := s.CASetConfig(0, conf); err != nil { + t.Fatalf("err: %s", err) + } + + return &consulCAMockDelegate{s} +} + +func testConsulCAConfig() *structs.CAConfiguration { + return &structs.CAConfiguration{ + ClusterID: "asdf", + Provider: "consul", + Config: map[string]interface{}{}, + } +} + func TestCAProvider_Bootstrap(t *testing.T) { t.Parallel() assert := assert.New(t) - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - codec := rpcClient(t, s1) - defer codec.Close() + conf := testConsulCAConfig() + delegate := newMockDelegate(t, conf) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - provider := s1.getCAProvider() + provider, err := NewConsulCAProvider(conf.Config, delegate) + assert.NoError(err) root, err := provider.ActiveRoot() assert.NoError(err) @@ -30,14 +83,12 @@ func TestCAProvider_Bootstrap(t *testing.T) { // Intermediate should be the same cert. inter, err := provider.ActiveIntermediate() assert.NoError(err) + assert.Equal(root, inter) - // Make sure we initialize without errors and that the - // root cert gets set to the active cert. - state := s1.fsm.State() - _, activeRoot, err := state.CARootActive(nil) + // Should be a valid cert + parsed, err := connect.ParseCert(root) assert.NoError(err) - assert.Equal(root, activeRoot.RootCert) - assert.Equal(inter, activeRoot.RootCert) + assert.Equal(parsed.URIs[0].String(), fmt.Sprintf("spiffe://%s.consul", conf.ClusterID)) } func TestCAProvider_Bootstrap_WithCert(t *testing.T) { @@ -46,49 +97,35 @@ func TestCAProvider_Bootstrap_WithCert(t *testing.T) { // Make sure setting a custom private key/root cert works. assert := assert.New(t) rootCA := connect.TestCA(t, nil) - dir1, s1 := testServerWithConfig(t, func(c *Config) { - c.CAConfig.Config["PrivateKey"] = rootCA.SigningKey - c.CAConfig.Config["RootCert"] = rootCA.RootCert - }) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - codec := rpcClient(t, s1) - defer codec.Close() + conf := testConsulCAConfig() + conf.Config = map[string]interface{}{ + "PrivateKey": rootCA.SigningKey, + "RootCert": rootCA.RootCert, + } + delegate := newMockDelegate(t, conf) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - provider := s1.getCAProvider() + provider, err := NewConsulCAProvider(conf.Config, delegate) + assert.NoError(err) root, err := provider.ActiveRoot() assert.NoError(err) - - // Make sure we initialize without errors and that the - // root cert we provided gets set to the active cert. - state := s1.fsm.State() - _, activeRoot, err := state.CARootActive(nil) - assert.NoError(err) - assert.Equal(root, activeRoot.RootCert) - assert.Equal(rootCA.RootCert, activeRoot.RootCert) + assert.Equal(root, rootCA.RootCert) } func TestCAProvider_SignLeaf(t *testing.T) { t.Parallel() assert := assert.New(t) - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - codec := rpcClient(t, s1) - defer codec.Close() + conf := testConsulCAConfig() + delegate := newMockDelegate(t, conf) - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - provider := s1.getCAProvider() + provider, err := NewConsulCAProvider(conf.Config, delegate) + assert.NoError(err) spiffeService := &connect.SpiffeIDService{ - Host: s1.config.NodeName, + Host: "node1", Namespace: "default", - Datacenter: s1.config.Datacenter, + Datacenter: "dc1", Service: "foo", } @@ -141,18 +178,12 @@ func TestCAProvider_CrossSignCA(t *testing.T) { t.Parallel() assert := assert.New(t) + conf := testConsulCAConfig() + delegate := newMockDelegate(t, conf) + provider, err := NewConsulCAProvider(conf.Config, delegate) + assert.NoError(err) - // Make sure setting a custom private key/root cert works. - dir1, s1 := testServer(t) - defer os.RemoveAll(dir1) - defer s1.Shutdown() - codec := rpcClient(t, s1) - defer codec.Close() - - testrpc.WaitForLeader(t, s1.RPC, "dc1") - - provider := s1.getCAProvider() - + // Make a new CA cert to get cross-signed. rootCA := connect.TestCA(t, nil) rootPEM, err := provider.ActiveRoot() assert.NoError(err) diff --git a/agent/connect/ca.go b/agent/connect/parsing.go similarity index 100% rename from agent/connect/ca.go rename to agent/connect/parsing.go diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index 321bcfcb4..49baddfb9 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/hashicorp/consul/agent/connect" + connect_ca "github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/net-rpc-msgpackrpc" @@ -82,9 +83,9 @@ func TestConnectCAConfig_GetSet(t *testing.T) { var reply structs.CAConfiguration assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply)) - actual, err := ParseConsulCAConfig(reply.Config) + actual, err := connect_ca.ParseConsulCAConfig(reply.Config) assert.NoError(err) - expected, err := ParseConsulCAConfig(s1.config.CAConfig.Config) + expected, err := connect_ca.ParseConsulCAConfig(s1.config.CAConfig.Config) assert.NoError(err) assert.Equal(reply.Provider, s1.config.CAConfig.Provider) assert.Equal(actual, expected) @@ -117,9 +118,9 @@ func TestConnectCAConfig_GetSet(t *testing.T) { var reply structs.CAConfiguration assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply)) - actual, err := ParseConsulCAConfig(reply.Config) + actual, err := connect_ca.ParseConsulCAConfig(reply.Config) assert.NoError(err) - expected, err := ParseConsulCAConfig(newConfig.Config) + expected, err := connect_ca.ParseConsulCAConfig(newConfig.Config) assert.NoError(err) assert.Equal(reply.Provider, newConfig.Provider) assert.Equal(actual, expected) @@ -149,7 +150,7 @@ func TestConnectCAConfig_TriggerRotation(t *testing.T) { // Update the provider config to use a new private key, which should // cause a rotation. - newKey, err := generatePrivateKey() + newKey, err := connect_ca.GeneratePrivateKey() assert.NoError(err) newConfig := &structs.CAConfiguration{ Provider: "consul", @@ -219,9 +220,9 @@ func TestConnectCAConfig_TriggerRotation(t *testing.T) { var reply structs.CAConfiguration assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply)) - actual, err := ParseConsulCAConfig(reply.Config) + actual, err := connect_ca.ParseConsulCAConfig(reply.Config) assert.NoError(err) - expected, err := ParseConsulCAConfig(newConfig.Config) + expected, err := connect_ca.ParseConsulCAConfig(newConfig.Config) assert.NoError(err) assert.Equal(reply.Provider, newConfig.Provider) assert.Equal(actual, expected) diff --git a/agent/consul/consul_ca_delegate.go b/agent/consul/consul_ca_delegate.go new file mode 100644 index 000000000..5f32a9396 --- /dev/null +++ b/agent/consul/consul_ca_delegate.go @@ -0,0 +1,28 @@ +package consul + +import ( + "github.com/hashicorp/consul/agent/consul/state" + "github.com/hashicorp/consul/agent/structs" +) + +// consulCADelegate providers callbacks for the Consul CA provider +// to use the state store for its operations. +type consulCADelegate struct { + srv *Server +} + +func (c *consulCADelegate) State() *state.Store { + return c.srv.fsm.State() +} + +func (c *consulCADelegate) ApplyCARequest(req *structs.CARequest) error { + resp, err := c.srv.raftApply(structs.ConnectCARequestType, req) + if err != nil { + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } + + return nil +} diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 4d61ed0c4..b07a3622d 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -10,6 +10,7 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" + connect_ca "github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/consul/autopilot" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/structs" @@ -478,10 +479,10 @@ func (s *Server) initializeCA() error { } // createProvider returns a connect CA provider from the given config. -func (s *Server) createCAProvider(conf *structs.CAConfiguration) (connect.CAProvider, error) { +func (s *Server) createCAProvider(conf *structs.CAConfiguration) (connect_ca.Provider, error) { switch conf.Provider { case structs.ConsulCAProvider: - return NewConsulCAProvider(conf.Config, s) + return connect_ca.NewConsulCAProvider(conf.Config, &consulCADelegate{s}) default: return nil, fmt.Errorf("unknown CA provider %q", conf.Provider) } @@ -510,7 +511,7 @@ func (s *Server) getCAProvider() connect.CAProvider { return result } -func (s *Server) setCAProvider(newProvider connect.CAProvider) { +func (s *Server) setCAProvider(newProvider connect_ca.Provider) { s.caProviderLock.Lock() defer s.caProviderLock.Unlock() s.caProvider = newProvider diff --git a/agent/consul/server.go b/agent/consul/server.go index e15d5f71c..871115c35 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -18,7 +18,7 @@ import ( "time" "github.com/hashicorp/consul/acl" - "github.com/hashicorp/consul/agent/connect" + connect_ca "github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/consul/autopilot" "github.com/hashicorp/consul/agent/consul/fsm" "github.com/hashicorp/consul/agent/consul/state" @@ -99,7 +99,7 @@ type Server struct { // caProvider is the current CA provider in use for Connect. This is // only non-nil when we are the leader. - caProvider connect.CAProvider + caProvider connect_ca.Provider caProviderLock sync.RWMutex // Consul configuration From 5998623c445ca8633453ab035be4090c5176526c Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 4 May 2018 15:28:11 -0700 Subject: [PATCH 261/627] Add test for ca config http endpoint --- agent/connect/ca/ca_provider_consul.go | 4 +- agent/connect_ca_endpoint_test.go | 62 +++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 3 deletions(-) diff --git a/agent/connect/ca/ca_provider_consul.go b/agent/connect/ca/ca_provider_consul.go index 7d925a40f..2b119c0a3 100644 --- a/agent/connect/ca/ca_provider_consul.go +++ b/agent/connect/ca/ca_provider_consul.go @@ -114,7 +114,7 @@ func NewConsulCAProvider(rawConfig map[string]interface{}, delegate ConsulCAStat } func ParseConsulCAConfig(raw map[string]interface{}) (*structs.ConsulCAProviderConfig, error) { - var config *structs.ConsulCAProviderConfig + var config structs.ConsulCAProviderConfig if err := mapstructure.WeakDecode(raw, &config); err != nil { return nil, fmt.Errorf("error decoding config: %s", err) } @@ -123,7 +123,7 @@ func ParseConsulCAConfig(raw map[string]interface{}) (*structs.ConsulCAProviderC return nil, fmt.Errorf("must provide a private key when providing a root cert") } - return config, nil + return &config, nil } // Return the active root CA and generate a new one if needed diff --git a/agent/connect_ca_endpoint_test.go b/agent/connect_ca_endpoint_test.go index a9b355e0d..04abcfa9a 100644 --- a/agent/connect_ca_endpoint_test.go +++ b/agent/connect_ca_endpoint_test.go @@ -1,11 +1,14 @@ package agent import ( + "bytes" "net/http" "net/http/httptest" "testing" + "time" "github.com/hashicorp/consul/agent/connect" + connect_ca "github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/structs" "github.com/stretchr/testify/assert" ) @@ -42,7 +45,7 @@ func TestConnectCARoots_list(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/connect/ca/roots", nil) resp := httptest.NewRecorder() obj, err := a.srv.ConnectCARoots(resp, req) - assert.Nil(err) + assert.NoError(err) value := obj.(structs.IndexedCARoots) assert.Equal(value.ActiveRootID, ca2.ID) @@ -54,3 +57,60 @@ func TestConnectCARoots_list(t *testing.T) { assert.Equal("", r.SigningKey) } } + +func TestConnectCAConfig(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + expected := &structs.ConsulCAProviderConfig{ + RotationPeriod: 90 * 24 * time.Hour, + } + + // Get the initial config. + { + req, _ := http.NewRequest("GET", "/v1/connect/ca/configuration", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.ConnectCAConfiguration(resp, req) + assert.NoError(err) + + value := obj.(structs.CAConfiguration) + parsed, err := connect_ca.ParseConsulCAConfig(value.Config) + assert.NoError(err) + assert.Equal("consul", value.Provider) + assert.Equal(expected, parsed) + } + + // Set the config. + { + body := bytes.NewBuffer([]byte(` + { + "Provider": "consul", + "Config": { + "RotationPeriod": 3600000000000 + } + }`)) + req, _ := http.NewRequest("PUT", "/v1/connect/ca/configuration", body) + resp := httptest.NewRecorder() + _, err := a.srv.ConnectCAConfiguration(resp, req) + assert.NoError(err) + } + + // The config should be updated now. + { + expected.RotationPeriod = time.Hour + req, _ := http.NewRequest("GET", "/v1/connect/ca/configuration", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.ConnectCAConfiguration(resp, req) + assert.NoError(err) + + value := obj.(structs.CAConfiguration) + //t.Fatalf("%#v", value) + parsed, err := connect_ca.ParseConsulCAConfig(value.Config) + assert.NoError(err) + assert.Equal("consul", value.Provider) + assert.Equal(expected, parsed) + } +} From baf4db1c72c64a3dfdd3dee98cd7f88564134474 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 4 May 2018 16:01:38 -0700 Subject: [PATCH 262/627] Use provider state table for a global serial index --- agent/connect/ca/ca_provider_consul.go | 19 +++++++------- agent/consul/fsm/commands_oss_test.go | 7 +++--- agent/consul/state/connect_ca_test.go | 35 +++++++++++--------------- agent/structs/connect_ca.go | 7 +++--- 4 files changed, 30 insertions(+), 38 deletions(-) diff --git a/agent/connect/ca/ca_provider_consul.go b/agent/connect/ca/ca_provider_consul.go index 2b119c0a3..922472eb7 100644 --- a/agent/connect/ca/ca_provider_consul.go +++ b/agent/connect/ca/ca_provider_consul.go @@ -179,7 +179,7 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (string, error) { // Get the provider state state := c.delegate.State() - _, providerState, err := state.CAProviderState(c.id) + idx, providerState, err := state.CAProviderState(c.id) if err != nil { return "", err } @@ -215,7 +215,7 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (string, error) { // Cert template for generation sn := &big.Int{} - sn.SetUint64(providerState.SerialIndex + 1) + sn.SetUint64(idx + 1) template := x509.Certificate{ SerialNumber: sn, Subject: pkix.Name{CommonName: serviceId.Service}, @@ -252,7 +252,7 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (string, error) { return "", fmt.Errorf("error encoding private key: %s", err) } - err = c.incrementSerialIndex(providerState) + err = c.incrementProviderIndex(providerState) if err != nil { return "", err } @@ -268,7 +268,7 @@ func (c *ConsulCAProvider) CrossSignCA(cert *x509.Certificate) (string, error) { // Get the provider state state := c.delegate.State() - _, providerState, err := state.CAProviderState(c.id) + idx, providerState, err := state.CAProviderState(c.id) if err != nil { return "", err } @@ -290,7 +290,7 @@ func (c *ConsulCAProvider) CrossSignCA(cert *x509.Certificate) (string, error) { // Create the cross-signing template from the existing root CA serialNum := &big.Int{} - serialNum.SetUint64(providerState.SerialIndex + 1) + serialNum.SetUint64(idx + 1) template := *cert template.SerialNumber = serialNum template.SignatureAlgorithm = rootCA.SignatureAlgorithm @@ -309,7 +309,7 @@ func (c *ConsulCAProvider) CrossSignCA(cert *x509.Certificate) (string, error) { return "", fmt.Errorf("error encoding private key: %s", err) } - err = c.incrementSerialIndex(providerState) + err = c.incrementProviderIndex(providerState) if err != nil { return "", err } @@ -317,11 +317,10 @@ func (c *ConsulCAProvider) CrossSignCA(cert *x509.Certificate) (string, error) { return buf.String(), nil } -// incrementSerialIndex increments the cert serial number index in the provider -// state. -func (c *ConsulCAProvider) incrementSerialIndex(providerState *structs.CAConsulProviderState) error { +// incrementProviderIndex does a write to increment the provider state store table index +// used for serial numbers when generating certificates. +func (c *ConsulCAProvider) incrementProviderIndex(providerState *structs.CAConsulProviderState) error { newState := *providerState - newState.SerialIndex++ args := &structs.CARequest{ Op: structs.CAOpSetProviderState, ProviderState: &newState, diff --git a/agent/consul/fsm/commands_oss_test.go b/agent/consul/fsm/commands_oss_test.go index 280bf5b38..85b20b442 100644 --- a/agent/consul/fsm/commands_oss_test.go +++ b/agent/consul/fsm/commands_oss_test.go @@ -1328,10 +1328,9 @@ func TestFSM_CABuiltinProvider(t *testing.T) { // Provider state. expected := &structs.CAConsulProviderState{ - ID: "foo", - PrivateKey: "a", - RootCert: "b", - SerialIndex: 2, + ID: "foo", + PrivateKey: "a", + RootCert: "b", RaftIndex: structs.RaftIndex{ CreateIndex: 1, ModifyIndex: 1, diff --git a/agent/consul/state/connect_ca_test.go b/agent/consul/state/connect_ca_test.go index 4639c7f5a..de914ee16 100644 --- a/agent/consul/state/connect_ca_test.go +++ b/agent/consul/state/connect_ca_test.go @@ -356,10 +356,9 @@ func TestStore_CABuiltinProvider(t *testing.T) { { expected := &structs.CAConsulProviderState{ - ID: "foo", - PrivateKey: "a", - RootCert: "b", - SerialIndex: 1, + ID: "foo", + PrivateKey: "a", + RootCert: "b", } ok, err := s.CASetProviderState(0, expected) @@ -374,10 +373,9 @@ func TestStore_CABuiltinProvider(t *testing.T) { { expected := &structs.CAConsulProviderState{ - ID: "bar", - PrivateKey: "c", - RootCert: "d", - SerialIndex: 2, + ID: "bar", + PrivateKey: "c", + RootCert: "d", } ok, err := s.CASetProviderState(1, expected) @@ -398,16 +396,14 @@ func TestStore_CABuiltinProvider_Snapshot_Restore(t *testing.T) { // Create multiple state entries. before := []*structs.CAConsulProviderState{ { - ID: "bar", - PrivateKey: "y", - RootCert: "z", - SerialIndex: 2, + ID: "bar", + PrivateKey: "y", + RootCert: "z", }, { - ID: "foo", - PrivateKey: "a", - RootCert: "b", - SerialIndex: 1, + ID: "foo", + PrivateKey: "a", + RootCert: "b", }, } @@ -423,10 +419,9 @@ func TestStore_CABuiltinProvider_Snapshot_Restore(t *testing.T) { // Modify the state store. after := &structs.CAConsulProviderState{ - ID: "foo", - PrivateKey: "c", - RootCert: "d", - SerialIndex: 1, + ID: "foo", + PrivateKey: "c", + RootCert: "d", } ok, err := s.CASetProviderState(100, after) assert.NoError(err) diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 0570057b6..ca60a677f 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -168,10 +168,9 @@ type ConsulCAProviderConfig struct { // CAConsulProviderState is used to track the built-in Consul CA provider's state. type CAConsulProviderState struct { - ID string - PrivateKey string - RootCert string - SerialIndex uint64 + ID string + PrivateKey string + RootCert string RaftIndex } From 1660f9ebab278296759571cc847d08598c4f4f93 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 4 May 2018 16:01:54 -0700 Subject: [PATCH 263/627] Add more metadata to structs.CARoot --- agent/consul/connect_ca_endpoint.go | 14 ++++++------ agent/consul/leader.go | 33 ++++++++++++++++++++++------- agent/structs/connect_ca.go | 11 ++++++++++ 3 files changed, 42 insertions(+), 16 deletions(-) diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 619418bab..8acaa4ed0 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -98,15 +98,9 @@ func (s *ConnectCA) ConfigurationSet( return err } - id, err := connect.CalculateCertFingerprint(newRootPEM) + newActiveRoot, err := parseCARoot(newRootPEM, args.Config.Provider) if err != nil { - return fmt.Errorf("error parsing root fingerprint: %v", err) - } - newActiveRoot := &structs.CARoot{ - ID: id, - Name: fmt.Sprintf("%s CA Root Cert", config.Provider), - RootCert: newRootPEM, - Active: true, + return err } // Compare the new provider's root CA ID to the current one. If they @@ -240,6 +234,10 @@ func (s *ConnectCA) Roots( reply.Roots[i] = &structs.CARoot{ ID: r.ID, Name: r.Name, + SerialNumber: r.SerialNumber, + SigningKeyID: r.SigningKeyID, + NotBefore: r.NotBefore, + NotAfter: r.NotAfter, RootCert: r.RootCert, IntermediateCerts: r.IntermediateCerts, RaftIndex: r.RaftIndex, diff --git a/agent/consul/leader.go b/agent/consul/leader.go index b07a3622d..a06871888 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -4,6 +4,7 @@ import ( "fmt" "net" "strconv" + "strings" "sync" "time" @@ -427,15 +428,9 @@ func (s *Server) initializeCA() error { return fmt.Errorf("error getting root cert: %v", err) } - id, err := connect.CalculateCertFingerprint(rootPEM) + rootCA, err := parseCARoot(rootPEM, conf.Provider) if err != nil { - return fmt.Errorf("error parsing root fingerprint: %v", err) - } - rootCA := &structs.CARoot{ - ID: id, - Name: fmt.Sprintf("%s CA Root Cert", conf.Provider), - RootCert: rootPEM, - Active: true, + return err } // Check if the CA root is already initialized and exit if it is. @@ -478,6 +473,28 @@ func (s *Server) initializeCA() error { return nil } +// parseCARoot returns a filled-in structs.CARoot from a raw PEM value. +func parseCARoot(pemValue, provider string) (*structs.CARoot, error) { + id, err := connect.CalculateCertFingerprint(pemValue) + if err != nil { + return nil, fmt.Errorf("error parsing root fingerprint: %v", err) + } + rootCert, err := connect.ParseCert(pemValue) + if err != nil { + return nil, fmt.Errorf("error parsing root cert: %v", err) + } + return &structs.CARoot{ + ID: id, + Name: fmt.Sprintf("%s CA Root Cert", strings.Title(provider)), + SerialNumber: rootCert.SerialNumber.Uint64(), + SigningKeyID: connect.HexString(rootCert.AuthorityKeyId), + NotBefore: rootCert.NotBefore, + NotAfter: rootCert.NotAfter, + RootCert: pemValue, + Active: true, + }, nil +} + // createProvider returns a connect CA provider from the given config. func (s *Server) createCAProvider(conf *structs.CAConfiguration) (connect_ca.Provider, error) { switch conf.Provider { diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index ca60a677f..3a4ca8131 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -28,6 +28,17 @@ type CARoot struct { // opaque to Consul and is not used for anything internally. Name string + // SerialNumber is the x509 serial number of the certificate. + SerialNumber uint64 + + // SigningKeyID is the ID of the public key that corresponds to the + // private key used to sign the certificate. + SigningKeyID string + + // Time validity bounds. + NotBefore time.Time + NotAfter time.Time + // RootCert is the PEM-encoded public certificate. RootCert string From d1265bc38b7a99bad1221baa175997583209959c Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Wed, 9 May 2018 15:12:31 -0700 Subject: [PATCH 264/627] Rename some of the CA structs/files --- .../ca/{ca_provider.go => provider.go} | 0 ..._provider_consul.go => provider_consul.go} | 55 +++++-------------- ...consul_test.go => provider_consul_test.go} | 10 ++-- agent/connect/generate.go | 34 ++++++++++++ agent/connect_ca_endpoint_test.go | 7 +-- agent/consul/connect_ca_endpoint_test.go | 16 +++--- agent/consul/leader.go | 12 ++-- agent/consul/server.go | 4 +- 8 files changed, 73 insertions(+), 65 deletions(-) rename agent/connect/ca/{ca_provider.go => provider.go} (100%) rename agent/connect/ca/{ca_provider_consul.go => provider_consul.go} (85%) rename agent/connect/ca/{ca_provider_consul_test.go => provider_consul_test.go} (94%) create mode 100644 agent/connect/generate.go diff --git a/agent/connect/ca/ca_provider.go b/agent/connect/ca/provider.go similarity index 100% rename from agent/connect/ca/ca_provider.go rename to agent/connect/ca/provider.go diff --git a/agent/connect/ca/ca_provider_consul.go b/agent/connect/ca/provider_consul.go similarity index 85% rename from agent/connect/ca/ca_provider_consul.go rename to agent/connect/ca/provider_consul.go index 922472eb7..8fa1fb3d3 100644 --- a/agent/connect/ca/ca_provider_consul.go +++ b/agent/connect/ca/provider_consul.go @@ -2,8 +2,6 @@ package connect import ( "bytes" - "crypto/ecdsa" - "crypto/elliptic" "crypto/rand" "crypto/x509" "crypto/x509/pkix" @@ -20,26 +18,26 @@ import ( "github.com/mitchellh/mapstructure" ) -type ConsulCAProvider struct { +type ConsulProvider struct { config *structs.ConsulCAProviderConfig id string - delegate ConsulCAStateDelegate + delegate ConsulProviderStateDelegate sync.RWMutex } -type ConsulCAStateDelegate interface { +type ConsulProviderStateDelegate interface { State() *state.Store ApplyCARequest(*structs.CARequest) error } -// NewConsulCAProvider returns a new instance of the Consul CA provider, +// NewConsulProvider returns a new instance of the Consul CA provider, // bootstrapping its state in the state store necessary -func NewConsulCAProvider(rawConfig map[string]interface{}, delegate ConsulCAStateDelegate) (*ConsulCAProvider, error) { +func NewConsulProvider(rawConfig map[string]interface{}, delegate ConsulProviderStateDelegate) (*ConsulProvider, error) { conf, err := ParseConsulCAConfig(rawConfig) if err != nil { return nil, err } - provider := &ConsulCAProvider{ + provider := &ConsulProvider{ config: conf, delegate: delegate, id: fmt.Sprintf("%s,%s", conf.PrivateKey, conf.RootCert), @@ -81,7 +79,7 @@ func NewConsulCAProvider(rawConfig map[string]interface{}, delegate ConsulCAStat // Generate a private key if needed if conf.PrivateKey == "" { - pk, err := GeneratePrivateKey() + pk, err := connect.GeneratePrivateKey() if err != nil { return nil, err } @@ -127,7 +125,7 @@ func ParseConsulCAConfig(raw map[string]interface{}) (*structs.ConsulCAProviderC } // Return the active root CA and generate a new one if needed -func (c *ConsulCAProvider) ActiveRoot() (string, error) { +func (c *ConsulProvider) ActiveRoot() (string, error) { state := c.delegate.State() _, providerState, err := state.CAProviderState(c.id) if err != nil { @@ -139,13 +137,13 @@ func (c *ConsulCAProvider) ActiveRoot() (string, error) { // We aren't maintaining separate root/intermediate CAs for the builtin // provider, so just return the root. -func (c *ConsulCAProvider) ActiveIntermediate() (string, error) { +func (c *ConsulProvider) ActiveIntermediate() (string, error) { return c.ActiveRoot() } // We aren't maintaining separate root/intermediate CAs for the builtin // provider, so just generate a CSR for the active root. -func (c *ConsulCAProvider) GenerateIntermediate() (string, error) { +func (c *ConsulProvider) GenerateIntermediate() (string, error) { ca, err := c.ActiveIntermediate() if err != nil { return "", err @@ -157,7 +155,7 @@ func (c *ConsulCAProvider) GenerateIntermediate() (string, error) { } // Remove the state store entry for this provider instance. -func (c *ConsulCAProvider) Cleanup() error { +func (c *ConsulProvider) Cleanup() error { args := &structs.CARequest{ Op: structs.CAOpDeleteProviderState, ProviderState: &structs.CAConsulProviderState{ID: c.id}, @@ -171,7 +169,7 @@ func (c *ConsulCAProvider) Cleanup() error { // Sign returns a new certificate valid for the given SpiffeIDService // using the current CA. -func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (string, error) { +func (c *ConsulProvider) Sign(csr *x509.CertificateRequest) (string, error) { // Lock during the signing so we don't use the same index twice // for different cert serial numbers. c.Lock() @@ -262,7 +260,7 @@ func (c *ConsulCAProvider) Sign(csr *x509.CertificateRequest) (string, error) { } // CrossSignCA returns the given intermediate CA cert signed by the current active root. -func (c *ConsulCAProvider) CrossSignCA(cert *x509.Certificate) (string, error) { +func (c *ConsulProvider) CrossSignCA(cert *x509.Certificate) (string, error) { c.Lock() defer c.Unlock() @@ -319,7 +317,7 @@ func (c *ConsulCAProvider) CrossSignCA(cert *x509.Certificate) (string, error) { // incrementProviderIndex does a write to increment the provider state store table index // used for serial numbers when generating certificates. -func (c *ConsulCAProvider) incrementProviderIndex(providerState *structs.CAConsulProviderState) error { +func (c *ConsulProvider) incrementProviderIndex(providerState *structs.CAConsulProviderState) error { newState := *providerState args := &structs.CARequest{ Op: structs.CAOpSetProviderState, @@ -332,31 +330,8 @@ func (c *ConsulCAProvider) incrementProviderIndex(providerState *structs.CAConsu return nil } -// GeneratePrivateKey returns a new private key -func GeneratePrivateKey() (string, error) { - var pk *ecdsa.PrivateKey - - pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return "", fmt.Errorf("error generating private key: %s", err) - } - - bs, err := x509.MarshalECPrivateKey(pk) - if err != nil { - return "", fmt.Errorf("error generating private key: %s", err) - } - - var buf bytes.Buffer - err = pem.Encode(&buf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) - if err != nil { - return "", fmt.Errorf("error encoding private key: %s", err) - } - - return buf.String(), nil -} - // generateCA makes a new root CA using the current private key -func (c *ConsulCAProvider) generateCA(privateKey string, sn uint64) (string, error) { +func (c *ConsulProvider) generateCA(privateKey string, sn uint64) (string, error) { state := c.delegate.State() _, config, err := state.CAConfig() if err != nil { diff --git a/agent/connect/ca/ca_provider_consul_test.go b/agent/connect/ca/provider_consul_test.go similarity index 94% rename from agent/connect/ca/ca_provider_consul_test.go rename to agent/connect/ca/provider_consul_test.go index fd1c8ec29..9f8cc04b4 100644 --- a/agent/connect/ca/ca_provider_consul_test.go +++ b/agent/connect/ca/provider_consul_test.go @@ -74,7 +74,7 @@ func TestCAProvider_Bootstrap(t *testing.T) { conf := testConsulCAConfig() delegate := newMockDelegate(t, conf) - provider, err := NewConsulCAProvider(conf.Config, delegate) + provider, err := NewConsulProvider(conf.Config, delegate) assert.NoError(err) root, err := provider.ActiveRoot() @@ -104,7 +104,7 @@ func TestCAProvider_Bootstrap_WithCert(t *testing.T) { } delegate := newMockDelegate(t, conf) - provider, err := NewConsulCAProvider(conf.Config, delegate) + provider, err := NewConsulProvider(conf.Config, delegate) assert.NoError(err) root, err := provider.ActiveRoot() @@ -119,7 +119,7 @@ func TestCAProvider_SignLeaf(t *testing.T) { conf := testConsulCAConfig() delegate := newMockDelegate(t, conf) - provider, err := NewConsulCAProvider(conf.Config, delegate) + provider, err := NewConsulProvider(conf.Config, delegate) assert.NoError(err) spiffeService := &connect.SpiffeIDService{ @@ -143,7 +143,7 @@ func TestCAProvider_SignLeaf(t *testing.T) { assert.NoError(err) assert.Equal(parsed.URIs[0], spiffeService.URI()) assert.Equal(parsed.Subject.CommonName, "foo") - assert.Equal(parsed.SerialNumber.Uint64(), uint64(1)) + assert.Equal(uint64(2), parsed.SerialNumber.Uint64()) // Ensure the cert is valid now and expires within the correct limit. assert.True(parsed.NotAfter.Sub(time.Now()) < 3*24*time.Hour) @@ -180,7 +180,7 @@ func TestCAProvider_CrossSignCA(t *testing.T) { assert := assert.New(t) conf := testConsulCAConfig() delegate := newMockDelegate(t, conf) - provider, err := NewConsulCAProvider(conf.Config, delegate) + provider, err := NewConsulProvider(conf.Config, delegate) assert.NoError(err) // Make a new CA cert to get cross-signed. diff --git a/agent/connect/generate.go b/agent/connect/generate.go new file mode 100644 index 000000000..1226323f0 --- /dev/null +++ b/agent/connect/generate.go @@ -0,0 +1,34 @@ +package connect + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "fmt" +) + +// GeneratePrivateKey returns a new private key +func GeneratePrivateKey() (string, error) { + var pk *ecdsa.PrivateKey + + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return "", fmt.Errorf("error generating private key: %s", err) + } + + bs, err := x509.MarshalECPrivateKey(pk) + if err != nil { + return "", fmt.Errorf("error generating private key: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) + if err != nil { + return "", fmt.Errorf("error encoding private key: %s", err) + } + + return buf.String(), nil +} diff --git a/agent/connect_ca_endpoint_test.go b/agent/connect_ca_endpoint_test.go index 04abcfa9a..afaa5f049 100644 --- a/agent/connect_ca_endpoint_test.go +++ b/agent/connect_ca_endpoint_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/hashicorp/consul/agent/connect" - connect_ca "github.com/hashicorp/consul/agent/connect/ca" + ca "github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/structs" "github.com/stretchr/testify/assert" ) @@ -77,7 +77,7 @@ func TestConnectCAConfig(t *testing.T) { assert.NoError(err) value := obj.(structs.CAConfiguration) - parsed, err := connect_ca.ParseConsulCAConfig(value.Config) + parsed, err := ca.ParseConsulCAConfig(value.Config) assert.NoError(err) assert.Equal("consul", value.Provider) assert.Equal(expected, parsed) @@ -107,8 +107,7 @@ func TestConnectCAConfig(t *testing.T) { assert.NoError(err) value := obj.(structs.CAConfiguration) - //t.Fatalf("%#v", value) - parsed, err := connect_ca.ParseConsulCAConfig(value.Config) + parsed, err := ca.ParseConsulCAConfig(value.Config) assert.NoError(err) assert.Equal("consul", value.Provider) assert.Equal(expected, parsed) diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index 49baddfb9..4609934ad 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -7,7 +7,7 @@ import ( "time" "github.com/hashicorp/consul/agent/connect" - connect_ca "github.com/hashicorp/consul/agent/connect/ca" + ca "github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/net-rpc-msgpackrpc" @@ -83,9 +83,9 @@ func TestConnectCAConfig_GetSet(t *testing.T) { var reply structs.CAConfiguration assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply)) - actual, err := connect_ca.ParseConsulCAConfig(reply.Config) + actual, err := ca.ParseConsulCAConfig(reply.Config) assert.NoError(err) - expected, err := connect_ca.ParseConsulCAConfig(s1.config.CAConfig.Config) + expected, err := ca.ParseConsulCAConfig(s1.config.CAConfig.Config) assert.NoError(err) assert.Equal(reply.Provider, s1.config.CAConfig.Provider) assert.Equal(actual, expected) @@ -118,9 +118,9 @@ func TestConnectCAConfig_GetSet(t *testing.T) { var reply structs.CAConfiguration assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply)) - actual, err := connect_ca.ParseConsulCAConfig(reply.Config) + actual, err := ca.ParseConsulCAConfig(reply.Config) assert.NoError(err) - expected, err := connect_ca.ParseConsulCAConfig(newConfig.Config) + expected, err := ca.ParseConsulCAConfig(newConfig.Config) assert.NoError(err) assert.Equal(reply.Provider, newConfig.Provider) assert.Equal(actual, expected) @@ -150,7 +150,7 @@ func TestConnectCAConfig_TriggerRotation(t *testing.T) { // Update the provider config to use a new private key, which should // cause a rotation. - newKey, err := connect_ca.GeneratePrivateKey() + newKey, err := connect.GeneratePrivateKey() assert.NoError(err) newConfig := &structs.CAConfiguration{ Provider: "consul", @@ -220,9 +220,9 @@ func TestConnectCAConfig_TriggerRotation(t *testing.T) { var reply structs.CAConfiguration assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.ConfigurationGet", args, &reply)) - actual, err := connect_ca.ParseConsulCAConfig(reply.Config) + actual, err := ca.ParseConsulCAConfig(reply.Config) assert.NoError(err) - expected, err := connect_ca.ParseConsulCAConfig(newConfig.Config) + expected, err := ca.ParseConsulCAConfig(newConfig.Config) assert.NoError(err) assert.Equal(reply.Provider, newConfig.Provider) assert.Equal(actual, expected) diff --git a/agent/consul/leader.go b/agent/consul/leader.go index a06871888..579ff4b1d 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -11,7 +11,7 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/consul/acl" "github.com/hashicorp/consul/agent/connect" - connect_ca "github.com/hashicorp/consul/agent/connect/ca" + ca "github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/consul/autopilot" "github.com/hashicorp/consul/agent/metadata" "github.com/hashicorp/consul/agent/structs" @@ -496,18 +496,18 @@ func parseCARoot(pemValue, provider string) (*structs.CARoot, error) { } // createProvider returns a connect CA provider from the given config. -func (s *Server) createCAProvider(conf *structs.CAConfiguration) (connect_ca.Provider, error) { +func (s *Server) createCAProvider(conf *structs.CAConfiguration) (ca.Provider, error) { switch conf.Provider { case structs.ConsulCAProvider: - return connect_ca.NewConsulCAProvider(conf.Config, &consulCADelegate{s}) + return ca.NewConsulProvider(conf.Config, &consulCADelegate{s}) default: return nil, fmt.Errorf("unknown CA provider %q", conf.Provider) } } -func (s *Server) getCAProvider() connect.CAProvider { +func (s *Server) getCAProvider() ca.Provider { retries := 0 - var result connect.CAProvider + var result ca.Provider for result == nil { s.caProviderLock.RLock() result = s.caProvider @@ -528,7 +528,7 @@ func (s *Server) getCAProvider() connect.CAProvider { return result } -func (s *Server) setCAProvider(newProvider connect_ca.Provider) { +func (s *Server) setCAProvider(newProvider ca.Provider) { s.caProviderLock.Lock() defer s.caProviderLock.Unlock() s.caProvider = newProvider diff --git a/agent/consul/server.go b/agent/consul/server.go index 871115c35..7b589d753 100644 --- a/agent/consul/server.go +++ b/agent/consul/server.go @@ -18,7 +18,7 @@ import ( "time" "github.com/hashicorp/consul/acl" - connect_ca "github.com/hashicorp/consul/agent/connect/ca" + ca "github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/consul/autopilot" "github.com/hashicorp/consul/agent/consul/fsm" "github.com/hashicorp/consul/agent/consul/state" @@ -99,7 +99,7 @@ type Server struct { // caProvider is the current CA provider in use for Connect. This is // only non-nil when we are the leader. - caProvider connect_ca.Provider + caProvider ca.Provider caProviderLock sync.RWMutex // Consul configuration From c808833a78275a3dd92784efd772e9e929df02bb Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Tue, 8 May 2018 14:23:44 +0100 Subject: [PATCH 265/627] Return TrustDomain from CARoots RPC --- agent/connect/ca/provider_consul.go | 2 +- agent/connect/uri_signing.go | 12 ++++++++++++ agent/connect/uri_signing_test.go | 11 +++++++++++ agent/consul/connect_ca_endpoint.go | 23 +++++++++++++++++++++++ agent/consul/connect_ca_endpoint_test.go | 13 ++++++++++--- agent/structs/connect_ca.go | 15 +++++++++++++++ 6 files changed, 72 insertions(+), 4 deletions(-) diff --git a/agent/connect/ca/provider_consul.go b/agent/connect/ca/provider_consul.go index 8fa1fb3d3..d88a58bfc 100644 --- a/agent/connect/ca/provider_consul.go +++ b/agent/connect/ca/provider_consul.go @@ -346,7 +346,7 @@ func (c *ConsulProvider) generateCA(privateKey string, sn uint64) (string, error name := fmt.Sprintf("Consul CA %d", sn) // The URI (SPIFFE compatible) for the cert - id := &connect.SpiffeIDSigning{ClusterID: config.ClusterID, Domain: "consul"} + id := connect.SpiffeIDSigningForCluster(config) keyId, err := connect.KeyId(privKey.Public()) if err != nil { return "", err diff --git a/agent/connect/uri_signing.go b/agent/connect/uri_signing.go index 213f744d1..b43971ed7 100644 --- a/agent/connect/uri_signing.go +++ b/agent/connect/uri_signing.go @@ -27,3 +27,15 @@ func (id *SpiffeIDSigning) Authorize(ixn *structs.Intention) (bool, bool) { // Never authorize as a client. return false, true } + +// SpiffeIDSigningForCluster returns the SPIFFE signing identifier (trust +// domain) representation of the given CA config. +// +// NOTE(banks): we intentionally fix the tld `.consul` for now rather than tie +// this to the `domain` config used for DNS because changing DNS domain can't +// break all certificate validation. That does mean that DNS prefix might not +// match the identity URIs and so the trust domain might not actually resolve +// which we would like but don't actually need. +func SpiffeIDSigningForCluster(config *structs.CAConfiguration) *SpiffeIDSigning { + return &SpiffeIDSigning{ClusterID: config.ClusterID, Domain: "consul"} +} diff --git a/agent/connect/uri_signing_test.go b/agent/connect/uri_signing_test.go index a9be3c5e2..98babbc2d 100644 --- a/agent/connect/uri_signing_test.go +++ b/agent/connect/uri_signing_test.go @@ -3,6 +3,8 @@ package connect import ( "testing" + "github.com/hashicorp/consul/agent/structs" + "github.com/stretchr/testify/assert" ) @@ -13,3 +15,12 @@ func TestSpiffeIDSigningAuthorize(t *testing.T) { assert.False(t, auth) assert.True(t, ok) } + +func TestSpiffeIDSigningForCluster(t *testing.T) { + // For now it should just append .consul to the ID. + config := &structs.CAConfiguration{ + ClusterID: testClusterID, + } + id := SpiffeIDSigningForCluster(config) + assert.Equal(t, id.URI().String(), "spiffe://"+testClusterID+".consul") +} diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index 8acaa4ed0..f70c0d3a3 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -211,6 +211,29 @@ func (s *ConnectCA) Roots( return err } + // Load the ClusterID to generate TrustDomain. We do this outside the loop + // since by definition this value should be immutable once set for lifetime of + // the cluster so we don't need to look it up more than once. We also don't + // have to worry about non-atomicity between the config fetch transaction and + // the CARoots transaction below since this field must remain immutable. Do + // not re-use this state/config for other logic that might care about changes + // of config during the blocking query below. + { + state := s.srv.fsm.State() + _, config, err := state.CAConfig() + if err != nil { + return err + } + // Build TrustDomain based on the ClusterID stored. + spiffeID := connect.SpiffeIDSigningForCluster(config) + uri := spiffeID.URI() + if uri == nil { + // Impossible(tm) but let's not panic + return errors.New("no trust domain found") + } + reply.TrustDomain = uri.Host + } + return s.srv.blockingQuery( &args.QueryOptions, &reply.QueryMeta, func(ws memdb.WatchSet, state *state.Store) error { diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index 4609934ad..655b1d7f4 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -2,10 +2,13 @@ package consul import ( "crypto/x509" + "fmt" "os" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/hashicorp/consul/agent/connect" ca "github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/structs" @@ -27,6 +30,7 @@ func TestConnectCARoots(t *testing.T) { t.Parallel() assert := assert.New(t) + require := require.New(t) dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -41,17 +45,19 @@ func TestConnectCARoots(t *testing.T) { ca2 := connect.TestCA(t, nil) ca2.Active = false idx, _, err := state.CARoots(nil) - assert.NoError(err) + require.NoError(err) ok, err := state.CARootSetCAS(idx, idx, []*structs.CARoot{ca1, ca2}) assert.True(ok) - assert.NoError(err) + require.NoError(err) + _, caCfg, err := state.CAConfig() + require.NoError(err) // Request args := &structs.DCSpecificRequest{ Datacenter: "dc1", } var reply structs.IndexedCARoots - assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", args, &reply)) + require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Roots", args, &reply)) // Verify assert.Equal(ca1.ID, reply.ActiveRootID) @@ -61,6 +67,7 @@ func TestConnectCARoots(t *testing.T) { assert.Equal("", r.SigningCert) assert.Equal("", r.SigningKey) } + assert.Equal(fmt.Sprintf("%s.consul", caCfg.ClusterID), reply.TrustDomain) } func TestConnectCAConfig_GetSet(t *testing.T) { diff --git a/agent/structs/connect_ca.go b/agent/structs/connect_ca.go index 3a4ca8131..fa273a3e4 100644 --- a/agent/structs/connect_ca.go +++ b/agent/structs/connect_ca.go @@ -11,6 +11,21 @@ type IndexedCARoots struct { // the process of being rotated out. ActiveRootID string + // TrustDomain is the identification root for this Consul cluster. All + // certificates signed by the cluster's CA must have their identifying URI in + // this domain. + // + // This does not include the protocol (currently spiffe://) since we may + // implement other protocols in future with equivalent semantics. It should be + // compared against the "authority" section of a URI (i.e. host:port). + // + // NOTE(banks): Later we may support explicitly trusting external domains + // which may be encoded into the CARoot struct or a separate list but this + // domain identifier should be immutable and cluster-wide so deserves to be at + // the root of this response rather than duplicated through all CARoots that + // are not externally trusted entities. + TrustDomain string + // Roots is a list of root CA certs to trust. Roots []*CARoot From 5a1408f18608252f25ef2b17bd6d96a5253d3e1f Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Wed, 9 May 2018 14:25:48 +0100 Subject: [PATCH 266/627] Add CSR signing verification of service ACL, trust domain and datacenter. --- agent/connect/uri_signing.go | 35 ++++++- agent/connect/uri_signing_test.go | 97 ++++++++++++++++++++ agent/consul/connect_ca_endpoint.go | 52 ++++++++--- agent/consul/connect_ca_endpoint_test.go | 112 ++++++++++++++++++++++- 4 files changed, 277 insertions(+), 19 deletions(-) diff --git a/agent/connect/uri_signing.go b/agent/connect/uri_signing.go index b43971ed7..843f95596 100644 --- a/agent/connect/uri_signing.go +++ b/agent/connect/uri_signing.go @@ -3,6 +3,7 @@ package connect import ( "fmt" "net/url" + "strings" "github.com/hashicorp/consul/agent/structs" ) @@ -18,16 +19,48 @@ type SpiffeIDSigning struct { func (id *SpiffeIDSigning) URI() *url.URL { var result url.URL result.Scheme = "spiffe" - result.Host = fmt.Sprintf("%s.%s", id.ClusterID, id.Domain) + result.Host = id.Host() return &result } +// Host is the canonical representation as a DNS-compatible hostname. +func (id *SpiffeIDSigning) Host() string { + return strings.ToLower(fmt.Sprintf("%s.%s", id.ClusterID, id.Domain)) +} + // CertURI impl. func (id *SpiffeIDSigning) Authorize(ixn *structs.Intention) (bool, bool) { // Never authorize as a client. return false, true } +// CanSign takes any CertURI and returns whether or not this signing entity is +// allowed to sign CSRs for that entity (i.e. represents the trust domain for +// that entity). +// +// I choose to make this a fixed centralised method here for now rather than a +// method on CertURI interface since we don't intend this to be extensible +// outside and it's easier to reason about the security properties when they are +// all in one place with "whitelist" semantics. +func (id *SpiffeIDSigning) CanSign(cu CertURI) bool { + switch other := cu.(type) { + case *SpiffeIDSigning: + // We can only sign other CA certificates for the same trust domain. Note + // that we could open this up later for example to support external + // federation of roots and cross-signing external roots that have different + // URI structure but it's simpler to start off restrictive. + return id == other + case *SpiffeIDService: + // The host component of the service must be an exact match for now under + // ascii case folding (since hostnames are case-insensitive). Later we might + // worry about Unicode domains if we start allowing customisation beyond the + // built-in cluster ids. + return strings.ToLower(other.Host) == id.Host() + default: + return false + } +} + // SpiffeIDSigningForCluster returns the SPIFFE signing identifier (trust // domain) representation of the given CA config. // diff --git a/agent/connect/uri_signing_test.go b/agent/connect/uri_signing_test.go index 98babbc2d..2d8975858 100644 --- a/agent/connect/uri_signing_test.go +++ b/agent/connect/uri_signing_test.go @@ -1,6 +1,8 @@ package connect import ( + "net/url" + "strings" "testing" "github.com/hashicorp/consul/agent/structs" @@ -24,3 +26,98 @@ func TestSpiffeIDSigningForCluster(t *testing.T) { id := SpiffeIDSigningForCluster(config) assert.Equal(t, id.URI().String(), "spiffe://"+testClusterID+".consul") } + +// fakeCertURI is a CertURI implementation that our implementation doesn't know +// about +type fakeCertURI string + +func (f fakeCertURI) Authorize(*structs.Intention) (auth bool, match bool) { + return false, false +} + +func (f fakeCertURI) URI() *url.URL { + u, _ := url.Parse(string(f)) + return u +} +func TestSpiffeIDSigning_CanSign(t *testing.T) { + + testSigning := &SpiffeIDSigning{ + ClusterID: testClusterID, + Domain: "consul", + } + + tests := []struct { + name string + id *SpiffeIDSigning + input CertURI + want bool + }{ + { + name: "same signing ID", + id: testSigning, + input: testSigning, + want: true, + }, + { + name: "other signing ID", + id: testSigning, + input: &SpiffeIDSigning{ + ClusterID: "fakedomain", + Domain: "consul", + }, + want: false, + }, + { + name: "different TLD signing ID", + id: testSigning, + input: &SpiffeIDSigning{ + ClusterID: testClusterID, + Domain: "evil", + }, + want: false, + }, + { + name: "nil", + id: testSigning, + input: nil, + want: false, + }, + { + name: "unrecognised CertURI implementation", + id: testSigning, + input: fakeCertURI("spiffe://foo.bar/baz"), + want: false, + }, + { + name: "service - good", + id: testSigning, + input: &SpiffeIDService{testClusterID + ".consul", "default", "dc1", "web"}, + want: true, + }, + { + name: "service - good midex case", + id: testSigning, + input: &SpiffeIDService{strings.ToUpper(testClusterID) + ".CONsuL", "defAUlt", "dc1", "WEB"}, + want: true, + }, + { + name: "service - different cluster", + id: testSigning, + input: &SpiffeIDService{"55555555-4444-3333-2222-111111111111.consul", "default", "dc1", "web"}, + want: false, + }, + { + name: "service - different TLD", + id: testSigning, + input: &SpiffeIDService{testClusterID + ".fake", "default", "dc1", "web"}, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.id.CanSign(tt.input) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index f70c0d3a3..a72a4998b 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -225,13 +225,8 @@ func (s *ConnectCA) Roots( return err } // Build TrustDomain based on the ClusterID stored. - spiffeID := connect.SpiffeIDSigningForCluster(config) - uri := spiffeID.URI() - if uri == nil { - // Impossible(tm) but let's not panic - return errors.New("no trust domain found") - } - reply.TrustDomain = uri.Host + signingID := connect.SpiffeIDSigningForCluster(config) + reply.TrustDomain = signingID.Host() } return s.srv.blockingQuery( @@ -297,11 +292,11 @@ func (s *ConnectCA) Sign( } // Parse the SPIFFE ID - spiffeId, err := connect.ParseCertURI(csr.URIs[0]) + spiffeID, err := connect.ParseCertURI(csr.URIs[0]) if err != nil { return err } - serviceId, ok := spiffeId.(*connect.SpiffeIDService) + serviceID, ok := spiffeID.(*connect.SpiffeIDService) if !ok { return fmt.Errorf("SPIFFE ID in CSR must be a service ID") } @@ -311,7 +306,35 @@ func (s *ConnectCA) Sign( return fmt.Errorf("internal error: CA provider is nil") } - // todo(kyhavlov): more validation on the CSR before signing + // Verify that the CSR entity is in the cluster's trust domain + state := s.srv.fsm.State() + _, config, err := state.CAConfig() + if err != nil { + return err + } + signingID := connect.SpiffeIDSigningForCluster(config) + if !signingID.CanSign(serviceID) { + return fmt.Errorf("SPIFFE ID in CSR from a different trust domain: %s, "+ + "we are %s", serviceID.Host, signingID.Host()) + } + + // Verify that the ACL token provided has permission to act as this service + rule, err := s.srv.resolveToken(args.Token) + if err != nil { + return err + } + if rule != nil && !rule.ServiceWrite(serviceID.Service, nil) { + return acl.ErrPermissionDenied + } + + // Verify that the DC in the service URI matches us. We might relax this + // requirement later but being restrictive for now is safer. + if serviceID.Datacenter != s.srv.config.Datacenter { + return fmt.Errorf("SPIFFE ID in CSR from a different datacenter: %s, "+ + "we are %s", serviceID.Datacenter, s.srv.config.Datacenter) + } + + // All seems to be in order, actually sign it. pem, err := provider.Sign(csr) if err != nil { return err @@ -322,9 +345,10 @@ func (s *ConnectCA) Sign( // the built-in provider being supported and the implementation detail that we // have to write a SerialIndex update to the provider config table for every // cert issued so in all cases this index will be higher than any previous - // sign response. This has to happen after the provider.Sign call to observe - // the index update. - modIdx, _, err := s.srv.fsm.State().CAConfig() + // sign response. This has to be reloaded after the provider.Sign call to + // observe the index update. + state = s.srv.fsm.State() + modIdx, _, err := state.CAConfig() if err != nil { return err } @@ -338,7 +362,7 @@ func (s *ConnectCA) Sign( *reply = structs.IssuedCert{ SerialNumber: connect.HexString(cert.SerialNumber.Bytes()), CertPEM: pem, - Service: serviceId.Service, + Service: serviceID.Service, ServiceURI: cert.URIs[0].String(), ValidAfter: cert.NotBefore, ValidBefore: cert.NotAfter, diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index 655b1d7f4..f20935877 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -241,6 +241,7 @@ func TestConnectCASign(t *testing.T) { t.Parallel() assert := assert.New(t) + require := require.New(t) dir1, s1 := testServer(t) defer os.RemoveAll(dir1) defer s1.Shutdown() @@ -251,30 +252,133 @@ func TestConnectCASign(t *testing.T) { // Generate a CSR and request signing spiffeId := connect.TestSpiffeIDService(t, "web") + spiffeId.Host = testGetClusterTrustDomain(t, s1) csr, _ := connect.TestCSR(t, spiffeId) args := &structs.CASignRequest{ Datacenter: "dc1", CSR: csr, } var reply structs.IssuedCert - assert.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply)) + require.NoError(msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply)) // Get the current CA state := s1.fsm.State() _, ca, err := state.CARootActive(nil) - assert.NoError(err) + require.NoError(err) // Verify that the cert is signed by the CA roots := x509.NewCertPool() assert.True(roots.AppendCertsFromPEM([]byte(ca.RootCert))) leaf, err := connect.ParseCert(reply.CertPEM) - assert.NoError(err) + require.NoError(err) _, err = leaf.Verify(x509.VerifyOptions{ Roots: roots, }) - assert.NoError(err) + require.NoError(err) // Verify other fields assert.Equal("web", reply.Service) assert.Equal(spiffeId.URI().String(), reply.ServiceURI) } + +func testGetClusterTrustDomain(t *testing.T, s *Server) string { + t.Helper() + state := s.fsm.State() + _, config, err := state.CAConfig() + require.NoError(t, err) + // Build TrustDomain based on the ClusterID stored. + signingID := connect.SpiffeIDSigningForCluster(config) + return signingID.Host() +} + +func TestConnectCASignValidation(t *testing.T) { + t.Parallel() + + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create an ACL token with service:write for web* + var webToken string + { + arg := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: ` + service "web" { + policy = "write" + }`, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + require.NoError(t, msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &webToken)) + } + + trustDomain := testGetClusterTrustDomain(t, s1) + + tests := []struct { + name string + id connect.CertURI + wantErr string + }{ + { + name: "different cluster", + id: &connect.SpiffeIDService{ + "55555555-4444-3333-2222-111111111111.consul", + "default", "dc1", "web"}, + wantErr: "different trust domain", + }, + { + name: "same cluster should validate", + id: &connect.SpiffeIDService{ + trustDomain, + "default", "dc1", "web"}, + wantErr: "", + }, + { + name: "same cluster, CSR for a different DC should NOT validate", + id: &connect.SpiffeIDService{ + trustDomain, + "default", "dc2", "web"}, + wantErr: "different datacenter", + }, + { + name: "same cluster and DC, different service should not have perms", + id: &connect.SpiffeIDService{ + trustDomain, + "default", "dc1", "db"}, + wantErr: "Permission denied", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + csr, _ := connect.TestCSR(t, tt.id) + args := &structs.CASignRequest{ + Datacenter: "dc1", + CSR: csr, + WriteRequest: structs.WriteRequest{Token: webToken}, + } + var reply structs.IssuedCert + err := msgpackrpc.CallWithCodec(codec, "ConnectCA.Sign", args, &reply) + if tt.wantErr == "" { + require.NoError(t, err) + // No other validation that is handled in different tests + } else { + require.Error(t, err) + require.Contains(t, err.Error(), tt.wantErr) + } + }) + } +} From 30d90b3be4dcdb0155b7a9185966ffd339bc138f Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Wed, 9 May 2018 17:15:29 +0100 Subject: [PATCH 267/627] Generate CSR using real trust-domain --- agent/agent_endpoint_test.go | 26 ++++++---- agent/cache-types/connect_ca_leaf.go | 46 +++++++++++++----- agent/cache-types/connect_ca_leaf_test.go | 5 +- agent/connect/ca/provider_consul.go | 4 +- agent/connect/csr.go | 59 +++++++++++++++++++++++ agent/connect/testing_ca.go | 5 +- agent/connect/uri_signing.go | 3 +- agent/consul/connect_ca_endpoint.go | 14 ++++-- agent/consul/connect_ca_endpoint_test.go | 2 +- agent/testagent.go | 3 +- 10 files changed, 136 insertions(+), 31 deletions(-) create mode 100644 agent/connect/csr.go diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index d4b55a50f..4749148e1 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2106,13 +2106,14 @@ func TestAgentConnectCARoots_empty(t *testing.T) { t.Parallel() assert := assert.New(t) + require := require.New(t) a := NewTestAgent(t.Name(), "connect { enabled = false }") defer a.Shutdown() req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) resp := httptest.NewRecorder() obj, err := a.srv.AgentConnectCARoots(resp, req) - assert.Nil(err) + require.NoError(err) value := obj.(structs.IndexedCARoots) assert.Equal(value.ActiveRootID, "") @@ -2122,6 +2123,7 @@ func TestAgentConnectCARoots_empty(t *testing.T) { func TestAgentConnectCARoots_list(t *testing.T) { t.Parallel() + assert := assert.New(t) require := require.New(t) a := NewTestAgent(t.Name(), "") defer a.Shutdown() @@ -2137,30 +2139,34 @@ func TestAgentConnectCARoots_list(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) resp := httptest.NewRecorder() obj, err := a.srv.AgentConnectCARoots(resp, req) - require.Nil(err) + require.NoError(err) value := obj.(structs.IndexedCARoots) - require.Equal(value.ActiveRootID, ca2.ID) - require.Len(value.Roots, 2) + assert.Equal(value.ActiveRootID, ca2.ID) + // Would like to assert that it's the same as the TestAgent domain but the + // only way to access that state via this package is by RPC to the server + // implementation running in TestAgent which is more or less a tautology. + assert.NotEmpty(value.TrustDomain) + assert.Len(value.Roots, 2) // We should never have the secret information for _, r := range value.Roots { - require.Equal("", r.SigningCert) - require.Equal("", r.SigningKey) + assert.Equal("", r.SigningCert) + assert.Equal("", r.SigningKey) } // That should've been a cache miss, so no hit change - require.Equal(cacheHits, a.cache.Hits()) + assert.Equal(cacheHits, a.cache.Hits()) // Test caching { // List it again obj2, err := a.srv.AgentConnectCARoots(httptest.NewRecorder(), req) - require.Nil(err) - require.Equal(obj, obj2) + require.NoError(err) + assert.Equal(obj, obj2) // Should cache hit this time and not make request - require.Equal(cacheHits+1, a.cache.Hits()) + assert.Equal(cacheHits+1, a.cache.Hits()) cacheHits++ } diff --git a/agent/cache-types/connect_ca_leaf.go b/agent/cache-types/connect_ca_leaf.go index 1058ec26a..4070298df 100644 --- a/agent/cache-types/connect_ca_leaf.go +++ b/agent/cache-types/connect_ca_leaf.go @@ -1,6 +1,7 @@ package cachetype import ( + "errors" "fmt" "sync" "sync/atomic" @@ -9,9 +10,7 @@ import ( "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" - // NOTE(mitcehllh): This is temporary while certs are stubbed out. - "github.com/mitchellh/go-testing-interface" ) // Recommended name for registration. @@ -97,16 +96,41 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache // by the above channel). } - // Create a CSR. - // TODO(mitchellh): This is obviously not production ready! The host - // needs a correct host ID, and we probably don't want to use TestCSR - // and want a non-test-specific way to create a CSR. - csr, pk := connect.TestCSR(&testing.RuntimeT{}, &connect.SpiffeIDService{ - Host: "11111111-2222-3333-4444-555555555555.consul", - Namespace: "default", + // Need to lookup RootCAs response to discover trust domain. First just lookup + // with no blocking info - this should be a cache hit most of the time. + rawRoots, err := c.Cache.Get(ConnectCARootName, &structs.DCSpecificRequest{ Datacenter: reqReal.Datacenter, - Service: reqReal.Service, }) + if err != nil { + return result, err + } + roots, ok := rawRoots.(*structs.IndexedCARoots) + if !ok { + return result, errors.New("invalid RootCA response type") + } + if roots.TrustDomain == "" { + return result, errors.New("cluster has no CA bootstrapped") + } + + // Build the service ID + serviceID := &connect.SpiffeIDService{ + Host: roots.TrustDomain, + Datacenter: reqReal.Datacenter, + Namespace: "default", + Service: reqReal.Service, + } + + // Create a new private key + pk, pkPEM, err := connect.GeneratePrivateKey() + if err != nil { + return result, err + } + + // Create a CSR. + csr, err := connect.CreateCSR(serviceID, pk) + if err != nil { + return result, err + } // Request signing var reply structs.IssuedCert @@ -117,7 +141,7 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache if err := c.RPC.RPC("ConnectCA.Sign", &args, &reply); err != nil { return result, err } - reply.PrivateKeyPEM = pk + reply.PrivateKeyPEM = pkPEM // Lock the issued certs map so we can insert it. We only insert if // we didn't happen to get a newer one. This should never happen since diff --git a/agent/cache-types/connect_ca_leaf_test.go b/agent/cache-types/connect_ca_leaf_test.go index 0612aed21..d55caf408 100644 --- a/agent/cache-types/connect_ca_leaf_test.go +++ b/agent/cache-types/connect_ca_leaf_test.go @@ -25,10 +25,11 @@ func TestConnectCALeaf_changingRoots(t *testing.T) { defer close(rootsCh) rootsCh <- structs.IndexedCARoots{ ActiveRootID: "1", + TrustDomain: "fake-trust-domain.consul", QueryMeta: structs.QueryMeta{Index: 1}, } - // Instrument ConnectCA.Sign to + // Instrument ConnectCA.Sign to return signed cert var resp *structs.IssuedCert var idx uint64 rpc.On("RPC", "ConnectCA.Sign", mock.Anything, mock.Anything).Return(nil). @@ -67,6 +68,7 @@ func TestConnectCALeaf_changingRoots(t *testing.T) { // Let's send in new roots, which should trigger the sign req rootsCh <- structs.IndexedCARoots{ ActiveRootID: "2", + TrustDomain: "fake-trust-domain.consul", QueryMeta: structs.QueryMeta{Index: 2}, } select { @@ -101,6 +103,7 @@ func TestConnectCALeaf_expiringLeaf(t *testing.T) { defer close(rootsCh) rootsCh <- structs.IndexedCARoots{ ActiveRootID: "1", + TrustDomain: "fake-trust-domain.consul", QueryMeta: structs.QueryMeta{Index: 1}, } diff --git a/agent/connect/ca/provider_consul.go b/agent/connect/ca/provider_consul.go index d88a58bfc..20641a16c 100644 --- a/agent/connect/ca/provider_consul.go +++ b/agent/connect/ca/provider_consul.go @@ -79,7 +79,7 @@ func NewConsulProvider(rawConfig map[string]interface{}, delegate ConsulProvider // Generate a private key if needed if conf.PrivateKey == "" { - pk, err := connect.GeneratePrivateKey() + _, pk, err := connect.GeneratePrivateKey() if err != nil { return nil, err } @@ -247,7 +247,7 @@ func (c *ConsulProvider) Sign(csr *x509.CertificateRequest) (string, error) { } err = pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: bs}) if err != nil { - return "", fmt.Errorf("error encoding private key: %s", err) + return "", fmt.Errorf("error encoding certificate: %s", err) } err = c.incrementProviderIndex(providerState) diff --git a/agent/connect/csr.go b/agent/connect/csr.go new file mode 100644 index 000000000..4b975d06c --- /dev/null +++ b/agent/connect/csr.go @@ -0,0 +1,59 @@ +package connect + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/pem" + "fmt" + "net/url" +) + +// CreateCSR returns a CSR to sign the given service along with the PEM-encoded +// private key for this certificate. +func CreateCSR(uri CertURI, privateKey crypto.Signer) (string, error) { + template := &x509.CertificateRequest{ + URIs: []*url.URL{uri.URI()}, + SignatureAlgorithm: x509.ECDSAWithSHA256, + } + + // Create the CSR itself + var csrBuf bytes.Buffer + bs, err := x509.CreateCertificateRequest(rand.Reader, template, privateKey) + if err != nil { + return "", err + } + + err = pem.Encode(&csrBuf, &pem.Block{Type: "CERTIFICATE REQUEST", Bytes: bs}) + if err != nil { + return "", err + } + + return csrBuf.String(), nil +} + +// GeneratePrivateKey generates a new Private key +func GeneratePrivateKey() (crypto.Signer, string, error) { + var pk *ecdsa.PrivateKey + + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, "", fmt.Errorf("error generating private key: %s", err) + } + + bs, err := x509.MarshalECPrivateKey(pk) + if err != nil { + return nil, "", fmt.Errorf("error generating private key: %s", err) + } + + var buf bytes.Buffer + err = pem.Encode(&buf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) + if err != nil { + return nil, "", fmt.Errorf("error encoding private key: %s", err) + } + + return pk, buf.String(), nil +} diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index 552c57535..ba2d29203 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -161,7 +161,10 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) (string, string } // Generate fresh private key - pkSigner, pkPEM := testPrivateKey(t) + pkSigner, pkPEM, err := GeneratePrivateKey() + if err != nil { + t.Fatalf("failed to generate private key: %s", err) + } // Cert template for generation template := x509.Certificate{ diff --git a/agent/connect/uri_signing.go b/agent/connect/uri_signing.go index 843f95596..d934360eb 100644 --- a/agent/connect/uri_signing.go +++ b/agent/connect/uri_signing.go @@ -62,7 +62,8 @@ func (id *SpiffeIDSigning) CanSign(cu CertURI) bool { } // SpiffeIDSigningForCluster returns the SPIFFE signing identifier (trust -// domain) representation of the given CA config. +// domain) representation of the given CA config. If config is nil this function +// will panic. // // NOTE(banks): we intentionally fix the tld `.consul` for now rather than tie // this to the `domain` config used for DNS because changing DNS domain can't diff --git a/agent/consul/connect_ca_endpoint.go b/agent/consul/connect_ca_endpoint.go index a72a4998b..1e24bac7b 100644 --- a/agent/consul/connect_ca_endpoint.go +++ b/agent/consul/connect_ca_endpoint.go @@ -224,9 +224,17 @@ func (s *ConnectCA) Roots( if err != nil { return err } - // Build TrustDomain based on the ClusterID stored. - signingID := connect.SpiffeIDSigningForCluster(config) - reply.TrustDomain = signingID.Host() + // Check CA is actually bootstrapped... + if config != nil { + // Build TrustDomain based on the ClusterID stored. + signingID := connect.SpiffeIDSigningForCluster(config) + if signingID == nil { + // If CA is bootstrapped at all then this should never happen but be + // defensive. + return errors.New("no cluster trust domain setup") + } + reply.TrustDomain = signingID.Host() + } } return s.srv.blockingQuery( diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index f20935877..ac64ceb30 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -157,7 +157,7 @@ func TestConnectCAConfig_TriggerRotation(t *testing.T) { // Update the provider config to use a new private key, which should // cause a rotation. - newKey, err := connect.GeneratePrivateKey() + _, newKey, err := connect.GeneratePrivateKey() assert.NoError(err) newConfig := &structs.CAConfiguration{ Provider: "consul", diff --git a/agent/testagent.go b/agent/testagent.go index c2e4ddf01..724b0c80e 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -16,6 +16,8 @@ import ( "time" metrics "github.com/armon/go-metrics" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/consul/agent/config" "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" @@ -23,7 +25,6 @@ import ( "github.com/hashicorp/consul/lib/freeport" "github.com/hashicorp/consul/logger" "github.com/hashicorp/consul/testutil/retry" - uuid "github.com/hashicorp/go-uuid" ) func init() { From 5abf47472d0422c2eed57185e76730299e177f87 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Wed, 9 May 2018 20:30:43 +0100 Subject: [PATCH 268/627] Verify trust domain on /authorize calls --- agent/agent_endpoint.go | 28 ++++++++- agent/agent_endpoint_test.go | 106 +++++++++++++++++++++++++++----- agent/cache/cache.go | 2 +- agent/connect/testing_spiffe.go | 8 ++- 4 files changed, 123 insertions(+), 21 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 32b326867..c9afa55db 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -1159,8 +1159,30 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R return nil, acl.ErrPermissionDenied } - // TODO(mitchellh): we need to verify more things here, such as the - // trust domain, blacklist lookup of the serial, etc. + // Validate the trust domain matches ours. Later we will support explicit + // external federation but not built yet. + rootArgs := &structs.DCSpecificRequest{Datacenter: s.agent.config.Datacenter} + raw, err := s.agent.cache.Get(cachetype.ConnectCARootName, rootArgs) + if err != nil { + return nil, err + } + + roots, ok := raw.(*structs.IndexedCARoots) + if !ok { + return nil, fmt.Errorf("internal error: roots response type not correct") + } + if roots.TrustDomain == "" { + return nil, fmt.Errorf("connect CA not bootstrapped yet") + } + if roots.TrustDomain != strings.ToLower(uriService.Host) { + return &connectAuthorizeResp{ + Authorized: false, + Reason: fmt.Sprintf("Identity from an external trust domain: %s", + uriService.Host), + }, nil + } + + // TODO(banks): Implement revocation list checking here. // Get the intentions for this target service. args := &structs.IntentionQueryRequest{ @@ -1177,7 +1199,7 @@ func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.R } args.Token = token - raw, err := s.agent.cache.Get(cachetype.IntentionMatchName, args) + raw, err = s.agent.cache.Get(cachetype.IntentionMatchName, args) if err != nil { return nil, err } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 4749148e1..c1fd16eb9 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -3286,6 +3286,17 @@ func TestAgentConnectAuthorize_idNotService(t *testing.T) { assert.Contains(obj.Reason, "must be a valid") } +func testFetchTrustDomain(t *testing.T, a *TestAgent) string { + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentConnectCARoots(resp, req) + require.NoError(t, err) + + value := obj.(structs.IndexedCARoots) + require.NotEmpty(t, value.TrustDomain) + return value.TrustDomain +} + // Test when there is an intention allowing the connection func TestAgentConnectAuthorize_allow(t *testing.T) { t.Parallel() @@ -3296,6 +3307,8 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { target := "db" + trustDomain := testFetchTrustDomain(t, a) + // Create some intentions var ixnId string { @@ -3317,8 +3330,9 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { cacheHits := a.cache.Hits() args := &structs.ConnectAuthorizeRequest{ - Target: target, - ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), + Target: target, + ClientCertURI: connect.TestSpiffeIDServiceWithHost(t, "web", trustDomain). + URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() @@ -3330,8 +3344,13 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { require.True(obj.Authorized) require.Contains(obj.Reason, "Matched") - // That should've been a cache miss, so not hit change - require.Equal(cacheHits, a.cache.Hits()) + // That should've been a cache miss, so no hit change, however since + // testFetchTrustDomain already called Roots and caused it to be in cache, the + // authorize call above will also call it and see a cache hit for the Roots + // RPC. In other words, there are 2 cached calls in /authorize and we always + // expect one of them to be a hit. So asserting only 1 happened is as close as + // we can get to verifying that the intention match RPC was a hit. + require.Equal(cacheHits+1, a.cache.Hits()) // Make the request again { @@ -3346,9 +3365,10 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { require.Contains(obj.Reason, "Matched") } - // That should've been a cache hit - require.Equal(cacheHits+1, a.cache.Hits()) - cacheHits++ + // That should've been a cache hit. We add the one hit from Roots from first + // call as well as the 2 from this call (Roots + Intentions). + require.Equal(cacheHits+1+2, a.cache.Hits()) + cacheHits = a.cache.Hits() // Change the intention { @@ -3384,9 +3404,9 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { } // That should've been a cache hit, too, since it updated in the - // background. - require.Equal(cacheHits+1, a.cache.Hits()) - cacheHits++ + // background. (again 2 hits for Roots + Intentions) + require.Equal(cacheHits+2, a.cache.Hits()) + cacheHits += 2 } // Test when there is an intention denying the connection @@ -3399,6 +3419,8 @@ func TestAgentConnectAuthorize_deny(t *testing.T) { target := "db" + trustDomain := testFetchTrustDomain(t, a) + // Create some intentions { req := structs.IntentionRequest{ @@ -3417,8 +3439,9 @@ func TestAgentConnectAuthorize_deny(t *testing.T) { } args := &structs.ConnectAuthorizeRequest{ - Target: target, - ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), + Target: target, + ClientCertURI: connect.TestSpiffeIDServiceWithHost(t, "web", trustDomain). + URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() @@ -3431,6 +3454,53 @@ func TestAgentConnectAuthorize_deny(t *testing.T) { assert.Contains(obj.Reason, "Matched") } +// Test when there is an intention allowing service but for a different trust +// domain. +func TestAgentConnectAuthorize_denyTrustDomain(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + target := "db" + + // Create some intentions + { + req := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + req.Intention.SourceNS = structs.IntentionDefaultNamespace + req.Intention.SourceName = "web" + req.Intention.DestinationNS = structs.IntentionDefaultNamespace + req.Intention.DestinationName = target + req.Intention.Action = structs.IntentionActionAllow + + var reply string + assert.Nil(a.RPC("Intention.Apply", &req, &reply)) + } + + { + args := &structs.ConnectAuthorizeRequest{ + Target: target, + // Rely on the test trust domain this will choose to not match the random + // one picked on agent startup. + ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), + } + req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) + resp := httptest.NewRecorder() + respRaw, err := a.srv.AgentConnectAuthorize(resp, req) + assert.Nil(err) + assert.Equal(200, resp.Code) + + obj := respRaw.(*connectAuthorizeResp) + assert.False(obj.Authorized) + assert.Contains(obj.Reason, "Identity from an external trust domain") + } +} + func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { t.Parallel() @@ -3440,6 +3510,8 @@ func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { target := "db" + trustDomain := testFetchTrustDomain(t, a) + // Create some intentions { // Deny wildcard to DB @@ -3477,8 +3549,9 @@ func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { // Web should be allowed { args := &structs.ConnectAuthorizeRequest{ - Target: target, - ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), + Target: target, + ClientCertURI: connect.TestSpiffeIDServiceWithHost(t, "web", trustDomain). + URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() @@ -3494,8 +3567,9 @@ func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { // API should be denied { args := &structs.ConnectAuthorizeRequest{ - Target: target, - ClientCertURI: connect.TestSpiffeIDService(t, "api").URI().String(), + Target: target, + ClientCertURI: connect.TestSpiffeIDServiceWithHost(t, "api", trustDomain). + URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() diff --git a/agent/cache/cache.go b/agent/cache/cache.go index 1b4653cb4..e2eee03d8 100644 --- a/agent/cache/cache.go +++ b/agent/cache/cache.go @@ -72,7 +72,7 @@ type Cache struct { // of "//" in order to properly partition // requests to different datacenters and ACL tokens. This format has some // big drawbacks: we can't evict by datacenter, ACL token, etc. For an - // initial implementaiton this works and the tests are agnostic to the + // initial implementation this works and the tests are agnostic to the // internal storage format so changing this should be possible safely. entriesLock sync.RWMutex entries map[string]cacheEntry diff --git a/agent/connect/testing_spiffe.go b/agent/connect/testing_spiffe.go index d6a70cb81..42db76495 100644 --- a/agent/connect/testing_spiffe.go +++ b/agent/connect/testing_spiffe.go @@ -6,8 +6,14 @@ import ( // TestSpiffeIDService returns a SPIFFE ID representing a service. func TestSpiffeIDService(t testing.T, service string) *SpiffeIDService { + return TestSpiffeIDServiceWithHost(t, service, testClusterID+".consul") +} + +// TestSpiffeIDServiceWithHost returns a SPIFFE ID representing a service with +// the specified trust domain. +func TestSpiffeIDServiceWithHost(t testing.T, service, host string) *SpiffeIDService { return &SpiffeIDService{ - Host: testClusterID + ".consul", + Host: host, Namespace: "default", Datacenter: "dc1", Service: service, From bdd30b191b397861273a971a3a1abd64024b38bb Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Wed, 9 May 2018 20:34:14 +0100 Subject: [PATCH 269/627] Comment cleanup --- agent/cache-types/connect_ca_leaf.go | 1 - 1 file changed, 1 deletion(-) diff --git a/agent/cache-types/connect_ca_leaf.go b/agent/cache-types/connect_ca_leaf.go index 4070298df..ef354c9ce 100644 --- a/agent/cache-types/connect_ca_leaf.go +++ b/agent/cache-types/connect_ca_leaf.go @@ -10,7 +10,6 @@ import ( "github.com/hashicorp/consul/agent/cache" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" - // NOTE(mitcehllh): This is temporary while certs are stubbed out. ) // Recommended name for registration. From 834ed1d25f086a9beb40d392a4b5e18f5ab42b74 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 10 May 2018 17:04:33 +0100 Subject: [PATCH 270/627] Fixed many tests after rebase. Some still failing and seem unrelated to any connect changes. --- agent/agent.go | 53 ++++++++++++++++----- agent/agent_endpoint.go | 5 +- agent/agent_endpoint_test.go | 60 +++++++----------------- agent/agent_test.go | 40 ++++++++++++++++ agent/cache-types/connect_ca_leaf.go | 7 ++- agent/connect/testing_ca.go | 12 ++--- agent/connect/testing_spiffe.go | 2 +- agent/connect/uri_signing_test.go | 14 +++--- agent/consul/connect_ca_endpoint_test.go | 40 +++++++--------- agent/consul/leader.go | 12 +++-- agent/consul/server_test.go | 6 +++ agent/testagent.go | 4 ++ api/agent_test.go | 6 +-- api/connect_ca.go | 1 + api/connect_ca_test.go | 41 +++++++++++++--- connect/tls_test.go | 6 +-- testutil/server.go | 7 +++ 17 files changed, 199 insertions(+), 117 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 77045c69e..eb9e203dc 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -932,6 +932,25 @@ func (a *Agent) consulConfig() (*consul.Config, error) { if a.config.ConnectEnabled { base.ConnectEnabled = true + // Allow config to specify cluster_id provided it's a valid UUID. This is + // meant only for tests where a deterministic ID makes fixtures much simpler + // to work with but since it's only read on initial cluster bootstrap it's not + // that much of a liability in production. The worst a user could do is + // configure logically separate clusters with same ID by mistake but we can + // avoid documenting this is even an option. + if clusterID, ok := a.config.ConnectCAConfig["cluster_id"]; ok { + if cIDStr, ok := clusterID.(string); ok { + if _, err := uuid.ParseUUID(cIDStr); err == nil { + // Valid UUID configured, use that + base.CAConfig.ClusterID = cIDStr + } + } + if base.CAConfig.ClusterID == "" { + a.logger.Println("[WARN] connect CA config cluster_id specified but ", + "is not a valid UUID, ignoring") + } + } + if a.config.ConnectCAProvider != "" { base.CAConfig.Provider = a.config.ConnectCAProvider @@ -2116,20 +2135,25 @@ func (a *Agent) RemoveProxy(proxyID string, persist bool) error { } // verifyProxyToken takes a token and attempts to verify it against the -// targetService name. If targetProxy is specified, then the local proxy -// token must exactly match the given proxy ID. -// cert, config, etc.). +// targetService name. If targetProxy is specified, then the local proxy token +// must exactly match the given proxy ID. cert, config, etc.). // -// The given token may be a local-only proxy token or it may be an ACL -// token. We will attempt to verify the local proxy token first. -func (a *Agent) verifyProxyToken(token, targetService, targetProxy string) error { +// The given token may be a local-only proxy token or it may be an ACL token. We +// will attempt to verify the local proxy token first. +// +// The effective ACL token is returned along with any error. In the case the +// token matches a proxy token, then the ACL token used to register that proxy's +// target service is returned for use in any RPC calls the proxy needs to make +// on behalf of that service. If the token was an ACL token already then it is +// always returned. Provided error is nil, a valid ACL token is always returned. +func (a *Agent) verifyProxyToken(token, targetService, targetProxy string) (string, error) { // If we specify a target proxy, we look up that proxy directly. Otherwise, // we resolve with any proxy we can find. var proxy *local.ManagedProxy if targetProxy != "" { proxy = a.State.Proxy(targetProxy) if proxy == nil { - return fmt.Errorf("unknown proxy service ID: %q", targetProxy) + return "", fmt.Errorf("unknown proxy service ID: %q", targetProxy) } // If the token DOESN'T match, then we reset the proxy which will @@ -2148,10 +2172,13 @@ func (a *Agent) verifyProxyToken(token, targetService, targetProxy string) error // service. if proxy != nil { if proxy.Proxy.TargetServiceID != targetService { - return acl.ErrPermissionDenied + return "", acl.ErrPermissionDenied } - return nil + // Resolve the actual ACL token used to register the proxy/service and + // return that for use in RPC calls. + aclToken := a.State.ServiceToken(targetService) + return aclToken, nil } // Retrieve the service specified. This should always exist because @@ -2159,7 +2186,7 @@ func (a *Agent) verifyProxyToken(token, targetService, targetProxy string) error // only be called for local services. service := a.State.Service(targetService) if service == nil { - return fmt.Errorf("unknown service ID: %s", targetService) + return "", fmt.Errorf("unknown service ID: %s", targetService) } // Doesn't match, we have to do a full token resolution. The required @@ -2168,13 +2195,13 @@ func (a *Agent) verifyProxyToken(token, targetService, targetProxy string) error // is usually present in the configuration. rule, err := a.resolveToken(token) if err != nil { - return err + return "", err } if rule != nil && !rule.ServiceWrite(service.Service, nil) { - return acl.ErrPermissionDenied + return "", acl.ErrPermissionDenied } - return nil + return token, nil } func (a *Agent) cancelCheckMonitors(checkID types.CheckID) { diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index c9afa55db..6a5126fa2 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -927,10 +927,11 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http. // Verify the proxy token. This will check both the local proxy token // as well as the ACL if the token isn't local. - err := s.agent.verifyProxyToken(qOpts.Token, id, "") + effectiveToken, err := s.agent.verifyProxyToken(qOpts.Token, id, "") if err != nil { return nil, err } + args.Token = effectiveToken raw, err := s.agent.cache.Get(cachetype.ConnectCALeafName, &args) if err != nil { @@ -982,7 +983,7 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http } // Validate the ACL token - err := s.agent.verifyProxyToken(token, proxy.Proxy.TargetServiceID, id) + _, err := s.agent.verifyProxyToken(token, proxy.Proxy.TargetServiceID, id) if err != nil { return "", nil, err } diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index c1fd16eb9..ac2d28d00 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -3286,17 +3286,6 @@ func TestAgentConnectAuthorize_idNotService(t *testing.T) { assert.Contains(obj.Reason, "must be a valid") } -func testFetchTrustDomain(t *testing.T, a *TestAgent) string { - req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/roots", nil) - resp := httptest.NewRecorder() - obj, err := a.srv.AgentConnectCARoots(resp, req) - require.NoError(t, err) - - value := obj.(structs.IndexedCARoots) - require.NotEmpty(t, value.TrustDomain) - return value.TrustDomain -} - // Test when there is an intention allowing the connection func TestAgentConnectAuthorize_allow(t *testing.T) { t.Parallel() @@ -3307,8 +3296,6 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { target := "db" - trustDomain := testFetchTrustDomain(t, a) - // Create some intentions var ixnId string { @@ -3330,9 +3317,8 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { cacheHits := a.cache.Hits() args := &structs.ConnectAuthorizeRequest{ - Target: target, - ClientCertURI: connect.TestSpiffeIDServiceWithHost(t, "web", trustDomain). - URI().String(), + Target: target, + ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() @@ -3344,13 +3330,9 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { require.True(obj.Authorized) require.Contains(obj.Reason, "Matched") - // That should've been a cache miss, so no hit change, however since - // testFetchTrustDomain already called Roots and caused it to be in cache, the - // authorize call above will also call it and see a cache hit for the Roots - // RPC. In other words, there are 2 cached calls in /authorize and we always - // expect one of them to be a hit. So asserting only 1 happened is as close as - // we can get to verifying that the intention match RPC was a hit. - require.Equal(cacheHits+1, a.cache.Hits()) + // That should've been a cache miss (for both Intentions and Roots, so no hit + // change). + require.Equal(cacheHits, a.cache.Hits()) // Make the request again { @@ -3365,10 +3347,9 @@ func TestAgentConnectAuthorize_allow(t *testing.T) { require.Contains(obj.Reason, "Matched") } - // That should've been a cache hit. We add the one hit from Roots from first - // call as well as the 2 from this call (Roots + Intentions). - require.Equal(cacheHits+1+2, a.cache.Hits()) - cacheHits = a.cache.Hits() + // That should've been a cache hit. We add 2 (Roots + Intentions). + require.Equal(cacheHits+2, a.cache.Hits()) + cacheHits += 2 // Change the intention { @@ -3419,8 +3400,6 @@ func TestAgentConnectAuthorize_deny(t *testing.T) { target := "db" - trustDomain := testFetchTrustDomain(t, a) - // Create some intentions { req := structs.IntentionRequest{ @@ -3439,9 +3418,8 @@ func TestAgentConnectAuthorize_deny(t *testing.T) { } args := &structs.ConnectAuthorizeRequest{ - Target: target, - ClientCertURI: connect.TestSpiffeIDServiceWithHost(t, "web", trustDomain). - URI().String(), + Target: target, + ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() @@ -3484,10 +3462,8 @@ func TestAgentConnectAuthorize_denyTrustDomain(t *testing.T) { { args := &structs.ConnectAuthorizeRequest{ - Target: target, - // Rely on the test trust domain this will choose to not match the random - // one picked on agent startup. - ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), + Target: target, + ClientCertURI: "spiffe://fake-domain.consul/ns/default/dc/dc1/svc/web", } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() @@ -3510,8 +3486,6 @@ func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { target := "db" - trustDomain := testFetchTrustDomain(t, a) - // Create some intentions { // Deny wildcard to DB @@ -3549,9 +3523,8 @@ func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { // Web should be allowed { args := &structs.ConnectAuthorizeRequest{ - Target: target, - ClientCertURI: connect.TestSpiffeIDServiceWithHost(t, "web", trustDomain). - URI().String(), + Target: target, + ClientCertURI: connect.TestSpiffeIDService(t, "web").URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() @@ -3567,9 +3540,8 @@ func TestAgentConnectAuthorize_denyWildcard(t *testing.T) { // API should be denied { args := &structs.ConnectAuthorizeRequest{ - Target: target, - ClientCertURI: connect.TestSpiffeIDServiceWithHost(t, "api", trustDomain). - URI().String(), + Target: target, + ClientCertURI: connect.TestSpiffeIDService(t, "api").URI().String(), } req, _ := http.NewRequest("POST", "/v1/agent/connect/authorize", jsonReader(args)) resp := httptest.NewRecorder() diff --git a/agent/agent_test.go b/agent/agent_test.go index 6219b70da..911ed63a0 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -15,7 +15,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/hashicorp/consul/agent/checks" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/testutil" @@ -52,6 +55,43 @@ func TestAgent_MultiStartStop(t *testing.T) { } } +func TestAgent_ConnectClusterIDConfig(t *testing.T) { + tests := []struct { + name string + hcl string + wantClusterID string + }{ + { + name: "default TestAgent has fixed cluster id", + hcl: "", + wantClusterID: connect.TestClusterID, + }, + { + name: "no cluster ID specified remains null", + hcl: "connect { enabled = true }", + wantClusterID: "", + }, + { + name: "non-UUID cluster_id is ignored", + hcl: `connect { + enabled = true + ca_config { + cluster_id = "fake-id" + } + }`, + wantClusterID: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + a := NewTestAgent("test", tt.hcl) + cfg := a.consulConfig() + assert.Equal(t, tt.wantClusterID, cfg.CAConfig.ClusterID) + }) + } +} + func TestAgent_StartStop(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") diff --git a/agent/cache-types/connect_ca_leaf.go b/agent/cache-types/connect_ca_leaf.go index ef354c9ce..2316acab1 100644 --- a/agent/cache-types/connect_ca_leaf.go +++ b/agent/cache-types/connect_ca_leaf.go @@ -134,8 +134,9 @@ func (c *ConnectCALeaf) Fetch(opts cache.FetchOptions, req cache.Request) (cache // Request signing var reply structs.IssuedCert args := structs.CASignRequest{ - Datacenter: reqReal.Datacenter, - CSR: csr, + WriteRequest: structs.WriteRequest{Token: reqReal.Token}, + Datacenter: reqReal.Datacenter, + CSR: csr, } if err := c.RPC.RPC("ConnectCA.Sign", &args, &reply); err != nil { return result, err @@ -217,6 +218,7 @@ func (c *ConnectCALeaf) waitNewRootCA(datacenter string, ch chan<- error, // since this is only used for cache-related requests and not forwarded // directly to any Consul servers. type ConnectCALeafRequest struct { + Token string Datacenter string Service string // Service name, not ID MinQueryIndex uint64 @@ -224,6 +226,7 @@ type ConnectCALeafRequest struct { func (r *ConnectCALeafRequest) CacheInfo() cache.RequestInfo { return cache.RequestInfo{ + Token: r.Token, Key: r.Service, Datacenter: r.Datacenter, MinIndex: r.MinQueryIndex, diff --git a/agent/connect/testing_ca.go b/agent/connect/testing_ca.go index ba2d29203..cc015af81 100644 --- a/agent/connect/testing_ca.go +++ b/agent/connect/testing_ca.go @@ -20,12 +20,8 @@ import ( "github.com/mitchellh/go-testing-interface" ) -// testClusterID is the Consul cluster ID for testing. -// -// NOTE(mitchellh): This might have to change some other constant for -// real testing once we integrate the Cluster ID into the core. For now it -// is unchecked. -const testClusterID = "11111111-2222-3333-4444-555555555555" +// TestClusterID is the Consul cluster ID for testing. +const TestClusterID = "11111111-2222-3333-4444-555555555555" // testCACounter is just an atomically incremented counter for creating // unique names for the CA certs. @@ -53,7 +49,7 @@ func TestCA(t testing.T, xc *structs.CARoot) *structs.CARoot { } // The URI (SPIFFE compatible) for the cert - id := &SpiffeIDSigning{ClusterID: testClusterID, Domain: "consul"} + id := &SpiffeIDSigning{ClusterID: TestClusterID, Domain: "consul"} // Create the CA cert template := x509.Certificate{ @@ -148,7 +144,7 @@ func TestLeaf(t testing.T, service string, root *structs.CARoot) (string, string // Build the SPIFFE ID spiffeId := &SpiffeIDService{ - Host: fmt.Sprintf("%s.consul", testClusterID), + Host: fmt.Sprintf("%s.consul", TestClusterID), Namespace: "default", Datacenter: "dc1", Service: service, diff --git a/agent/connect/testing_spiffe.go b/agent/connect/testing_spiffe.go index 42db76495..c7fa6f753 100644 --- a/agent/connect/testing_spiffe.go +++ b/agent/connect/testing_spiffe.go @@ -6,7 +6,7 @@ import ( // TestSpiffeIDService returns a SPIFFE ID representing a service. func TestSpiffeIDService(t testing.T, service string) *SpiffeIDService { - return TestSpiffeIDServiceWithHost(t, service, testClusterID+".consul") + return TestSpiffeIDServiceWithHost(t, service, TestClusterID+".consul") } // TestSpiffeIDServiceWithHost returns a SPIFFE ID representing a service with diff --git a/agent/connect/uri_signing_test.go b/agent/connect/uri_signing_test.go index 2d8975858..6d04a5fab 100644 --- a/agent/connect/uri_signing_test.go +++ b/agent/connect/uri_signing_test.go @@ -21,10 +21,10 @@ func TestSpiffeIDSigningAuthorize(t *testing.T) { func TestSpiffeIDSigningForCluster(t *testing.T) { // For now it should just append .consul to the ID. config := &structs.CAConfiguration{ - ClusterID: testClusterID, + ClusterID: TestClusterID, } id := SpiffeIDSigningForCluster(config) - assert.Equal(t, id.URI().String(), "spiffe://"+testClusterID+".consul") + assert.Equal(t, id.URI().String(), "spiffe://"+TestClusterID+".consul") } // fakeCertURI is a CertURI implementation that our implementation doesn't know @@ -42,7 +42,7 @@ func (f fakeCertURI) URI() *url.URL { func TestSpiffeIDSigning_CanSign(t *testing.T) { testSigning := &SpiffeIDSigning{ - ClusterID: testClusterID, + ClusterID: TestClusterID, Domain: "consul", } @@ -71,7 +71,7 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) { name: "different TLD signing ID", id: testSigning, input: &SpiffeIDSigning{ - ClusterID: testClusterID, + ClusterID: TestClusterID, Domain: "evil", }, want: false, @@ -91,13 +91,13 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) { { name: "service - good", id: testSigning, - input: &SpiffeIDService{testClusterID + ".consul", "default", "dc1", "web"}, + input: &SpiffeIDService{TestClusterID + ".consul", "default", "dc1", "web"}, want: true, }, { name: "service - good midex case", id: testSigning, - input: &SpiffeIDService{strings.ToUpper(testClusterID) + ".CONsuL", "defAUlt", "dc1", "WEB"}, + input: &SpiffeIDService{strings.ToUpper(TestClusterID) + ".CONsuL", "defAUlt", "dc1", "WEB"}, want: true, }, { @@ -109,7 +109,7 @@ func TestSpiffeIDSigning_CanSign(t *testing.T) { { name: "service - different TLD", id: testSigning, - input: &SpiffeIDService{testClusterID + ".fake", "default", "dc1", "web"}, + input: &SpiffeIDService{TestClusterID + ".fake", "default", "dc1", "web"}, want: false, }, } diff --git a/agent/consul/connect_ca_endpoint_test.go b/agent/consul/connect_ca_endpoint_test.go index ac64ceb30..eb7176b67 100644 --- a/agent/consul/connect_ca_endpoint_test.go +++ b/agent/consul/connect_ca_endpoint_test.go @@ -252,7 +252,6 @@ func TestConnectCASign(t *testing.T) { // Generate a CSR and request signing spiffeId := connect.TestSpiffeIDService(t, "web") - spiffeId.Host = testGetClusterTrustDomain(t, s1) csr, _ := connect.TestCSR(t, spiffeId) args := &structs.CASignRequest{ Datacenter: "dc1", @@ -281,16 +280,6 @@ func TestConnectCASign(t *testing.T) { assert.Equal(spiffeId.URI().String(), reply.ServiceURI) } -func testGetClusterTrustDomain(t *testing.T, s *Server) string { - t.Helper() - state := s.fsm.State() - _, config, err := state.CAConfig() - require.NoError(t, err) - // Build TrustDomain based on the ClusterID stored. - signingID := connect.SpiffeIDSigningForCluster(config) - return signingID.Host() -} - func TestConnectCASignValidation(t *testing.T) { t.Parallel() @@ -325,7 +314,7 @@ func TestConnectCASignValidation(t *testing.T) { require.NoError(t, msgpackrpc.CallWithCodec(codec, "ACL.Apply", &arg, &webToken)) } - trustDomain := testGetClusterTrustDomain(t, s1) + testWebID := connect.TestSpiffeIDService(t, "web") tests := []struct { name string @@ -335,29 +324,36 @@ func TestConnectCASignValidation(t *testing.T) { { name: "different cluster", id: &connect.SpiffeIDService{ - "55555555-4444-3333-2222-111111111111.consul", - "default", "dc1", "web"}, + Host: "55555555-4444-3333-2222-111111111111.consul", + Namespace: testWebID.Namespace, + Datacenter: testWebID.Datacenter, + Service: testWebID.Service, + }, wantErr: "different trust domain", }, { - name: "same cluster should validate", - id: &connect.SpiffeIDService{ - trustDomain, - "default", "dc1", "web"}, + name: "same cluster should validate", + id: testWebID, wantErr: "", }, { name: "same cluster, CSR for a different DC should NOT validate", id: &connect.SpiffeIDService{ - trustDomain, - "default", "dc2", "web"}, + Host: testWebID.Host, + Namespace: testWebID.Namespace, + Datacenter: "dc2", + Service: testWebID.Service, + }, wantErr: "different datacenter", }, { name: "same cluster and DC, different service should not have perms", id: &connect.SpiffeIDService{ - trustDomain, - "default", "dc1", "db"}, + Host: testWebID.Host, + Namespace: testWebID.Namespace, + Datacenter: testWebID.Datacenter, + Service: "db", + }, wantErr: "Permission denied", }, } diff --git a/agent/consul/leader.go b/agent/consul/leader.go index 579ff4b1d..f47dde83f 100644 --- a/agent/consul/leader.go +++ b/agent/consul/leader.go @@ -383,13 +383,15 @@ func (s *Server) initializeCAConfig() (*structs.CAConfiguration, error) { return config, nil } - id, err := uuid.GenerateUUID() - if err != nil { - return nil, err + config = s.config.CAConfig + if config.ClusterID == "" { + id, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + config.ClusterID = id } - config = s.config.CAConfig - config.ClusterID = id req := structs.CARequest{ Op: structs.CAOpSetConfig, Config: config, diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 84ec6743a..0359c847f 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -10,7 +10,9 @@ import ( "testing" "time" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/metadata" + "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/agent/token" "github.com/hashicorp/consul/lib/freeport" "github.com/hashicorp/consul/testrpc" @@ -92,6 +94,10 @@ func testServerConfig(t *testing.T) (string, *Config) { config.RPCHoldTimeout = 5 * time.Second config.ConnectEnabled = true + config.CAConfig = &structs.CAConfiguration{ + ClusterID: connect.TestClusterID, + Provider: structs.ConsulCAProvider, + } return dir, config } diff --git a/agent/testagent.go b/agent/testagent.go index 724b0c80e..26c81a81d 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -19,6 +19,7 @@ import ( uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/consul/agent/config" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/consul/api" @@ -337,6 +338,9 @@ func TestConfig(sources ...config.Source) *config.RuntimeConfig { node_name = "Node ` + nodeID + `" connect { enabled = true + ca_config { + cluster_id = "` + connect.TestClusterID + `" + } } performance { raft_multiplier = 1 diff --git a/api/agent_test.go b/api/agent_test.go index 1f816c23a..ed73672de 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -203,7 +203,7 @@ func TestAPI_AgentServices_ManagedConnectProxy(t *testing.T) { Connect: &AgentServiceConnect{ Proxy: &AgentServiceConnectProxy{ ExecMode: ProxyExecModeScript, - Command: "foo.rb", + Command: []string{"foo.rb"}, Config: map[string]interface{}{ "foo": "bar", }, @@ -1123,7 +1123,7 @@ func TestAPI_AgentConnectAuthorize(t *testing.T) { Target: "foo", ClientCertSerial: "fake", // Importing connect.TestSpiffeIDService creates an import cycle - ClientCertURI: "spiffe://123.consul/ns/default/dc/ny1/svc/web", + ClientCertURI: "spiffe://11111111-2222-3333-4444-555555555555.consul/ns/default/dc/ny1/svc/web", } auth, err := agent.ConnectAuthorize(params) require.Nil(err) @@ -1169,7 +1169,7 @@ func TestAPI_AgentConnectProxyConfig(t *testing.T) { TargetServiceName: "foo", ContentHash: "e662ea8600d84cf0", ExecMode: "daemon", - Command: "consul connect proxy", + Command: []string{"consul connect proxy"}, Config: map[string]interface{}{ "bind_address": "127.0.0.1", "bind_port": float64(20000), diff --git a/api/connect_ca.go b/api/connect_ca.go index 00951c75d..ed0ac5e8f 100644 --- a/api/connect_ca.go +++ b/api/connect_ca.go @@ -7,6 +7,7 @@ import ( // CARootList is the structure for the results of listing roots. type CARootList struct { ActiveRootID string + TrustDomain string Roots []*CARoot } diff --git a/api/connect_ca_test.go b/api/connect_ca_test.go index 3ad7cb078..36fb12b56 100644 --- a/api/connect_ca_test.go +++ b/api/connect_ca_test.go @@ -3,24 +3,51 @@ package api import ( "testing" + "github.com/hashicorp/consul/testutil" + "github.com/hashicorp/consul/testutil/retry" "github.com/stretchr/testify/require" ) -// NOTE(mitchellh): we don't have a way to test CA roots yet since there -// is no API public way to configure the root certs. This wll be resolved -// in the future and we can write tests then. This is tested in agent and -// agent/consul which do have internal access to manually create roots. - func TestAPI_ConnectCARoots_empty(t *testing.T) { t.Parallel() require := require.New(t) - c, s := makeClient(t) + c, s := makeClientWithConfig(t, nil, func(c *testutil.TestServerConfig) { + // Don't bootstrap CA + c.Connect = nil + }) defer s.Stop() connect := c.Connect() list, meta, err := connect.CARoots(nil) - require.Nil(err) + require.NoError(err) require.Equal(uint64(0), meta.LastIndex) require.Len(list.Roots, 0) + require.Empty(list.TrustDomain) +} + +func TestAPI_ConnectCARoots_list(t *testing.T) { + t.Parallel() + + c, s := makeClient(t) + defer s.Stop() + + // This fails occasionally if server doesn't have time to bootstrap CA so + // retry + retry.Run(t, func(r *retry.R) { + connect := c.Connect() + list, meta, err := connect.CARoots(nil) + r.Check(err) + if meta.LastIndex <= 0 { + r.Fatalf("expected roots raft index to be > 0") + } + if v := len(list.Roots); v != 1 { + r.Fatalf("expected 1 root, got %d", v) + } + // connect.TestClusterID causes import cycle so hard code it + if list.TrustDomain != "11111111-2222-3333-4444-555555555555.consul" { + r.Fatalf("expected fixed trust domain got '%s'", list.TrustDomain) + } + }) + } diff --git a/connect/tls_test.go b/connect/tls_test.go index a9fd6fe8c..5df491866 100644 --- a/connect/tls_test.go +++ b/connect/tls_test.go @@ -147,7 +147,7 @@ func TestServerSideVerifier(t *testing.T) { cfg := api.DefaultConfig() cfg.Address = agent.HTTPAddr() client, err := api.NewClient(cfg) - require.Nil(t, err) + require.NoError(t, err) // Setup intentions to validate against. We actually default to allow so first // setup a blanket deny rule for db, then only allow web. @@ -162,7 +162,7 @@ func TestServerSideVerifier(t *testing.T) { Meta: map[string]string{}, } id, _, err := connect.IntentionCreate(ixn, nil) - require.Nil(t, err) + require.NoError(t, err) require.NotEmpty(t, id) ixn = &api.Intention{ @@ -175,7 +175,7 @@ func TestServerSideVerifier(t *testing.T) { Meta: map[string]string{}, } id, _, err = connect.IntentionCreate(ixn, nil) - require.Nil(t, err) + require.NoError(t, err) require.NotEmpty(t, id) tests := []struct { diff --git a/testutil/server.go b/testutil/server.go index f188079d7..e80b0e7fd 100644 --- a/testutil/server.go +++ b/testutil/server.go @@ -135,6 +135,13 @@ func defaultServerConfig() *TestServerConfig { Server: ports[5], }, ReadyTimeout: 10 * time.Second, + Connect: map[string]interface{}{ + "enabled": true, + "ca_config": map[string]interface{}{ + // const TestClusterID causes import cycle so hard code it here. + "cluster_id": "11111111-2222-3333-4444-555555555555", + }, + }, } } From cac32ba071891cafa81172e475d6e75c59becfc1 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 10 May 2018 17:14:16 +0100 Subject: [PATCH 271/627] More test cleanup --- api/agent.go | 6 ------ api/agent_test.go | 26 ++++++++------------------ 2 files changed, 8 insertions(+), 24 deletions(-) diff --git a/api/agent.go b/api/agent.go index 16241c6f9..7830f80f2 100644 --- a/api/agent.go +++ b/api/agent.go @@ -565,9 +565,6 @@ func (a *Agent) ForceLeave(node string) error { // ConnectAuthorize is used to authorize an incoming connection // to a natively integrated Connect service. -// -// TODO(mitchellh): we need to test this better once we have a way to -// configure CAs from the API package (when the CA work is done). func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, error) { r := a.c.newRequest("POST", "/v1/agent/connect/authorize") r.obj = auth @@ -585,9 +582,6 @@ func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, e } // ConnectCARoots returns the list of roots. -// -// TODO(mitchellh): we need to test this better once we have a way to -// configure CAs from the API package (when the CA work is done). func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { r := a.c.newRequest("GET", "/v1/agent/connect/ca/roots") r.setQueryOptions(q) diff --git a/api/agent_test.go b/api/agent_test.go index ed73672de..ad236ba3a 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -1044,7 +1044,9 @@ func TestAPI_AgentConnectCARoots_empty(t *testing.T) { t.Parallel() require := require.New(t) - c, s := makeClient(t) + c, s := makeClientWithConfig(t, nil, func(c *testutil.TestServerConfig) { + c.Connect = nil // disable connect to prevent CA beening bootstrapped + }) defer s.Stop() agent := c.Agent() @@ -1058,12 +1060,7 @@ func TestAPI_AgentConnectCARoots_list(t *testing.T) { t.Parallel() require := require.New(t) - c, s := makeClientWithConfig(t, nil, func(c *testutil.TestServerConfig) { - // Force auto port range to 1 port so we have deterministic response. - c.Connect = map[string]interface{}{ - "enabled": true, - } - }) + c, s := makeClient(t) defer s.Stop() agent := c.Agent() @@ -1077,12 +1074,7 @@ func TestAPI_AgentConnectCALeaf(t *testing.T) { t.Parallel() require := require.New(t) - c, s := makeClientWithConfig(t, nil, func(c *testutil.TestServerConfig) { - // Force auto port range to 1 port so we have deterministic response. - c.Connect = map[string]interface{}{ - "enabled": true, - } - }) + c, s := makeClient(t) defer s.Stop() agent := c.Agent() @@ -1109,9 +1101,6 @@ func TestAPI_AgentConnectCALeaf(t *testing.T) { require.True(leaf.ValidBefore.After(time.Now())) } -// TODO(banks): once we have CA stuff setup properly we can probably make this -// much more complete. This is just a sanity check that the agent code basically -// works. func TestAPI_AgentConnectAuthorize(t *testing.T) { t.Parallel() require := require.New(t) @@ -1151,6 +1140,7 @@ func TestAPI_AgentConnectProxyConfig(t *testing.T) { Port: 8000, Connect: &AgentServiceConnect{ Proxy: &AgentServiceConnectProxy{ + Command: []string{"consul connect proxy"}, Config: map[string]interface{}{ "foo": "bar", }, @@ -1167,7 +1157,7 @@ func TestAPI_AgentConnectProxyConfig(t *testing.T) { ProxyServiceID: "foo-proxy", TargetServiceID: "foo", TargetServiceName: "foo", - ContentHash: "e662ea8600d84cf0", + ContentHash: "93baee1d838888ae", ExecMode: "daemon", Command: []string{"consul connect proxy"}, Config: map[string]interface{}{ @@ -1178,5 +1168,5 @@ func TestAPI_AgentConnectProxyConfig(t *testing.T) { }, } require.Equal(t, expectConfig, config) - require.Equal(t, "e662ea8600d84cf0", qm.LastContentHash) + require.Equal(t, expectConfig.ContentHash, qm.LastContentHash) } From dbcf286d4c54068059d859b598f418a8e9fe9504 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Thu, 10 May 2018 17:27:42 +0100 Subject: [PATCH 272/627] Ooops remove the CA stuff from actual server defaults and make it test server only --- agent/consul/config.go | 9 --------- agent/consul/server_test.go | 5 +++++ 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/agent/consul/config.go b/agent/consul/config.go index 94c8bc06a..6f9410c4b 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -435,15 +435,6 @@ func DefaultConfig() *Config { ServerHealthInterval: 2 * time.Second, AutopilotInterval: 10 * time.Second, - - CAConfig: &structs.CAConfiguration{ - Provider: "consul", - Config: map[string]interface{}{ - "PrivateKey": "", - "RootCert": "", - "RotationPeriod": 90 * 24 * time.Hour, - }, - }, } // Increase our reap interval to 3 days instead of 24h. diff --git a/agent/consul/server_test.go b/agent/consul/server_test.go index 0359c847f..43dcd13ff 100644 --- a/agent/consul/server_test.go +++ b/agent/consul/server_test.go @@ -97,6 +97,11 @@ func testServerConfig(t *testing.T) (string, *Config) { config.CAConfig = &structs.CAConfiguration{ ClusterID: connect.TestClusterID, Provider: structs.ConsulCAProvider, + Config: map[string]interface{}{ + "PrivateKey": "", + "RootCert": "", + "RotationPeriod": 90 * 24 * time.Hour, + }, } return dir, config From bd5eb8b749484e643b52ece95e89c779ed87391c Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Sat, 12 May 2018 09:48:16 +0100 Subject: [PATCH 273/627] Add default CA config back - I didn't add it and causes nil panics --- agent/consul/config.go | 9 +++++++++ agent/proxy/proxy_test.go | 3 --- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/agent/consul/config.go b/agent/consul/config.go index 6f9410c4b..461a7dcf7 100644 --- a/agent/consul/config.go +++ b/agent/consul/config.go @@ -433,6 +433,15 @@ func DefaultConfig() *Config { ServerStabilizationTime: 10 * time.Second, }, + CAConfig: &structs.CAConfiguration{ + Provider: "consul", + Config: map[string]interface{}{ + "PrivateKey": "", + "RootCert": "", + "RotationPeriod": 90 * 24 * time.Hour, + }, + }, + ServerHealthInterval: 2 * time.Second, AutopilotInterval: 10 * time.Second, } diff --git a/agent/proxy/proxy_test.go b/agent/proxy/proxy_test.go index b46b5d677..9b123787c 100644 --- a/agent/proxy/proxy_test.go +++ b/agent/proxy/proxy_test.go @@ -138,9 +138,6 @@ func TestHelperProcess(t *testing.T) { time.Sleep(25 * time.Millisecond) } - // Run forever - <-make(chan struct{}) - case "output": fmt.Fprintf(os.Stdout, "hello stdout\n") fmt.Fprintf(os.Stderr, "hello stderr\n") From 73f2a49ef18b0955479692133b71d88d1761268c Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Sat, 12 May 2018 11:27:44 +0100 Subject: [PATCH 274/627] Fix broken api test for service Meta (logical conflict rom OSS). Add test that would make this much easier to catch in future. --- agent/agent_endpoint.go | 1 + agent/agent_endpoint_test.go | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 6a5126fa2..0342d1fd4 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -175,6 +175,7 @@ func (s *HTTPServer) AgentServices(resp http.ResponseWriter, req *http.Request) ID: s.ID, Service: s.Service, Tags: s.Tags, + Meta: s.Meta, Port: s.Port, Address: s.Address, EnableTagOverride: s.EnableTagOverride, diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index ac2d28d00..9c10a61ff 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -56,7 +56,10 @@ func TestAgent_Services(t *testing.T) { ID: "mysql", Service: "mysql", Tags: []string{"master"}, - Port: 5000, + Meta: map[string]string{ + "foo": "bar", + }, + Port: 5000, } require.NoError(t, a.State.AddService(srv1, "")) @@ -81,6 +84,7 @@ func TestAgent_Services(t *testing.T) { val := obj.(map[string]*api.AgentService) assert.Lenf(t, val, 1, "bad services: %v", obj) assert.Equal(t, 5000, val["mysql"].Port) + assert.Equal(t, srv1.Meta, val["mysql"].Meta) assert.NotNil(t, val["mysql"].Connect) assert.NotNil(t, val["mysql"].Connect.Proxy) assert.Equal(t, prxy1.ExecMode.String(), string(val["mysql"].Connect.Proxy.ExecMode)) From 919fd3e148d2a8b53eccb6f61101bca4a5c4f666 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Sat, 12 May 2018 11:58:14 +0100 Subject: [PATCH 275/627] Fix logical conflicts with CA refactor --- agent/connect/csr.go | 26 -------------------------- agent/connect/generate.go | 13 +++++++------ 2 files changed, 7 insertions(+), 32 deletions(-) diff --git a/agent/connect/csr.go b/agent/connect/csr.go index 4b975d06c..16a46af3f 100644 --- a/agent/connect/csr.go +++ b/agent/connect/csr.go @@ -3,12 +3,9 @@ package connect import ( "bytes" "crypto" - "crypto/ecdsa" - "crypto/elliptic" "crypto/rand" "crypto/x509" "encoding/pem" - "fmt" "net/url" ) @@ -34,26 +31,3 @@ func CreateCSR(uri CertURI, privateKey crypto.Signer) (string, error) { return csrBuf.String(), nil } - -// GeneratePrivateKey generates a new Private key -func GeneratePrivateKey() (crypto.Signer, string, error) { - var pk *ecdsa.PrivateKey - - pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return nil, "", fmt.Errorf("error generating private key: %s", err) - } - - bs, err := x509.MarshalECPrivateKey(pk) - if err != nil { - return nil, "", fmt.Errorf("error generating private key: %s", err) - } - - var buf bytes.Buffer - err = pem.Encode(&buf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) - if err != nil { - return nil, "", fmt.Errorf("error encoding private key: %s", err) - } - - return pk, buf.String(), nil -} diff --git a/agent/connect/generate.go b/agent/connect/generate.go index 1226323f0..47ea5f43e 100644 --- a/agent/connect/generate.go +++ b/agent/connect/generate.go @@ -2,6 +2,7 @@ package connect import ( "bytes" + "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" @@ -10,25 +11,25 @@ import ( "fmt" ) -// GeneratePrivateKey returns a new private key -func GeneratePrivateKey() (string, error) { +// GeneratePrivateKey generates a new Private key +func GeneratePrivateKey() (crypto.Signer, string, error) { var pk *ecdsa.PrivateKey pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { - return "", fmt.Errorf("error generating private key: %s", err) + return nil, "", fmt.Errorf("error generating private key: %s", err) } bs, err := x509.MarshalECPrivateKey(pk) if err != nil { - return "", fmt.Errorf("error generating private key: %s", err) + return nil, "", fmt.Errorf("error generating private key: %s", err) } var buf bytes.Buffer err = pem.Encode(&buf, &pem.Block{Type: "EC PRIVATE KEY", Bytes: bs}) if err != nil { - return "", fmt.Errorf("error encoding private key: %s", err) + return nil, "", fmt.Errorf("error encoding private key: %s", err) } - return buf.String(), nil + return pk, buf.String(), nil } From 69b668c95140c6d4e19d2835f85f0628f36340e0 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Sat, 12 May 2018 20:16:39 +0100 Subject: [PATCH 276/627] Make connect client resolver resolve trust domain properly --- connect/resolver.go | 40 ++++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/connect/resolver.go b/connect/resolver.go index 98d8c88d3..b7e89bd62 100644 --- a/connect/resolver.go +++ b/connect/resolver.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "math/rand" + "sync" "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/api" @@ -73,11 +74,22 @@ type ConsulResolver struct { // Datacenter to resolve in, empty indicates agent's local DC. Datacenter string + + // trustDomain stores the cluster's trust domain it's populated once on first + // Resolve call and blocks all resolutions. + trustDomain string + trustDomainMu sync.Mutex } // Resolve performs service discovery against the local Consul agent and returns // the address and expected identity of a suitable service instance. func (cr *ConsulResolver) Resolve(ctx context.Context) (string, connect.CertURI, error) { + // Fetch trust domain if we've not done that yet + err := cr.ensureTrustDomain() + if err != nil { + return "", nil, err + } + switch cr.Type { case ConsulResolverTypeService: return cr.resolveService(ctx) @@ -91,6 +103,27 @@ func (cr *ConsulResolver) Resolve(ctx context.Context) (string, connect.CertURI, } } +func (cr *ConsulResolver) ensureTrustDomain() error { + cr.trustDomainMu.Lock() + defer cr.trustDomainMu.Unlock() + + if cr.trustDomain != "" { + return nil + } + + roots, _, err := cr.Client.Agent().ConnectCARoots(nil) + if err != nil { + return fmt.Errorf("failed fetching cluster trust domain: %s", err) + } + + if roots.TrustDomain == "" { + return fmt.Errorf("cluster trust domain empty, connect not bootstrapped") + } + + cr.trustDomain = roots.TrustDomain + return nil +} + func (cr *ConsulResolver) resolveService(ctx context.Context) (string, connect.CertURI, error) { health := cr.Client.Health() @@ -116,13 +149,8 @@ func (cr *ConsulResolver) resolveService(ctx context.Context) (string, connect.C port := svcs[idx].Service.Port // Generate the expected CertURI - - // TODO(banks): when we've figured out the CA story around generating and - // propagating these trust domains we need to actually fetch the trust domain - // somehow. We also need to implement namespaces. Use of test function here is - // temporary pending the work on trust domains. certURI := &connect.SpiffeIDService{ - Host: "11111111-2222-3333-4444-555555555555.consul", + Host: cr.trustDomain, Namespace: "default", Datacenter: svcs[idx].Node.Datacenter, Service: svcs[idx].Service.ProxyDestination, From 957aaf69abf5277ae7c3f35888e20f617f1ccae1 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Sat, 12 May 2018 23:37:44 +0100 Subject: [PATCH 277/627] Make Service logger log to right place again --- connect/proxy/proxy.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/connect/proxy/proxy.go b/connect/proxy/proxy.go index e3db982fe..64e098825 100644 --- a/connect/proxy/proxy.go +++ b/connect/proxy/proxy.go @@ -80,7 +80,8 @@ func (p *Proxy) Serve() error { // Initial setup // Setup Service instance now we know target ID etc - service, err := connect.NewService(newCfg.ProxiedServiceID, p.client) + service, err := connect.NewServiceWithLogger(newCfg.ProxiedServiceID, + p.client, p.logger) if err != nil { return err } From bd5e569dc7f703ebe4a45e61753033dbc1a572b6 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Tue, 22 May 2018 15:11:13 +0100 Subject: [PATCH 278/627] Make invalid clusterID be fatal --- agent/agent.go | 17 +++++++++++------ agent/agent_test.go | 37 ++++++++++++++++++++++++++----------- agent/testagent.go | 11 +++++++++++ 3 files changed, 48 insertions(+), 17 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index eb9e203dc..622a105e8 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -946,8 +946,12 @@ func (a *Agent) consulConfig() (*consul.Config, error) { } } if base.CAConfig.ClusterID == "" { - a.logger.Println("[WARN] connect CA config cluster_id specified but ", - "is not a valid UUID, ignoring") + // If the tried to specify an ID but typoed it don't ignore as they will + // then bootstrap with a new ID and have to throw away the whole cluster + // and start again. + a.logger.Println("[ERR] connect CA config cluster_id specified but " + + "is not a valid UUID, aborting startup") + return nil, fmt.Errorf("cluster_id was supplied but was not a valid UUID") } } @@ -1315,8 +1319,10 @@ func (a *Agent) ShutdownAgent() error { // NOTE(mitchellh): we use Kill for now to kill the processes since // the local state isn't snapshotting meaning the proxy tokens are // regenerated each time forcing the processes to restart anyways. - if err := a.proxyManager.Kill(); err != nil { - a.logger.Printf("[WARN] agent: error shutting down proxy manager: %s", err) + if a.proxyManager != nil { + if err := a.proxyManager.Kill(); err != nil { + a.logger.Printf("[WARN] agent: error shutting down proxy manager: %s", err) + } } var err error @@ -2177,8 +2183,7 @@ func (a *Agent) verifyProxyToken(token, targetService, targetProxy string) (stri // Resolve the actual ACL token used to register the proxy/service and // return that for use in RPC calls. - aclToken := a.State.ServiceToken(targetService) - return aclToken, nil + return a.State.ServiceToken(targetService), nil } // Retrieve the service specified. This should always exist because diff --git a/agent/agent_test.go b/agent/agent_test.go index 911ed63a0..993bf3b25 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -60,6 +60,7 @@ func TestAgent_ConnectClusterIDConfig(t *testing.T) { name string hcl string wantClusterID string + wantPanic bool }{ { name: "default TestAgent has fixed cluster id", @@ -72,22 +73,36 @@ func TestAgent_ConnectClusterIDConfig(t *testing.T) { wantClusterID: "", }, { - name: "non-UUID cluster_id is ignored", - hcl: `connect { - enabled = true - ca_config { - cluster_id = "fake-id" - } - }`, + name: "non-UUID cluster_id is fatal", + hcl: `connect { + enabled = true + ca_config { + cluster_id = "fake-id" + } + }`, wantClusterID: "", + wantPanic: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - a := NewTestAgent("test", tt.hcl) - cfg := a.consulConfig() - assert.Equal(t, tt.wantClusterID, cfg.CAConfig.ClusterID) + // Indirection to support panic recovery cleanly + testFn := func() { + a := &TestAgent{Name: "test", HCL: tt.hcl} + a.ExpectConfigError = tt.wantPanic + a.Start() + defer a.Shutdown() + + cfg := a.consulConfig() + assert.Equal(t, tt.wantClusterID, cfg.CAConfig.ClusterID) + } + + if tt.wantPanic { + require.Panics(t, testFn) + } else { + testFn() + } }) } } @@ -95,7 +110,7 @@ func TestAgent_ConnectClusterIDConfig(t *testing.T) { func TestAgent_StartStop(t *testing.T) { t.Parallel() a := NewTestAgent(t.Name(), "") - // defer a.Shutdown() + defer a.Shutdown() if err := a.Leave(); err != nil { t.Fatalf("err: %v", err) diff --git a/agent/testagent.go b/agent/testagent.go index 26c81a81d..007d01322 100644 --- a/agent/testagent.go +++ b/agent/testagent.go @@ -45,6 +45,12 @@ type TestAgent struct { HCL string + // ExpectConfigError can be set to prevent the agent retrying Start on errors + // and eventually blowing up with runtime.Goexit. This enables tests to assert + // that some specific bit of config actually does prevent startup entirely in + // a reasonable way without reproducing a lot of the boilerplate here. + ExpectConfigError bool + // Config is the agent configuration. If Config is nil then // TestConfig() is used. If Config.DataDir is set then it is // the callers responsibility to clean up the data directory. @@ -159,6 +165,11 @@ func (a *TestAgent) Start() *TestAgent { } else if i == 0 { fmt.Println(id, a.Name, "Error starting agent:", err) runtime.Goexit() + } else if a.ExpectConfigError { + // Panic the error since this can be caught if needed. Pretty gross way to + // detect errors but enough for now and this is a tiny edge case that I'd + // otherwise not have a way to test at all... + panic(err) } else { agent.ShutdownAgent() agent.ShutdownEndpoints() From 526cfc34bd56e4b2bd67c955f1d72c7775daa861 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 10 May 2018 22:35:47 -0700 Subject: [PATCH 279/627] agent/consul: implement Intention.Test endpoint --- agent/consul/intention_endpoint.go | 90 +++++++++ agent/consul/intention_endpoint_test.go | 254 ++++++++++++++++++++++++ agent/structs/intention.go | 28 +++ 3 files changed, 372 insertions(+) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 2458a8ee9..7662ea852 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -7,6 +7,7 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/consul/acl" + "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" "github.com/hashicorp/go-memdb" @@ -252,3 +253,92 @@ func (s *Intention) Match( }, ) } + +// Test tests a source/destination and returns whether it would be allowed +// or denied based on the current ACL configuration. +func (s *Intention) Test( + args *structs.IntentionQueryRequest, + reply *structs.IntentionQueryTestResponse) error { + // Get the test args, and defensively guard against nil + query := args.Test + if query == nil { + return errors.New("Test must be specified on args") + } + + // Build the URI + var uri connect.CertURI + switch query.SourceType { + case structs.IntentionSourceConsul: + uri = &connect.SpiffeIDService{ + Namespace: query.SourceNS, + Service: query.SourceName, + } + + default: + return fmt.Errorf("unsupported SourceType: %q", query.SourceType) + } + + // Get the ACL token for the request for the checks below. + rule, err := s.srv.resolveToken(args.Token) + if err != nil { + return err + } + + // Perform the ACL check + if prefix, ok := query.GetACLPrefix(); ok { + if rule != nil && !rule.ServiceRead(prefix) { + s.srv.logger.Printf("[WARN] consul.intention: test on intention '%s' denied due to ACLs", prefix) + return acl.ErrPermissionDenied + } + } + + // Get the matches for this destination + state := s.srv.fsm.State() + _, matches, err := state.IntentionMatch(nil, &structs.IntentionQueryMatch{ + Type: structs.IntentionMatchDestination, + Entries: []structs.IntentionMatchEntry{ + structs.IntentionMatchEntry{ + Namespace: query.DestinationNS, + Name: query.DestinationName, + }, + }, + }) + if err != nil { + return err + } + if len(matches) != 1 { + // This should never happen since the documented behavior of the + // Match call is that it'll always return exactly the number of results + // as entries passed in. But we guard against misbehavior. + return errors.New("internal error loading matches") + } + + // Test the authorization for each match + for _, ixn := range matches[0] { + if auth, ok := uri.Authorize(ixn); ok { + reply.Allowed = auth + return nil + } + } + + // No match, we need to determine the default behavior. We do this by + // specifying the anonymous token token, which will get that behavior. + // The default behavior if ACLs are disabled is to allow connections + // to mimic the behavior of Consul itself: everything is allowed if + // ACLs are disabled. + // + // NOTE(mitchellh): This is the same behavior as the agent authorize + // endpoint. If this behavior is incorrect, we should also change it there + // which is much more important. + rule, err = s.srv.resolveToken("") + if err != nil { + return err + } + + reply.Allowed = true + if rule != nil { + reply.Allowed = rule.IntentionDefaultAllow() + } + + return nil +} diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index dfac4fc45..b1f51a714 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/consul/testrpc" "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // Test basic creation @@ -1007,3 +1008,256 @@ service "bar" { assert.Equal(expected, actual) } } + +// Test the Test method defaults to allow with no ACL set. +func TestIntentionTest_defaultNoACL(t *testing.T) { + t.Parallel() + + require := require.New(t) + dir1, s1 := testServer(t) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Test + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + Test: &structs.IntentionQueryTest{ + SourceNS: "foo", + SourceName: "bar", + DestinationNS: "foo", + DestinationName: "qux", + SourceType: structs.IntentionSourceConsul, + }, + } + var resp structs.IntentionQueryTestResponse + require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Test", req, &resp)) + require.True(resp.Allowed) +} + +// Test the Test method defaults to deny with whitelist ACLs. +func TestIntentionTest_defaultACLDeny(t *testing.T) { + t.Parallel() + + require := require.New(t) + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Test + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + Test: &structs.IntentionQueryTest{ + SourceNS: "foo", + SourceName: "bar", + DestinationNS: "foo", + DestinationName: "qux", + SourceType: structs.IntentionSourceConsul, + }, + } + req.Token = "root" + var resp structs.IntentionQueryTestResponse + require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Test", req, &resp)) + require.False(resp.Allowed) +} + +// Test the Test method defaults to deny with blacklist ACLs. +func TestIntentionTest_defaultACLAllow(t *testing.T) { + t.Parallel() + + require := require.New(t) + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "allow" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Test + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + Test: &structs.IntentionQueryTest{ + SourceNS: "foo", + SourceName: "bar", + DestinationNS: "foo", + DestinationName: "qux", + SourceType: structs.IntentionSourceConsul, + }, + } + req.Token = "root" + var resp structs.IntentionQueryTestResponse + require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Test", req, &resp)) + require.True(resp.Allowed) +} + +// Test the Test method requires service:read permission. +func TestIntentionTest_aclDeny(t *testing.T) { + t.Parallel() + + require := require.New(t) + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create an ACL with service read permissions. This will grant permission. + var token string + { + var rules = ` +service "bar" { + policy = "read" +}` + + req := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: rules, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + require.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token)) + } + + // Test + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + Test: &structs.IntentionQueryTest{ + SourceNS: "foo", + SourceName: "qux", + DestinationNS: "foo", + DestinationName: "baz", + SourceType: structs.IntentionSourceConsul, + }, + } + req.Token = token + var resp structs.IntentionQueryTestResponse + err := msgpackrpc.CallWithCodec(codec, "Intention.Test", req, &resp) + require.True(acl.IsErrPermissionDenied(err)) +} + +// Test the Test method returns allow/deny properly. +func TestIntentionTest_match(t *testing.T) { + t.Parallel() + + require := require.New(t) + dir1, s1 := testServerWithConfig(t, func(c *Config) { + c.ACLDatacenter = "dc1" + c.ACLMasterToken = "root" + c.ACLDefaultPolicy = "deny" + }) + defer os.RemoveAll(dir1) + defer s1.Shutdown() + codec := rpcClient(t, s1) + defer codec.Close() + + testrpc.WaitForLeader(t, s1.RPC, "dc1") + + // Create an ACL with service read permissions. This will grant permission. + var token string + { + var rules = ` +service "bar" { + policy = "read" +}` + + req := structs.ACLRequest{ + Datacenter: "dc1", + Op: structs.ACLSet, + ACL: structs.ACL{ + Name: "User token", + Type: structs.ACLTypeClient, + Rules: rules, + }, + WriteRequest: structs.WriteRequest{Token: "root"}, + } + require.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token)) + } + + // Create some intentions + { + insert := [][]string{ + {"foo", "*", "foo", "*"}, + {"foo", "*", "foo", "bar"}, + {"bar", "*", "foo", "bar"}, // duplicate destination different source + } + + for _, v := range insert { + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: &structs.Intention{ + SourceNS: v[0], + SourceName: v[1], + DestinationNS: v[2], + DestinationName: v[3], + Action: structs.IntentionActionAllow, + }, + } + ixn.WriteRequest.Token = "root" + + // Create + var reply string + require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Apply", &ixn, &reply)) + } + } + + // Test + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + Test: &structs.IntentionQueryTest{ + SourceNS: "foo", + SourceName: "qux", + DestinationNS: "foo", + DestinationName: "bar", + SourceType: structs.IntentionSourceConsul, + }, + } + req.Token = token + var resp structs.IntentionQueryTestResponse + require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Test", req, &resp)) + require.True(resp.Allowed) + + // Test no match for sanity + { + req := &structs.IntentionQueryRequest{ + Datacenter: "dc1", + Test: &structs.IntentionQueryTest{ + SourceNS: "baz", + SourceName: "qux", + DestinationNS: "foo", + DestinationName: "bar", + SourceType: structs.IntentionSourceConsul, + }, + } + req.Token = token + var resp structs.IntentionQueryTestResponse + require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Test", req, &resp)) + require.False(resp.Allowed) + } +} diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 5c6b1e991..34d15d997 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -261,6 +261,10 @@ type IntentionQueryRequest struct { // resolving wildcards. Match *IntentionQueryMatch + // Test is non-nil if we're performing a test query. A test will + // return allowed/deny based on an exact match. + Test *IntentionQueryTest + // Options for queries QueryOptions } @@ -313,6 +317,30 @@ type IntentionMatchEntry struct { Name string } +// IntentionQueryTest are the parameters for performing a test request. +type IntentionQueryTest struct { + // SourceNS, SourceName, DestinationNS, and DestinationName are the + // source and namespace, respectively, for the test. These must be + // exact values. + SourceNS, SourceName string + DestinationNS, DestinationName string + + // SourceType is the type of the value for the source. + SourceType IntentionSourceType +} + +// GetACLPrefix returns the prefix to look up the ACL policy for this +// request, and a boolean noting whether the prefix is valid to check +// or not. You must check the ok value before using the prefix. +func (q *IntentionQueryTest) GetACLPrefix() (string, bool) { + return q.DestinationName, q.DestinationName != "" +} + +// IntentionQueryTestResponse is the response for a test request. +type IntentionQueryTestResponse struct { + Allowed bool +} + // IntentionPrecedenceSorter takes a list of intentions and sorts them // based on the match precedence rules for intentions. The intentions // closer to the head of the list have higher precedence. i.e. index 0 has From b02502be736261d09f8e376fa5cf50e207d1b7da Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 10 May 2018 22:37:02 -0700 Subject: [PATCH 280/627] agent: comments to point to differing logic --- agent/agent_endpoint.go | 3 +++ agent/consul/intention_endpoint.go | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 0342d1fd4..b52abc732 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -1106,6 +1106,9 @@ func (s *HTTPServer) agentLocalBlockingQuery(resp http.ResponseWriter, hash stri // AgentConnectAuthorize // // POST /v1/agent/connect/authorize +// +// Note: when this logic changes, consider if the Intention.Test RPC method +// also needs to be updated. func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Fetch the token var token string diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 7662ea852..2bae56f5e 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -256,6 +256,10 @@ func (s *Intention) Match( // Test tests a source/destination and returns whether it would be allowed // or denied based on the current ACL configuration. +// +// Note: Whenever the logic for this method is changed, you should take +// a look at the agent authorize endpoint (agent/agent_endpoint.go) since +// the logic there is similar. func (s *Intention) Test( args *structs.IntentionQueryRequest, reply *structs.IntentionQueryTestResponse) error { From a48ff5431854062fdc9491722ff07fb835bb0974 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 10 May 2018 22:38:13 -0700 Subject: [PATCH 281/627] agent/consul: forward request if necessary --- agent/consul/intention_endpoint.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 2bae56f5e..378565241 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -263,6 +263,11 @@ func (s *Intention) Match( func (s *Intention) Test( args *structs.IntentionQueryRequest, reply *structs.IntentionQueryTestResponse) error { + // Forward maybe + if done, err := s.srv.forward("Intention.Test", args, args, reply); done { + return err + } + // Get the test args, and defensively guard against nil query := args.Test if query == nil { From b961bab08cde31abe68a55179a9c65ae11db1827 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Thu, 10 May 2018 22:52:57 -0700 Subject: [PATCH 282/627] agent: implement HTTP endpoint --- agent/http_oss.go | 1 + agent/intentions_endpoint.go | 53 ++++++++++++++++++ agent/intentions_endpoint_test.go | 91 +++++++++++++++++++++++++++++++ 3 files changed, 145 insertions(+) diff --git a/agent/http_oss.go b/agent/http_oss.go index 9b9857e40..92c7bf4c0 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -48,6 +48,7 @@ func init() { registerEndpoint("/v1/connect/ca/roots", []string{"GET"}, (*HTTPServer).ConnectCARoots) registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPServer).IntentionEndpoint) registerEndpoint("/v1/connect/intentions/match", []string{"GET"}, (*HTTPServer).IntentionMatch) + registerEndpoint("/v1/connect/intentions/test", []string{"GET"}, (*HTTPServer).IntentionTest) registerEndpoint("/v1/connect/intentions/", []string{"GET", "PUT", "DELETE"}, (*HTTPServer).IntentionSpecific) registerEndpoint("/v1/coordinate/datacenters", []string{"GET"}, (*HTTPServer).CoordinateDatacenters) registerEndpoint("/v1/coordinate/nodes", []string{"GET"}, (*HTTPServer).CoordinateNodes) diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 5a2e0e809..cb846bc19 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -122,6 +122,59 @@ func (s *HTTPServer) IntentionMatch(resp http.ResponseWriter, req *http.Request) return response, nil } +// GET /v1/connect/intentions/test +func (s *HTTPServer) IntentionTest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Prepare args + args := &structs.IntentionQueryRequest{Test: &structs.IntentionQueryTest{}} + if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { + return nil, nil + } + + q := req.URL.Query() + + // Set the source type if set + args.Test.SourceType = structs.IntentionSourceConsul + if sourceType, ok := q["source-type"]; ok && len(sourceType) > 0 { + args.Test.SourceType = structs.IntentionSourceType(sourceType[0]) + } + + // Extract the source/destination + source, ok := q["source"] + if !ok || len(source) != 1 { + return nil, fmt.Errorf("required query parameter 'source' not set") + } + destination, ok := q["destination"] + if !ok || len(destination) != 1 { + return nil, fmt.Errorf("required query parameter 'destination' not set") + } + + // We parse them the same way as matches to extract namespace/name + args.Test.SourceName = source[0] + if args.Test.SourceType == structs.IntentionSourceConsul { + entry, err := parseIntentionMatchEntry(source[0]) + if err != nil { + return nil, fmt.Errorf("source %q is invalid: %s", source[0], err) + } + args.Test.SourceNS = entry.Namespace + args.Test.SourceName = entry.Name + } + + // The destination is always in the Consul format + entry, err := parseIntentionMatchEntry(destination[0]) + if err != nil { + return nil, fmt.Errorf("destination %q is invalid: %s", destination[0], err) + } + args.Test.DestinationNS = entry.Namespace + args.Test.DestinationName = entry.Name + + var reply structs.IntentionQueryTestResponse + if err := s.agent.RPC("Intention.Test", args, &reply); err != nil { + return nil, err + } + + return &reply, nil +} + // IntentionSpecific handles the endpoint for /v1/connection/intentions/:id func (s *HTTPServer) IntentionSpecific(resp http.ResponseWriter, req *http.Request) (interface{}, error) { id := strings.TrimPrefix(req.URL.Path, "/v1/connect/intentions/") diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index d4d68f26c..e669bcf5f 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/consul/agent/structs" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestIntentionsList_empty(t *testing.T) { @@ -180,6 +181,96 @@ func TestIntentionsMatch_noName(t *testing.T) { assert.Nil(obj) } +func TestIntentionsTest_basic(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Create some intentions + { + insert := [][]string{ + {"foo", "*", "foo", "*"}, + {"foo", "*", "foo", "bar"}, + {"bar", "*", "foo", "bar"}, + } + + for _, v := range insert { + ixn := structs.IntentionRequest{ + Datacenter: "dc1", + Op: structs.IntentionOpCreate, + Intention: structs.TestIntention(t), + } + ixn.Intention.SourceNS = v[0] + ixn.Intention.SourceName = v[1] + ixn.Intention.DestinationNS = v[2] + ixn.Intention.DestinationName = v[3] + ixn.Intention.Action = structs.IntentionActionDeny + + // Create + var reply string + require.Nil(a.RPC("Intention.Apply", &ixn, &reply)) + } + } + + // Request matching intention + { + req, _ := http.NewRequest("GET", + "/v1/connect/intentions/test?source=foo/bar&destination=foo/baz", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionTest(resp, req) + require.Nil(err) + value := obj.(*structs.IntentionQueryTestResponse) + require.False(value.Allowed) + } + + // Request non-matching intention + { + req, _ := http.NewRequest("GET", + "/v1/connect/intentions/test?source=foo/bar&destination=bar/qux", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionTest(resp, req) + require.Nil(err) + value := obj.(*structs.IntentionQueryTestResponse) + require.True(value.Allowed) + } +} + +func TestIntentionsTest_noSource(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Request + req, _ := http.NewRequest("GET", + "/v1/connect/intentions/test?destination=B", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionTest(resp, req) + require.NotNil(err) + require.Contains(err.Error(), "'source' not set") + require.Nil(obj) +} + +func TestIntentionsTest_noDestination(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // Request + req, _ := http.NewRequest("GET", + "/v1/connect/intentions/test?source=B", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.IntentionTest(resp, req) + require.NotNil(err) + require.Contains(err.Error(), "'destination' not set") + require.Nil(obj) +} + func TestIntentionsCreate_good(t *testing.T) { t.Parallel() From b5b29cd6afb594cff724d5d22e8211ce61ab5ef2 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 11 May 2018 09:19:22 -0700 Subject: [PATCH 283/627] agent: rename test to check --- agent/consul/intention_endpoint.go | 14 +++--- agent/consul/intention_endpoint_test.go | 64 ++++++++++++------------- agent/http_oss.go | 2 +- agent/intentions_endpoint.go | 24 +++++----- agent/intentions_endpoint_test.go | 18 +++---- agent/structs/intention.go | 14 +++--- 6 files changed, 68 insertions(+), 68 deletions(-) diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index 378565241..a0d88352f 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -254,24 +254,24 @@ func (s *Intention) Match( ) } -// Test tests a source/destination and returns whether it would be allowed +// Check tests a source/destination and returns whether it would be allowed // or denied based on the current ACL configuration. // // Note: Whenever the logic for this method is changed, you should take // a look at the agent authorize endpoint (agent/agent_endpoint.go) since // the logic there is similar. -func (s *Intention) Test( +func (s *Intention) Check( args *structs.IntentionQueryRequest, - reply *structs.IntentionQueryTestResponse) error { + reply *structs.IntentionQueryCheckResponse) error { // Forward maybe - if done, err := s.srv.forward("Intention.Test", args, args, reply); done { + if done, err := s.srv.forward("Intention.Check", args, args, reply); done { return err } // Get the test args, and defensively guard against nil - query := args.Test + query := args.Check if query == nil { - return errors.New("Test must be specified on args") + return errors.New("Check must be specified on args") } // Build the URI @@ -322,7 +322,7 @@ func (s *Intention) Test( return errors.New("internal error loading matches") } - // Test the authorization for each match + // Check the authorization for each match for _, ixn := range matches[0] { if auth, ok := uri.Authorize(ixn); ok { reply.Allowed = auth diff --git a/agent/consul/intention_endpoint_test.go b/agent/consul/intention_endpoint_test.go index b1f51a714..29db41f44 100644 --- a/agent/consul/intention_endpoint_test.go +++ b/agent/consul/intention_endpoint_test.go @@ -1009,8 +1009,8 @@ service "bar" { } } -// Test the Test method defaults to allow with no ACL set. -func TestIntentionTest_defaultNoACL(t *testing.T) { +// Test the Check method defaults to allow with no ACL set. +func TestIntentionCheck_defaultNoACL(t *testing.T) { t.Parallel() require := require.New(t) @@ -1025,7 +1025,7 @@ func TestIntentionTest_defaultNoACL(t *testing.T) { // Test req := &structs.IntentionQueryRequest{ Datacenter: "dc1", - Test: &structs.IntentionQueryTest{ + Check: &structs.IntentionQueryCheck{ SourceNS: "foo", SourceName: "bar", DestinationNS: "foo", @@ -1033,13 +1033,13 @@ func TestIntentionTest_defaultNoACL(t *testing.T) { SourceType: structs.IntentionSourceConsul, }, } - var resp structs.IntentionQueryTestResponse - require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Test", req, &resp)) + var resp structs.IntentionQueryCheckResponse + require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Check", req, &resp)) require.True(resp.Allowed) } -// Test the Test method defaults to deny with whitelist ACLs. -func TestIntentionTest_defaultACLDeny(t *testing.T) { +// Test the Check method defaults to deny with whitelist ACLs. +func TestIntentionCheck_defaultACLDeny(t *testing.T) { t.Parallel() require := require.New(t) @@ -1055,10 +1055,10 @@ func TestIntentionTest_defaultACLDeny(t *testing.T) { testrpc.WaitForLeader(t, s1.RPC, "dc1") - // Test + // Check req := &structs.IntentionQueryRequest{ Datacenter: "dc1", - Test: &structs.IntentionQueryTest{ + Check: &structs.IntentionQueryCheck{ SourceNS: "foo", SourceName: "bar", DestinationNS: "foo", @@ -1067,13 +1067,13 @@ func TestIntentionTest_defaultACLDeny(t *testing.T) { }, } req.Token = "root" - var resp structs.IntentionQueryTestResponse - require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Test", req, &resp)) + var resp structs.IntentionQueryCheckResponse + require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Check", req, &resp)) require.False(resp.Allowed) } -// Test the Test method defaults to deny with blacklist ACLs. -func TestIntentionTest_defaultACLAllow(t *testing.T) { +// Test the Check method defaults to deny with blacklist ACLs. +func TestIntentionCheck_defaultACLAllow(t *testing.T) { t.Parallel() require := require.New(t) @@ -1089,10 +1089,10 @@ func TestIntentionTest_defaultACLAllow(t *testing.T) { testrpc.WaitForLeader(t, s1.RPC, "dc1") - // Test + // Check req := &structs.IntentionQueryRequest{ Datacenter: "dc1", - Test: &structs.IntentionQueryTest{ + Check: &structs.IntentionQueryCheck{ SourceNS: "foo", SourceName: "bar", DestinationNS: "foo", @@ -1101,13 +1101,13 @@ func TestIntentionTest_defaultACLAllow(t *testing.T) { }, } req.Token = "root" - var resp structs.IntentionQueryTestResponse - require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Test", req, &resp)) + var resp structs.IntentionQueryCheckResponse + require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Check", req, &resp)) require.True(resp.Allowed) } -// Test the Test method requires service:read permission. -func TestIntentionTest_aclDeny(t *testing.T) { +// Test the Check method requires service:read permission. +func TestIntentionCheck_aclDeny(t *testing.T) { t.Parallel() require := require.New(t) @@ -1144,10 +1144,10 @@ service "bar" { require.Nil(msgpackrpc.CallWithCodec(codec, "ACL.Apply", &req, &token)) } - // Test + // Check req := &structs.IntentionQueryRequest{ Datacenter: "dc1", - Test: &structs.IntentionQueryTest{ + Check: &structs.IntentionQueryCheck{ SourceNS: "foo", SourceName: "qux", DestinationNS: "foo", @@ -1156,13 +1156,13 @@ service "bar" { }, } req.Token = token - var resp structs.IntentionQueryTestResponse - err := msgpackrpc.CallWithCodec(codec, "Intention.Test", req, &resp) + var resp structs.IntentionQueryCheckResponse + err := msgpackrpc.CallWithCodec(codec, "Intention.Check", req, &resp) require.True(acl.IsErrPermissionDenied(err)) } -// Test the Test method returns allow/deny properly. -func TestIntentionTest_match(t *testing.T) { +// Test the Check method returns allow/deny properly. +func TestIntentionCheck_match(t *testing.T) { t.Parallel() require := require.New(t) @@ -1227,10 +1227,10 @@ service "bar" { } } - // Test + // Check req := &structs.IntentionQueryRequest{ Datacenter: "dc1", - Test: &structs.IntentionQueryTest{ + Check: &structs.IntentionQueryCheck{ SourceNS: "foo", SourceName: "qux", DestinationNS: "foo", @@ -1239,15 +1239,15 @@ service "bar" { }, } req.Token = token - var resp structs.IntentionQueryTestResponse - require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Test", req, &resp)) + var resp structs.IntentionQueryCheckResponse + require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Check", req, &resp)) require.True(resp.Allowed) // Test no match for sanity { req := &structs.IntentionQueryRequest{ Datacenter: "dc1", - Test: &structs.IntentionQueryTest{ + Check: &structs.IntentionQueryCheck{ SourceNS: "baz", SourceName: "qux", DestinationNS: "foo", @@ -1256,8 +1256,8 @@ service "bar" { }, } req.Token = token - var resp structs.IntentionQueryTestResponse - require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Test", req, &resp)) + var resp structs.IntentionQueryCheckResponse + require.Nil(msgpackrpc.CallWithCodec(codec, "Intention.Check", req, &resp)) require.False(resp.Allowed) } } diff --git a/agent/http_oss.go b/agent/http_oss.go index 92c7bf4c0..ac5eff335 100644 --- a/agent/http_oss.go +++ b/agent/http_oss.go @@ -48,7 +48,7 @@ func init() { registerEndpoint("/v1/connect/ca/roots", []string{"GET"}, (*HTTPServer).ConnectCARoots) registerEndpoint("/v1/connect/intentions", []string{"GET", "POST"}, (*HTTPServer).IntentionEndpoint) registerEndpoint("/v1/connect/intentions/match", []string{"GET"}, (*HTTPServer).IntentionMatch) - registerEndpoint("/v1/connect/intentions/test", []string{"GET"}, (*HTTPServer).IntentionTest) + registerEndpoint("/v1/connect/intentions/check", []string{"GET"}, (*HTTPServer).IntentionCheck) registerEndpoint("/v1/connect/intentions/", []string{"GET", "PUT", "DELETE"}, (*HTTPServer).IntentionSpecific) registerEndpoint("/v1/coordinate/datacenters", []string{"GET"}, (*HTTPServer).CoordinateDatacenters) registerEndpoint("/v1/coordinate/nodes", []string{"GET"}, (*HTTPServer).CoordinateNodes) diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index cb846bc19..80ddedf24 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -123,9 +123,9 @@ func (s *HTTPServer) IntentionMatch(resp http.ResponseWriter, req *http.Request) } // GET /v1/connect/intentions/test -func (s *HTTPServer) IntentionTest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { +func (s *HTTPServer) IntentionCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Prepare args - args := &structs.IntentionQueryRequest{Test: &structs.IntentionQueryTest{}} + args := &structs.IntentionQueryRequest{Check: &structs.IntentionQueryCheck{}} if done := s.parse(resp, req, &args.Datacenter, &args.QueryOptions); done { return nil, nil } @@ -133,9 +133,9 @@ func (s *HTTPServer) IntentionTest(resp http.ResponseWriter, req *http.Request) q := req.URL.Query() // Set the source type if set - args.Test.SourceType = structs.IntentionSourceConsul + args.Check.SourceType = structs.IntentionSourceConsul if sourceType, ok := q["source-type"]; ok && len(sourceType) > 0 { - args.Test.SourceType = structs.IntentionSourceType(sourceType[0]) + args.Check.SourceType = structs.IntentionSourceType(sourceType[0]) } // Extract the source/destination @@ -149,14 +149,14 @@ func (s *HTTPServer) IntentionTest(resp http.ResponseWriter, req *http.Request) } // We parse them the same way as matches to extract namespace/name - args.Test.SourceName = source[0] - if args.Test.SourceType == structs.IntentionSourceConsul { + args.Check.SourceName = source[0] + if args.Check.SourceType == structs.IntentionSourceConsul { entry, err := parseIntentionMatchEntry(source[0]) if err != nil { return nil, fmt.Errorf("source %q is invalid: %s", source[0], err) } - args.Test.SourceNS = entry.Namespace - args.Test.SourceName = entry.Name + args.Check.SourceNS = entry.Namespace + args.Check.SourceName = entry.Name } // The destination is always in the Consul format @@ -164,11 +164,11 @@ func (s *HTTPServer) IntentionTest(resp http.ResponseWriter, req *http.Request) if err != nil { return nil, fmt.Errorf("destination %q is invalid: %s", destination[0], err) } - args.Test.DestinationNS = entry.Namespace - args.Test.DestinationName = entry.Name + args.Check.DestinationNS = entry.Namespace + args.Check.DestinationName = entry.Name - var reply structs.IntentionQueryTestResponse - if err := s.agent.RPC("Intention.Test", args, &reply); err != nil { + var reply structs.IntentionQueryCheckResponse + if err := s.agent.RPC("Intention.Check", args, &reply); err != nil { return nil, err } diff --git a/agent/intentions_endpoint_test.go b/agent/intentions_endpoint_test.go index e669bcf5f..991ab9017 100644 --- a/agent/intentions_endpoint_test.go +++ b/agent/intentions_endpoint_test.go @@ -181,7 +181,7 @@ func TestIntentionsMatch_noName(t *testing.T) { assert.Nil(obj) } -func TestIntentionsTest_basic(t *testing.T) { +func TestIntentionsCheck_basic(t *testing.T) { t.Parallel() require := require.New(t) @@ -219,9 +219,9 @@ func TestIntentionsTest_basic(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/connect/intentions/test?source=foo/bar&destination=foo/baz", nil) resp := httptest.NewRecorder() - obj, err := a.srv.IntentionTest(resp, req) + obj, err := a.srv.IntentionCheck(resp, req) require.Nil(err) - value := obj.(*structs.IntentionQueryTestResponse) + value := obj.(*structs.IntentionQueryCheckResponse) require.False(value.Allowed) } @@ -230,14 +230,14 @@ func TestIntentionsTest_basic(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/connect/intentions/test?source=foo/bar&destination=bar/qux", nil) resp := httptest.NewRecorder() - obj, err := a.srv.IntentionTest(resp, req) + obj, err := a.srv.IntentionCheck(resp, req) require.Nil(err) - value := obj.(*structs.IntentionQueryTestResponse) + value := obj.(*structs.IntentionQueryCheckResponse) require.True(value.Allowed) } } -func TestIntentionsTest_noSource(t *testing.T) { +func TestIntentionsCheck_noSource(t *testing.T) { t.Parallel() require := require.New(t) @@ -248,13 +248,13 @@ func TestIntentionsTest_noSource(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/connect/intentions/test?destination=B", nil) resp := httptest.NewRecorder() - obj, err := a.srv.IntentionTest(resp, req) + obj, err := a.srv.IntentionCheck(resp, req) require.NotNil(err) require.Contains(err.Error(), "'source' not set") require.Nil(obj) } -func TestIntentionsTest_noDestination(t *testing.T) { +func TestIntentionsCheck_noDestination(t *testing.T) { t.Parallel() require := require.New(t) @@ -265,7 +265,7 @@ func TestIntentionsTest_noDestination(t *testing.T) { req, _ := http.NewRequest("GET", "/v1/connect/intentions/test?source=B", nil) resp := httptest.NewRecorder() - obj, err := a.srv.IntentionTest(resp, req) + obj, err := a.srv.IntentionCheck(resp, req) require.NotNil(err) require.Contains(err.Error(), "'destination' not set") require.Nil(obj) diff --git a/agent/structs/intention.go b/agent/structs/intention.go index 34d15d997..19a6402ab 100644 --- a/agent/structs/intention.go +++ b/agent/structs/intention.go @@ -261,9 +261,9 @@ type IntentionQueryRequest struct { // resolving wildcards. Match *IntentionQueryMatch - // Test is non-nil if we're performing a test query. A test will + // Check is non-nil if we're performing a test query. A test will // return allowed/deny based on an exact match. - Test *IntentionQueryTest + Check *IntentionQueryCheck // Options for queries QueryOptions @@ -317,8 +317,8 @@ type IntentionMatchEntry struct { Name string } -// IntentionQueryTest are the parameters for performing a test request. -type IntentionQueryTest struct { +// IntentionQueryCheck are the parameters for performing a test request. +type IntentionQueryCheck struct { // SourceNS, SourceName, DestinationNS, and DestinationName are the // source and namespace, respectively, for the test. These must be // exact values. @@ -332,12 +332,12 @@ type IntentionQueryTest struct { // GetACLPrefix returns the prefix to look up the ACL policy for this // request, and a boolean noting whether the prefix is valid to check // or not. You must check the ok value before using the prefix. -func (q *IntentionQueryTest) GetACLPrefix() (string, bool) { +func (q *IntentionQueryCheck) GetACLPrefix() (string, bool) { return q.DestinationName, q.DestinationName != "" } -// IntentionQueryTestResponse is the response for a test request. -type IntentionQueryTestResponse struct { +// IntentionQueryCheckResponse is the response for a test request. +type IntentionQueryCheckResponse struct { Allowed bool } From bf99a7f54ac8d0d50e4a5786faa43db853ddd3ae Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 11 May 2018 17:19:54 -0700 Subject: [PATCH 284/627] api: IntentionCheck --- api/agent_test.go | 2 +- api/connect_intention.go | 39 ++++++++++++++++++++++++++++ api/connect_intention_test.go | 49 +++++++++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 1 deletion(-) diff --git a/api/agent_test.go b/api/agent_test.go index ad236ba3a..1066a0b42 100644 --- a/api/agent_test.go +++ b/api/agent_test.go @@ -1159,7 +1159,7 @@ func TestAPI_AgentConnectProxyConfig(t *testing.T) { TargetServiceName: "foo", ContentHash: "93baee1d838888ae", ExecMode: "daemon", - Command: []string{"consul connect proxy"}, + Command: []string{"consul", "connect", "proxy"}, Config: map[string]interface{}{ "bind_address": "127.0.0.1", "bind_port": float64(20000), diff --git a/api/connect_intention.go b/api/connect_intention.go index aa2f82d3d..c28c55de1 100644 --- a/api/connect_intention.go +++ b/api/connect_intention.go @@ -83,6 +83,18 @@ const ( IntentionMatchDestination IntentionMatchType = "destination" ) +// IntentionCheck are the arguments for the intention check API. For +// more documentation see the IntentionCheck function. +type IntentionCheck struct { + // Source and Destination are the source and destination values to + // check. The destination is always a Consul service, but the source + // may be other values as defined by the SourceType. + Source, Destination string + + // SourceType is the type of the value for the source. + SourceType IntentionSourceType +} + // Intentions returns the list of intentions. func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) { r := h.c.newRequest("GET", "/v1/connect/intentions") @@ -156,6 +168,33 @@ func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[str return out, qm, nil } +// IntentionCheck returns whether a given source/destination would be allowed +// or not given the current set of intentions and the configuration of Consul. +func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/check") + r.setQueryOptions(q) + r.params.Set("source", args.Source) + r.params.Set("destination", args.Destination) + if args.SourceType != "" { + r.params.Set("source-type", string(args.SourceType)) + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out struct{ Allowed bool } + if err := decodeBody(resp, &out); err != nil { + return false, nil, err + } + return out.Allowed, qm, nil +} + // IntentionCreate will create a new intention. The ID in the given // structure must be empty and a generate ID will be returned on // success. diff --git a/api/connect_intention_test.go b/api/connect_intention_test.go index 0edcf4c49..e6e76071e 100644 --- a/api/connect_intention_test.go +++ b/api/connect_intention_test.go @@ -87,6 +87,55 @@ func TestAPI_ConnectIntentionMatch(t *testing.T) { require.Equal(expected, actual) } +func TestAPI_ConnectIntentionCheck(t *testing.T) { + t.Parallel() + + require := require.New(t) + c, s := makeClient(t) + defer s.Stop() + + connect := c.Connect() + + // Create + { + insert := [][]string{ + {"foo", "*", "foo", "bar"}, + } + + for _, v := range insert { + ixn := testIntention() + ixn.SourceNS = v[0] + ixn.SourceName = v[1] + ixn.DestinationNS = v[2] + ixn.DestinationName = v[3] + ixn.Action = IntentionActionDeny + id, _, err := connect.IntentionCreate(ixn, nil) + require.Nil(err) + require.NotEmpty(id) + } + } + + // Match it + { + result, _, err := connect.IntentionCheck(&IntentionCheck{ + Source: "foo/qux", + Destination: "foo/bar", + }, nil) + require.Nil(err) + require.False(result) + } + + // Match it (non-matching) + { + result, _, err := connect.IntentionCheck(&IntentionCheck{ + Source: "bar/qux", + Destination: "foo/bar", + }, nil) + require.Nil(err) + require.True(result) + } +} + func testIntention() *Intention { return &Intention{ SourceNS: "eng", From a1a7eaa8767e7f201802f9bb900e380a016c4688 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 11 May 2018 19:47:26 -0700 Subject: [PATCH 285/627] command/intention/create --- api/connect_intention.go | 16 ++ command/commands_oss.go | 4 + command/intention/create/create.go | 193 ++++++++++++++++++++++++ command/intention/create/create_test.go | 188 +++++++++++++++++++++++ command/intention/intention.go | 48 ++++++ command/intention/intention_test.go | 13 ++ 6 files changed, 462 insertions(+) create mode 100644 command/intention/create/create.go create mode 100644 command/intention/create/create_test.go create mode 100644 command/intention/intention.go create mode 100644 command/intention/intention_test.go diff --git a/api/connect_intention.go b/api/connect_intention.go index c28c55de1..e43506a8a 100644 --- a/api/connect_intention.go +++ b/api/connect_intention.go @@ -1,6 +1,7 @@ package api import ( + "fmt" "time" ) @@ -50,6 +51,21 @@ type Intention struct { ModifyIndex uint64 } +// String returns human-friendly output describing ths intention. +func (i *Intention) String() string { + source := i.SourceName + if i.SourceNS != "" { + source = i.SourceNS + "/" + source + } + + dest := i.DestinationName + if i.DestinationNS != "" { + dest = i.DestinationNS + "/" + dest + } + + return fmt.Sprintf("%s => %s (%s)", source, dest, i.Action) +} + // IntentionAction is the action that the intention represents. This // can be "allow" or "deny" to whitelist or blacklist intentions. type IntentionAction string diff --git a/command/commands_oss.go b/command/commands_oss.go index c1e3e794a..79636c598 100644 --- a/command/commands_oss.go +++ b/command/commands_oss.go @@ -12,6 +12,8 @@ import ( "github.com/hashicorp/consul/command/exec" "github.com/hashicorp/consul/command/forceleave" "github.com/hashicorp/consul/command/info" + "github.com/hashicorp/consul/command/intention" + ixncreate "github.com/hashicorp/consul/command/intention/create" "github.com/hashicorp/consul/command/join" "github.com/hashicorp/consul/command/keygen" "github.com/hashicorp/consul/command/keyring" @@ -66,6 +68,8 @@ func init() { Register("exec", func(ui cli.Ui) (cli.Command, error) { return exec.New(ui, MakeShutdownCh()), nil }) Register("force-leave", func(ui cli.Ui) (cli.Command, error) { return forceleave.New(ui), nil }) Register("info", func(ui cli.Ui) (cli.Command, error) { return info.New(ui), nil }) + Register("intention", func(ui cli.Ui) (cli.Command, error) { return intention.New(), nil }) + Register("intention create", func(ui cli.Ui) (cli.Command, error) { return ixncreate.New(), nil }) Register("join", func(ui cli.Ui) (cli.Command, error) { return join.New(ui), nil }) Register("keygen", func(ui cli.Ui) (cli.Command, error) { return keygen.New(ui), nil }) Register("keyring", func(ui cli.Ui) (cli.Command, error) { return keyring.New(ui), nil }) diff --git a/command/intention/create/create.go b/command/intention/create/create.go new file mode 100644 index 000000000..b847117a1 --- /dev/null +++ b/command/intention/create/create.go @@ -0,0 +1,193 @@ +package create + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "os" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/command/flags" + "github.com/mitchellh/cli" +) + +func New(ui cli.Ui) *cmd { + c := &cmd{UI: ui} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string + + // flags + flagAllow bool + flagDeny bool + flagFile bool + flagReplace bool + flagMeta map[string]string + + // testStdin is the input for testing. + testStdin io.Reader +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.flags.BoolVar(&c.flagAllow, "allow", false, + "Create an intention that allows when matched.") + c.flags.BoolVar(&c.flagDeny, "deny", false, + "Create an intention that denies when matched.") + c.flags.BoolVar(&c.flagFile, "file", false, + "Read intention data from one or more files.") + c.flags.BoolVar(&c.flagReplace, "replace", false, + "Replace matching intentions.") + c.flags.Var((*flags.FlagMapValue)(&c.flagMeta), "meta", + "Metadata to set on the intention, formatted as key=value. This flag "+ + "may be specified multiple times to set multiple meta fields.") + + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.ServerFlags()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + return 1 + } + + // Default to allow + if !c.flagAllow && !c.flagDeny { + c.flagAllow = true + } + + // If both are specified it is an error + if c.flagAllow && c.flagDeny { + c.UI.Error("Only one of -allow or -deny may be specified.") + return 1 + } + + // Check for arg validation + args = c.flags.Args() + ixns, err := c.ixnsFromArgs(args) + if err != nil { + c.UI.Error(fmt.Sprintf("Error: %s", err)) + return 1 + } + + // Create and test the HTTP client + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) + return 1 + } + + // Go through and create each intention + for _, ixn := range ixns { + _, _, err := client.Connect().IntentionCreate(ixn, nil) + if err != nil { + c.UI.Error(fmt.Sprintf("Error creating intention %q: %s", ixn, err)) + return 1 + } + + c.UI.Output(fmt.Sprintf("Created: %s", ixn)) + } + + return 0 +} + +// ixnsFromArgs returns the set of intentions to create based on the arguments +// given and the flags set. This will call ixnsFromFiles if the -file flag +// was set. +func (c *cmd) ixnsFromArgs(args []string) ([]*api.Intention, error) { + // If we're in file mode, load from files + if c.flagFile { + return c.ixnsFromFiles(args) + } + + // From args we require exactly two + if len(args) != 2 { + return nil, fmt.Errorf("Must specify two arguments: source and destination") + } + + return []*api.Intention{&api.Intention{ + SourceName: args[0], + DestinationName: args[1], + SourceType: api.IntentionSourceConsul, + Action: c.ixnAction(), + Meta: c.flagMeta, + }}, nil +} + +func (c *cmd) ixnsFromFiles(args []string) ([]*api.Intention, error) { + var result []*api.Intention + for _, path := range args { + f, err := os.Open(path) + if err != nil { + return nil, err + } + + var ixn api.Intention + err = json.NewDecoder(f).Decode(&ixn) + f.Close() + if err != nil { + return nil, err + } + + result = append(result, &ixn) + } + + return result, nil +} + +// ixnAction returns the api.IntentionAction based on the flag set. +func (c *cmd) ixnAction() api.IntentionAction { + if c.flagAllow { + return api.IntentionActionAllow + } + + return api.IntentionActionDeny +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return c.help +} + +const synopsis = "Create intentions for service connections." +const help = ` +Usage: consul intention create [options] SRC DST +Usage: consul intention create [options] -file FILE... + + Create one or more intentions. The data can be specified as a single + source and destination pair or via a set of files when the "-file" flag + is specified. + + $ consul intention create web db + + To consume data from a set of files: + + $ consul intention create -file one.json two.json + + When specifying the "-file" flag, "-" may be used once to read from stdin: + + $ echo "{ ... }" | consul intention create -file - + + An "allow" intention is created by default (whitelist). To create a + "deny" intention, the "-deny" flag should be specified. + + If a conflicting intention is found, creation will fail. To replace any + conflicting intentions, specify the "-replace" flag. This will replace any + conflicting intentions with the intention specified in this command. + Metadata and any other fields of the previous intention will not be + preserved. + + Additional flags and more advanced use cases are detailed below. +` diff --git a/command/intention/create/create_test.go b/command/intention/create/create_test.go new file mode 100644 index 000000000..963a3edc6 --- /dev/null +++ b/command/intention/create/create_test.go @@ -0,0 +1,188 @@ +package create + +import ( + "os" + "strings" + "testing" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/testutil" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +func TestCommand_noTabs(t *testing.T) { + t.Parallel() + if strings.ContainsRune(New(nil).Help(), '\t') { + t.Fatal("help has tabs") + } +} + +func TestCommand_Validation(t *testing.T) { + t.Parallel() + + ui := cli.NewMockUi() + c := New(ui) + + cases := map[string]struct { + args []string + output string + }{ + "-allow and -deny": { + []string{"-allow", "-deny", "foo", "bar"}, + "one of -allow", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + require := require.New(t) + + c.init() + + // Ensure our buffer is always clear + if ui.ErrorWriter != nil { + ui.ErrorWriter.Reset() + } + if ui.OutputWriter != nil { + ui.OutputWriter.Reset() + } + + require.Equal(1, c.Run(tc.args)) + output := ui.ErrorWriter.String() + require.Contains(output, tc.output) + }) + } +} + +func TestCommand(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "foo", "bar", + } + require.Equal(0, c.Run(args), ui.ErrorWriter.String()) + + ixns, _, err := client.Connect().Intentions(nil) + require.NoError(err) + require.Len(ixns, 1) + require.Equal("foo", ixns[0].SourceName) + require.Equal("bar", ixns[0].DestinationName) + require.Equal(api.IntentionActionAllow, ixns[0].Action) +} + +func TestCommand_deny(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-deny", + "foo", "bar", + } + require.Equal(0, c.Run(args), ui.ErrorWriter.String()) + + ixns, _, err := client.Connect().Intentions(nil) + require.NoError(err) + require.Len(ixns, 1) + require.Equal("foo", ixns[0].SourceName) + require.Equal("bar", ixns[0].DestinationName) + require.Equal(api.IntentionActionDeny, ixns[0].Action) +} + +func TestCommand_meta(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-meta", "hello=world", + "foo", "bar", + } + require.Equal(0, c.Run(args), ui.ErrorWriter.String()) + + ixns, _, err := client.Connect().Intentions(nil) + require.NoError(err) + require.Len(ixns, 1) + require.Equal("foo", ixns[0].SourceName) + require.Equal("bar", ixns[0].DestinationName) + require.Equal(map[string]string{"hello": "world"}, ixns[0].Meta) +} + +func TestCommand_File(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + ui := cli.NewMockUi() + c := New(ui) + + contents := `{ "SourceName": "foo", "DestinationName": "bar", "Action": "allow" }` + f := testutil.TempFile(t, "intention-create-command-file") + defer os.Remove(f.Name()) + if _, err := f.WriteString(contents); err != nil { + t.Fatalf("err: %#v", err) + } + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-file", + f.Name(), + } + + require.Equal(0, c.Run(args), ui.ErrorWriter.String()) + + ixns, _, err := client.Connect().Intentions(nil) + require.NoError(err) + require.Len(ixns, 1) + require.Equal("foo", ixns[0].SourceName) + require.Equal("bar", ixns[0].DestinationName) + require.Equal(api.IntentionActionAllow, ixns[0].Action) +} + +func TestCommand_FileNoExist(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-file", + "shouldnotexist.txt", + } + + require.Equal(1, c.Run(args), ui.ErrorWriter.String()) + require.Contains(ui.ErrorWriter.String(), "no such file") +} diff --git a/command/intention/intention.go b/command/intention/intention.go new file mode 100644 index 000000000..767e3ff1b --- /dev/null +++ b/command/intention/intention.go @@ -0,0 +1,48 @@ +package intention + +import ( + "github.com/hashicorp/consul/command/flags" + "github.com/mitchellh/cli" +) + +func New() *cmd { + return &cmd{} +} + +type cmd struct{} + +func (c *cmd) Run(args []string) int { + return cli.RunResultHelp +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return flags.Usage(help, nil) +} + +const synopsis = "Interact with Connect service intentions" +const help = ` +Usage: consul intention [options] [args] + + This command has subcommands for interacting with intentions. Intentions + are the permissions for what services are allowed to communicate via + Connect. Here are some simple examples, and more detailed examples are + available in the subcommands or the documentation. + + Create an intention to allow "web" to talk to "db": + + $ consul intention create web db + + Test whether a "web" is allowed to connect to "db": + + $ consul intention check web db + + Find all intentions for communicating to the "db" service: + + $ consul intention match db + + For more examples, ask for subcommand help or view the documentation. +` diff --git a/command/intention/intention_test.go b/command/intention/intention_test.go new file mode 100644 index 000000000..e697f537f --- /dev/null +++ b/command/intention/intention_test.go @@ -0,0 +1,13 @@ +package intention + +import ( + "strings" + "testing" +) + +func TestCommand_noTabs(t *testing.T) { + t.Parallel() + if strings.ContainsRune(New().Help(), '\t') { + t.Fatal("help has tabs") + } +} From 77d0360de15e87ee456cf86e506fe211e2c7d913 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 11 May 2018 21:42:46 -0700 Subject: [PATCH 286/627] command/intention/finder: package for finding based on src/dst --- api/connect_intention.go | 31 +++++++++++----- command/intention/finder/finder.go | 46 +++++++++++++++++++++++ command/intention/finder/finder_test.go | 49 +++++++++++++++++++++++++ 3 files changed, 117 insertions(+), 9 deletions(-) create mode 100644 command/intention/finder/finder.go create mode 100644 command/intention/finder/finder_test.go diff --git a/api/connect_intention.go b/api/connect_intention.go index e43506a8a..b7af3163c 100644 --- a/api/connect_intention.go +++ b/api/connect_intention.go @@ -53,17 +53,30 @@ type Intention struct { // String returns human-friendly output describing ths intention. func (i *Intention) String() string { - source := i.SourceName - if i.SourceNS != "" { - source = i.SourceNS + "/" + source + return fmt.Sprintf("%s => %s (%s)", + i.SourceString(), + i.DestinationString(), + i.Action) +} + +// SourceString returns the namespace/name format for the source, or +// just "name" if the namespace is the default namespace. +func (i *Intention) SourceString() string { + return i.partString(i.SourceNS, i.SourceName) +} + +// DestinationString returns the namespace/name format for the source, or +// just "name" if the namespace is the default namespace. +func (i *Intention) DestinationString() string { + return i.partString(i.DestinationNS, i.DestinationName) +} + +func (i *Intention) partString(ns, n string) string { + if ns != "" { + n = ns + "/" + n } - dest := i.DestinationName - if i.DestinationNS != "" { - dest = i.DestinationNS + "/" + dest - } - - return fmt.Sprintf("%s => %s (%s)", source, dest, i.Action) + return n } // IntentionAction is the action that the intention represents. This diff --git a/command/intention/finder/finder.go b/command/intention/finder/finder.go new file mode 100644 index 000000000..c8db7ba5c --- /dev/null +++ b/command/intention/finder/finder.go @@ -0,0 +1,46 @@ +package finder + +import ( + "sync" + + "github.com/hashicorp/consul/api" +) + +// Finder finds intentions by a src/dst exact match. There is currently +// no direct API to do this so this struct downloads all intentions and +// caches them once, and searches in-memory for this. For now this works since +// even with a very large number of intentions, the size of the data gzipped +// over HTTP will be relatively small. +type Finder struct { + // Client is the API client to use for any requests. + Client *api.Client + + lock sync.Mutex + ixns []*api.Intention // cached list of intentions +} + +// Find finds the intention that matches the given src and dst. This will +// return nil when the result is not found. +func (f *Finder) Find(src, dst string) (*api.Intention, error) { + f.lock.Lock() + defer f.lock.Unlock() + + // If the list of ixns is nil, then we haven't fetched yet, so fetch + if f.ixns == nil { + ixns, _, err := f.Client.Connect().Intentions(nil) + if err != nil { + return nil, err + } + + f.ixns = ixns + } + + // Go through the intentions and find an exact match + for _, ixn := range f.ixns { + if ixn.SourceString() == src && ixn.DestinationString() == dst { + return ixn, nil + } + } + + return nil, nil +} diff --git a/command/intention/finder/finder_test.go b/command/intention/finder/finder_test.go new file mode 100644 index 000000000..eb8c5b99e --- /dev/null +++ b/command/intention/finder/finder_test.go @@ -0,0 +1,49 @@ +package finder + +import ( + "testing" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/stretchr/testify/require" +) + +func TestFinder(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + // Create a set of intentions + var ids []string + { + insert := [][]string{ + []string{"a", "b", "c", "d"}, + } + + for _, v := range insert { + ixn := &api.Intention{ + SourceNS: v[0], + SourceName: v[1], + DestinationNS: v[2], + DestinationName: v[3], + Action: api.IntentionActionAllow, + } + + id, _, err := client.Connect().IntentionCreate(ixn, nil) + require.NoError(err) + ids = append(ids, id) + } + } + + finder := &Finder{Client: client} + ixn, err := finder.Find("a/b", "c/d") + require.NoError(err) + require.Equal(ids[0], ixn.ID) + + ixn, err = finder.Find("a/c", "c/d") + require.NoError(err) + require.Nil(ixn) +} From aead9cd422c5cdff241063b983fb867fd28deb04 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 11 May 2018 22:07:58 -0700 Subject: [PATCH 287/627] command/intention/get: the get command without tests --- api/connect_intention.go | 7 +- command/commands_oss.go | 4 +- command/intention/finder/finder.go | 16 ++++ command/intention/get/get.go | 133 +++++++++++++++++++++++++++++ 4 files changed, 158 insertions(+), 2 deletions(-) create mode 100644 command/intention/get/get.go diff --git a/api/connect_intention.go b/api/connect_intention.go index b7af3163c..95ed335a8 100644 --- a/api/connect_intention.go +++ b/api/connect_intention.go @@ -72,13 +72,18 @@ func (i *Intention) DestinationString() string { } func (i *Intention) partString(ns, n string) string { - if ns != "" { + // For now we omit the default namespace from the output. In the future + // we might want to look at this and show this in a multi-namespace world. + if ns != "" && ns != IntentionDefaultNamespace { n = ns + "/" + n } return n } +// IntentionDefaultNamespace is the default namespace value. +const IntentionDefaultNamespace = "default" + // IntentionAction is the action that the intention represents. This // can be "allow" or "deny" to whitelist or blacklist intentions. type IntentionAction string diff --git a/command/commands_oss.go b/command/commands_oss.go index 79636c598..9eba97b09 100644 --- a/command/commands_oss.go +++ b/command/commands_oss.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/consul/command/info" "github.com/hashicorp/consul/command/intention" ixncreate "github.com/hashicorp/consul/command/intention/create" + ixnget "github.com/hashicorp/consul/command/intention/get" "github.com/hashicorp/consul/command/join" "github.com/hashicorp/consul/command/keygen" "github.com/hashicorp/consul/command/keyring" @@ -69,7 +70,8 @@ func init() { Register("force-leave", func(ui cli.Ui) (cli.Command, error) { return forceleave.New(ui), nil }) Register("info", func(ui cli.Ui) (cli.Command, error) { return info.New(ui), nil }) Register("intention", func(ui cli.Ui) (cli.Command, error) { return intention.New(), nil }) - Register("intention create", func(ui cli.Ui) (cli.Command, error) { return ixncreate.New(), nil }) + Register("intention create", func(ui cli.Ui) (cli.Command, error) { return ixncreate.New(ui), nil }) + Register("intention get", func(ui cli.Ui) (cli.Command, error) { return ixnget.New(ui), nil }) Register("join", func(ui cli.Ui) (cli.Command, error) { return join.New(ui), nil }) Register("keygen", func(ui cli.Ui) (cli.Command, error) { return keygen.New(ui), nil }) Register("keyring", func(ui cli.Ui) (cli.Command, error) { return keyring.New(ui), nil }) diff --git a/command/intention/finder/finder.go b/command/intention/finder/finder.go index c8db7ba5c..f4c6109a0 100644 --- a/command/intention/finder/finder.go +++ b/command/intention/finder/finder.go @@ -1,6 +1,7 @@ package finder import ( + "strings" "sync" "github.com/hashicorp/consul/api" @@ -22,6 +23,9 @@ type Finder struct { // Find finds the intention that matches the given src and dst. This will // return nil when the result is not found. func (f *Finder) Find(src, dst string) (*api.Intention, error) { + src = StripDefaultNS(src) + dst = StripDefaultNS(dst) + f.lock.Lock() defer f.lock.Unlock() @@ -44,3 +48,15 @@ func (f *Finder) Find(src, dst string) (*api.Intention, error) { return nil, nil } + +// StripDefaultNS strips the default namespace from an argument. For now, +// the API and lookups strip this value from string output so we strip it. +func StripDefaultNS(v string) string { + if idx := strings.IndexByte(v, '/'); idx > 0 { + if v[:idx] == api.IntentionDefaultNamespace { + return v[:idx+1] + } + } + + return v +} diff --git a/command/intention/get/get.go b/command/intention/get/get.go new file mode 100644 index 000000000..0c0eba77e --- /dev/null +++ b/command/intention/get/get.go @@ -0,0 +1,133 @@ +package create + +import ( + "flag" + "fmt" + "io" + "sort" + "time" + + "github.com/hashicorp/consul/command/flags" + "github.com/hashicorp/consul/command/intention/finder" + "github.com/mitchellh/cli" + "github.com/ryanuber/columnize" +) + +func New(ui cli.Ui) *cmd { + c := &cmd{UI: ui} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string + + // testStdin is the input for testing. + testStdin io.Reader +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.ServerFlags()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + return 1 + } + + // Create and test the HTTP client + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) + return 1 + } + + // Get the intention ID to load + var id string + args = c.flags.Args() + switch len(args) { + case 1: + id = args[0] + + case 2: + f := &finder.Finder{Client: client} + ixn, err := f.Find(args[0], args[1]) + if err != nil { + c.UI.Error(fmt.Sprintf("Error looking up intention: %s", err)) + return 1 + } + if ixn == nil { + c.UI.Error(fmt.Sprintf( + "Intention with source %q and destination %q not found.", + args[0], args[1])) + return 1 + } + + id = ixn.ID + + default: + c.UI.Error(fmt.Sprintf("Error: get requires exactly 1 or 2 arguments")) + return 1 + } + + // Read the intention + ixn, _, err := client.Connect().IntentionGet(id, nil) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading the intention: %s", err)) + return 1 + } + + // Format the tabular data + data := []string{ + fmt.Sprintf("Source:|%s", ixn.SourceString()), + fmt.Sprintf("Destination:|%s", ixn.DestinationString()), + fmt.Sprintf("Action:|%s", ixn.Action), + fmt.Sprintf("ID:|%s", ixn.ID), + } + if v := ixn.Description; v != "" { + data = append(data, fmt.Sprintf("Description:|%s", v)) + } + if len(ixn.Meta) > 0 { + var keys []string + for k := range ixn.Meta { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + data = append(data, fmt.Sprintf("Meta[%s]:|%s", k, ixn.Meta[k])) + } + } + data = append(data, + fmt.Sprintf("Created At:|%s", ixn.CreatedAt.Local().Format(time.RFC850)), + ) + + c.UI.Output(columnize.SimpleFormat(data)) + return 0 +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return c.help +} + +const synopsis = "Show information about an intention." +const help = ` +Usage: consul intention get [options] SRC DST +Usage: consul intention get [options] ID + + Read and show the details about an intention. The intention can be looked + up via an exact source/destination match or via the unique intention ID. + + $ consul intention get web db + +` From efa82278e205b113bec7bd5dce6a9c29c7e15e47 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 11 May 2018 22:19:21 -0700 Subject: [PATCH 288/627] api: IntentionDelete + tests --- api/connect_intention.go | 24 +++++++++++++++++++++++- api/connect_intention_test.go | 11 ++++++++++- 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/api/connect_intention.go b/api/connect_intention.go index 95ed335a8..3d10b219c 100644 --- a/api/connect_intention.go +++ b/api/connect_intention.go @@ -154,7 +154,7 @@ func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) { r := h.c.newRequest("GET", "/v1/connect/intentions/"+id) r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) + rtt, resp, err := h.c.doRequest(r) if err != nil { return nil, nil, err } @@ -164,6 +164,12 @@ func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMe parseQueryMeta(resp, qm) qm.RequestTime = rtt + if resp.StatusCode == 404 { + return nil, qm, nil + } else if resp.StatusCode != 200 { + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + var out Intention if err := decodeBody(resp, &out); err != nil { return nil, nil, err @@ -171,6 +177,22 @@ func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMe return &out, qm, nil } +// IntentionDelete deletes a single intention. +func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error) { + r := h.c.newRequest("DELETE", "/v1/connect/intentions/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + return qm, nil +} + // IntentionMatch returns the list of intentions that match a given source // or destination. The returned intentions are ordered by precedence where // result[0] is the highest precedence (if that matches, then that rule overrides diff --git a/api/connect_intention_test.go b/api/connect_intention_test.go index e6e76071e..436d0de0c 100644 --- a/api/connect_intention_test.go +++ b/api/connect_intention_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAPI_ConnectIntentionCreateListGet(t *testing.T) { +func TestAPI_ConnectIntentionCreateListGetDelete(t *testing.T) { t.Parallel() require := require.New(t) @@ -38,6 +38,15 @@ func TestAPI_ConnectIntentionCreateListGet(t *testing.T) { actual, _, err = connect.IntentionGet(id, nil) require.Nil(err) require.Equal(ixn, actual) + + // Delete it + _, err = connect.IntentionDelete(id, nil) + require.Nil(err) + + // Get it (should be gone) + actual, _, err = connect.IntentionGet(id, nil) + require.Nil(err) + require.Nil(actual) } func TestAPI_ConnectIntentionMatch(t *testing.T) { From 4caeaaaa218d17fca4c5c721894b0ab51dbc3b2b Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 11 May 2018 22:19:39 -0700 Subject: [PATCH 289/627] command/intentions/delete --- command/commands_oss.go | 2 + command/intention/delete/delete.go | 86 ++++++++++++++++++++++++++++++ command/intention/finder/finder.go | 26 +++++++++ command/intention/get/get.go | 30 ++--------- 4 files changed, 119 insertions(+), 25 deletions(-) create mode 100644 command/intention/delete/delete.go diff --git a/command/commands_oss.go b/command/commands_oss.go index 9eba97b09..81d44fd1a 100644 --- a/command/commands_oss.go +++ b/command/commands_oss.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/consul/command/info" "github.com/hashicorp/consul/command/intention" ixncreate "github.com/hashicorp/consul/command/intention/create" + ixndelete "github.com/hashicorp/consul/command/intention/delete" ixnget "github.com/hashicorp/consul/command/intention/get" "github.com/hashicorp/consul/command/join" "github.com/hashicorp/consul/command/keygen" @@ -71,6 +72,7 @@ func init() { Register("info", func(ui cli.Ui) (cli.Command, error) { return info.New(ui), nil }) Register("intention", func(ui cli.Ui) (cli.Command, error) { return intention.New(), nil }) Register("intention create", func(ui cli.Ui) (cli.Command, error) { return ixncreate.New(ui), nil }) + Register("intention delete", func(ui cli.Ui) (cli.Command, error) { return ixndelete.New(ui), nil }) Register("intention get", func(ui cli.Ui) (cli.Command, error) { return ixnget.New(ui), nil }) Register("join", func(ui cli.Ui) (cli.Command, error) { return join.New(ui), nil }) Register("keygen", func(ui cli.Ui) (cli.Command, error) { return keygen.New(ui), nil }) diff --git a/command/intention/delete/delete.go b/command/intention/delete/delete.go new file mode 100644 index 000000000..d7a928d9c --- /dev/null +++ b/command/intention/delete/delete.go @@ -0,0 +1,86 @@ +package delete + +import ( + "flag" + "fmt" + "io" + + "github.com/hashicorp/consul/command/flags" + "github.com/hashicorp/consul/command/intention/finder" + "github.com/mitchellh/cli" +) + +func New(ui cli.Ui) *cmd { + c := &cmd{UI: ui} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string + + // testStdin is the input for testing. + testStdin io.Reader +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.ServerFlags()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + return 1 + } + + // Create and test the HTTP client + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) + return 1 + } + + // Get the intention ID to load + f := &finder.Finder{Client: client} + id, err := f.IDFromArgs(c.flags.Args()) + if err != nil { + c.UI.Error(fmt.Sprintf("Error: %s", err)) + return 1 + } + + // Read the intention + _, err = client.Connect().IntentionDelete(id, nil) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading the intention: %s", err)) + return 1 + } + + c.UI.Output(fmt.Sprintf("Intention deleted.")) + return 0 +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return c.help +} + +const synopsis = "Delete an intention." +const help = ` +Usage: consul intention delete [options] SRC DST +Usage: consul intention delete [options] ID + + Delete an intention. This cannot be reversed. The intention can be looked + up via an exact source/destination match or via the unique intention ID. + + $ consul intention delete web db + +` diff --git a/command/intention/finder/finder.go b/command/intention/finder/finder.go index f4c6109a0..e16bc02dc 100644 --- a/command/intention/finder/finder.go +++ b/command/intention/finder/finder.go @@ -1,6 +1,7 @@ package finder import ( + "fmt" "strings" "sync" @@ -20,6 +21,31 @@ type Finder struct { ixns []*api.Intention // cached list of intentions } +// ID returns the intention ID for the given CLI args. An error is returned +// if args is not 1 or 2 elements. +func (f *Finder) IDFromArgs(args []string) (string, error) { + switch len(args) { + case 1: + return args[0], nil + + case 2: + ixn, err := f.Find(args[0], args[1]) + if err != nil { + return "", err + } + if ixn == nil { + return "", fmt.Errorf( + "Intention with source %q and destination %q not found.", + args[0], args[1]) + } + + return ixn.ID, nil + + default: + return "", fmt.Errorf("command requires exactly 1 or 2 arguments") + } +} + // Find finds the intention that matches the given src and dst. This will // return nil when the result is not found. func (f *Finder) Find(src, dst string) (*api.Intention, error) { diff --git a/command/intention/get/get.go b/command/intention/get/get.go index 0c0eba77e..bfe4c754c 100644 --- a/command/intention/get/get.go +++ b/command/intention/get/get.go @@ -1,4 +1,4 @@ -package create +package get import ( "flag" @@ -50,30 +50,10 @@ func (c *cmd) Run(args []string) int { } // Get the intention ID to load - var id string - args = c.flags.Args() - switch len(args) { - case 1: - id = args[0] - - case 2: - f := &finder.Finder{Client: client} - ixn, err := f.Find(args[0], args[1]) - if err != nil { - c.UI.Error(fmt.Sprintf("Error looking up intention: %s", err)) - return 1 - } - if ixn == nil { - c.UI.Error(fmt.Sprintf( - "Intention with source %q and destination %q not found.", - args[0], args[1])) - return 1 - } - - id = ixn.ID - - default: - c.UI.Error(fmt.Sprintf("Error: get requires exactly 1 or 2 arguments")) + f := &finder.Finder{Client: client} + id, err := f.IDFromArgs(c.flags.Args()) + if err != nil { + c.UI.Error(fmt.Sprintf("Error: %s", err)) return 1 } From e055f40612e27de7eb2b6fc663ae659013beda04 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 11 May 2018 22:28:59 -0700 Subject: [PATCH 290/627] command/intention/create: -replace flag, jank, we should change to PUT --- command/intention/create/create.go | 32 ++++++++++++- command/intention/create/create_test.go | 63 +++++++++++++++++++++++++ 2 files changed, 94 insertions(+), 1 deletion(-) diff --git a/command/intention/create/create.go b/command/intention/create/create.go index b847117a1..dd5f61565 100644 --- a/command/intention/create/create.go +++ b/command/intention/create/create.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/flags" + "github.com/hashicorp/consul/command/intention/finder" "github.com/mitchellh/cli" ) @@ -44,7 +45,8 @@ func (c *cmd) init() { c.flags.BoolVar(&c.flagFile, "file", false, "Read intention data from one or more files.") c.flags.BoolVar(&c.flagReplace, "replace", false, - "Replace matching intentions.") + "Replace matching intentions. This is not an atomic operation. "+ + "If the insert fails, then the previous intention will still be deleted.") c.flags.Var((*flags.FlagMapValue)(&c.flagMeta), "meta", "Metadata to set on the intention, formatted as key=value. This flag "+ "may be specified multiple times to set multiple meta fields.") @@ -86,8 +88,36 @@ func (c *cmd) Run(args []string) int { return 1 } + // Create the finder in case we need it + find := &finder.Finder{Client: client} + // Go through and create each intention for _, ixn := range ixns { + // If replace is set to true, then find this intention and delete it. + if c.flagReplace { + ixn, err := find.Find(ixn.SourceString(), ixn.DestinationString()) + if err != nil { + c.UI.Error(fmt.Sprintf( + "Error looking up intention for replacement with source %q "+ + "and destination %q: %s", + ixn.SourceString(), + ixn.DestinationString(), + err)) + return 1 + } + if ixn != nil { + if _, err := client.Connect().IntentionDelete(ixn.ID, nil); err != nil { + c.UI.Error(fmt.Sprintf( + "Error deleting intention for replacement with source %q "+ + "and destination %q: %s", + ixn.SourceString(), + ixn.DestinationString(), + err)) + return 1 + } + } + } + _, _, err := client.Connect().IntentionCreate(ixn, nil) if err != nil { c.UI.Error(fmt.Sprintf("Error creating intention %q: %s", ixn, err)) diff --git a/command/intention/create/create_test.go b/command/intention/create/create_test.go index 963a3edc6..067d0d6a9 100644 --- a/command/intention/create/create_test.go +++ b/command/intention/create/create_test.go @@ -186,3 +186,66 @@ func TestCommand_FileNoExist(t *testing.T) { require.Equal(1, c.Run(args), ui.ErrorWriter.String()) require.Contains(ui.ErrorWriter.String(), "no such file") } + +func TestCommand_replace(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + // Create the first + { + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "foo", "bar", + } + require.Equal(0, c.Run(args), ui.ErrorWriter.String()) + + ixns, _, err := client.Connect().Intentions(nil) + require.NoError(err) + require.Len(ixns, 1) + require.Equal("foo", ixns[0].SourceName) + require.Equal("bar", ixns[0].DestinationName) + require.Equal(api.IntentionActionAllow, ixns[0].Action) + } + + // Don't replace, should be an error + { + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-deny", + "foo", "bar", + } + require.Equal(1, c.Run(args), ui.ErrorWriter.String()) + require.Contains(ui.ErrorWriter.String(), "duplicate") + } + + // Replace it + { + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-replace", + "-deny", + "foo", "bar", + } + require.Equal(0, c.Run(args), ui.ErrorWriter.String()) + + ixns, _, err := client.Connect().Intentions(nil) + require.NoError(err) + require.Len(ixns, 1) + require.Equal("foo", ixns[0].SourceName) + require.Equal("bar", ixns[0].DestinationName) + require.Equal(api.IntentionActionDeny, ixns[0].Action) + } +} From 5ed57b393c1c8f10e11760a6037b34607a8092bb Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 15 May 2018 06:58:56 -0700 Subject: [PATCH 291/627] command/intentions/check --- command/commands_oss.go | 2 + command/intention/check/check.go | 94 ++++++++++++++++++++++++++++++++ 2 files changed, 96 insertions(+) create mode 100644 command/intention/check/check.go diff --git a/command/commands_oss.go b/command/commands_oss.go index 81d44fd1a..6452a4f6f 100644 --- a/command/commands_oss.go +++ b/command/commands_oss.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/consul/command/forceleave" "github.com/hashicorp/consul/command/info" "github.com/hashicorp/consul/command/intention" + ixncheck "github.com/hashicorp/consul/command/intention/check" ixncreate "github.com/hashicorp/consul/command/intention/create" ixndelete "github.com/hashicorp/consul/command/intention/delete" ixnget "github.com/hashicorp/consul/command/intention/get" @@ -71,6 +72,7 @@ func init() { Register("force-leave", func(ui cli.Ui) (cli.Command, error) { return forceleave.New(ui), nil }) Register("info", func(ui cli.Ui) (cli.Command, error) { return info.New(ui), nil }) Register("intention", func(ui cli.Ui) (cli.Command, error) { return intention.New(), nil }) + Register("intention check", func(ui cli.Ui) (cli.Command, error) { return ixncheck.New(ui), nil }) Register("intention create", func(ui cli.Ui) (cli.Command, error) { return ixncreate.New(ui), nil }) Register("intention delete", func(ui cli.Ui) (cli.Command, error) { return ixndelete.New(ui), nil }) Register("intention get", func(ui cli.Ui) (cli.Command, error) { return ixnget.New(ui), nil }) diff --git a/command/intention/check/check.go b/command/intention/check/check.go new file mode 100644 index 000000000..af56a8973 --- /dev/null +++ b/command/intention/check/check.go @@ -0,0 +1,94 @@ +package check + +import ( + "flag" + "fmt" + "io" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/command/flags" + "github.com/mitchellh/cli" +) + +func New(ui cli.Ui) *cmd { + c := &cmd{UI: ui} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string + + // testStdin is the input for testing. + testStdin io.Reader +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.ServerFlags()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + return 2 + } + + args = c.flags.Args() + if len(args) != 2 { + c.UI.Error(fmt.Sprintf("Error: command requires exactly two arguments: src and dst")) + return 2 + } + + // Create and test the HTTP client + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) + return 2 + } + + // Check the intention + allowed, _, err := client.Connect().IntentionCheck(&api.IntentionCheck{ + Source: args[0], + Destination: args[1], + SourceType: api.IntentionSourceConsul, + }, nil) + if err != nil { + c.UI.Error(fmt.Sprintf("Error checking the connection: %s", err)) + return 2 + } + + if allowed { + c.UI.Output("Allowed") + return 0 + } else { + c.UI.Output("Denied") + return 1 + } + + return 0 +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return c.help +} + +const synopsis = "Check whether a connection between two services is allowed." +const help = ` +Usage: consul intention check [options] SRC DST + + Check whether a connection between SRC and DST would be allowed by + Connect given the current Consul configuration. + + $ consul intention check web db + +` From 50e179c3af98f0ecd8219406cefeb1643d6af963 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 15 May 2018 08:44:58 -0700 Subject: [PATCH 292/627] command/intention/match --- command/commands_oss.go | 2 + command/intention/match/match.go | 100 +++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 command/intention/match/match.go diff --git a/command/commands_oss.go b/command/commands_oss.go index 6452a4f6f..f166e82d7 100644 --- a/command/commands_oss.go +++ b/command/commands_oss.go @@ -17,6 +17,7 @@ import ( ixncreate "github.com/hashicorp/consul/command/intention/create" ixndelete "github.com/hashicorp/consul/command/intention/delete" ixnget "github.com/hashicorp/consul/command/intention/get" + ixnmatch "github.com/hashicorp/consul/command/intention/match" "github.com/hashicorp/consul/command/join" "github.com/hashicorp/consul/command/keygen" "github.com/hashicorp/consul/command/keyring" @@ -76,6 +77,7 @@ func init() { Register("intention create", func(ui cli.Ui) (cli.Command, error) { return ixncreate.New(ui), nil }) Register("intention delete", func(ui cli.Ui) (cli.Command, error) { return ixndelete.New(ui), nil }) Register("intention get", func(ui cli.Ui) (cli.Command, error) { return ixnget.New(ui), nil }) + Register("intention match", func(ui cli.Ui) (cli.Command, error) { return ixnmatch.New(ui), nil }) Register("join", func(ui cli.Ui) (cli.Command, error) { return join.New(ui), nil }) Register("keygen", func(ui cli.Ui) (cli.Command, error) { return keygen.New(ui), nil }) Register("keyring", func(ui cli.Ui) (cli.Command, error) { return keyring.New(ui), nil }) diff --git a/command/intention/match/match.go b/command/intention/match/match.go new file mode 100644 index 000000000..651ff8fef --- /dev/null +++ b/command/intention/match/match.go @@ -0,0 +1,100 @@ +package match + +import ( + "flag" + "fmt" + "io" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/command/flags" + "github.com/mitchellh/cli" +) + +func New(ui cli.Ui) *cmd { + c := &cmd{UI: ui} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string + + // flags + flagSource bool + flagDestination bool + + // testStdin is the input for testing. + testStdin io.Reader +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.flags.BoolVar(&c.flagSource, "source", false, + "Match intentions with the given source.") + c.flags.BoolVar(&c.flagDestination, "destination", false, + "Match intentions with the given destination.") + + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.ServerFlags()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + return 2 + } + + args = c.flags.Args() + if len(args) != 1 { + c.UI.Error(fmt.Sprintf("Error: command requires exactly one argument: src or dst")) + return 2 + } + + // Create and test the HTTP client + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) + return 2 + } + + // Match the intention + matches, _, err := client.Connect().IntentionMatch(&api.IntentionMatch{ + By: api.IntentionMatchDestination, + Names: []string{args[0]}, + }, nil) + if err != nil { + c.UI.Error(fmt.Sprintf("Error matching the connection: %s", err)) + return 2 + } + + for _, ixn := range matches[args[0]] { + c.UI.Output(ixn.String()) + } + + return 0 +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return c.help +} + +const synopsis = "Show intentions that match a source or destination." +const help = ` +Usage: consul intention match [options] SRC|DST + + Show the list of intentions that would be enforced for a given source + or destination. The intentions are listed in the order they would be + evaluated. + + $ consul intention match db + $ consul intention match -source web + +` From 8df851c1eac0baae067c53d2bda05464da73991d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 15 May 2018 13:51:49 -0700 Subject: [PATCH 293/627] command/intention/get: tests --- command/intention/get/get_test.go | 124 ++++++++++++++++++++++++++++++ 1 file changed, 124 insertions(+) create mode 100644 command/intention/get/get_test.go diff --git a/command/intention/get/get_test.go b/command/intention/get/get_test.go new file mode 100644 index 000000000..2da243b43 --- /dev/null +++ b/command/intention/get/get_test.go @@ -0,0 +1,124 @@ +package get + +import ( + "strings" + "testing" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +func TestCommand_noTabs(t *testing.T) { + t.Parallel() + if strings.ContainsRune(New(nil).Help(), '\t') { + t.Fatal("help has tabs") + } +} + +func TestCommand_Validation(t *testing.T) { + t.Parallel() + + ui := cli.NewMockUi() + c := New(ui) + + cases := map[string]struct { + args []string + output string + }{ + "0 args": { + []string{}, + "requires exactly 1 or 2", + }, + + "3 args": { + []string{"a", "b", "c"}, + "requires exactly 1 or 2", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + require := require.New(t) + + c.init() + + // Ensure our buffer is always clear + if ui.ErrorWriter != nil { + ui.ErrorWriter.Reset() + } + if ui.OutputWriter != nil { + ui.OutputWriter.Reset() + } + + require.Equal(1, c.Run(tc.args)) + output := ui.ErrorWriter.String() + require.Contains(output, tc.output) + }) + } +} + +func TestCommand_id(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + // Create the intention + var id string + { + var err error + id, _, err = client.Connect().IntentionCreate(&api.Intention{ + SourceName: "web", + DestinationName: "db", + Action: api.IntentionActionAllow, + }, nil) + require.NoError(err) + } + + // Get it + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + id, + } + require.Equal(0, c.Run(args), ui.ErrorWriter.String()) + require.Contains(ui.OutputWriter.String(), id) +} + +func TestCommand_srcDst(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + // Create the intention + var id string + { + var err error + id, _, err = client.Connect().IntentionCreate(&api.Intention{ + SourceName: "web", + DestinationName: "db", + Action: api.IntentionActionAllow, + }, nil) + require.NoError(err) + } + + // Get it + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "web", "db", + } + require.Equal(0, c.Run(args), ui.ErrorWriter.String()) + require.Contains(ui.OutputWriter.String(), id) +} From 15ce2643e596102532cf6755cc70d9aa113f1398 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 15 May 2018 22:10:43 -0700 Subject: [PATCH 294/627] command/intention/check: check tests --- command/intention/check/check_test.go | 109 ++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) create mode 100644 command/intention/check/check_test.go diff --git a/command/intention/check/check_test.go b/command/intention/check/check_test.go new file mode 100644 index 000000000..d7ac1840a --- /dev/null +++ b/command/intention/check/check_test.go @@ -0,0 +1,109 @@ +package check + +import ( + "strings" + "testing" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +func TestCommand_noTabs(t *testing.T) { + t.Parallel() + if strings.ContainsRune(New(nil).Help(), '\t') { + t.Fatal("help has tabs") + } +} + +func TestCommand_Validation(t *testing.T) { + t.Parallel() + + ui := cli.NewMockUi() + c := New(ui) + + cases := map[string]struct { + args []string + output string + }{ + "0 args": { + []string{}, + "requires exactly two", + }, + + "1 args": { + []string{"a"}, + "requires exactly two", + }, + + "3 args": { + []string{"a", "b", "c"}, + "requires exactly two", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + require := require.New(t) + + c.init() + + // Ensure our buffer is always clear + if ui.ErrorWriter != nil { + ui.ErrorWriter.Reset() + } + if ui.OutputWriter != nil { + ui.OutputWriter.Reset() + } + + require.Equal(2, c.Run(tc.args)) + output := ui.ErrorWriter.String() + require.Contains(output, tc.output) + }) + } +} + +func TestCommand(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + // Create the intention + { + _, _, err := client.Connect().IntentionCreate(&api.Intention{ + SourceName: "web", + DestinationName: "db", + Action: api.IntentionActionDeny, + }, nil) + require.NoError(err) + } + + // Get it + { + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "foo", "db", + } + require.Equal(0, c.Run(args), ui.ErrorWriter.String()) + require.Contains(ui.OutputWriter.String(), "Allow") + } + + { + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "web", "db", + } + require.Equal(1, c.Run(args), ui.ErrorWriter.String()) + require.Contains(ui.OutputWriter.String(), "Denied") + } +} From afbe0c3e6c67e48d5cf9c03180b7efc9ad68df4a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 16 May 2018 08:41:12 -0700 Subject: [PATCH 295/627] command/intention/delete: tests --- command/intention/delete/delete_test.go | 99 +++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 command/intention/delete/delete_test.go diff --git a/command/intention/delete/delete_test.go b/command/intention/delete/delete_test.go new file mode 100644 index 000000000..c5674f771 --- /dev/null +++ b/command/intention/delete/delete_test.go @@ -0,0 +1,99 @@ +package delete + +import ( + "strings" + "testing" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +func TestCommand_noTabs(t *testing.T) { + t.Parallel() + if strings.ContainsRune(New(nil).Help(), '\t') { + t.Fatal("help has tabs") + } +} + +func TestCommand_Validation(t *testing.T) { + t.Parallel() + + ui := cli.NewMockUi() + c := New(ui) + + cases := map[string]struct { + args []string + output string + }{ + "0 args": { + []string{}, + "requires exactly 1 or 2", + }, + + "3 args": { + []string{"a", "b", "c"}, + "requires exactly 1 or 2", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + require := require.New(t) + + c.init() + + // Ensure our buffer is always clear + if ui.ErrorWriter != nil { + ui.ErrorWriter.Reset() + } + if ui.OutputWriter != nil { + ui.OutputWriter.Reset() + } + + require.Equal(1, c.Run(tc.args)) + output := ui.ErrorWriter.String() + require.Contains(output, tc.output) + }) + } +} + +func TestCommand(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + // Create the intention + { + _, _, err := client.Connect().IntentionCreate(&api.Intention{ + SourceName: "web", + DestinationName: "db", + Action: api.IntentionActionDeny, + }, nil) + require.NoError(err) + } + + // Delete it + { + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "web", "db", + } + require.Equal(0, c.Run(args), ui.ErrorWriter.String()) + require.Contains(ui.OutputWriter.String(), "deleted") + } + + // Find it (should be gone) + { + ixns, _, err := client.Connect().Intentions(nil) + require.NoError(err) + require.Len(ixns, 0) + } +} From f03fa81e6a3a878506e0db5201bdbf78282ec43a Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 16 May 2018 08:47:32 -0700 Subject: [PATCH 296/627] command/intention/match --- command/intention/match/match.go | 18 ++- command/intention/match/match_test.go | 151 ++++++++++++++++++++++++++ 2 files changed, 165 insertions(+), 4 deletions(-) create mode 100644 command/intention/match/match_test.go diff --git a/command/intention/match/match.go b/command/intention/match/match.go index 651ff8fef..5ee6fb463 100644 --- a/command/intention/match/match.go +++ b/command/intention/match/match.go @@ -51,24 +51,34 @@ func (c *cmd) Run(args []string) int { args = c.flags.Args() if len(args) != 1 { c.UI.Error(fmt.Sprintf("Error: command requires exactly one argument: src or dst")) - return 2 + return 1 + } + + if c.flagSource && c.flagDestination { + c.UI.Error(fmt.Sprintf("Error: only one of -source or -destination may be specified")) + return 1 + } + + by := api.IntentionMatchDestination + if c.flagSource { + by = api.IntentionMatchSource } // Create and test the HTTP client client, err := c.http.APIClient() if err != nil { c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err)) - return 2 + return 1 } // Match the intention matches, _, err := client.Connect().IntentionMatch(&api.IntentionMatch{ - By: api.IntentionMatchDestination, + By: by, Names: []string{args[0]}, }, nil) if err != nil { c.UI.Error(fmt.Sprintf("Error matching the connection: %s", err)) - return 2 + return 1 } for _, ixn := range matches[args[0]] { diff --git a/command/intention/match/match_test.go b/command/intention/match/match_test.go new file mode 100644 index 000000000..937fda3ec --- /dev/null +++ b/command/intention/match/match_test.go @@ -0,0 +1,151 @@ +package match + +import ( + "strings" + "testing" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +func TestCommand_noTabs(t *testing.T) { + t.Parallel() + if strings.ContainsRune(New(nil).Help(), '\t') { + t.Fatal("help has tabs") + } +} + +func TestCommand_Validation(t *testing.T) { + t.Parallel() + + ui := cli.NewMockUi() + c := New(ui) + + cases := map[string]struct { + args []string + output string + }{ + "0 args": { + []string{}, + "requires exactly one", + }, + + "3 args": { + []string{"a", "b", "c"}, + "requires exactly one", + }, + + "both source and dest": { + []string{"-source", "-destination", "foo"}, + "only one of -source", + }, + } + + for name, tc := range cases { + t.Run(name, func(t *testing.T) { + require := require.New(t) + + c.init() + + // Ensure our buffer is always clear + if ui.ErrorWriter != nil { + ui.ErrorWriter.Reset() + } + if ui.OutputWriter != nil { + ui.OutputWriter.Reset() + } + + require.Equal(1, c.Run(tc.args)) + output := ui.ErrorWriter.String() + require.Contains(output, tc.output) + }) + } +} + +func TestCommand_matchDst(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + // Create some intentions + { + insert := [][]string{ + {"foo", "db"}, + {"web", "db"}, + {"*", "db"}, + } + + for _, v := range insert { + id, _, err := client.Connect().IntentionCreate(&api.Intention{ + SourceName: v[0], + DestinationName: v[1], + Action: api.IntentionActionDeny, + }, nil) + require.NoError(err) + require.NotEmpty(id) + } + } + + // Match it + { + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "db", + } + require.Equal(0, c.Run(args), ui.ErrorWriter.String()) + require.Contains(ui.OutputWriter.String(), "web") + require.Contains(ui.OutputWriter.String(), "db") + require.Contains(ui.OutputWriter.String(), "*") + } +} + +func TestCommand_matchSource(t *testing.T) { + t.Parallel() + + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + // Create some intentions + { + insert := [][]string{ + {"foo", "db"}, + {"web", "db"}, + {"*", "db"}, + } + + for _, v := range insert { + id, _, err := client.Connect().IntentionCreate(&api.Intention{ + SourceName: v[0], + DestinationName: v[1], + Action: api.IntentionActionDeny, + }, nil) + require.NoError(err) + require.NotEmpty(id) + } + } + + // Match it + { + ui := cli.NewMockUi() + c := New(ui) + + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-source", + "foo", + } + require.Equal(0, c.Run(args), ui.ErrorWriter.String()) + require.Contains(ui.OutputWriter.String(), "db") + require.NotContains(ui.OutputWriter.String(), "web") + } +} From a316ba7f39fcf72f2f1ef59f82c1879f0168ec53 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 16 May 2018 08:53:33 -0700 Subject: [PATCH 297/627] api: IntentionUpdate API --- api/connect_intention.go | 17 +++++++++++++++++ api/connect_intention_test.go | 14 +++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/api/connect_intention.go b/api/connect_intention.go index 3d10b219c..eb7973100 100644 --- a/api/connect_intention.go +++ b/api/connect_intention.go @@ -273,3 +273,20 @@ func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *Wri } return out.ID, wm, nil } + +// IntentionUpdate will update an existing intention. The ID in the given +// structure must be non-empty. +func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/connect/intentions/"+ixn.ID) + r.setWriteOptions(q) + r.obj = ixn + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} diff --git a/api/connect_intention_test.go b/api/connect_intention_test.go index 436d0de0c..83f86d3ab 100644 --- a/api/connect_intention_test.go +++ b/api/connect_intention_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestAPI_ConnectIntentionCreateListGetDelete(t *testing.T) { +func TestAPI_ConnectIntentionCreateListGetUpdateDelete(t *testing.T) { t.Parallel() require := require.New(t) @@ -39,6 +39,18 @@ func TestAPI_ConnectIntentionCreateListGetDelete(t *testing.T) { require.Nil(err) require.Equal(ixn, actual) + // Update it + ixn.SourceNS = ixn.SourceNS + "-different" + _, err = connect.IntentionUpdate(ixn, nil) + require.NoError(err) + + // Get it + actual, _, err = connect.IntentionGet(id, nil) + require.NoError(err) + ixn.UpdatedAt = actual.UpdatedAt + ixn.ModifyIndex = actual.ModifyIndex + require.Equal(ixn, actual) + // Delete it _, err = connect.IntentionDelete(id, nil) require.Nil(err) From 0fe99f4f1477e8599b4d4ae1ab6e2832b6345884 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Wed, 16 May 2018 08:55:33 -0700 Subject: [PATCH 298/627] command/intention/create: -replace does an atomic change --- command/intention/create/create.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/command/intention/create/create.go b/command/intention/create/create.go index dd5f61565..de53c743d 100644 --- a/command/intention/create/create.go +++ b/command/intention/create/create.go @@ -45,8 +45,7 @@ func (c *cmd) init() { c.flags.BoolVar(&c.flagFile, "file", false, "Read intention data from one or more files.") c.flags.BoolVar(&c.flagReplace, "replace", false, - "Replace matching intentions. This is not an atomic operation. "+ - "If the insert fails, then the previous intention will still be deleted.") + "Replace matching intentions.") c.flags.Var((*flags.FlagMapValue)(&c.flagMeta), "meta", "Metadata to set on the intention, formatted as key=value. This flag "+ "may be specified multiple times to set multiple meta fields.") @@ -95,7 +94,7 @@ func (c *cmd) Run(args []string) int { for _, ixn := range ixns { // If replace is set to true, then find this intention and delete it. if c.flagReplace { - ixn, err := find.Find(ixn.SourceString(), ixn.DestinationString()) + oldIxn, err := find.Find(ixn.SourceString(), ixn.DestinationString()) if err != nil { c.UI.Error(fmt.Sprintf( "Error looking up intention for replacement with source %q "+ @@ -105,16 +104,22 @@ func (c *cmd) Run(args []string) int { err)) return 1 } - if ixn != nil { - if _, err := client.Connect().IntentionDelete(ixn.ID, nil); err != nil { + if oldIxn != nil { + // We set the ID of our intention so we overwrite it + ixn.ID = oldIxn.ID + + if _, err := client.Connect().IntentionUpdate(ixn, nil); err != nil { c.UI.Error(fmt.Sprintf( - "Error deleting intention for replacement with source %q "+ + "Error replacing intention with source %q "+ "and destination %q: %s", ixn.SourceString(), ixn.DestinationString(), err)) return 1 } + + // Continue since we don't want to try to insert a new intention + continue } } From 787ce3b26996d44c469ee065ac92f2b550a8661c Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 18 May 2018 21:03:10 -0700 Subject: [PATCH 299/627] agent: address feedback --- agent/agent_endpoint.go | 2 +- agent/consul/intention_endpoint.go | 4 +++- agent/intentions_endpoint.go | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index b52abc732..5b9a5f8ec 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -1107,7 +1107,7 @@ func (s *HTTPServer) agentLocalBlockingQuery(resp http.ResponseWriter, hash stri // // POST /v1/agent/connect/authorize // -// Note: when this logic changes, consider if the Intention.Test RPC method +// Note: when this logic changes, consider if the Intention.Check RPC method // also needs to be updated. func (s *HTTPServer) AgentConnectAuthorize(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Fetch the token diff --git a/agent/consul/intention_endpoint.go b/agent/consul/intention_endpoint.go index a0d88352f..f363cafd3 100644 --- a/agent/consul/intention_endpoint.go +++ b/agent/consul/intention_endpoint.go @@ -293,7 +293,9 @@ func (s *Intention) Check( return err } - // Perform the ACL check + // Perform the ACL check. For Check we only require ServiceRead and + // NOT IntentionRead because the Check API only returns pass/fail and + // returns no other information about the intentions used. if prefix, ok := query.GetACLPrefix(); ok { if rule != nil && !rule.ServiceRead(prefix) { s.srv.logger.Printf("[WARN] consul.intention: test on intention '%s' denied due to ACLs", prefix) diff --git a/agent/intentions_endpoint.go b/agent/intentions_endpoint.go index 80ddedf24..4720e6f31 100644 --- a/agent/intentions_endpoint.go +++ b/agent/intentions_endpoint.go @@ -122,7 +122,7 @@ func (s *HTTPServer) IntentionMatch(resp http.ResponseWriter, req *http.Request) return response, nil } -// GET /v1/connect/intentions/test +// GET /v1/connect/intentions/check func (s *HTTPServer) IntentionCheck(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Prepare args args := &structs.IntentionQueryRequest{Check: &structs.IntentionQueryCheck{}} From 1476745bdc912fefb3cc51710176e9b4dc8c642d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 22 May 2018 10:22:41 -0700 Subject: [PATCH 300/627] command/intention: address comment feedback --- command/intention/create/create.go | 2 +- command/intention/finder/finder.go | 4 ++++ command/intention/intention.go | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/command/intention/create/create.go b/command/intention/create/create.go index de53c743d..834e2125e 100644 --- a/command/intention/create/create.go +++ b/command/intention/create/create.go @@ -92,7 +92,7 @@ func (c *cmd) Run(args []string) int { // Go through and create each intention for _, ixn := range ixns { - // If replace is set to true, then find this intention and delete it. + // If replace is set to true, then perform an update operation. if c.flagReplace { oldIxn, err := find.Find(ixn.SourceString(), ixn.DestinationString()) if err != nil { diff --git a/command/intention/finder/finder.go b/command/intention/finder/finder.go index e16bc02dc..1f7810cd8 100644 --- a/command/intention/finder/finder.go +++ b/command/intention/finder/finder.go @@ -13,6 +13,10 @@ import ( // caches them once, and searches in-memory for this. For now this works since // even with a very large number of intentions, the size of the data gzipped // over HTTP will be relatively small. +// +// The Finder will only downlaod the intentions one time. This struct is +// not expected to be used over a long period of time. Though it may be +// reused multile times, the intentions list is only downloaded once. type Finder struct { // Client is the API client to use for any requests. Client *api.Client diff --git a/command/intention/intention.go b/command/intention/intention.go index 767e3ff1b..94de26472 100644 --- a/command/intention/intention.go +++ b/command/intention/intention.go @@ -28,7 +28,7 @@ const help = ` Usage: consul intention [options] [args] This command has subcommands for interacting with intentions. Intentions - are the permissions for what services are allowed to communicate via + are permissions describing which services are allowed to communicate via Connect. Here are some simple examples, and more detailed examples are available in the subcommands or the documentation. From 9249662c6c1703429d5d4e59ef6fcedbe937aa68 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Fri, 18 May 2018 23:27:02 -0700 Subject: [PATCH 301/627] agent: leaf endpoint accepts name, not service ID This change is important so that requests can made representing a service that may not be registered with the same local agent. --- agent/agent.go | 23 +++---- agent/agent_endpoint.go | 24 +++---- agent/agent_endpoint_test.go | 118 +++++++++++++++++++++++++++++++++-- 3 files changed, 133 insertions(+), 32 deletions(-) diff --git a/agent/agent.go b/agent/agent.go index 622a105e8..6f909e5c7 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -2177,21 +2177,22 @@ func (a *Agent) verifyProxyToken(token, targetService, targetProxy string) (stri // that the service name of the matching proxy matches our target // service. if proxy != nil { - if proxy.Proxy.TargetServiceID != targetService { + // Get the target service since we only have the name. The nil + // check below should never be true since a proxy token always + // represents the existence of a local service. + target := a.State.Service(proxy.Proxy.TargetServiceID) + if target == nil { + return "", fmt.Errorf("proxy target service not found: %q", + proxy.Proxy.TargetServiceID) + } + + if target.Service != targetService { return "", acl.ErrPermissionDenied } // Resolve the actual ACL token used to register the proxy/service and // return that for use in RPC calls. - return a.State.ServiceToken(targetService), nil - } - - // Retrieve the service specified. This should always exist because - // we only call this function for proxies and leaf certs and both can - // only be called for local services. - service := a.State.Service(targetService) - if service == nil { - return "", fmt.Errorf("unknown service ID: %s", targetService) + return a.State.ServiceToken(proxy.Proxy.TargetServiceID), nil } // Doesn't match, we have to do a full token resolution. The required @@ -2202,7 +2203,7 @@ func (a *Agent) verifyProxyToken(token, targetService, targetProxy string) (stri if err != nil { return "", err } - if rule != nil && !rule.ServiceWrite(service.Service, nil) { + if rule != nil && !rule.ServiceWrite(targetService, nil) { return "", acl.ErrPermissionDenied } diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 5b9a5f8ec..239f962a1 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -908,16 +908,10 @@ func (s *HTTPServer) AgentConnectCARoots(resp http.ResponseWriter, req *http.Req // instance. This supports blocking queries to update the returned bundle. func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Get the service ID. Note that this is the ID of a service instance. - id := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/") - - // Retrieve the service specified - service := s.agent.State.Service(id) - if service == nil { - return nil, fmt.Errorf("unknown service ID: %s", id) - } + serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/") args := cachetype.ConnectCALeafRequest{ - Service: service.Service, // Need name not ID + Service: serviceName, // Need name not ID } var qOpts structs.QueryOptions // Store DC in the ConnectCALeafRequest but query opts separately @@ -928,7 +922,7 @@ func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http. // Verify the proxy token. This will check both the local proxy token // as well as the ACL if the token isn't local. - effectiveToken, err := s.agent.verifyProxyToken(qOpts.Token, id, "") + effectiveToken, err := s.agent.verifyProxyToken(qOpts.Token, serviceName, "") if err != nil { return nil, err } @@ -983,12 +977,6 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http return "", nil, nil } - // Validate the ACL token - _, err := s.agent.verifyProxyToken(token, proxy.Proxy.TargetServiceID, id) - if err != nil { - return "", nil, err - } - // Lookup the target service as a convenience target := s.agent.State.Service(proxy.Proxy.TargetServiceID) if target == nil { @@ -999,6 +987,12 @@ func (s *HTTPServer) AgentConnectProxyConfig(resp http.ResponseWriter, req *http return "", nil, nil } + // Validate the ACL token + _, err := s.agent.verifyProxyToken(token, target.Service, id) + if err != nil { + return "", nil, err + } + // Watch the proxy for changes ws.Add(proxy.WatchCh) diff --git a/agent/agent_endpoint_test.go b/agent/agent_endpoint_test.go index 9c10a61ff..62d095fba 100644 --- a/agent/agent_endpoint_test.go +++ b/agent/agent_endpoint_test.go @@ -2238,7 +2238,7 @@ func TestAgentConnectCALeafCert_aclDefaultDeny(t *testing.T) { require.Equal(200, resp.Code, "body: %s", resp.Body.String()) } - req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test-id", nil) + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) resp := httptest.NewRecorder() _, err := a.srv.AgentConnectCALeafCert(resp, req) require.Error(err) @@ -2284,7 +2284,7 @@ func TestAgentConnectCALeafCert_aclProxyToken(t *testing.T) { token := proxy.ProxyToken require.NotEmpty(token) - req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test-id?token="+token, nil) + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil) resp := httptest.NewRecorder() obj, err := a.srv.AgentConnectCALeafCert(resp, req) require.NoError(err) @@ -2355,7 +2355,7 @@ func TestAgentConnectCALeafCert_aclProxyTokenOther(t *testing.T) { token := proxy.ProxyToken require.NotEmpty(token) - req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test-id?token="+token, nil) + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil) resp := httptest.NewRecorder() _, err := a.srv.AgentConnectCALeafCert(resp, req) require.Error(err) @@ -2413,7 +2413,7 @@ func TestAgentConnectCALeafCert_aclServiceWrite(t *testing.T) { token = aclResp.ID } - req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test-id?token="+token, nil) + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil) resp := httptest.NewRecorder() obj, err := a.srv.AgentConnectCALeafCert(resp, req) require.NoError(err) @@ -2474,7 +2474,7 @@ func TestAgentConnectCALeafCert_aclServiceReadDeny(t *testing.T) { token = aclResp.ID } - req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test-id?token="+token, nil) + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test?token="+token, nil) resp := httptest.NewRecorder() _, err := a.srv.AgentConnectCALeafCert(resp, req) require.Error(err) @@ -2517,7 +2517,113 @@ func TestAgentConnectCALeafCert_good(t *testing.T) { } // List - req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/foo", nil) + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) + resp := httptest.NewRecorder() + obj, err := a.srv.AgentConnectCALeafCert(resp, req) + require.NoError(err) + + // Get the issued cert + issued, ok := obj.(*structs.IssuedCert) + assert.True(ok) + + // Verify that the cert is signed by the CA + requireLeafValidUnderCA(t, issued, ca1) + + // Verify blocking index + assert.True(issued.ModifyIndex > 0) + assert.Equal(fmt.Sprintf("%d", issued.ModifyIndex), + resp.Header().Get("X-Consul-Index")) + + // That should've been a cache miss, so no hit change + require.Equal(cacheHits, a.cache.Hits()) + + // Test caching + { + // Fetch it again + obj2, err := a.srv.AgentConnectCALeafCert(httptest.NewRecorder(), req) + require.NoError(err) + require.Equal(obj, obj2) + + // Should cache hit this time and not make request + require.Equal(cacheHits+1, a.cache.Hits()) + cacheHits++ + } + + // Test that caching is updated in the background + { + // Set a new CA + ca := connect.TestCAConfigSet(t, a, nil) + + retry.Run(t, func(r *retry.R) { + // Try and sign again (note no index/wait arg since cache should update in + // background even if we aren't actively blocking) + obj, err := a.srv.AgentConnectCALeafCert(httptest.NewRecorder(), req) + r.Check(err) + + issued2 := obj.(*structs.IssuedCert) + if issued.CertPEM == issued2.CertPEM { + r.Fatalf("leaf has not updated") + } + + // Got a new leaf. Sanity check it's a whole new key as well as differnt + // cert. + if issued.PrivateKeyPEM == issued2.PrivateKeyPEM { + r.Fatalf("new leaf has same private key as before") + } + + // Verify that the cert is signed by the new CA + requireLeafValidUnderCA(t, issued2, ca) + }) + + // Should be a cache hit! The data should've updated in the cache + // in the background so this should've been fetched directly from + // the cache. + if v := a.cache.Hits(); v < cacheHits+1 { + t.Fatalf("expected at least one more cache hit, still at %d", v) + } + cacheHits = a.cache.Hits() + } +} + +// Test we can request a leaf cert for a service we have permission for +// but is not local to this agent. +func TestAgentConnectCALeafCert_goodNotLocal(t *testing.T) { + t.Parallel() + + assert := assert.New(t) + require := require.New(t) + a := NewTestAgent(t.Name(), "") + defer a.Shutdown() + + // CA already setup by default by NewTestAgent but force a new one so we can + // verify it was signed easily. + ca1 := connect.TestCAConfigSet(t, a, nil) + + // Grab the initial cache hit count + cacheHits := a.cache.Hits() + + { + // Register a non-local service (central catalog) + args := &structs.RegisterRequest{ + Node: "foo", + Address: "127.0.0.1", + Service: &structs.NodeService{ + Service: "test", + Address: "127.0.0.1", + Port: 8080, + }, + } + req, _ := http.NewRequest("PUT", "/v1/catalog/register", jsonReader(args)) + resp := httptest.NewRecorder() + _, err := a.srv.CatalogRegister(resp, req) + require.NoError(err) + if !assert.Equal(200, resp.Code) { + t.Log("Body: ", resp.Body.String()) + } + } + + // List + req, _ := http.NewRequest("GET", "/v1/agent/connect/ca/leaf/test", nil) resp := httptest.NewRecorder() obj, err := a.srv.AgentConnectCALeafCert(resp, req) require.NoError(err) From b28e2b862237773d00025078d9e8c121629673b6 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 19 May 2018 00:11:51 -0700 Subject: [PATCH 302/627] connect/proxy: don't require proxy ID --- command/connect/proxy/proxy.go | 46 ++++++++++------ connect/proxy/config.go | 47 +++++++++-------- connect/proxy/config_test.go | 7 ++- connect/proxy/proxy.go | 52 +++---------------- connect/proxy/testdata/config-kitchensink.hcl | 3 +- connect/service.go | 36 ++++++------- connect/tls.go | 13 ++--- watch/funcs.go | 6 +-- watch/funcs_test.go | 2 +- 9 files changed, 91 insertions(+), 121 deletions(-) diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index b797177c5..83406e0fb 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -10,6 +10,7 @@ import ( "os" proxyAgent "github.com/hashicorp/consul/agent/proxy" + "github.com/hashicorp/consul/api" "github.com/hashicorp/consul/command/flags" proxyImpl "github.com/hashicorp/consul/connect/proxy" @@ -112,22 +113,17 @@ func (c *cmd) Run(args []string) int { return 1 } - var p *proxyImpl.Proxy - if c.cfgFile != "" { - c.UI.Info("Configuring proxy locally from " + c.cfgFile) + // Get the proper configuration watcher + cfgWatcher, err := c.configWatcher(client) + if err != nil { + c.UI.Error(fmt.Sprintf("Error preparing configuration: %s", err)) + return 1 + } - p, err = proxyImpl.NewFromConfigFile(client, c.cfgFile, c.logger) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed configuring from file: %s", err)) - return 1 - } - - } else { - p, err = proxyImpl.New(client, c.proxyID, c.logger) - if err != nil { - c.UI.Error(fmt.Sprintf("Failed configuring from agent: %s", err)) - return 1 - } + p, err := proxyImpl.New(client, cfgWatcher, c.logger) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed initializing proxy: %s", err)) + return 1 } // Hook the shutdownCh up to close the proxy @@ -151,6 +147,26 @@ func (c *cmd) Run(args []string) int { return 0 } +func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) { + // Manual configuration file is specified. + if c.cfgFile != "" { + cfg, err := proxyImpl.ParseConfigFile(c.cfgFile) + if err != nil { + return nil, err + } + return proxyImpl.NewStaticConfigWatcher(cfg), nil + } + + // Use the configured proxy ID + if c.proxyID == "" { + return nil, fmt.Errorf( + "-service or -proxy-id must be specified so that proxy can " + + "configure itself.") + } + + return proxyImpl.NewAgentConfigWatcher(client, c.proxyID, c.logger) +} + func (c *cmd) Synopsis() string { return synopsis } diff --git a/connect/proxy/config.go b/connect/proxy/config.go index 840afa896..025809a7b 100644 --- a/connect/proxy/config.go +++ b/connect/proxy/config.go @@ -19,18 +19,16 @@ import ( // different locations (e.g. command line, agent config endpoint, agent // certificate endpoints). type Config struct { - // ProxyID is the identifier for this proxy as registered in Consul. It's only - // guaranteed to be unique per agent. - ProxyID string `json:"proxy_id" hcl:"proxy_id"` - // Token is the authentication token provided for queries to the local agent. Token string `json:"token" hcl:"token"` - // ProxiedServiceID is the identifier of the service this proxy is representing. - ProxiedServiceID string `json:"proxied_service_id" hcl:"proxied_service_id"` - + // ProxiedServiceName is the name of the service this proxy is representing. + // This is the service _name_ and not the service _id_. This allows the + // proxy to represent services not present in the local catalog. + // // ProxiedServiceNamespace is the namespace of the service this proxy is // representing. + ProxiedServiceName string `json:"proxied_service_name" hcl:"proxied_service_name"` ProxiedServiceNamespace string `json:"proxied_service_namespace" hcl:"proxied_service_namespace"` // PublicListener configures the mTLS listener. @@ -39,28 +37,34 @@ type Config struct { // Upstreams configures outgoing proxies for remote connect services. Upstreams []UpstreamConfig `json:"upstreams" hcl:"upstreams"` - // DevCAFile allows passing the file path to PEM encoded root certificate - // bundle to be used in development instead of the ones supplied by Connect. - DevCAFile string `json:"dev_ca_file" hcl:"dev_ca_file"` - - // DevServiceCertFile allows passing the file path to PEM encoded service - // certificate (client and server) to be used in development instead of the - // ones supplied by Connect. + // DevCAFile, DevServiceCertFile, and DevServiceKeyFile allow configuring + // the certificate information from a static file. This is only for testing + // purposes. All or none must be specified. + DevCAFile string `json:"dev_ca_file" hcl:"dev_ca_file"` DevServiceCertFile string `json:"dev_service_cert_file" hcl:"dev_service_cert_file"` + DevServiceKeyFile string `json:"dev_service_key_file" hcl:"dev_service_key_file"` +} - // DevServiceKeyFile allows passing the file path to PEM encoded service - // private key to be used in development instead of the ones supplied by - // Connect. - DevServiceKeyFile string `json:"dev_service_key_file" hcl:"dev_service_key_file"` +// Service returns the *connect.Service structure represented by this config. +func (c *Config) Service(client *api.Client, logger *log.Logger) (*connect.Service, error) { + // If we aren't in dev mode, then we return the configured service. + if c.DevCAFile == "" { + return connect.NewServiceWithLogger(c.ProxiedServiceName, client, logger) + } + + // Dev mode + return connect.NewDevServiceFromCertFiles(c.ProxiedServiceName, + logger, c.DevCAFile, c.DevServiceCertFile, c.DevServiceKeyFile) } // PublicListenerConfig contains the parameters needed for the incoming mTLS // listener. type PublicListenerConfig struct { // BindAddress is the host/IP the public mTLS listener will bind to. + // + // BindPort is the port the public listener will bind to. BindAddress string `json:"bind_address" hcl:"bind_address" mapstructure:"bind_address"` - - BindPort int `json:"bind_port" hcl:"bind_port" mapstructure:"bind_port"` + BindPort int `json:"bind_port" hcl:"bind_port" mapstructure:"bind_port"` // LocalServiceAddress is the host:port for the proxied application. This // should be on loopback or otherwise protected as it's plain TCP. @@ -265,9 +269,8 @@ func (w *AgentConfigWatcher) handler(blockVal watch.BlockingParamVal, // Create proxy config from the response cfg := &Config{ - ProxyID: w.proxyID, // Token should be already setup in the client - ProxiedServiceID: resp.TargetServiceID, + ProxiedServiceName: resp.TargetServiceName, ProxiedServiceNamespace: "default", } diff --git a/connect/proxy/config_test.go b/connect/proxy/config_test.go index 1473e8fea..87bb43c81 100644 --- a/connect/proxy/config_test.go +++ b/connect/proxy/config_test.go @@ -19,9 +19,8 @@ func TestParseConfigFile(t *testing.T) { require.Nil(t, err) expect := &Config{ - ProxyID: "foo", Token: "11111111-2222-3333-4444-555555555555", - ProxiedServiceID: "web", + ProxiedServiceName: "web", ProxiedServiceNamespace: "default", PublicListener: PublicListenerConfig{ BindAddress: "127.0.0.1", @@ -117,6 +116,7 @@ func TestUpstreamResolverFromClient(t *testing.T) { func TestAgentConfigWatcher(t *testing.T) { a := agent.NewTestAgent("agent_smith", "") + defer a.Shutdown() client := a.Client() agent := client.Agent() @@ -153,8 +153,7 @@ func TestAgentConfigWatcher(t *testing.T) { cfg := testGetConfigValTimeout(t, w, 500*time.Millisecond) expectCfg := &Config{ - ProxyID: w.proxyID, - ProxiedServiceID: "web", + ProxiedServiceName: "web", ProxiedServiceNamespace: "default", PublicListener: PublicListenerConfig{ BindAddress: "10.10.10.10", diff --git a/connect/proxy/proxy.go b/connect/proxy/proxy.go index 64e098825..fb382288f 100644 --- a/connect/proxy/proxy.go +++ b/connect/proxy/proxy.go @@ -11,7 +11,6 @@ import ( // Proxy implements the built-in connect proxy. type Proxy struct { - proxyID string client *api.Client cfgWatcher ConfigWatcher stopChan chan struct{} @@ -19,51 +18,17 @@ type Proxy struct { service *connect.Service } -// NewFromConfigFile returns a Proxy instance configured just from a local file. -// This is intended mostly for development and bypasses the normal mechanisms -// for fetching config and certificates from the local agent. -func NewFromConfigFile(client *api.Client, filename string, - logger *log.Logger) (*Proxy, error) { - cfg, err := ParseConfigFile(filename) - if err != nil { - return nil, err - } - - service, err := connect.NewDevServiceFromCertFiles(cfg.ProxiedServiceID, - logger, cfg.DevCAFile, cfg.DevServiceCertFile, - cfg.DevServiceKeyFile) - if err != nil { - return nil, err - } - - p := &Proxy{ - proxyID: cfg.ProxyID, - client: client, - cfgWatcher: NewStaticConfigWatcher(cfg), - stopChan: make(chan struct{}), - logger: logger, - service: service, - } - return p, nil -} - -// New returns a Proxy with the given id, consuming the provided (configured) -// agent. It is ready to Run(). -func New(client *api.Client, proxyID string, logger *log.Logger) (*Proxy, error) { - cw, err := NewAgentConfigWatcher(client, proxyID, logger) - if err != nil { - return nil, err - } - p := &Proxy{ - proxyID: proxyID, +// New returns a proxy with the given configuration source. +// +// The ConfigWatcher can be used to update the configuration of the proxy. +// Whenever a new configuration is detected, the proxy will reconfigure itself. +func New(client *api.Client, cw ConfigWatcher, logger *log.Logger) (*Proxy, error) { + return &Proxy{ client: client, cfgWatcher: cw, stopChan: make(chan struct{}), logger: logger, - // Can't load service yet as we only have the proxy's ID not the service's - // until initial config fetch happens. - } - return p, nil + }, nil } // Serve the proxy instance until a fatal error occurs or proxy is closed. @@ -80,8 +45,7 @@ func (p *Proxy) Serve() error { // Initial setup // Setup Service instance now we know target ID etc - service, err := connect.NewServiceWithLogger(newCfg.ProxiedServiceID, - p.client, p.logger) + service, err := cfg.Service(p.client, p.logger) if err != nil { return err } diff --git a/connect/proxy/testdata/config-kitchensink.hcl b/connect/proxy/testdata/config-kitchensink.hcl index fccfdffd0..de3472d0f 100644 --- a/connect/proxy/testdata/config-kitchensink.hcl +++ b/connect/proxy/testdata/config-kitchensink.hcl @@ -1,9 +1,8 @@ # Example proxy config with everything specified -proxy_id = "foo" token = "11111111-2222-3333-4444-555555555555" -proxied_service_id = "web" +proxied_service_name = "web" proxied_service_namespace = "default" # Assumes running consul in dev mode from the repo root... diff --git a/connect/service.go b/connect/service.go index af9fbfcb7..b00775c1c 100644 --- a/connect/service.go +++ b/connect/service.go @@ -27,16 +27,12 @@ import ( // service has been delivered valid certificates. Once built, document that here // too. type Service struct { - // serviceID is the unique ID for this service in the agent-local catalog. - // This is often but not always the service name. This is used to request - // Connect metadata. If the service with this ID doesn't exist on the local - // agent no error will be returned and the Service will retry periodically. - // This allows service startup and registration to happen in either order - // without coordination since they might be performed by separate processes. - serviceID string + // service is the name (not ID) for the Consul service. This is used to request + // Connect metadata. + service string // client is the Consul API client. It must be configured with an appropriate - // Token that has `service:write` policy on the provided ServiceID. If an + // Token that has `service:write` policy on the provided service. If an // insufficient token is provided, the Service will abort further attempts to // fetch certificates and print a loud error message. It will not Close() or // kill the process since that could lead to a crash loop in every service if @@ -74,13 +70,13 @@ func NewService(serviceID string, client *api.Client) (*Service, error) { } // NewServiceWithLogger starts the service with a specified log.Logger. -func NewServiceWithLogger(serviceID string, client *api.Client, +func NewServiceWithLogger(serviceName string, client *api.Client, logger *log.Logger) (*Service, error) { s := &Service{ - serviceID: serviceID, - client: client, - logger: logger, - tlsCfg: newDynamicTLSConfig(defaultTLSConfig()), + service: serviceName, + client: client, + logger: logger, + tlsCfg: newDynamicTLSConfig(defaultTLSConfig()), } // Set up root and leaf watches @@ -94,8 +90,8 @@ func NewServiceWithLogger(serviceID string, client *api.Client, s.rootsWatch.HybridHandler = s.rootsWatchHandler p, err = watch.Parse(map[string]interface{}{ - "type": "connect_leaf", - "service_id": s.serviceID, + "type": "connect_leaf", + "service": s.service, }) if err != nil { return nil, err @@ -123,12 +119,12 @@ func NewDevServiceFromCertFiles(serviceID string, logger *log.Logger, // NewDevServiceWithTLSConfig creates a Service using static TLS config passed. // It's mostly useful for testing. -func NewDevServiceWithTLSConfig(serviceID string, logger *log.Logger, +func NewDevServiceWithTLSConfig(serviceName string, logger *log.Logger, tlsCfg *tls.Config) (*Service, error) { s := &Service{ - serviceID: serviceID, - logger: logger, - tlsCfg: newDynamicTLSConfig(tlsCfg), + service: serviceName, + logger: logger, + tlsCfg: newDynamicTLSConfig(tlsCfg), } return s, nil } @@ -144,7 +140,7 @@ func NewDevServiceWithTLSConfig(serviceID string, logger *log.Logger, // error during renewal. The listener will be able to accept connections again // once connectivity is restored provided the client's Token is valid. func (s *Service) ServerTLSConfig() *tls.Config { - return s.tlsCfg.Get(newServerSideVerifier(s.client, s.serviceID)) + return s.tlsCfg.Get(newServerSideVerifier(s.client, s.service)) } // Dial connects to a remote Connect-enabled server. The passed Resolver is used diff --git a/connect/tls.go b/connect/tls.go index 6f14cd787..db96eb3de 100644 --- a/connect/tls.go +++ b/connect/tls.go @@ -112,9 +112,9 @@ func verifyServerCertMatchesURI(certs []*x509.Certificate, // newServerSideVerifier returns a verifierFunc that wraps the provided // api.Client to verify the TLS chain and perform AuthZ for the server end of -// the connection. The service name provided is used as the target serviceID +// the connection. The service name provided is used as the target service name // for the Authorization. -func newServerSideVerifier(client *api.Client, serviceID string) verifierFunc { +func newServerSideVerifier(client *api.Client, serviceName string) verifierFunc { return func(tlsCfg *tls.Config, rawCerts [][]byte) error { leaf, err := verifyChain(tlsCfg, rawCerts, false) if err != nil { @@ -142,14 +142,7 @@ func newServerSideVerifier(client *api.Client, serviceID string) verifierFunc { // Perform AuthZ req := &api.AgentAuthorizeParams{ - // TODO(banks): this is jank, we have a serviceID from the Service setup - // but this needs to be a service name as the target. For now we are - // relying on them usually being the same but this will break when they - // are not. We either need to make Authorize endpoint optionally accept - // IDs somehow or rethink this as it will require fetching the service - // name sometime ahead of accepting requests (maybe along with TLS certs?) - // which feels gross and will take extra plumbing to expose it to here. - Target: serviceID, + Target: serviceName, ClientCertURI: certURI.URI().String(), ClientCertSerial: connect.HexString(leaf.SerialNumber.Bytes()), } diff --git a/watch/funcs.go b/watch/funcs.go index 3b1b854ed..a87fd63f4 100644 --- a/watch/funcs.go +++ b/watch/funcs.go @@ -258,8 +258,8 @@ func connectRootsWatch(params map[string]interface{}) (WatcherFunc, error) { func connectLeafWatch(params map[string]interface{}) (WatcherFunc, error) { // We don't support stale since certs are cached locally in the agent. - var serviceID string - if err := assignValue(params, "service_id", &serviceID); err != nil { + var serviceName string + if err := assignValue(params, "service", &serviceName); err != nil { return nil, err } @@ -268,7 +268,7 @@ func connectLeafWatch(params map[string]interface{}) (WatcherFunc, error) { opts := makeQueryOptionsWithContext(p, false) defer p.cancelFunc() - leaf, meta, err := agent.ConnectCALeaf(serviceID, &opts) + leaf, meta, err := agent.ConnectCALeaf(serviceName, &opts) if err != nil { return nil, nil, err } diff --git a/watch/funcs_test.go b/watch/funcs_test.go index b304a803f..9632ef206 100644 --- a/watch/funcs_test.go +++ b/watch/funcs_test.go @@ -602,7 +602,7 @@ func TestConnectLeafWatch(t *testing.T) { //invoke := makeInvokeCh() invoke := make(chan error) - plan := mustParse(t, `{"type":"connect_leaf", "service_id":"web"}`) + plan := mustParse(t, `{"type":"connect_leaf", "service":"web"}`) plan.Handler = func(idx uint64, raw interface{}) { if raw == nil { return // ignore From 27aa0743ec6e6ca3912683ab7209c736bf98b5c0 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 19 May 2018 00:20:43 -0700 Subject: [PATCH 303/627] connect/proxy: use the right variable for loading the new service --- connect/proxy/proxy.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/connect/proxy/proxy.go b/connect/proxy/proxy.go index fb382288f..4b740e22f 100644 --- a/connect/proxy/proxy.go +++ b/connect/proxy/proxy.go @@ -45,7 +45,7 @@ func (p *Proxy) Serve() error { // Initial setup // Setup Service instance now we know target ID etc - service, err := cfg.Service(p.client, p.logger) + service, err := newCfg.Service(p.client, p.logger) if err != nil { return err } From 3e8ea58585a4bb603f9658cfe5ab96d4151dafc3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 19 May 2018 00:43:38 -0700 Subject: [PATCH 304/627] command/connect/proxy: accept -service and -upstream --- command/connect/proxy/flag_upstreams.go | 54 ++++++++++ command/connect/proxy/flag_upstreams_test.go | 106 +++++++++++++++++++ command/connect/proxy/proxy.go | 28 ++++- 3 files changed, 186 insertions(+), 2 deletions(-) create mode 100644 command/connect/proxy/flag_upstreams.go create mode 100644 command/connect/proxy/flag_upstreams_test.go diff --git a/command/connect/proxy/flag_upstreams.go b/command/connect/proxy/flag_upstreams.go new file mode 100644 index 000000000..02ce3a3bb --- /dev/null +++ b/command/connect/proxy/flag_upstreams.go @@ -0,0 +1,54 @@ +package proxy + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/consul/connect/proxy" +) + +// FlagUpstreams implements the flag.Value interface and allows specifying +// the -upstream flag multiple times and keeping track of the name of the +// upstream and the local port. +// +// The syntax of the value is "name:addr" where addr can be "port" or +// "host:port". Examples: "db:8181", "db:127.0.0.10:8282", etc. +type FlagUpstreams map[string]proxy.UpstreamConfig + +func (f *FlagUpstreams) String() string { + return fmt.Sprintf("%v", *f) +} + +func (f *FlagUpstreams) Set(value string) error { + idx := strings.Index(value, ":") + if idx == -1 { + return fmt.Errorf("Upstream value should be name:addr in %q", value) + } + + addr := "" + name := value[:idx] + portRaw := value[idx+1:] + if idx := strings.Index(portRaw, ":"); idx != -1 { + addr = portRaw[:idx] + portRaw = portRaw[idx+1:] + } + + port, err := strconv.ParseInt(portRaw, 0, 0) + if err != nil { + return err + } + + if *f == nil { + *f = make(map[string]proxy.UpstreamConfig) + } + + (*f)[name] = proxy.UpstreamConfig{ + LocalBindAddress: addr, + LocalBindPort: int(port), + DestinationName: name, + DestinationType: "service", + } + + return nil +} diff --git a/command/connect/proxy/flag_upstreams_test.go b/command/connect/proxy/flag_upstreams_test.go new file mode 100644 index 000000000..d43c49d03 --- /dev/null +++ b/command/connect/proxy/flag_upstreams_test.go @@ -0,0 +1,106 @@ +package proxy + +import ( + "flag" + "testing" + + "github.com/hashicorp/consul/connect/proxy" + "github.com/stretchr/testify/require" +) + +func TestFlagUpstreams_impl(t *testing.T) { + var _ flag.Value = new(FlagUpstreams) +} + +func TestFlagUpstreams(t *testing.T) { + cases := []struct { + Name string + Input []string + Expected map[string]proxy.UpstreamConfig + Error string + }{ + { + "bad format", + []string{"foo"}, + nil, + "should be name:addr", + }, + + { + "port not int", + []string{"db:hello"}, + nil, + "invalid syntax", + }, + + { + "4 parts", + []string{"db:127.0.0.1:8181:foo"}, + nil, + "invalid syntax", + }, + + { + "single value", + []string{"db:8181"}, + map[string]proxy.UpstreamConfig{ + "db": proxy.UpstreamConfig{ + LocalBindPort: 8181, + DestinationName: "db", + DestinationType: "service", + }, + }, + "", + }, + + { + "address specified", + []string{"db:127.0.0.55:8181"}, + map[string]proxy.UpstreamConfig{ + "db": proxy.UpstreamConfig{ + LocalBindAddress: "127.0.0.55", + LocalBindPort: 8181, + DestinationName: "db", + DestinationType: "service", + }, + }, + "", + }, + + { + "repeat value, overwrite", + []string{"db:8181", "db:8282"}, + map[string]proxy.UpstreamConfig{ + "db": proxy.UpstreamConfig{ + LocalBindPort: 8282, + DestinationName: "db", + DestinationType: "service", + }, + }, + "", + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + require := require.New(t) + + var actual map[string]proxy.UpstreamConfig + f := (*FlagUpstreams)(&actual) + + var err error + for _, input := range tc.Input { + err = f.Set(input) + // Note we only test the last error. This could make some + // test failures confusing but it shouldn't be too bad. + } + if tc.Error != "" { + require.Error(err) + require.Contains(err.Error(), tc.Error) + return + } + + require.Equal(tc.Expected, actual) + }) + } +} diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index 83406e0fb..fc9e65b86 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -42,6 +42,8 @@ type cmd struct { cfgFile string proxyID string pprofAddr string + service string + upstreams map[string]proxyImpl.UpstreamConfig } func (c *cmd) init() { @@ -66,6 +68,14 @@ func (c *cmd) init() { "Enable debugging via pprof. Providing a host:port (or just ':port') "+ "enables profiling HTTP endpoints on that address.") + c.flags.StringVar(&c.service, "service", "", + "Name of the service this proxy is representing.") + + c.flags.Var((*FlagUpstreams)(&c.upstreams), "upstream", + "Upstream service to support connecting to. The format should be "+ + "'name:addr', such as 'db:8181'. This will make 'db' available "+ + "on port 8181.") + c.http = &flags.HTTPFlags{} flags.Merge(c.flags, c.http.ClientFlags()) flags.Merge(c.flags, c.http.ServerFlags()) @@ -158,13 +168,27 @@ func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) } // Use the configured proxy ID - if c.proxyID == "" { + if c.proxyID != "" { + return proxyImpl.NewAgentConfigWatcher(client, c.proxyID, c.logger) + } + + // Otherwise, we're representing a manually specified service. + if c.service == "" { return nil, fmt.Errorf( "-service or -proxy-id must be specified so that proxy can " + "configure itself.") } - return proxyImpl.NewAgentConfigWatcher(client, c.proxyID, c.logger) + // Convert our upstreams to a slice of configurations + upstreams := make([]proxyImpl.UpstreamConfig, 0, len(c.upstreams)) + for _, u := range c.upstreams { + upstreams = append(upstreams, u) + } + + return proxyImpl.NewStaticConfigWatcher(&proxyImpl.Config{ + ProxiedServiceName: c.service, + Upstreams: upstreams, + }), nil } func (c *cmd) Synopsis() string { From b88023c607a7f5355b3acd401db5dd4d808d25a8 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 19 May 2018 00:46:06 -0700 Subject: [PATCH 305/627] connect/proxy: don't start public listener if 0 port --- connect/proxy/proxy.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/connect/proxy/proxy.go b/connect/proxy/proxy.go index 4b740e22f..b92e4e0b5 100644 --- a/connect/proxy/proxy.go +++ b/connect/proxy/proxy.go @@ -60,11 +60,15 @@ func (p *Proxy) Serve() error { p.logger.Printf("[DEBUG] leaf: %s roots: %s", leaf.URIs[0], bytes.Join(tcfg.RootCAs.Subjects(), []byte(","))) }() - newCfg.PublicListener.applyDefaults() - l := NewPublicListener(p.service, newCfg.PublicListener, p.logger) - err = p.startListener("public listener", l) - if err != nil { - return err + // Only start a listener if we have a port set. This allows + // the configuration to disable our public listener. + if newCfg.PublicListener.BindPort != 0 { + newCfg.PublicListener.applyDefaults() + l := NewPublicListener(p.service, newCfg.PublicListener, p.logger) + err = p.startListener("public listener", l) + if err != nil { + return err + } } } From 99094d70b0f6713334c1d6e911cae8e0824484f8 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 19 May 2018 10:47:30 -0700 Subject: [PATCH 306/627] connect/proxy: add a full proxy test, parallel --- connect/proxy/config_test.go | 6 +++ connect/proxy/conn_test.go | 6 +++ connect/proxy/listener_test.go | 16 ++++--- connect/proxy/proxy_test.go | 79 ++++++++++++++++++++++++++++++++++ connect/proxy/testing.go | 19 +++++--- 5 files changed, 114 insertions(+), 12 deletions(-) create mode 100644 connect/proxy/proxy_test.go diff --git a/connect/proxy/config_test.go b/connect/proxy/config_test.go index 87bb43c81..2e214c6dd 100644 --- a/connect/proxy/config_test.go +++ b/connect/proxy/config_test.go @@ -15,6 +15,8 @@ import ( ) func TestParseConfigFile(t *testing.T) { + t.Parallel() + cfg, err := ParseConfigFile("testdata/config-kitchensink.hcl") require.Nil(t, err) @@ -54,6 +56,8 @@ func TestParseConfigFile(t *testing.T) { } func TestUpstreamResolverFromClient(t *testing.T) { + t.Parallel() + tests := []struct { name string cfg UpstreamConfig @@ -115,6 +119,8 @@ func TestUpstreamResolverFromClient(t *testing.T) { } func TestAgentConfigWatcher(t *testing.T) { + t.Parallel() + a := agent.NewTestAgent("agent_smith", "") defer a.Shutdown() diff --git a/connect/proxy/conn_test.go b/connect/proxy/conn_test.go index 4de428ad0..29dcdd3e3 100644 --- a/connect/proxy/conn_test.go +++ b/connect/proxy/conn_test.go @@ -64,6 +64,8 @@ func testConnPipelineSetup(t *testing.T) (net.Conn, net.Conn, *Conn, func()) { } func TestConn(t *testing.T) { + t.Parallel() + src, dst, c, stop := testConnPipelineSetup(t) defer stop() @@ -117,6 +119,8 @@ func TestConn(t *testing.T) { } func TestConnSrcClosing(t *testing.T) { + t.Parallel() + src, dst, c, stop := testConnPipelineSetup(t) defer stop() @@ -155,6 +159,8 @@ func TestConnSrcClosing(t *testing.T) { } func TestConnDstClosing(t *testing.T) { + t.Parallel() + src, dst, c, stop := testConnPipelineSetup(t) defer stop() diff --git a/connect/proxy/listener_test.go b/connect/proxy/listener_test.go index a0bc640d7..d63f5818b 100644 --- a/connect/proxy/listener_test.go +++ b/connect/proxy/listener_test.go @@ -15,23 +15,23 @@ import ( ) func TestPublicListener(t *testing.T) { + t.Parallel() + ca := agConnect.TestCA(t, nil) - ports := freeport.GetT(t, 2) + ports := freeport.GetT(t, 1) + + testApp := NewTestTCPServer(t) + defer testApp.Close() cfg := PublicListenerConfig{ BindAddress: "127.0.0.1", BindPort: ports[0], - LocalServiceAddress: TestLocalAddr(ports[1]), + LocalServiceAddress: testApp.Addr().String(), HandshakeTimeoutMs: 100, LocalConnectTimeoutMs: 100, } - testApp, err := NewTestTCPServer(t, cfg.LocalServiceAddress) - require.NoError(t, err) - defer testApp.Close() - svc := connect.TestService(t, "db", ca) - l := NewPublicListener(svc, cfg, log.New(os.Stderr, "", log.LstdFlags)) // Run proxy @@ -53,6 +53,8 @@ func TestPublicListener(t *testing.T) { } func TestUpstreamListener(t *testing.T) { + t.Parallel() + ca := agConnect.TestCA(t, nil) ports := freeport.GetT(t, 1) diff --git a/connect/proxy/proxy_test.go b/connect/proxy/proxy_test.go new file mode 100644 index 000000000..681f802c4 --- /dev/null +++ b/connect/proxy/proxy_test.go @@ -0,0 +1,79 @@ +package proxy + +import ( + "context" + "log" + "net" + "os" + "testing" + + "github.com/hashicorp/consul/agent" + agConnect "github.com/hashicorp/consul/agent/connect" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/connect" + "github.com/hashicorp/consul/lib/freeport" + "github.com/hashicorp/consul/testutil/retry" + "github.com/stretchr/testify/require" +) + +func TestProxy_public(t *testing.T) { + t.Parallel() + + require := require.New(t) + ports := freeport.GetT(t, 1) + + a := agent.NewTestAgent(t.Name(), "") + defer a.Shutdown() + client := a.Client() + + // Register the service so we can get a leaf cert + _, err := client.Catalog().Register(&api.CatalogRegistration{ + Datacenter: "dc1", + Node: "local", + Address: "127.0.0.1", + Service: &api.AgentService{ + Service: "echo", + }, + }, nil) + require.NoError(err) + + // Start the backend service that is being proxied + testApp := NewTestTCPServer(t) + defer testApp.Close() + + // Start the proxy + p, err := New(client, NewStaticConfigWatcher(&Config{ + ProxiedServiceName: "echo", + PublicListener: PublicListenerConfig{ + BindAddress: "127.0.0.1", + BindPort: ports[0], + LocalServiceAddress: testApp.Addr().String(), + }, + }), testLogger(t)) + require.NoError(err) + defer p.Close() + go p.Serve() + + // Create a test connection to the proxy. We retry here a few times + // since this is dependent on the agent actually starting up and setting + // up the CA. + var conn net.Conn + svc, err := connect.NewService("echo", client) + require.NoError(err) + retry.Run(t, func(r *retry.R) { + conn, err = svc.Dial(context.Background(), &connect.StaticResolver{ + Addr: TestLocalAddr(ports[0]), + CertURI: agConnect.TestSpiffeIDService(t, "echo"), + }) + if err != nil { + r.Fatalf("err: %s", err) + } + }) + + // Connection works, test it is the right one + TestEchoConn(t, conn, "") +} + +func testLogger(t *testing.T) *log.Logger { + return log.New(os.Stderr, "", log.LstdFlags) +} diff --git a/connect/proxy/testing.go b/connect/proxy/testing.go index f986cfe50..6b1b2636f 100644 --- a/connect/proxy/testing.go +++ b/connect/proxy/testing.go @@ -7,6 +7,7 @@ import ( "net" "sync/atomic" + "github.com/hashicorp/consul/lib/freeport" "github.com/mitchellh/go-testing-interface" "github.com/stretchr/testify/require" ) @@ -26,17 +27,20 @@ type TestTCPServer struct { // NewTestTCPServer opens as a listening socket on the given address and returns // a TestTCPServer serving requests to it. The server is already started and can // be stopped by calling Close(). -func NewTestTCPServer(t testing.T, addr string) (*TestTCPServer, error) { +func NewTestTCPServer(t testing.T) *TestTCPServer { + port := freeport.GetT(t, 1) + addr := TestLocalAddr(port[0]) + l, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } + require.NoError(t, err) + log.Printf("test tcp server listening on %s", addr) s := &TestTCPServer{ l: l, } go s.accept() - return s, nil + + return s } // Close stops the server @@ -47,6 +51,11 @@ func (s *TestTCPServer) Close() { } } +// Addr returns the address that this server is listening on. +func (s *TestTCPServer) Addr() net.Addr { + return s.l.Addr() +} + func (s *TestTCPServer) accept() error { for { conn, err := s.l.Accept() From b531919181649b6b4983b8d84f9cd79717154ef1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 19 May 2018 11:04:47 -0700 Subject: [PATCH 307/627] command/connect/proxy: tests for configuration --- command/connect/proxy/proxy.go | 30 ++++++++--- command/connect/proxy/proxy_test.go | 83 +++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+), 7 deletions(-) create mode 100644 command/connect/proxy/proxy_test.go diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index fc9e65b86..5d219d3ef 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -8,6 +8,7 @@ import ( "net/http" _ "net/http/pprof" // Expose pprof if configured "os" + "sort" proxyAgent "github.com/hashicorp/consul/agent/proxy" "github.com/hashicorp/consul/api" @@ -44,6 +45,9 @@ type cmd struct { pprofAddr string service string upstreams map[string]proxyImpl.UpstreamConfig + + // test flags + testNoStart bool // don't start the proxy, just exit 0 } func (c *cmd) init() { @@ -86,6 +90,10 @@ func (c *cmd) Run(args []string) int { if err := c.flags.Parse(args); err != nil { return 1 } + if len(c.flags.Args()) > 0 { + c.UI.Error(fmt.Sprintf("Should have no non-flag arguments.")) + return 1 + } // Load the proxy ID and token from env vars if they're set if c.proxyID == "" { @@ -147,10 +155,11 @@ func (c *cmd) Run(args []string) int { c.UI.Output("Log data will now stream in as it occurs:\n") logGate.Flush() - // Run the proxy - err = p.Serve() - if err != nil { - c.UI.Error(fmt.Sprintf("Failed running proxy: %s", err)) + // Run the proxy unless our tests require we don't + if !c.testNoStart { + if err := p.Serve(); err != nil { + c.UI.Error(fmt.Sprintf("Failed running proxy: %s", err)) + } } c.UI.Output("Consul Connect proxy shutdown") @@ -179,10 +188,17 @@ func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) "configure itself.") } - // Convert our upstreams to a slice of configurations + // Convert our upstreams to a slice of configurations. We do this + // deterministically by alphabetizing the upstream keys. We do this so + // that tests can compare the upstream values. + upstreamKeys := make([]string, 0, len(c.upstreams)) + for k := range c.upstreams { + upstreamKeys = append(upstreamKeys, k) + } + sort.Strings(upstreamKeys) upstreams := make([]proxyImpl.UpstreamConfig, 0, len(c.upstreams)) - for _, u := range c.upstreams { - upstreams = append(upstreams, u) + for _, k := range upstreamKeys { + upstreams = append(upstreams, c.upstreams[k]) } return proxyImpl.NewStaticConfigWatcher(&proxyImpl.Config{ diff --git a/command/connect/proxy/proxy_test.go b/command/connect/proxy/proxy_test.go new file mode 100644 index 000000000..b99fbfae1 --- /dev/null +++ b/command/connect/proxy/proxy_test.go @@ -0,0 +1,83 @@ +package proxy + +import ( + "testing" + "time" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/connect/proxy" + "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" +) + +func TestCommandConfigWatcher(t *testing.T) { + t.Parallel() + + cases := []struct { + Name string + Flags []string + Test func(*testing.T, *proxy.Config) + }{ + { + "-service flag only", + []string{"-service", "web"}, + func(t *testing.T, cfg *proxy.Config) { + require.Equal(t, 0, cfg.PublicListener.BindPort) + require.Len(t, cfg.Upstreams, 0) + }, + }, + + { + "-service flag with upstreams", + []string{ + "-service", "web", + "-upstream", "db:1234", + "-upstream", "db2:2345", + }, + func(t *testing.T, cfg *proxy.Config) { + require.Equal(t, 0, cfg.PublicListener.BindPort) + require.Len(t, cfg.Upstreams, 2) + require.Equal(t, 1234, cfg.Upstreams[0].LocalBindPort) + require.Equal(t, 2345, cfg.Upstreams[1].LocalBindPort) + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + require := require.New(t) + + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + ui := cli.NewMockUi() + c := New(ui, make(chan struct{})) + c.testNoStart = true + + // Run and purposely fail the command + code := c.Run(append([]string{ + "-http-addr=" + a.HTTPAddr(), + }, tc.Flags...)) + require.Equal(0, code, ui.ErrorWriter.String()) + + // Get the configuration watcher + cw, err := c.configWatcher(client) + require.NoError(err) + tc.Test(t, testConfig(t, cw)) + }) + } +} + +func testConfig(t *testing.T, cw proxy.ConfigWatcher) *proxy.Config { + t.Helper() + + select { + case cfg := <-cw.Watch(): + return cfg + + case <-time.After(1 * time.Second): + t.Fatal("no configuration loaded") + return nil // satisfy compiler + } +} From a750254b28714100c9477deac74f17bb67fa74a1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 19 May 2018 11:20:20 -0700 Subject: [PATCH 308/627] command/connect/proxy: can set public listener from flags --- command/connect/proxy/proxy.go | 45 ++++++++++++++++++++++++----- command/connect/proxy/proxy_test.go | 26 +++++++++++++++++ 2 files changed, 64 insertions(+), 7 deletions(-) diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index 5d219d3ef..0de7d6670 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -5,10 +5,12 @@ import ( "fmt" "io" "log" + "net" "net/http" _ "net/http/pprof" // Expose pprof if configured "os" "sort" + "strconv" proxyAgent "github.com/hashicorp/consul/agent/proxy" "github.com/hashicorp/consul/api" @@ -39,12 +41,14 @@ type cmd struct { logger *log.Logger // flags - logLevel string - cfgFile string - proxyID string - pprofAddr string - service string - upstreams map[string]proxyImpl.UpstreamConfig + logLevel string + cfgFile string + proxyID string + pprofAddr string + service string + serviceAddr string + upstreams map[string]proxyImpl.UpstreamConfig + listen string // test flags testNoStart bool // don't start the proxy, just exit 0 @@ -78,7 +82,15 @@ func (c *cmd) init() { c.flags.Var((*FlagUpstreams)(&c.upstreams), "upstream", "Upstream service to support connecting to. The format should be "+ "'name:addr', such as 'db:8181'. This will make 'db' available "+ - "on port 8181.") + "on port 8181. This can be repeated multiple times.") + + c.flags.StringVar(&c.serviceAddr, "service-addr", "", + "Address of the local service to proxy. Only useful if -listen "+ + "and -service are both set.") + + c.flags.StringVar(&c.listen, "listen", "", + "Address to listen for inbound connections to the proxied service. "+ + "Must be specified with -service and -service-addr.") c.http = &flags.HTTPFlags{} flags.Merge(c.flags, c.http.ClientFlags()) @@ -201,8 +213,27 @@ func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) upstreams = append(upstreams, c.upstreams[k]) } + // Parse out our listener if we have one + var listener proxyImpl.PublicListenerConfig + if c.listen != "" { + host, portRaw, err := net.SplitHostPort(c.listen) + if err != nil { + return nil, err + } + + port, err := strconv.ParseInt(portRaw, 0, 0) + if err != nil { + return nil, err + } + + listener.BindAddress = host + listener.BindPort = int(port) + listener.LocalServiceAddress = c.serviceAddr + } + return proxyImpl.NewStaticConfigWatcher(&proxyImpl.Config{ ProxiedServiceName: c.service, + PublicListener: listener, Upstreams: upstreams, }), nil } diff --git a/command/connect/proxy/proxy_test.go b/command/connect/proxy/proxy_test.go index b99fbfae1..9970a6d7a 100644 --- a/command/connect/proxy/proxy_test.go +++ b/command/connect/proxy/proxy_test.go @@ -41,6 +41,32 @@ func TestCommandConfigWatcher(t *testing.T) { require.Equal(t, 2345, cfg.Upstreams[1].LocalBindPort) }, }, + + { + "-service flag with -service-addr", + []string{"-service", "web"}, + func(t *testing.T, cfg *proxy.Config) { + // -service-addr has no affect since -listen isn't set + require.Equal(t, 0, cfg.PublicListener.BindPort) + require.Len(t, cfg.Upstreams, 0) + }, + }, + + { + "-service, -service-addr, -listen", + []string{ + "-service", "web", + "-service-addr", "127.0.0.1:1234", + "-listen", ":4567", + }, + func(t *testing.T, cfg *proxy.Config) { + require.Len(t, cfg.Upstreams, 0) + + require.Equal(t, "", cfg.PublicListener.BindAddress) + require.Equal(t, 4567, cfg.PublicListener.BindPort) + require.Equal(t, "127.0.0.1:1234", cfg.PublicListener.LocalServiceAddress) + }, + }, } for _, tc := range cases { From 01c35641584faf132f6ba1fefbd5b57365bd0126 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 19 May 2018 11:22:00 -0700 Subject: [PATCH 309/627] command/connect/proxy: -service-addr required for -listen --- command/connect/proxy/proxy.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index 0de7d6670..fd6e9c3b3 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -226,6 +226,12 @@ func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) return nil, err } + if c.serviceAddr == "" { + return nil, fmt.Errorf( + "-service-addr must be specified with -listen so the proxy " + + "knows the backend service address.") + } + listener.BindAddress = host listener.BindPort = int(port) listener.LocalServiceAddress = c.serviceAddr From 82ba16775772e5ca51bd397dc62bddf8cca552c1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 19 May 2018 11:34:00 -0700 Subject: [PATCH 310/627] command/connect/proxy: detailed help --- command/connect/proxy/proxy.go | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index fd6e9c3b3..b6b5dcc30 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -254,7 +254,34 @@ func (c *cmd) Help() string { const synopsis = "Runs a Consul Connect proxy" const help = ` -Usage: consul proxy [options] +Usage: consul connect proxy [options] Starts a Consul Connect proxy and runs until an interrupt is received. + The proxy can be used to accept inbound connections for a service, + wrap outbound connections to upstream services, or both. This enables + a non-Connect-aware application to use Connect. + + The proxy requires service:write permissions for the service it represents. + + Consul can automatically start and manage this proxy by specifying the + "proxy" configuration within your service definition. + + The example below shows how to start a local proxy for establishing outbound + connections to "db" representing the frontend service. Once running, any + process that creates a TCP connection to the specified port (8181) will + establish a mutual TLS connection to "db" identified as "frontend". + + $ consul connect proxy -service frontend -upstream db:8181 + + The next example starts a local proxy that also accepts inbound connections + on port 8443, authorizes the connection, then proxies it to port 8080: + + $ consul connect proxy \ + -service frontend \ + -service-addr 127.0.0.1:8080 \ + -listen ':8443' + + A proxy can accept both inbound connections as well as proxy to upstream + services by specifying both the "-listen" and "-upstream" flags. + ` From 351a9585e4cecd8622891857e7a9648bff7b7be7 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sat, 19 May 2018 15:47:25 -0700 Subject: [PATCH 311/627] command/connect/proxy: output information when starting similar to agent --- command/connect/proxy/proxy.go | 36 +++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index b6b5dcc30..f6a5576fa 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -23,6 +23,13 @@ import ( ) func New(ui cli.Ui, shutdownCh <-chan struct{}) *cmd { + ui = &cli.PrefixedUi{ + OutputPrefix: "==> ", + InfoPrefix: " ", + ErrorPrefix: "==> ", + Ui: ui, + } + c := &cmd{UI: ui, shutdownCh: shutdownCh} c.init() return c @@ -49,6 +56,7 @@ type cmd struct { serviceAddr string upstreams map[string]proxyImpl.UpstreamConfig listen string + register bool // test flags testNoStart bool // don't start the proxy, just exit 0 @@ -92,6 +100,10 @@ func (c *cmd) init() { "Address to listen for inbound connections to the proxied service. "+ "Must be specified with -service and -service-addr.") + c.flags.BoolVar(&c.register, "register", false, + "Self-register with the local Consul agent. Only useful with "+ + "-listen.") + c.http = &flags.HTTPFlags{} flags.Merge(c.flags, c.http.ClientFlags()) flags.Merge(c.flags, c.http.ServerFlags()) @@ -143,6 +155,10 @@ func (c *cmd) Run(args []string) int { return 1 } + // Output this first since the config watcher below will output + // other information. + c.UI.Output("Consul Connect proxy starting...") + // Get the proper configuration watcher cfgWatcher, err := c.configWatcher(client) if err != nil { @@ -162,8 +178,7 @@ func (c *cmd) Run(args []string) int { p.Close() }() - c.UI.Output("Consul Connect proxy starting") - + c.UI.Info("") c.UI.Output("Log data will now stream in as it occurs:\n") logGate.Flush() @@ -185,11 +200,15 @@ func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) if err != nil { return nil, err } + + c.UI.Info("Configuration mode: File") return proxyImpl.NewStaticConfigWatcher(cfg), nil } // Use the configured proxy ID if c.proxyID != "" { + c.UI.Info("Configuration mode: Agent API") + c.UI.Info(fmt.Sprintf(" Proxy ID: %s", c.proxyID)) return proxyImpl.NewAgentConfigWatcher(client, c.proxyID, c.logger) } @@ -200,6 +219,9 @@ func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) "configure itself.") } + c.UI.Info("Configuration mode: Flags") + c.UI.Info(fmt.Sprintf(" Service: %s", c.service)) + // Convert our upstreams to a slice of configurations. We do this // deterministically by alphabetizing the upstream keys. We do this so // that tests can compare the upstream values. @@ -210,7 +232,12 @@ func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) sort.Strings(upstreamKeys) upstreams := make([]proxyImpl.UpstreamConfig, 0, len(c.upstreams)) for _, k := range upstreamKeys { - upstreams = append(upstreams, c.upstreams[k]) + config := c.upstreams[k] + + c.UI.Info(fmt.Sprintf( + " Upstream: %s => %s:%d", + k, config.LocalBindAddress, config.LocalBindPort)) + upstreams = append(upstreams, config) } // Parse out our listener if we have one @@ -232,9 +259,12 @@ func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) "knows the backend service address.") } + c.UI.Info(fmt.Sprintf(" Public listener: %s:%d => %s", host, int(port), c.serviceAddr)) listener.BindAddress = host listener.BindPort = int(port) listener.LocalServiceAddress = c.serviceAddr + } else { + c.UI.Info(fmt.Sprintf(" Public listener: Disabled")) } return proxyImpl.NewStaticConfigWatcher(&proxyImpl.Config{ From 021782c36b77028ead192795eae4885f5bd45aa3 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 20 May 2018 10:04:29 -0700 Subject: [PATCH 312/627] command/connect/proxy: register monitor and -register flag --- command/connect/proxy/proxy.go | 64 ++++++- command/connect/proxy/register.go | 293 ++++++++++++++++++++++++++++++ 2 files changed, 349 insertions(+), 8 deletions(-) create mode 100644 command/connect/proxy/register.go diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index f6a5576fa..96c6a75cb 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -57,6 +57,7 @@ type cmd struct { upstreams map[string]proxyImpl.UpstreamConfig listen string register bool + registerId string // test flags testNoStart bool // don't start the proxy, just exit 0 @@ -104,6 +105,9 @@ func (c *cmd) init() { "Self-register with the local Consul agent. Only useful with "+ "-listen.") + c.flags.StringVar(&c.registerId, "register-id", "", + "ID suffix for the service. Use this to disambiguate with other proxies.") + c.http = &flags.HTTPFlags{} flags.Merge(c.flags, c.http.ClientFlags()) flags.Merge(c.flags, c.http.ServerFlags()) @@ -178,6 +182,18 @@ func (c *cmd) Run(args []string) int { p.Close() }() + // Register the service if we requested it + if c.register { + monitor, err := c.registerMonitor(client) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed initializing registration: %s", err)) + return 1 + } + + go monitor.Run() + defer monitor.Close() + } + c.UI.Info("") c.UI.Output("Log data will now stream in as it occurs:\n") logGate.Flush() @@ -243,12 +259,7 @@ func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) // Parse out our listener if we have one var listener proxyImpl.PublicListenerConfig if c.listen != "" { - host, portRaw, err := net.SplitHostPort(c.listen) - if err != nil { - return nil, err - } - - port, err := strconv.ParseInt(portRaw, 0, 0) + host, port, err := c.listenParts() if err != nil { return nil, err } @@ -259,9 +270,9 @@ func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) "knows the backend service address.") } - c.UI.Info(fmt.Sprintf(" Public listener: %s:%d => %s", host, int(port), c.serviceAddr)) + c.UI.Info(fmt.Sprintf(" Public listener: %s:%d => %s", host, port, c.serviceAddr)) listener.BindAddress = host - listener.BindPort = int(port) + listener.BindPort = port listener.LocalServiceAddress = c.serviceAddr } else { c.UI.Info(fmt.Sprintf(" Public listener: Disabled")) @@ -274,6 +285,43 @@ func (c *cmd) configWatcher(client *api.Client) (proxyImpl.ConfigWatcher, error) }), nil } +// registerMonitor returns the registration monitor ready to be started. +func (c *cmd) registerMonitor(client *api.Client) (*RegisterMonitor, error) { + if c.service == "" || c.listen == "" { + return nil, fmt.Errorf("-register may only be specified with -service and -listen") + } + + host, port, err := c.listenParts() + if err != nil { + return nil, err + } + + m := NewRegisterMonitor() + m.Logger = c.logger + m.Client = client + m.Service = c.service + m.IDSuffix = c.registerId + m.LocalAddress = host + m.LocalPort = port + return m, nil +} + +// listenParts returns the host and port parts of the -listen flag. The +// -listen flag must be non-empty prior to calling this. +func (c *cmd) listenParts() (string, int, error) { + host, portRaw, err := net.SplitHostPort(c.listen) + if err != nil { + return "", 0, err + } + + port, err := strconv.ParseInt(portRaw, 0, 0) + if err != nil { + return "", 0, err + } + + return host, int(port), nil +} + func (c *cmd) Synopsis() string { return synopsis } diff --git a/command/connect/proxy/register.go b/command/connect/proxy/register.go new file mode 100644 index 000000000..09f7e0fd4 --- /dev/null +++ b/command/connect/proxy/register.go @@ -0,0 +1,293 @@ +package proxy + +import ( + "fmt" + "log" + "os" + "sync" + "time" + + "github.com/hashicorp/consul/api" +) + +const ( + // RegisterReconcilePeriod is how often the monitor will attempt to + // reconcile the expected service state with the remote Consul server. + RegisterReconcilePeriod = 30 * time.Second + + // RegisterTTLPeriod is the TTL setting for the health check of the + // service. The monitor will automatically pass the health check + // three times per this period to be more resilient to failures. + RegisterTTLPeriod = 30 * time.Second +) + +// RegisterMonitor registers the proxy with the local Consul agent with a TTL +// health check that is kept alive. +// +// This struct should be intialized with NewRegisterMonitor instead of being +// allocated directly. Using this struct without calling NewRegisterMonitor +// will result in panics. +type RegisterMonitor struct { + // Logger is the logger for the monitor. + Logger *log.Logger + + // Client is the API client to a specific Consul agent. This agent is + // where the service will be registered. + Client *api.Client + + // Service is the name of the service being proxied. + Service string + + // LocalAddress and LocalPort are the address and port of the proxy + // itself, NOT the service being proxied. + LocalAddress string + LocalPort int + + // IDSuffix is a unique ID that is appended to the end of the service + // name. This helps the service be unique. By default the service ID + // is just the proxied service name followed by "-proxy". + IDSuffix string + + // The fields below are related to timing settings. See the default + // constants for more documentation on what they set. + ReconcilePeriod time.Duration + TTLPeriod time.Duration + + // lock is held while reading/writing any internal state of the monitor. + // cond is a condition variable on lock that is broadcasted for runState + // changes. + lock *sync.Mutex + cond *sync.Cond + + // runState is the current state of the monitor. To read this the + // lock must be held. The condition variable cond can be waited on + // for changes to this value. + runState registerRunState +} + +// registerState is the state of the RegisterMonitor. +// +// This is a basic state machine with the following transitions: +// +// * idle => running, stopped +// * running => stopping, stopped +// * stopping => stopped +// * stopped => <> +// +type registerRunState uint8 + +const ( + registerStateIdle registerRunState = iota + registerStateRunning + registerStateStopping + registerStateStopped +) + +// NewRegisterMonitor initializes a RegisterMonitor. After initialization, +// the exported fields should be configured as desired. To start the monitor, +// execute Run in a goroutine. +func NewRegisterMonitor() *RegisterMonitor { + var lock sync.Mutex + return &RegisterMonitor{ + Logger: log.New(os.Stderr, "", log.LstdFlags), // default logger + ReconcilePeriod: RegisterReconcilePeriod, + TTLPeriod: RegisterTTLPeriod, + lock: &lock, + cond: sync.NewCond(&lock), + } +} + +// Run should be started in a goroutine and will keep Consul updated +// in the background with the state of this proxy. If registration fails +// this will continue to retry. +func (r *RegisterMonitor) Run() { + // Grab the lock and set our state. If we're not idle, then we return + // immediately since the monitor is only allowed to run once. + r.lock.Lock() + if r.runState != registerStateIdle { + r.lock.Unlock() + return + } + r.runState = registerStateRunning + r.lock.Unlock() + + // Start a goroutine that just waits for a stop request + stopCh := make(chan struct{}) + go func() { + defer close(stopCh) + r.lock.Lock() + defer r.lock.Unlock() + + // We wait for anything not running, just so we're more resilient + // in the face of state machine issues. Basically any state change + // will cause us to quit. + for r.runState == registerStateRunning { + r.cond.Wait() + } + }() + + // When we exit, we set the state to stopped and broadcast to any + // waiting Close functions that they can return. + defer func() { + r.lock.Lock() + r.runState = registerStateStopped + r.cond.Broadcast() + r.lock.Unlock() + }() + + // Run the first registration optimistically. If this fails then its + // okay since we'll just retry shortly. + r.register() + + // Create the timers for trigger events. We don't use tickers because + // we don't want the events to pile on. + reconcileTimer := time.NewTimer(r.ReconcilePeriod) + heartbeatTimer := time.NewTimer(r.TTLPeriod / 3) + + for { + select { + case <-reconcileTimer.C: + r.register() + reconcileTimer.Reset(r.ReconcilePeriod) + + case <-heartbeatTimer.C: + r.heartbeat() + heartbeatTimer.Reset(r.TTLPeriod / 3) + + case <-stopCh: + r.Logger.Printf("[INFO] proxy: stop request received, deregistering") + r.deregister() + return + } + } +} + +// register queries the Consul agent to determine if we've already registered. +// If we haven't or the registered service differs from what we're trying to +// register, then we attempt to register our service. +func (r *RegisterMonitor) register() { + catalog := r.Client.Catalog() + serviceID := r.serviceID() + serviceName := r.serviceName() + + // Determine the current state of this service in Consul + var currentService *api.CatalogService + services, _, err := catalog.Service( + serviceName, "", + &api.QueryOptions{AllowStale: true}) + if err == nil { + for _, service := range services { + if serviceID == service.ServiceID { + currentService = service + break + } + } + } + + // If we have a matching service, then do nothing + if currentService != nil { + r.Logger.Printf("[DEBUG] proxy: service already registered, not re-registering") + return + } + + // If we're here, then we're registering the service. + err = r.Client.Agent().ServiceRegister(&api.AgentServiceRegistration{ + Kind: api.ServiceKindConnectProxy, + ProxyDestination: r.Service, + ID: serviceID, + Name: serviceName, + Address: r.LocalAddress, + Port: r.LocalPort, + Check: &api.AgentServiceCheck{ + CheckID: r.checkID(), + Name: "proxy heartbeat", + TTL: "30s", + Notes: "Built-in proxy will heartbeat this check.", + Status: "passing", + }, + }) + if err != nil { + r.Logger.Printf("[WARN] proxy: Failed to register Consul service: %s", err) + return + } + + r.Logger.Printf("[INFO] proxy: registered Consul service: %s", serviceID) +} + +// heartbeat just pings the TTL check for our service. +func (r *RegisterMonitor) heartbeat() { + // Trigger the health check passing. We don't need to retry this + // since we do a couple tries within the TTL period. + if err := r.Client.Agent().PassTTL(r.checkID(), ""); err != nil { + r.Logger.Printf("[WARN] proxy: heartbeat failed: %s", err) + } +} + +// deregister deregisters the service. +func (r *RegisterMonitor) deregister() { + // Basic retry loop, no backoff for now. But we want to retry a few + // times just in case there are basic ephemeral issues. + for i := 0; i < 3; i++ { + err := r.Client.Agent().ServiceDeregister(r.serviceID()) + if err == nil { + return + } + + r.Logger.Printf("[WARN] proxy: service deregister failed: %s", err) + time.Sleep(500 * time.Millisecond) + } +} + +// Close stops the register goroutines and deregisters the service. Once +// Close is called, the monitor can no longer be used again. It is safe to +// call Close multiple times and concurrently. +func (r *RegisterMonitor) Close() error { + r.lock.Lock() + defer r.lock.Unlock() + + for { + switch r.runState { + case registerStateIdle: + // Idle so just set it to stopped and return. We notify + // the condition variable in case others are waiting. + r.runState = registerStateStopped + r.cond.Broadcast() + return nil + + case registerStateRunning: + // Set the state to stopping and broadcast to all waiters, + // since Run is sitting on cond.Wait. + r.runState = registerStateStopping + r.cond.Broadcast() + r.cond.Wait() // Wait on the stopping event + + case registerStateStopping: + // Still stopping, wait... + r.cond.Wait() + + case registerStateStopped: + // Stopped, target state reached + return nil + } + } +} + +// serviceID returns the unique ID for this proxy service. +func (r *RegisterMonitor) serviceID() string { + id := fmt.Sprintf("%s-proxy", r.Service) + if r.IDSuffix != "" { + id += "-" + r.IDSuffix + } + + return id +} + +// serviceName returns the non-unique name of this proxy service. +func (r *RegisterMonitor) serviceName() string { + return fmt.Sprintf("%s-proxy", r.Service) +} + +// checkID is the unique ID for the registered health check. +func (r *RegisterMonitor) checkID() string { + return fmt.Sprintf("%s-ttl", r.serviceID()) +} From 771842255abf9a321de813d2fac677962ced6551 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 22 May 2018 10:33:14 -0700 Subject: [PATCH 313/627] address comment feedback --- agent/agent_endpoint.go | 3 ++- command/connect/proxy/proxy.go | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/agent/agent_endpoint.go b/agent/agent_endpoint.go index 239f962a1..1318e8f86 100644 --- a/agent/agent_endpoint.go +++ b/agent/agent_endpoint.go @@ -907,7 +907,8 @@ func (s *HTTPServer) AgentConnectCARoots(resp http.ResponseWriter, req *http.Req // AgentConnectCALeafCert returns the certificate bundle for a service // instance. This supports blocking queries to update the returned bundle. func (s *HTTPServer) AgentConnectCALeafCert(resp http.ResponseWriter, req *http.Request) (interface{}, error) { - // Get the service ID. Note that this is the ID of a service instance. + // Get the service name. Note that this is the name of the sevice, + // not the ID of the service instance. serviceName := strings.TrimPrefix(req.URL.Path, "/v1/agent/connect/ca/leaf/") args := cachetype.ConnectCALeafRequest{ diff --git a/command/connect/proxy/proxy.go b/command/connect/proxy/proxy.go index 96c6a75cb..91df04025 100644 --- a/command/connect/proxy/proxy.go +++ b/command/connect/proxy/proxy.go @@ -340,6 +340,8 @@ Usage: consul connect proxy [options] a non-Connect-aware application to use Connect. The proxy requires service:write permissions for the service it represents. + The token may be passed via the CLI or the CONSUL_TOKEN environment + variable. Consul can automatically start and manage this proxy by specifying the "proxy" configuration within your service definition. From 510a8a6a6c76121d66e26f9b085a73077b9b65f9 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 22 May 2018 10:45:37 -0700 Subject: [PATCH 314/627] connect/proxy: remove dev CA settings --- connect/proxy/config.go | 16 +--------------- connect/proxy/config_test.go | 3 --- connect/proxy/testdata/config-kitchensink.hcl | 5 ----- 3 files changed, 1 insertion(+), 23 deletions(-) diff --git a/connect/proxy/config.go b/connect/proxy/config.go index 025809a7b..b0e020ab2 100644 --- a/connect/proxy/config.go +++ b/connect/proxy/config.go @@ -36,25 +36,11 @@ type Config struct { // Upstreams configures outgoing proxies for remote connect services. Upstreams []UpstreamConfig `json:"upstreams" hcl:"upstreams"` - - // DevCAFile, DevServiceCertFile, and DevServiceKeyFile allow configuring - // the certificate information from a static file. This is only for testing - // purposes. All or none must be specified. - DevCAFile string `json:"dev_ca_file" hcl:"dev_ca_file"` - DevServiceCertFile string `json:"dev_service_cert_file" hcl:"dev_service_cert_file"` - DevServiceKeyFile string `json:"dev_service_key_file" hcl:"dev_service_key_file"` } // Service returns the *connect.Service structure represented by this config. func (c *Config) Service(client *api.Client, logger *log.Logger) (*connect.Service, error) { - // If we aren't in dev mode, then we return the configured service. - if c.DevCAFile == "" { - return connect.NewServiceWithLogger(c.ProxiedServiceName, client, logger) - } - - // Dev mode - return connect.NewDevServiceFromCertFiles(c.ProxiedServiceName, - logger, c.DevCAFile, c.DevServiceCertFile, c.DevServiceKeyFile) + return connect.NewServiceWithLogger(c.ProxiedServiceName, client, logger) } // PublicListenerConfig contains the parameters needed for the incoming mTLS diff --git a/connect/proxy/config_test.go b/connect/proxy/config_test.go index 2e214c6dd..d395c9767 100644 --- a/connect/proxy/config_test.go +++ b/connect/proxy/config_test.go @@ -47,9 +47,6 @@ func TestParseConfigFile(t *testing.T) { ConnectTimeoutMs: 10000, }, }, - DevCAFile: "connect/testdata/ca1-ca-consul-internal.cert.pem", - DevServiceCertFile: "connect/testdata/ca1-svc-web.cert.pem", - DevServiceKeyFile: "connect/testdata/ca1-svc-web.key.pem", } require.Equal(t, expect, cfg) diff --git a/connect/proxy/testdata/config-kitchensink.hcl b/connect/proxy/testdata/config-kitchensink.hcl index de3472d0f..2276190ca 100644 --- a/connect/proxy/testdata/config-kitchensink.hcl +++ b/connect/proxy/testdata/config-kitchensink.hcl @@ -5,11 +5,6 @@ token = "11111111-2222-3333-4444-555555555555" proxied_service_name = "web" proxied_service_namespace = "default" -# Assumes running consul in dev mode from the repo root... -dev_ca_file = "connect/testdata/ca1-ca-consul-internal.cert.pem" -dev_service_cert_file = "connect/testdata/ca1-svc-web.cert.pem" -dev_service_key_file = "connect/testdata/ca1-svc-web.key.pem" - public_listener { bind_address = "127.0.0.1" bind_port= "9999" From 118aa0f00a7381b3ae2c12b059aeafa2771f4a53 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 22 May 2018 11:40:08 -0700 Subject: [PATCH 315/627] command/connect/proxy: register monitor tests --- command/connect/proxy/register.go | 7 +- command/connect/proxy/register_test.go | 106 +++++++++++++++++++++++++ 2 files changed, 111 insertions(+), 2 deletions(-) create mode 100644 command/connect/proxy/register_test.go diff --git a/command/connect/proxy/register.go b/command/connect/proxy/register.go index 09f7e0fd4..332607b8a 100644 --- a/command/connect/proxy/register.go +++ b/command/connect/proxy/register.go @@ -184,8 +184,11 @@ func (r *RegisterMonitor) register() { } } - // If we have a matching service, then do nothing - if currentService != nil { + // If we have a matching service, then we verify if we need to reregister + // by comparing if it matches what we expect. + if currentService != nil && + currentService.ServiceAddress == r.LocalAddress && + currentService.ServicePort == r.LocalPort { r.Logger.Printf("[DEBUG] proxy: service already registered, not re-registering") return } diff --git a/command/connect/proxy/register_test.go b/command/connect/proxy/register_test.go new file mode 100644 index 000000000..3a7354247 --- /dev/null +++ b/command/connect/proxy/register_test.go @@ -0,0 +1,106 @@ +package proxy + +import ( + "testing" + "time" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/testutil/retry" + "github.com/stretchr/testify/require" +) + +func TestRegisterMonitor_good(t *testing.T) { + t.Parallel() + require := require.New(t) + + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + m, service := testMonitor(t, client) + defer m.Close() + + // Verify the settings + require.Equal(api.ServiceKindConnectProxy, service.Kind) + require.Equal("foo", service.ProxyDestination) + require.Equal("127.0.0.1", service.Address) + require.Equal(1234, service.Port) + + // Stop should deregister the service + require.NoError(m.Close()) + services, err := client.Agent().Services() + require.NoError(err) + require.NotContains(services, m.serviceID()) +} + +func TestRegisterMonitor_heartbeat(t *testing.T) { + t.Parallel() + require := require.New(t) + + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + client := a.Client() + + m, _ := testMonitor(t, client) + defer m.Close() + + // Get the check and verify that it is passing + checks, err := client.Agent().Checks() + require.NoError(err) + require.Contains(checks, m.checkID()) + require.Equal("passing", checks[m.checkID()].Status) + + // Purposely fail the TTL check, verify it becomes healthy again + require.NoError(client.Agent().FailTTL(m.checkID(), "")) + retry.Run(t, func(r *retry.R) { + checks, err := client.Agent().Checks() + if err != nil { + r.Fatalf("err: %s", err) + } + + check, ok := checks[m.checkID()] + if !ok { + r.Fatal("check not found") + } + + if check.Status != "passing" { + r.Fatalf("check status is bad: %s", check.Status) + } + }) +} + +// testMonitor creates a RegisterMonitor, configures it, and starts it. +// It waits until the service appears in the catalog and then returns. +func testMonitor(t *testing.T, client *api.Client) (*RegisterMonitor, *api.AgentService) { + // Setup the monitor + m := NewRegisterMonitor() + m.Client = client + m.Service = "foo" + m.LocalAddress = "127.0.0.1" + m.LocalPort = 1234 + + // We want shorter periods so we can test things + m.ReconcilePeriod = 400 * time.Millisecond + m.TTLPeriod = 200 * time.Millisecond + + // Start the monitor + go m.Run() + + // The service should be registered + var service *api.AgentService + retry.Run(t, func(r *retry.R) { + services, err := client.Agent().Services() + if err != nil { + r.Fatalf("err: %s", err) + } + + var ok bool + service, ok = services[m.serviceID()] + if !ok { + r.Fatal("service not found") + } + }) + + return m, service +} From 4d46bba2c4ed26236349ad7a8c65480d2df611af Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Wed, 23 May 2018 14:43:40 -0700 Subject: [PATCH 316/627] Support giving the duration as a string in CA config --- agent/connect/ca/provider.go | 2 +- agent/connect/ca/provider_consul.go | 16 +---- agent/connect/ca/provider_consul_config.go | 77 ++++++++++++++++++++++ agent/connect/ca/provider_consul_test.go | 2 +- agent/connect_ca_endpoint.go | 23 +++++++ 5 files changed, 103 insertions(+), 17 deletions(-) create mode 100644 agent/connect/ca/provider_consul_config.go diff --git a/agent/connect/ca/provider.go b/agent/connect/ca/provider.go index d557d289c..0fdd3e41b 100644 --- a/agent/connect/ca/provider.go +++ b/agent/connect/ca/provider.go @@ -1,4 +1,4 @@ -package connect +package ca import ( "crypto/x509" diff --git a/agent/connect/ca/provider_consul.go b/agent/connect/ca/provider_consul.go index 20641a16c..afe99db79 100644 --- a/agent/connect/ca/provider_consul.go +++ b/agent/connect/ca/provider_consul.go @@ -1,4 +1,4 @@ -package connect +package ca import ( "bytes" @@ -15,7 +15,6 @@ import ( "github.com/hashicorp/consul/agent/connect" "github.com/hashicorp/consul/agent/consul/state" "github.com/hashicorp/consul/agent/structs" - "github.com/mitchellh/mapstructure" ) type ConsulProvider struct { @@ -111,19 +110,6 @@ func NewConsulProvider(rawConfig map[string]interface{}, delegate ConsulProvider return provider, nil } -func ParseConsulCAConfig(raw map[string]interface{}) (*structs.ConsulCAProviderConfig, error) { - var config structs.ConsulCAProviderConfig - if err := mapstructure.WeakDecode(raw, &config); err != nil { - return nil, fmt.Errorf("error decoding config: %s", err) - } - - if config.PrivateKey == "" && config.RootCert != "" { - return nil, fmt.Errorf("must provide a private key when providing a root cert") - } - - return &config, nil -} - // Return the active root CA and generate a new one if needed func (c *ConsulProvider) ActiveRoot() (string, error) { state := c.delegate.State() diff --git a/agent/connect/ca/provider_consul_config.go b/agent/connect/ca/provider_consul_config.go new file mode 100644 index 000000000..e0112b3e2 --- /dev/null +++ b/agent/connect/ca/provider_consul_config.go @@ -0,0 +1,77 @@ +package ca + +import ( + "fmt" + "reflect" + "time" + + "github.com/hashicorp/consul/agent/structs" + "github.com/mitchellh/mapstructure" +) + +func ParseConsulCAConfig(raw map[string]interface{}) (*structs.ConsulCAProviderConfig, error) { + var config structs.ConsulCAProviderConfig + decodeConf := &mapstructure.DecoderConfig{ + DecodeHook: ParseDurationFunc(), + ErrorUnused: true, + Result: &config, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + if err := decoder.Decode(raw); err != nil { + return nil, fmt.Errorf("error decoding config: %s", err) + } + + if config.PrivateKey == "" && config.RootCert != "" { + return nil, fmt.Errorf("must provide a private key when providing a root cert") + } + + return &config, nil +} + +// ParseDurationFunc is a mapstructure hook for decoding a string or +// []uint8 into a time.Duration value. +func ParseDurationFunc() mapstructure.DecodeHookFunc { + uint8ToString := func(bs []uint8) string { + b := make([]byte, len(bs)) + for i, v := range bs { + b[i] = byte(v) + } + return string(b) + } + + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var v time.Duration + if t != reflect.TypeOf(v) { + return data, nil + } + + switch { + case f.Kind() == reflect.String: + if dur, err := time.ParseDuration(data.(string)); err != nil { + return nil, err + } else { + v = dur + } + return v, nil + case f == reflect.SliceOf(reflect.TypeOf(uint8(0))): + s := uint8ToString(data.([]uint8)) + if dur, err := time.ParseDuration(s); err != nil { + return nil, err + } else { + v = dur + } + return v, nil + default: + return data, nil + } + } +} diff --git a/agent/connect/ca/provider_consul_test.go b/agent/connect/ca/provider_consul_test.go index 9f8cc04b4..c3b375fc2 100644 --- a/agent/connect/ca/provider_consul_test.go +++ b/agent/connect/ca/provider_consul_test.go @@ -1,4 +1,4 @@ -package connect +package ca import ( "fmt" diff --git a/agent/connect_ca_endpoint.go b/agent/connect_ca_endpoint.go index 979005df1..f7f83b13a 100644 --- a/agent/connect_ca_endpoint.go +++ b/agent/connect_ca_endpoint.go @@ -47,6 +47,7 @@ func (s *HTTPServer) ConnectCAConfigurationGet(resp http.ResponseWriter, req *ht var reply structs.CAConfiguration err := s.agent.RPC("ConnectCA.ConfigurationGet", &args, &reply) + fixupConfig(&reply) return reply, err } @@ -67,3 +68,25 @@ func (s *HTTPServer) ConnectCAConfigurationSet(resp http.ResponseWriter, req *ht err := s.agent.RPC("ConnectCA.ConfigurationSet", &args, &reply) return nil, err } + +// A hack to fix up the config types inside of the map[string]interface{} +// so that they get formatted correctly during json.Marshal. Without this, +// duration values given as text like "24h" end up getting output back +// to the user in base64-encoded form. +func fixupConfig(conf *structs.CAConfiguration) { + if conf.Provider == structs.ConsulCAProvider { + if v, ok := conf.Config["RotationPeriod"]; ok { + if raw, ok := v.([]uint8); ok { + conf.Config["RotationPeriod"] = uint8ToString(raw) + } + } + } +} + +func uint8ToString(bs []uint8) string { + b := make([]byte, len(bs)) + for i, v := range bs { + b[i] = byte(v) + } + return string(b) +} From 1a1090aebfef1b0141530a5ba13404fb6be5b66c Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Wed, 23 May 2018 14:44:24 -0700 Subject: [PATCH 317/627] Add client api support for CA config endpoints --- api/connect_ca.go | 72 ++++++++++++++++++++++++++++++++++++++++++ api/connect_ca_test.go | 41 ++++++++++++++++++++++++ 2 files changed, 113 insertions(+) diff --git a/api/connect_ca.go b/api/connect_ca.go index ed0ac5e8f..c43339969 100644 --- a/api/connect_ca.go +++ b/api/connect_ca.go @@ -1,9 +1,44 @@ package api import ( + "fmt" "time" + + "github.com/mitchellh/mapstructure" ) +// CAConfig is the structure for the Connect CA configuration. +type CAConfig struct { + // Provider is the CA provider implementation to use. + Provider string + + // Configuration is arbitrary configuration for the provider. This + // should only contain primitive values and containers (such as lists + // and maps). + Config map[string]interface{} + + CreateIndex uint64 + ModifyIndex uint64 +} + +// ConsulCAProviderConfig is the config for the built-in Consul CA provider. +type ConsulCAProviderConfig struct { + PrivateKey string + RootCert string + RotationPeriod time.Duration +} + +// ParseConsulCAConfig takes a raw config map and returns a parsed +// ConsulCAProviderConfig. +func ParseConsulCAConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { + var config ConsulCAProviderConfig + if err := mapstructure.WeakDecode(raw, &config); err != nil { + return nil, fmt.Errorf("error decoding config: %s", err) + } + + return &config, nil +} + // CARootList is the structure for the results of listing roots. type CARootList struct { ActiveRootID string @@ -79,3 +114,40 @@ func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { } return &out, qm, nil } + +// CAGetConfig returns the current CA configuration. +func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/ca/configuration") + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CAConfig + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// CASetConfig sets the current CA configuration. +func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, error) { + r := h.c.newRequest("PUT", "/v1/connect/ca/configuration") + r.setWriteOptions(q) + r.obj = conf + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} diff --git a/api/connect_ca_test.go b/api/connect_ca_test.go index 36fb12b56..9cfaa9a0a 100644 --- a/api/connect_ca_test.go +++ b/api/connect_ca_test.go @@ -2,6 +2,9 @@ package api import ( "testing" + "time" + + "github.com/pascaldekloe/goe/verify" "github.com/hashicorp/consul/testutil" "github.com/hashicorp/consul/testutil/retry" @@ -51,3 +54,41 @@ func TestAPI_ConnectCARoots_list(t *testing.T) { }) } + +func TestAPI_ConnectCAConfig_get_set(t *testing.T) { + t.Parallel() + + c, s := makeClient(t) + defer s.Stop() + + expected := &ConsulCAProviderConfig{ + RotationPeriod: 90 * 24 * time.Hour, + } + + // This fails occasionally if server doesn't have time to bootstrap CA so + // retry + retry.Run(t, func(r *retry.R) { + connect := c.Connect() + + conf, _, err := connect.CAGetConfig(nil) + r.Check(err) + if conf.Provider != "consul" { + r.Fatalf("expected default provider, got %q", conf.Provider) + } + parsed, err := ParseConsulCAConfig(conf.Config) + r.Check(err) + verify.Values(r, "", parsed, expected) + + // Change a config value and update + conf.Config["RotationPeriod"] = 120 * 24 * time.Hour + _, err = connect.CASetConfig(conf, nil) + r.Check(err) + + updated, _, err := connect.CAGetConfig(nil) + r.Check(err) + expected.RotationPeriod = 120 * 24 * time.Hour + parsed, err = ParseConsulCAConfig(updated.Config) + r.Check(err) + verify.Values(r, "", parsed, expected) + }) +} From 96f4ff961cf9cc513caaa59be602bd635a5a8ebe Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Wed, 23 May 2018 14:44:41 -0700 Subject: [PATCH 318/627] Add CA CLI commands for getting/setting config --- command/commands_oss.go | 6 ++ command/connect/ca/ca.go | 44 +++++++++ command/connect/ca/ca_test.go | 13 +++ command/connect/ca/get/connect_ca_get.go | 81 ++++++++++++++++ command/connect/ca/get/connect_ca_get_test.go | 35 +++++++ command/connect/ca/set/connect_ca_set.go | 96 +++++++++++++++++++ command/connect/ca/set/connect_ca_set_test.go | 51 ++++++++++ .../ca/set/test-fixtures/ca_config.json | 8 ++ 8 files changed, 334 insertions(+) create mode 100644 command/connect/ca/ca.go create mode 100644 command/connect/ca/ca_test.go create mode 100644 command/connect/ca/get/connect_ca_get.go create mode 100644 command/connect/ca/get/connect_ca_get_test.go create mode 100644 command/connect/ca/set/connect_ca_set.go create mode 100644 command/connect/ca/set/connect_ca_set_test.go create mode 100644 command/connect/ca/set/test-fixtures/ca_config.json diff --git a/command/commands_oss.go b/command/commands_oss.go index f166e82d7..8e95282aa 100644 --- a/command/commands_oss.go +++ b/command/commands_oss.go @@ -7,6 +7,9 @@ import ( catlistnodes "github.com/hashicorp/consul/command/catalog/list/nodes" catlistsvc "github.com/hashicorp/consul/command/catalog/list/services" "github.com/hashicorp/consul/command/connect" + "github.com/hashicorp/consul/command/connect/ca" + caget "github.com/hashicorp/consul/command/connect/ca/get" + caset "github.com/hashicorp/consul/command/connect/ca/set" "github.com/hashicorp/consul/command/connect/proxy" "github.com/hashicorp/consul/command/event" "github.com/hashicorp/consul/command/exec" @@ -67,6 +70,9 @@ func init() { Register("catalog nodes", func(ui cli.Ui) (cli.Command, error) { return catlistnodes.New(ui), nil }) Register("catalog services", func(ui cli.Ui) (cli.Command, error) { return catlistsvc.New(ui), nil }) Register("connect", func(ui cli.Ui) (cli.Command, error) { return connect.New(), nil }) + Register("connect ca", func(ui cli.Ui) (cli.Command, error) { return ca.New(), nil }) + Register("connect ca get-config", func(ui cli.Ui) (cli.Command, error) { return caget.New(ui), nil }) + Register("connect ca set-config", func(ui cli.Ui) (cli.Command, error) { return caset.New(ui), nil }) Register("connect proxy", func(ui cli.Ui) (cli.Command, error) { return proxy.New(ui, MakeShutdownCh()), nil }) Register("event", func(ui cli.Ui) (cli.Command, error) { return event.New(ui), nil }) Register("exec", func(ui cli.Ui) (cli.Command, error) { return exec.New(ui, MakeShutdownCh()), nil }) diff --git a/command/connect/ca/ca.go b/command/connect/ca/ca.go new file mode 100644 index 000000000..918f4a254 --- /dev/null +++ b/command/connect/ca/ca.go @@ -0,0 +1,44 @@ +package ca + +import ( + "github.com/hashicorp/consul/command/flags" + "github.com/mitchellh/cli" +) + +func New() *cmd { + return &cmd{} +} + +type cmd struct{} + +func (c *cmd) Run(args []string) int { + return cli.RunResultHelp +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return flags.Usage(help, nil) +} + +const synopsis = "Interact with the Consul Connect CA" +const help = ` +Usage: consul connect ca [options] [args] + + This command has subcommands for interacting with Consul Connect's CA. + + Here are some simple examples, and more detailed examples are available + in the subcommands or the documentation. + + Get the configuration: + + $ consul connect ca get-config + + Update the configuration: + + $ consul connect ca set-config -config-file ca.json + + For more examples, ask for subcommand help or view the documentation. +` diff --git a/command/connect/ca/ca_test.go b/command/connect/ca/ca_test.go new file mode 100644 index 000000000..31febd342 --- /dev/null +++ b/command/connect/ca/ca_test.go @@ -0,0 +1,13 @@ +package ca + +import ( + "strings" + "testing" +) + +func TestCatalogCommand_noTabs(t *testing.T) { + t.Parallel() + if strings.ContainsRune(New().Help(), '\t') { + t.Fatal("help has tabs") + } +} diff --git a/command/connect/ca/get/connect_ca_get.go b/command/connect/ca/get/connect_ca_get.go new file mode 100644 index 000000000..c255bbcb8 --- /dev/null +++ b/command/connect/ca/get/connect_ca_get.go @@ -0,0 +1,81 @@ +package get + +import ( + "encoding/json" + "flag" + "fmt" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/command/flags" + "github.com/mitchellh/cli" +) + +func New(ui cli.Ui) *cmd { + c := &cmd{UI: ui} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.ServerFlags()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + if err == flag.ErrHelp { + return 0 + } + c.UI.Error(fmt.Sprintf("Failed to parse args: %v", err)) + return 1 + } + + // Set up a client. + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 1 + } + + // Fetch the current configuration. + opts := &api.QueryOptions{ + AllowStale: c.http.Stale(), + } + config, _, err := client.Connect().CAGetConfig(opts) + if err != nil { + c.UI.Error(fmt.Sprintf("Error querying CA configuration: %s", err)) + return 1 + } + output, err := json.MarshalIndent(config, "", "\t") + if err != nil { + c.UI.Error(fmt.Sprintf("Error formatting CA configuration: %s", err)) + } + c.UI.Output(string(output)) + + return 0 +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return c.help +} + +const synopsis = "Display the current Connect CA configuration" +const help = ` +Usage: consul connect ca get-config [options] + + Displays the current Connect CA configuration. +` diff --git a/command/connect/ca/get/connect_ca_get_test.go b/command/connect/ca/get/connect_ca_get_test.go new file mode 100644 index 000000000..660c6a29b --- /dev/null +++ b/command/connect/ca/get/connect_ca_get_test.go @@ -0,0 +1,35 @@ +package get + +import ( + "strings" + "testing" + + "github.com/hashicorp/consul/agent" + "github.com/mitchellh/cli" +) + +func TestConnectCAGetConfigCommand_noTabs(t *testing.T) { + t.Parallel() + if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') { + t.Fatal("help has tabs") + } +} + +func TestConnectCAGetConfigCommand(t *testing.T) { + t.Parallel() + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + + ui := cli.NewMockUi() + c := New(ui) + args := []string{"-http-addr=" + a.HTTPAddr()} + + code := c.Run(args) + if code != 0 { + t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) + } + output := strings.TrimSpace(ui.OutputWriter.String()) + if !strings.Contains(output, `"Provider": "consul"`) { + t.Fatalf("bad: %s", output) + } +} diff --git a/command/connect/ca/set/connect_ca_set.go b/command/connect/ca/set/connect_ca_set.go new file mode 100644 index 000000000..e98d5923e --- /dev/null +++ b/command/connect/ca/set/connect_ca_set.go @@ -0,0 +1,96 @@ +package set + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + + "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/command/flags" + "github.com/mitchellh/cli" +) + +func New(ui cli.Ui) *cmd { + c := &cmd{UI: ui} + c.init() + return c +} + +type cmd struct { + UI cli.Ui + flags *flag.FlagSet + http *flags.HTTPFlags + help string + + // flags + configFile flags.StringValue +} + +func (c *cmd) init() { + c.flags = flag.NewFlagSet("", flag.ContinueOnError) + c.flags.Var(&c.configFile, "config-file", + "The path to the config file to use.") + + c.http = &flags.HTTPFlags{} + flags.Merge(c.flags, c.http.ClientFlags()) + flags.Merge(c.flags, c.http.ServerFlags()) + c.help = flags.Usage(help, c.flags) +} + +func (c *cmd) Run(args []string) int { + if err := c.flags.Parse(args); err != nil { + if err == flag.ErrHelp { + return 0 + } + c.UI.Error(fmt.Sprintf("Failed to parse args: %v", err)) + return 1 + } + + // Set up a client. + client, err := c.http.APIClient() + if err != nil { + c.UI.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 1 + } + + if c.configFile.String() == "" { + c.UI.Error("The -config-file flag is required") + return 1 + } + + bytes, err := ioutil.ReadFile(c.configFile.String()) + if err != nil { + c.UI.Error(fmt.Sprintf("Error reading config file: %s", err)) + return 1 + } + + var config api.CAConfig + if err := json.Unmarshal(bytes, &config); err != nil { + c.UI.Error(fmt.Sprintf("Error parsing config file: %s", err)) + return 1 + } + + // Set the new configuration. + if _, err := client.Connect().CASetConfig(&config, nil); err != nil { + c.UI.Error(fmt.Sprintf("Error setting CA configuration: %s", err)) + return 1 + } + c.UI.Output("Configuration updated!") + return 0 +} + +func (c *cmd) Synopsis() string { + return synopsis +} + +func (c *cmd) Help() string { + return c.help +} + +const synopsis = "Modify the current Connect CA configuration" +const help = ` +Usage: consul connect ca set-config [options] + + Modifies the current Connect CA configuration. +` diff --git a/command/connect/ca/set/connect_ca_set_test.go b/command/connect/ca/set/connect_ca_set_test.go new file mode 100644 index 000000000..095d21d17 --- /dev/null +++ b/command/connect/ca/set/connect_ca_set_test.go @@ -0,0 +1,51 @@ +package set + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/hashicorp/consul/agent" + "github.com/hashicorp/consul/agent/connect/ca" + "github.com/hashicorp/consul/agent/structs" + "github.com/mitchellh/cli" +) + +func TestConnectCASetConfigCommand_noTabs(t *testing.T) { + t.Parallel() + if strings.ContainsRune(New(cli.NewMockUi()).Help(), '\t') { + t.Fatal("help has tabs") + } +} + +func TestConnectCASetConfigCommand(t *testing.T) { + t.Parallel() + require := require.New(t) + a := agent.NewTestAgent(t.Name(), ``) + defer a.Shutdown() + + ui := cli.NewMockUi() + c := New(ui) + args := []string{ + "-http-addr=" + a.HTTPAddr(), + "-config-file=test-fixtures/ca_config.json", + } + + code := c.Run(args) + if code != 0 { + t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) + } + + req := structs.DCSpecificRequest{ + Datacenter: "dc1", + } + var reply structs.CAConfiguration + require.NoError(a.RPC("ConnectCA.ConfigurationGet", &req, &reply)) + require.Equal("consul", reply.Provider) + + parsed, err := ca.ParseConsulCAConfig(reply.Config) + require.NoError(err) + require.Equal(24*time.Hour, parsed.RotationPeriod) +} diff --git a/command/connect/ca/set/test-fixtures/ca_config.json b/command/connect/ca/set/test-fixtures/ca_config.json new file mode 100644 index 000000000..d29b25e8d --- /dev/null +++ b/command/connect/ca/set/test-fixtures/ca_config.json @@ -0,0 +1,8 @@ +{ + "Provider": "consul", + "Config": { + "PrivateKey": "", + "RootCert": "", + "RotationPeriod": "24h" + } +} \ No newline at end of file From 33d1d01374fbb1da8998d692d84f58f150eedc63 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 25 May 2018 10:27:04 -0700 Subject: [PATCH 319/627] Clarify CA commands' help text --- command/connect/ca/ca.go | 7 ++++--- command/connect/ca/get/connect_ca_get.go | 4 ++-- command/connect/ca/set/connect_ca_set.go | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/command/connect/ca/ca.go b/command/connect/ca/ca.go index 918f4a254..9e9df7ad6 100644 --- a/command/connect/ca/ca.go +++ b/command/connect/ca/ca.go @@ -23,11 +23,12 @@ func (c *cmd) Help() string { return flags.Usage(help, nil) } -const synopsis = "Interact with the Consul Connect CA" +const synopsis = "Interact with the Consul Connect Certificate Authority (CA)" const help = ` -Usage: consul connect ca [options] [args] +Usage: consul connect ca [options] [args] - This command has subcommands for interacting with Consul Connect's CA. + This command has subcommands for interacting with Consul Connect's + Certificate Authority (CA). Here are some simple examples, and more detailed examples are available in the subcommands or the documentation. diff --git a/command/connect/ca/get/connect_ca_get.go b/command/connect/ca/get/connect_ca_get.go index c255bbcb8..26bcb5824 100644 --- a/command/connect/ca/get/connect_ca_get.go +++ b/command/connect/ca/get/connect_ca_get.go @@ -73,9 +73,9 @@ func (c *cmd) Help() string { return c.help } -const synopsis = "Display the current Connect CA configuration" +const synopsis = "Display the current Connect Certificate Authority (CA) configuration" const help = ` Usage: consul connect ca get-config [options] - Displays the current Connect CA configuration. + Displays the current Connect Certificate Authority (CA) configuration. ` diff --git a/command/connect/ca/set/connect_ca_set.go b/command/connect/ca/set/connect_ca_set.go index e98d5923e..696b894c0 100644 --- a/command/connect/ca/set/connect_ca_set.go +++ b/command/connect/ca/set/connect_ca_set.go @@ -92,5 +92,5 @@ const synopsis = "Modify the current Connect CA configuration" const help = ` Usage: consul connect ca set-config [options] - Modifies the current Connect CA configuration. + Modifies the current Connect Certificate Authority (CA) configuration. ` From 54bc937feda62c17f09f452650e599821b594432 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Fri, 25 May 2018 10:28:18 -0700 Subject: [PATCH 320/627] Re-use uint8ToString --- agent/connect/ca/provider_consul_config.go | 18 +++++++++--------- agent/connect_ca_endpoint.go | 17 +++++++---------- 2 files changed, 16 insertions(+), 19 deletions(-) diff --git a/agent/connect/ca/provider_consul_config.go b/agent/connect/ca/provider_consul_config.go index e0112b3e2..9eae88610 100644 --- a/agent/connect/ca/provider_consul_config.go +++ b/agent/connect/ca/provider_consul_config.go @@ -37,14 +37,6 @@ func ParseConsulCAConfig(raw map[string]interface{}) (*structs.ConsulCAProviderC // ParseDurationFunc is a mapstructure hook for decoding a string or // []uint8 into a time.Duration value. func ParseDurationFunc() mapstructure.DecodeHookFunc { - uint8ToString := func(bs []uint8) string { - b := make([]byte, len(bs)) - for i, v := range bs { - b[i] = byte(v) - } - return string(b) - } - return func( f reflect.Type, t reflect.Type, @@ -63,7 +55,7 @@ func ParseDurationFunc() mapstructure.DecodeHookFunc { } return v, nil case f == reflect.SliceOf(reflect.TypeOf(uint8(0))): - s := uint8ToString(data.([]uint8)) + s := Uint8ToString(data.([]uint8)) if dur, err := time.ParseDuration(s); err != nil { return nil, err } else { @@ -75,3 +67,11 @@ func ParseDurationFunc() mapstructure.DecodeHookFunc { } } } + +func Uint8ToString(bs []uint8) string { + b := make([]byte, len(bs)) + for i, v := range bs { + b[i] = byte(v) + } + return string(b) +} diff --git a/agent/connect_ca_endpoint.go b/agent/connect_ca_endpoint.go index f7f83b13a..49851baac 100644 --- a/agent/connect_ca_endpoint.go +++ b/agent/connect_ca_endpoint.go @@ -4,6 +4,7 @@ import ( "fmt" "net/http" + "github.com/hashicorp/consul/agent/connect/ca" "github.com/hashicorp/consul/agent/structs" ) @@ -47,8 +48,12 @@ func (s *HTTPServer) ConnectCAConfigurationGet(resp http.ResponseWriter, req *ht var reply structs.CAConfiguration err := s.agent.RPC("ConnectCA.ConfigurationGet", &args, &reply) + if err != nil { + return nil, err + } + fixupConfig(&reply) - return reply, err + return reply, nil } // PUT /v1/connect/ca/configuration @@ -77,16 +82,8 @@ func fixupConfig(conf *structs.CAConfiguration) { if conf.Provider == structs.ConsulCAProvider { if v, ok := conf.Config["RotationPeriod"]; ok { if raw, ok := v.([]uint8); ok { - conf.Config["RotationPeriod"] = uint8ToString(raw) + conf.Config["RotationPeriod"] = ca.Uint8ToString(raw) } } } } - -func uint8ToString(bs []uint8) string { - b := make([]byte, len(bs)) - for i, v := range bs { - b[i] = byte(v) - } - return string(b) -} From 57aa7384166e9527052a123f6aeeab62e711b2c3 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Thu, 14 Jun 2018 21:25:59 -0400 Subject: [PATCH 321/627] Update the scripting Automated putting the source tree into release mode. --- .dockerignore | 3 + GNUmakefile | 67 ++++++-- build-support/docker/Consul-Dev.dockerfile | 13 ++ build-support/docker/Makefile | 28 --- build-support/functions/00-vars.sh | 15 +- build-support/functions/01-util.sh | 191 ++++++++++++++++++++- build-support/functions/02-build.sh | 6 +- build-support/functions/03-release.sh | 58 +++++-- build-support/scripts/build-docker.sh | 141 +++++++++++++++ build-support/scripts/build-local.sh | 102 +++++++++++ build-support/scripts/functions.sh | 0 build-support/scripts/publish.sh | 87 ++++++++++ build-support/scripts/release.sh | 131 ++++++++++++++ build-support/scripts/version.sh | 87 ++++++++++ 14 files changed, 862 insertions(+), 67 deletions(-) create mode 100644 .dockerignore create mode 100644 build-support/docker/Consul-Dev.dockerfile delete mode 100644 build-support/docker/Makefile create mode 100644 build-support/scripts/build-docker.sh create mode 100644 build-support/scripts/build-local.sh mode change 100644 => 100755 build-support/scripts/functions.sh create mode 100755 build-support/scripts/publish.sh create mode 100755 build-support/scripts/release.sh create mode 100755 build-support/scripts/version.sh diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..47f5d44b5 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,3 @@ +pkg/ +.git +bin/ diff --git a/GNUmakefile b/GNUmakefile index 4872efa34..a86899ea0 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -28,6 +28,20 @@ GIT_DESCRIBE?=$(shell git describe --tags --always) GIT_IMPORT=github.com/hashicorp/consul/version GOLDFLAGS=-X $(GIT_IMPORT).GitCommit=$(GIT_COMMIT)$(GIT_DIRTY) -X $(GIT_IMPORT).GitDescribe=$(GIT_DESCRIBE) +ifeq ($(FORCE_REBUILD),1) +NOCACHE=--no-cache +else +NOCACHE= +endif + +DOCKER_BUILD_QUIET?=1 +ifeq (${DOCKER_BUILD_QUIET},1) +QUIET=-q +else +QUIET= +endif + +CONSUL_DEV_IMAGE?=consul-dev GO_BUILD_TAG?=consul-build-go UI_BUILD_TAG?=consul-build-ui UI_LEGACY_BUILD_TAG?=consul-build-ui-legacy @@ -37,6 +51,18 @@ DIST_TAG?=1 DIST_BUILD?=1 DIST_SIGN?=1 +ifdef DIST_VERSION +DIST_VERSION_ARG=-v $(DIST_VERSION) +else +DIST_VERSION_ARG= +endif + +ifdef DIST_RELEASE_DATE +DIST_DATE_ARG=-d $(DIST_RELEASE_DATE) +else +DIST_DATE_ARG= +endif + export GO_BUILD_TAG export UI_BUILD_TAG export UI_LEGACY_BUILD_TAG @@ -57,7 +83,11 @@ bin: tools dev: changelogfmt vendorfmt dev-build dev-build: - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh consul-local -o '$(GOOS)' -a '$(GOARCH)' + @$(SHELL) $(CURDIR)/build-support/scripts/build-local.sh + +dev-docker: + @$(SHELL) + @docker build -t '$(CONSUL_DEV_IMAGE)' --build-arg 'GIT_COMMIT=$(GIT_COMMIT)' --build-arg 'GIT_DIRTY=$(GIT_DIRTY)' --build-arg 'GIT_DESCRIBE=$(GIT_DESCRIBE)' -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile $(CURDIR) vendorfmt: @echo "--> Formatting vendor/vendor.json" @@ -70,14 +100,14 @@ changelogfmt: # linux builds a linux package independent of the source platform linux: - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh consul-local -o linux -a amd64 + @$(SHELL) $(CURDIR)/build-support/scripts/build-local.sh -o linux -a amd64 # dist builds binaries for all platforms and packages them for distribution dist: - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh release -t '$(DIST_TAG)' -b '$(DIST_BUILD)' -S '$(DIST_SIGN)' + @$(SHELL) $(CURDIR)/build-support/scripts/release.sh -t '$(DIST_TAG)' -b '$(DIST_BUILD)' -S '$(DIST_SIGN)' '$(DIST_VERSION_ARG)' '$(DIST_DATE_ARG)' publish: - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh publish + @$(SHELL) $(CURDIR)/build-support/scripts/publish.sh -g -w cov: gocov test $(GOFILES) | gocov-html > /tmp/coverage.html @@ -143,37 +173,40 @@ tools: version: @echo -n "Version: " - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh version + @$(SHELL) $(CURDIR)/build-support/scripts/version.sh @echo -n "Version + release: " - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh version -R + @$(SHELL) $(CURDIR)/build-support/scripts/version.sh -r @echo -n "Version + git: " - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh version -G + @$(SHELL) $(CURDIR)/build-support/scripts/version.sh -g @echo -n "Version + release + git: " - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh version -R -G + @$(SHELL) $(CURDIR)/build-support/scripts/version.sh -r -g -docker-images: - @$(MAKE) -C build-support/docker images + +docker-images: go-build-image ui-build-image ui-legacy-build-image go-build-image: - @$(MAKE) -C build-support/docker go-build-image + @echo "Building Golang build container" + @docker build $(NOCACHE) $(QUIET) --build-arg 'GOTOOLS=$(GOTOOLS)' -t $(GO_BUILD_TAG) - < build-support/docker/Build-Go.dockerfile ui-build-image: - @$(MAKE) -C build-support/docker ui-build-image + @echo "Building UI build container" + @docker build $(NOCACHE) $(QUIET) -t $(UI_BUILD_TAG) - < build-support/docker/Build-UI.dockerfile ui-legacy-build-image: - @$(MAKE) -C build-support/docker ui-legacy-build-image + @echo "Building Legacy UI build container" + @docker build $(NOCACHE) $(QUIET) -t $(UI_LEGACY_BUILD_TAG) - < build-support/docker/Build-UI-Legacy.dockerfile static-assets-docker: go-build-image - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh assetfs + @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh static-assets consul-docker: go-build-image - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh consul + @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh consul ui-docker: ui-build-image - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh ui + @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui ui-legacy-docker: ui-legacy-build-image - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh ui-legacy + @$(SHELL) $(CURDIR)/build-support/scripts/build-docker.sh ui-legacy .PHONY: all ci bin dev dist cov test cover format vet ui static-assets tools vendorfmt diff --git a/build-support/docker/Consul-Dev.dockerfile b/build-support/docker/Consul-Dev.dockerfile new file mode 100644 index 000000000..2b581f44a --- /dev/null +++ b/build-support/docker/Consul-Dev.dockerfile @@ -0,0 +1,13 @@ +FROM golang:latest as builder +ARG GIT_COMMIT +ARG GIT_DIRTY +ARG GIT_DESCRIBE +WORKDIR /go/src/github.com/hashicorp/consul +ENV CONSUL_DEV=1 +ENV COLORIZE=0 +Add . /go/src/github.com/hashicorp/consul/ +RUN make + +FROM consul:latest + +COPY --from=builder /go/src/github.com/hashicorp/consul/bin/consul /bin diff --git a/build-support/docker/Makefile b/build-support/docker/Makefile deleted file mode 100644 index 01c468959..000000000 --- a/build-support/docker/Makefile +++ /dev/null @@ -1,28 +0,0 @@ -ifeq ($(FORCE_REBUILD),1) -NOCACHE=--no-cache -else -NOCACHE= -endif -GO_BUILD_TAG?=consul-build-go -UI_BUILD_TAG?=consul-build-ui -UI_LEGACY_BUILD_TAG?=consul-build-ui-legacy - -DOCKER_BUILD_QUIET?=1 -ifeq (${DOCKER_BUILD_QUIET},1) -QUIET=-q -else -QUIET= -endif - -images: go-build-image ui-build-image ui-legacy-build-image - -go-build-image: - docker build $(NOCACHE) $(QUIET) -t $(GO_BUILD_TAG) -f Build-Go.dockerfile . - -ui-build-image: - docker build $(NOCACHE) $(QUIET) -t $(UI_BUILD_TAG) -f Build-UI.dockerfile . - -ui-legacy-build-image: - docker build $(NOCACHE) $(QUIET) -t $(UI_LEGACY_BUILD_TAG) -f Build-UI-Legacy.dockerfile . - -.PHONY: images go-build-image ui-build-image ui-legacy-build-image diff --git a/build-support/functions/00-vars.sh b/build-support/functions/00-vars.sh index b039b658a..e65ee9c54 100644 --- a/build-support/functions/00-vars.sh +++ b/build-support/functions/00-vars.sh @@ -7,7 +7,10 @@ UI_LEGACY_BUILD_CONTAINER_DEFAULT="consul-build-ui-legacy" GO_BUILD_CONTAINER_DEFAULT="consul-build-go" # Whether to colorize shell output -COLORIZE=1 +if test -z "${COLORIZE}" +then + COLORIZE=1 +fi # determine GOPATH and the first GOPATH to use for intalling binaries @@ -36,4 +39,12 @@ fi if test -z "${PUBLISH_GIT_REPO}" then PUBLISH_GIT_REPO=hashicorp/consul.git -fi \ No newline at end of file +fi + +if test "$(uname)" == "Darwin" +then + SED_EXT="-E" +else + SED_EXT="" +fi + \ No newline at end of file diff --git a/build-support/functions/01-util.sh b/build-support/functions/01-util.sh index 9e15005d8..dfefc9dfd 100644 --- a/build-support/functions/01-util.sh +++ b/build-support/functions/01-util.sh @@ -313,7 +313,7 @@ function normalize_git_url { url="${1#https://}" url="${url#git@}" url="${url%.git}" - url="$(sed -e 's/\([^\/:]*\)[:\/]\(.*\)/\1:\2/' <<< "${url}")" + url="$(sed ${SED_EXT} -e 's/\([^\/:]*\)[:\/]\(.*\)/\1:\2/' <<< "${url}")" echo "$url" return 0 } @@ -388,4 +388,193 @@ function is_git_clean { fi popd > /dev/null return ${ret} +} + +function update_version { + # Arguments: + # $1 - Path to the version file + # $2 - Version string + # $3 - PreRelease version (if unset will become an empty string) + # + # Returns: + # 0 - success + # * - error + + if ! test -f "$1" + then + err "ERROR: '$1' is not a regular file. update_version must be called with the path to a go version file" + return 1 + fi + + if test -z "$2" + then + err "ERROR: The version specified was empty" + return 1 + fi + + local vfile="$1" + local version="$2" + local prerelease="$3" + + sed ${SED_EXT} -i "" -e "s/(Version[[:space:]]*=[[:space:]]*)\"[^\"]*\"/\1\"${version}\"/g" -e "s/(VersionPrerelease[[:space:]]*=[[:space:]]*)\"[^\"]*\"/\1\"${prerelease}\"/g" "${vfile}" + return $? +} + +function set_changelog_version { + # Arguments: + # $1 - Path to top level Consul source + # $2 - Version to put into the Changelog + # $3 - Release Date + # + # Returns: + # 0 - success + # * - error + + local changelog="${1}/CHANGELOG.md" + local version="$2" + local rel_date="$3" + + if ! test -f "${changelog}" + then + err "ERROR: File not found: ${changelog}" + return 1 + fi + + if test -z "${version}" + then + err "ERROR: Must specify a version to put into the changelog" + return 1 + fi + + if test -z "${rel_date}" + then + rel_date=$(date +"%B %d, %Y") + fi + + sed ${SED_EXT} -i "" -e "s/## UNRELEASED/## ${version} (${rel_date})/" "${changelog}" + return $? +} + +function unset_changelog_version { + # Arguments: + # $1 - Path to top level Consul source + # + # Returns: + # 0 - success + # * - error + + local changelog="${1}/CHANGELOG.md" + + if ! test -f "${changelog}" + then + err "ERROR: File not found: ${changelog}" + return 1 + fi + + sed ${SED_EXT} -i "" -e "1 s/^## [0-9]+\.[0-9]+\.[0-9]+ \([^)]*\)/## UNRELEASED/" "${changelog}" + return $? +} + +function add_unreleased_to_changelog { + # Arguments: + # $1 - Path to top level Consul source + # + # Returns: + # 0 - success + # * - error + + local changelog="${1}/CHANGELOG.md" + + if ! test -f "${changelog}" + then + err "ERROR: File not found: ${changelog}" + return 1 + fi + + # Check if we are already in unreleased mode + if head -n 1 "${changelog}" | grep -q -c UNRELEASED + then + return 0 + fi + + local tfile="$(mktemp) -t "CHANGELOG.md_")" + ( + echo -e "## UNRELEASED\n" > "${tfile}" && + cat "${changelog}" >> "${tfile}" && + cp "${tfile}" "${changelog}" + ) + local ret=$? + rm "${tfile}" + return $ret +} + +function set_release_mode { + # Arguments: + # $1 - Path to top level Consul source + # $2 - The version of the release + # $3 - The release date + # + # + # Returns: + # 0 - success + # * - error + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. set_release_mode must be called with the path to a git repo as the first argument" + return 1 + fi + + if test -z "$2" + then + err "ERROR: The version specified was empty" + return 1 + fi + + local sdir="$1" + local vers="$2" + local rel_date="$(date +"%B %d, %Y")" + + if test -n "$3" + then + rel_date="$3" + fi + + status_stage "==> Updating CHANGELOG.md with release info: ${vers} (${rel_date})" + set_changelog_version "${sdir}" "${vers}" "${rel_date}" || return 1 + + status_stage "==> Updating version/version.go" + if ! update_version "${sdir}/version/version.go" "${vers}" + then + unset_changelog_version "${sdir}" + return 1 + fi + + return 0 +} + +function set_dev_mode { + # Arguments: + # $1 - Path to top level Consul source + # + # Returns: + # 0 - success + # * - error + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. set_dev_mode must be called with the path to a git repo as the first argument'" + return 1 + fi + + local sdir="$1" + local vers="$(parse_version "${sdir}" false false)" + + status_stage "==> Setting VersionPreRelease back to 'dev'" + update_version "${sdir}/version/version.go" "${vers}" dev || return 1 + + status_stage "==> Adding new UNRELEASED label in CHANGELOG.md" + add_unreleased_to_changelog "${sdir}" || return 1 + + return 0 } \ No newline at end of file diff --git a/build-support/functions/02-build.sh b/build-support/functions/02-build.sh index c980b74df..6205db50e 100644 --- a/build-support/functions/02-build.sh +++ b/build-support/functions/02-build.sh @@ -16,9 +16,9 @@ function refresh_docker_images { local sdir="$1" local targets="$2" - test -n "${targets}" || targets="images" + test -n "${targets}" || targets="docker-images" - make -C "${sdir}/build-support/docker" $targets + make -C "${sdir}" ${targets} return $? } @@ -164,7 +164,7 @@ function build_assetfs { local ret=$? if test $ret -eq 0 then - status "Copying the sources from '${sdir}/(pkg|GNUmakefile)' to /go/src/github.com/hashicorp/consul/pkg" + status "Copying the sources from '${sdir}/(pkg/web_ui|GNUmakefile)' to /go/src/github.com/hashicorp/consul/pkg" ( tar -c pkg/web_ui GNUmakefile | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul && status "Running build in container" && docker start -i ${container_id} && diff --git a/build-support/functions/03-release.sh b/build-support/functions/03-release.sh index 4d762f858..8858867ac 100644 --- a/build-support/functions/03-release.sh +++ b/build-support/functions/03-release.sh @@ -258,18 +258,30 @@ function build_consul_release { build_consul "$1" "" "$2" } + + function build_release { - # Arguments: + # Arguments: (yeah there are lots) # $1 - Path to the top level Consul source # $2 - boolean whether to tag the release yet # $3 - boolean whether to build the binaries # $4 - boolean whether to generate the sha256 sums - # $5 - alternative gpg key to use for signing operations (optional) + # $5 - version to set within version.go and the changelog + # $6 - release date to set within the changelog + # $7 - alternative gpg key to use for signing operations (optional) # # Returns: # 0 - success # * - error + debug "Source Dir: $1" + debug "Tag Release: $2" + debug "Build Release: $3" + debug "Sign Release: $4" + debug "Version: $5" + debug "Release Date: $6" + debug "GPG Key: $7" + if ! test -d "$1" then err "ERROR: '$1' is not a directory. build_release must be called with the path to the top level source as the first argument'" @@ -286,26 +298,13 @@ function build_release { local do_tag="$2" local do_build="$3" local do_sha256="$4" - local gpg_key="$5" + local gpg_key="$7" if test -z "${gpg_key}" then gpg_key=${HASHICORP_GPG_KEY} fi - local vers="$(get_version ${sdir} true false)" - if test $? -ne 0 - then - err "Please specify a version (couldn't find one based on build tags)." - return 1 - fi - - if ! is_git_clean "${sdir}" true && ! is_set "${ALLOW_DIRTY_GIT}" - then - err "ERROR: Refusing to build because Git is dirty. Set ALLOW_DIRTY_GIT=1 in the environment to proceed anyways" - return 1 - fi - if ! is_set "${RELEASE_UNSIGNED}" then if ! have_gpg_key "${gpg_key}" @@ -315,6 +314,33 @@ function build_release { fi fi + if ! is_git_clean "${sdir}" true && ! is_set "${ALLOW_DIRTY_GIT}" + then + err "ERROR: Refusing to build because Git is dirty. Set ALLOW_DIRTY_GIT=1 in the environment to proceed anyways" + return 1 + fi + + local set_vers="$5" + local set_date="$6" + + if test -z "${set_vers}" + then + set_vers=$(get_version "${sdir}" false false) + fi + + if ! set_release_mode "${sdir}" "${set_vers}" "${set_date}" + then + err "ERROR: Failed to put source into release mode" + return 1 + fi + + local vers="$(get_version ${sdir} true false)" + if test $? -ne 0 + then + err "Please specify a version (couldn't find one based on build tags)." + return 1 + fi + # Make sure we arent in dev mode unset CONSUL_DEV diff --git a/build-support/scripts/build-docker.sh b/build-support/scripts/build-docker.sh new file mode 100644 index 000000000..7b6330844 --- /dev/null +++ b/build-support/scripts/build-docker.sh @@ -0,0 +1,141 @@ +#!/bin/bash +SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" +pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null +SCRIPT_DIR=$(pwd) +pushd ../.. > /dev/null +SOURCE_DIR=$(pwd) +popd > /dev/null +pushd ../functions > /dev/null +FN_DIR=$(pwd) +popd > /dev/null +popd > /dev/null + +source "${SCRIPT_DIR}/functions.sh" + +function usage { +cat <<-EOF +Usage: ${SCRIPT_NAME} (consul|ui|ui-legacy|static-assets) [] + +Options: + -i | --image IMAGE Alternative Docker image to run the build within. + + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" + + -r | --refresh Enables refreshing the docker image prior to building. + + -h | --help Print this help text. +EOF +} + +function err_usage { + err "$1" + err "" + err "$(usage)" +} + +function main { + declare image= + declare sdir="${SOURCE_DIR}" + declare -i refresh=0 + declare command="$1" + + # get rid of the subcommand + shift + + while test $# -gt 0 + do + case "$1" in + -h | --help ) + usage + return 0 + ;; + -i | --image ) + if test -z "$2" + then + err_usage "ERROR: option -i/--image requires an argument" + return 1 + fi + + image="$2" + shift 2 + ;; + -s | --source ) + if test -z "$2" + then + err_usage "ERROR: option -s/--source requires an argument" + return 1 + fi + + if ! test -d "$2" + then + err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source" + return 1 + fi + + sdir="$2" + shift 2 + ;; + -r | --refresh ) + refresh=1 + shift + ;; + * ) + err_usage "ERROR: Unknown argument '$1'" + return 1 + ;; + esac + done + + case "${command}" in + consul ) + if is_set "${refresh}" + then + status_stage "==> Refreshing Consul build container image" + export GO_BUILD_TAG="${image:-${GO_BUILD_CONTAINER_DEFAULT}}" + refresh_docker_images "${sdir}" go-build-image || return 1 + fi + status_stage "==> Building Consul" + build_consul "${sdir}" "" "${image}" || return 1 + ;; + static-assets ) + if is_set "${refresh}" + then + status_stage "==> Refreshing Consul build container image" + export GO_BUILD_TAG="${image:-${GO_BUILD_CONTAINER_DEFAULT}}" + refresh_docker_images "${sdir}" go-build-image || return 1 + fi + status_stage "==> Building Static Assets" + build_assetfs "${sdir}" "${image}" || return 1 + ;; + ui ) + if is_set "${refresh}" + then + status_stage "==> Refreshing UI build container image" + export UI_BUILD_TAG="${image:-${UI_BUILD_CONTAINER_DEFAULT}}" + refresh_docker_images "${sdir}" ui-build-image || return 1 + fi + status_stage "==> Building UI" + build_ui "${sdir}" "${image}" || return 1 + ;; + ui-legacy ) + if is_set "${refresh}" + then + status_stage "==> Refreshing Legacy UI build container image" + export UI_LEAGCY_BUILD_TAG="${image:-${UI_LEGACY_BUILD_CONTAINER_DEFAULT}}" + refresh_docker_images "${sdir}" ui-legacy-build-image || return 1 + fi + status_stage "==> Building UI" + build_ui_legacy "${sdir}" "${image}" || return 1 + ;; + * ) + err_usage "ERROR: Unknown command: '${command}'" + return 1 + ;; + esac + + return 0 +} + +main $@ +exit $? \ No newline at end of file diff --git a/build-support/scripts/build-local.sh b/build-support/scripts/build-local.sh new file mode 100644 index 000000000..ba65e1b53 --- /dev/null +++ b/build-support/scripts/build-local.sh @@ -0,0 +1,102 @@ +#!/bin/bash +SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" +pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null +SCRIPT_DIR=$(pwd) +pushd ../.. > /dev/null +SOURCE_DIR=$(pwd) +popd > /dev/null +pushd ../functions > /dev/null +FN_DIR=$(pwd) +popd > /dev/null +popd > /dev/null + +source "${SCRIPT_DIR}/functions.sh" + +function usage { +cat <<-EOF +Usage: ${SCRIPT_NAME} (consul|ui|ui-legacy|static-assets) [] + +Options: + + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" + + -o | --os OSES Space separated string of OS + platforms to build. + + -a | --arch ARCH Space separated string of + architectures to build. + + -h | --help Print this help text. +EOF +} + +function err_usage { + err "$1" + err "" + err "$(usage)" +} + +function main { + declare sdir="${SOURCE_DIR}" + declare build_os="" + declare build_arch="" + + + while test $# -gt 0 + do + case "$1" in + -h | --help ) + usage + return 0 + ;; + -s | --source ) + if test -z "$2" + then + err_usage "ERROR: option -s/--source requires an argument" + return 1 + fi + + if ! test -d "$2" + then + err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source" + return 1 + fi + + sdir="$2" + shift 2 + ;; + -o | --os ) + if test -z "$2" + then + err_usage "ERROR: option -o/--os requires an argument" + return 1 + fi + + build_os="$2" + shift 2 + ;; + -a | --arch ) + if test -z "$2" + then + err_usage "ERROR: option -a/--arch requires an argument" + return 1 + fi + + build_arch="$2" + shift 2 + ;; + * ) + err_usage "ERROR: Unknown argument: '$1'" + return 1 + ;; + esac + done + + build_consul_local "${sdir}" "${build_os}" "${build_arch}" || return 1 + + return 0 +} + +main $@ +exit $? \ No newline at end of file diff --git a/build-support/scripts/functions.sh b/build-support/scripts/functions.sh old mode 100644 new mode 100755 diff --git a/build-support/scripts/publish.sh b/build-support/scripts/publish.sh new file mode 100755 index 000000000..e6f0a481b --- /dev/null +++ b/build-support/scripts/publish.sh @@ -0,0 +1,87 @@ +#!/bin/bash +SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" +pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null +SCRIPT_DIR=$(pwd) +pushd ../.. > /dev/null +SOURCE_DIR=$(pwd) +popd > /dev/null +pushd ../functions > /dev/null +FN_DIR=$(pwd) +popd > /dev/null +popd > /dev/null + +source "${SCRIPT_DIR}/functions.sh" + +function usage { +cat <<-EOF +Usage: ${SCRIPT_NAME} [] + +Options: + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" + + -w | --website Publish to releases.hashicorp.com + + -g | --git Push release commit and tag to Git + + -h | --help Print this help text. +EOF +} + +function err_usage { + err "$1" + err "" + err "$(usage)" +} + +function main { + declare sdir="${SOURCE_DIR}" + declare -i website=0 + declare -i git_push=0 + + while test $# -gt 0 + do + case "$1" in + -h | --help ) + usage + return 0 + ;; + -s | --source ) + if test -z "$2" + then + err_usage "ERROR: option -s/--source requires an argument" + return 1 + fi + + if ! test -d "$2" + then + err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source" + return 1 + fi + + sdir="$2" + shift 2 + ;; + -w | --website ) + website=1 + shift + ;; + -g | --git ) + git_push=1 + shift + ;; + *) + err_usage "ERROR: Unknown argument: '$1'" + return 1 + ;; + esac + done + + publish_release "${sdir}" "${git_push}" "${website}" || return 1 + + return 0 +} + +main $@ +exit $? + \ No newline at end of file diff --git a/build-support/scripts/release.sh b/build-support/scripts/release.sh new file mode 100755 index 000000000..a7fa95900 --- /dev/null +++ b/build-support/scripts/release.sh @@ -0,0 +1,131 @@ +#!/bin/bash +SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" +pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null +SCRIPT_DIR=$(pwd) +pushd ../.. > /dev/null +SOURCE_DIR=$(pwd) +popd > /dev/null +pushd ../functions > /dev/null +FN_DIR=$(pwd) +popd > /dev/null +popd > /dev/null + +source "${SCRIPT_DIR}/functions.sh" + +function usage { +cat <<-EOF +Usage: ${SCRIPT_NAME} [] + +Options: + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" + + -t | --tag BOOL Whether to add a release commit and tag the build + Defaults to 1. + + -b | --build BOOL Whether to perform the build of the ui's, assetfs and + binaries. Defaults to 1. + + -S | --sign BOOL Whether to sign the generated SHA256SUMS file. + Defaults to 1. + + -g | --gpg-key KEY Alternative GPG key to use for signing operations. + Defaults to ${HASHICORP_GPG_KEY} + + -v | --version VERSION The version of Consul to be built. If not specified + the version will be parsed from the source. + + -d | --date DATE The release date. Defaults to today. + + -h | --help Print this help text. +EOF +} + +function err_usage { + err "$1" + err "" + err "$(usage)" +} + +function ensure_arg { + if test -z "$2" + then + err_usage "ERROR: option $1 requires an argument" + return 1 + fi + + return 0 +} + +function main { + declare sdir="${SOURCE_DIR}" + declare -i do_tag=1 + declare -i do_build=1 + declare -i do_sign=1 + declare gpg_key="${HASHICORP_GPG_KEY}" + declare version="" + declare release_date=$(date +"%B %d, %Y") + + while test $# -gt 0 + do + case "$1" in + -h | --help ) + usage + return 0 + ;; + -s | --source ) + ensure_arg "-s/--source" "$2" || return 1 + + if ! test -d "$2" + then + err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source" + return 1 + fi + + sdir="$2" + shift 2 + ;; + -t | --tag ) + ensure_arg "-t/--tag" "$2" || return 1 + do_tag="$2" + shift 2 + ;; + -b | --build ) + ensure_arg "-b/--build" "$2" || return 1 + do_build="$2" + shift 2 + ;; + -S | --sign ) + ensure_arg "-s/--sign" "$2" || return 1 + do_sign="$2" + shift 2 + ;; + -g | --gpg-key ) + ensure_arg "-g/--gpg-key" "$2" || return 1 + gpg_key="$2" + shift 2 + ;; + -v | --version ) + ensure_arg "-v/--version" "$2" || return 1 + version="$2" + shift 2 + ;; + -d | --date) + ensure_arg "-d/--date" "$2" || return 1 + release_date="$2" + shift 2 + ;; + *) + err_usage "ERROR: Unknown argument: '$1'" + return 1 + ;; + esac + done + + build_release "${sdir}" "${do_tag}" "${do_build}" "${do_sign}" "${version}" "${release_date}" "${gpg_key}" + return $? +} + +main $@ +exit $? + \ No newline at end of file diff --git a/build-support/scripts/version.sh b/build-support/scripts/version.sh new file mode 100755 index 000000000..c0b4c51ab --- /dev/null +++ b/build-support/scripts/version.sh @@ -0,0 +1,87 @@ +#!/bin/bash +SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" +pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null +SCRIPT_DIR=$(pwd) +pushd ../.. > /dev/null +SOURCE_DIR=$(pwd) +popd > /dev/null +pushd ../functions > /dev/null +FN_DIR=$(pwd) +popd > /dev/null +popd > /dev/null + +source "${SCRIPT_DIR}/functions.sh" + +function usage { +cat <<-EOF +Usage: ${SCRIPT_NAME} [] + +Options: + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" + + -r | --release Include the release in the version + + -g | --git Take git variables into account + + -h | --help Print this help text. +EOF +} + +function err_usage { + err "$1" + err "" + err "$(usage)" +} + +function main { + declare sdir="${SOURCE_DIR}" + declare -i release=0 + declare -i git_info=0 + + while test $# -gt 0 + do + case "$1" in + -h | --help ) + usage + return 0 + ;; + -s | --source ) + if test -z "$2" + then + err_usage "ERROR: option -s/--source requires an argument" + return 1 + fi + + if ! test -d "$2" + then + err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source" + return 1 + fi + + sdir="$2" + shift 2 + ;; + -r | --release ) + release=1 + shift + ;; + -g | --git ) + git_info=1 + shift + ;; + *) + err_usage "ERROR: Unknown argument: '$1'" + return 1 + ;; + esac + done + + parse_version "${sdir}" "${release}" "${git_info}" || return 1 + + return 0 +} + +main $@ +exit $? + \ No newline at end of file From 1aac7c7081bfe71016e49740da2b2bde11a5266d Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Fri, 15 Jun 2018 08:00:12 -0400 Subject: [PATCH 322/627] Add capability to put tree back into dev mode via make dev-tree --- GNUmakefile | 18 +- build-support/scripts/build-local.sh | 2 +- build-support/scripts/build.sh | 403 --------------------------- build-support/scripts/dev.sh | 76 +++++ 4 files changed, 86 insertions(+), 413 deletions(-) delete mode 100755 build-support/scripts/build.sh create mode 100644 build-support/scripts/dev.sh diff --git a/GNUmakefile b/GNUmakefile index a86899ea0..0b9502907 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -76,8 +76,7 @@ export GOLDFLAGS # all builds binaries for all targets all: bin -bin: tools - @$(SHELL) $(CURDIR)/build-support/scripts/build.sh consul-local +bin: tools dev-build # dev creates binaries for testing locally - these are put into ./bin and $GOPATH dev: changelogfmt vendorfmt dev-build @@ -86,7 +85,6 @@ dev-build: @$(SHELL) $(CURDIR)/build-support/scripts/build-local.sh dev-docker: - @$(SHELL) @docker build -t '$(CONSUL_DEV_IMAGE)' --build-arg 'GIT_COMMIT=$(GIT_COMMIT)' --build-arg 'GIT_DIRTY=$(GIT_DIRTY)' --build-arg 'GIT_DESCRIBE=$(GIT_DESCRIBE)' -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile $(CURDIR) vendorfmt: @@ -109,6 +107,9 @@ dist: publish: @$(SHELL) $(CURDIR)/build-support/scripts/publish.sh -g -w +dev-tree: + @$(SHELL) $(CURDIR)/build-support/scripts/dev.sh + cov: gocov test $(GOFILES) | gocov-html > /tmp/coverage.html open /tmp/coverage.html @@ -156,11 +157,6 @@ vet: exit 1; \ fi -# Build the static web ui and build static assets inside a Docker container, the -# same way a release build works. This implicitly does a "make static-assets" at -# the end. -ui: ui-legacy-docker ui-docker static-assets - # If you've run "make ui" manually then this will get called for you. This is # also run as part of the release build script when it verifies that there are no # changes to the UI assets that aren't checked in. @@ -168,6 +164,10 @@ static-assets: @go-bindata-assetfs -pkg agent -prefix pkg -o $(ASSETFS_PATH) ./pkg/web_ui/... $(MAKE) format + +# Build the static web ui and build static assets inside a Docker container +ui: ui-legacy-docker ui-docker static-assets-docker + tools: go get -u -v $(GOTOOLS) @@ -179,7 +179,7 @@ version: @echo -n "Version + git: " @$(SHELL) $(CURDIR)/build-support/scripts/version.sh -g @echo -n "Version + release + git: " - @$(SHELL) $(CURDIR)/build-support/scripts/version.sh -r -g + @$(SHELL) $(CURDIR)/build-support/scripts/version.sh -r -g docker-images: go-build-image ui-build-image ui-legacy-build-image diff --git a/build-support/scripts/build-local.sh b/build-support/scripts/build-local.sh index ba65e1b53..0ecf1c8a4 100644 --- a/build-support/scripts/build-local.sh +++ b/build-support/scripts/build-local.sh @@ -14,7 +14,7 @@ source "${SCRIPT_DIR}/functions.sh" function usage { cat <<-EOF -Usage: ${SCRIPT_NAME} (consul|ui|ui-legacy|static-assets) [] +Usage: ${SCRIPT_NAME} [] Options: diff --git a/build-support/scripts/build.sh b/build-support/scripts/build.sh deleted file mode 100755 index ab2e938c3..000000000 --- a/build-support/scripts/build.sh +++ /dev/null @@ -1,403 +0,0 @@ -#!/bin/bash -SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" -pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null -SCRIPT_DIR=$(pwd) -pushd ../.. > /dev/null -SOURCE_DIR=$(pwd) -popd > /dev/null -pushd ../functions > /dev/null -FN_DIR=$(pwd) -popd > /dev/null -popd > /dev/null - -source "${SCRIPT_DIR}/functions.sh" - -function can_parse_option { - local allowed="$1" - local command="$2" - local options="$3" - - if test ${allowed} -ne 1 - then - err "ERROR: subcommand ${command} does not support the ${options} options" - return 1 - fi - return 0 -} - -function check_duplicate { - local is_dup="$1" - local command="$2" - local options="$3" - - if test ${is_dup} -ne 0 - then - err "ERROR: options ${options} may not be given more than once to the subcommand ${command}" - return 1 - fi - return 0 -} - -function option_check { - can_parse_option "$1" "$3" "$4" && check_duplicate "$2" "$3" "$4" - return $? -} - -function get_option_value { - # Arguments: - # $1 - bool whether the option should be allowed - # $2 - bool whether the option has been specified already - # $3 - the option value - # $4 - the command being executed - # $5 - the option names to use for logging - # - # Returns: - # 0 - success - # * - failure - - option_check "$1" "$2" "$4" "$5" || return 1 - - if test -z "$3" - then - err "ERROR: options ${5} for subcommand ${4} require an argument but none was provided" - return 1 - fi - - echo "$3" - return 0 -} - -function usage { -cat <<-EOF -Usage: ${SCRIPT_NAME} [] - -Subcommands: - assetfs: Builds the bindata_assetfs.go file from previously build UI artifacts - - Options: - -i | --image IMAGE Alternative Docker image to run the build within. - Defaults to ${GO_BUILD_CONTAINER_DEFAULT} - - -s | --source DIR Path to source to build. - Defaults to "${SOURCE_DIR}" - - -r | --refresh Enables refreshing the docker image prior to building. - - consul: Builds the main Consul binary. This assumes the assetfs is up to date: - - Options: - -i | --image IMAGE Alternative Docker image to run the build within. - Defaults to ${GO_BUILD_CONTAINER_DEFAULT} - - -s | --source DIR Path to source to build. - Defaults to "${SOURCE_DIR}" - - -r | --refresh Enables refreshing the docker image prior to building. - - consul-local: Builds the main Consul binary on the local system (no docker) - - -s | --source DIR Path to source to build. - Defaults to "${SOURCE_DIR}" - - -o | --build-os OS Space separated string of OSes to build - - -a | --build-arch ARCH Space separated string of architectures to build - - publish: Publishes a release build. - - -s | --source DIR Path to the source to build. - Defaults to "${SOURCE_DIR}" - - release: Performs a release build. - - Options: - -s | --source DIR Path to source to build. - Defaults to "${SOURCE_DIR}" - - -t | --tag BOOL Whether to add a release commit and tag the build - Defaults to 1. - - -b | --build BOOL Whether to perform the build of the ui's, assetfs and - binaries. Defaults to 1. - - -S | --sign BOOL Whether to sign the generated SHA256SUMS file. - Defaults to 1. - - -g | --gpg-key KEY Alternative GPG key to use for signing operations. - Defaults to ${HASHICORP_GPG_KEY} - - ui: Builds the latest UI. - - Options: - -i | --image IMAGE Alternative Docker image to run the build within. - Defaults to ${UI_BUILD_CONTAINER_DEFAULT} - - -s | --source DIR Path to source to build. - Defaults to "${SOURCE_DIR}" - - -r | --refresh Enables refreshing the docker image prior to building. - - ui-legacy: Builds the legacy UI - - Options: - -i | --image IMAGE Alternative Docker image to run the build within. - Defaults to ${UI_LEGACY_BUILD_CONTAINER_DEFAULT} - - -s | --source DIR Path to source to build. - Defaults to "${SOURCE_DIR}" - - -r | --refresh Enables refreshing the docker image prior to building. - - version: Prints out the version parsed from source. - - Options: - -s | --source DIR Path to source to build. - Defaults to "${SOURCE_DIR}" -EOF -} - -function main { - declare build_fn - declare sdir - declare image - declare -i refresh_docker=0 - declare -i rel_tag - declare -i rel_build - declare -i rel_sign - declare rel_gpg_key="" - declare build_os - declare build_arch - declare -i vers_release - declare -i vers_git - - declare -i use_refresh=1 - declare -i default_refresh=0 - declare -i use_sdir=1 - declare default_sdir="${SOURCE_DIR}" - declare -i use_image=0 - declare default_image="" - declare -i use_rel=0 - declare -i default_rel_tag=1 - declare -i default_rel_build=1 - declare -i default_rel_sign=1 - declare default_rel_gpg_key="${HASHICORP_GPG_KEY}" - declare -i use_xc=0 - declare default_build_os="" - declare default_build_arch="" - declare -i use_version_args - declare -i default_vers_rel=0 - declare -i default_vers_git=0 - - declare command="$1" - shift - - case "${command}" in - assetfs ) - use_image=1 - default_image="${GO_BUILD_CONTAINER_DEFAULT}" - ;; - consul ) - use_image=1 - default_image="${GO_BUILD_CONTAINER_DEFAULT}" - ;; - consul-local ) - use_xc=1 - ;; - publish ) - use_refresh=0 - ;; - release ) - use_rel=1 - use_refresh=0 - ;; - ui ) - use_image=1 - default_image="${UI_BUILD_CONTAINER_DEFAULT}" - ;; - ui-legacy ) - use_image=1 - default_image="${UI_LEGACY_BUILD_CONTAINER_DEFAULT}" - ;; - version ) - use_refresh=0 - use_version_args=1 - ;; - -h | --help) - usage - return 0 - ;; - *) - err "Unkown subcommand: '$1' - possible values are 'consul', 'ui', 'ui-legacy', 'assetfs', version' and 'release'" - return 1 - ;; - esac - - declare -i have_image_arg=0 - declare -i have_sdir_arg=0 - declare -i have_rel_tag_arg=0 - declare -i have_rel_build_arg=0 - declare -i have_rel_sign_arg=0 - declare -i have_rel_gpg_key_arg=0 - declare -i have_refresh_arg=0 - declare -i have_build_os_arg=0 - declare -i have_build_arch_arg=0 - declare -i have_vers_rel_arg=0 - declare -i have_vers_git_arg=0 - - while test $# -gt 0 - do - case $1 in - -h | --help ) - usage - return 0 - ;; - -o | --build-os ) - build_os=$(get_option_value "${use_xc}" "${have_build_os_arg}" "$2" "${command}" "-o/--xc-os") || return 1 - have_build_os_arg=1 - shift 2 - ;; - -a | --build-arch) - build_arch=$(get_option_value "${use_xc}" "${have_build_arch_arg}" "$2" "${command}" "-o/--xc-arch") || return 1 - have_build_arch_arg=1 - shift 2 - ;; - -R | --release ) - option_check "${use_version_args}" "${have_vers_rel_arg}" "${command}" "-R/--release" || return 1 - have_vers_rel_arg=1 - vers_release=1 - shift - ;; - -G | --git ) - option_check "${use_version_args}" "${have_vers_git_arg}" "${command}" "-G/--git" || return 1 - have_vers_git_arg=1 - vers_git=1 - shift - ;; - -r | --refresh) - option_check "${use_refresh}" "${have_refresh_arg}" "${command}" "-r/--refresh" || return 1 - have_refresh_arg=1 - refresh_docker=1 - shift - ;; - -i | --image ) - image=$(get_option_value "${use_image}" "${have_image_arg}" "$2" "${command}" "-i/--image") || return 1 - have_image_arg=1 - shift 2 - ;; - -s | --source ) - sdir=$(get_option_value "${use_sdir}" "${have_sdir_arg}" "$2" "${command}" "-s/--source") || return 1 - if ! test -d "${sdir}" - then - err "ERROR: -s/--source is not a path to a top level directory" - return 1 - fi - have_sdir_arg=1 - shift 2 - ;; - -t | --tag ) - rel_tag=$(get_option_value "${use_rel}" "${have_rel_tag_arg}" "$2" "${command}" "-t/--tag") || return 1 - have_rel_tag_arg=1 - shift 2 - ;; - -b | --build ) - rel_build=$(get_option_value "${use_rel}" "${have_rel_build_arg}" "$2" "${command}" "-b/--build") || return 1 - have_rel_build_arg=1 - shift 2 - ;; - -S | --sign ) - rel_sign=$(get_option_value "${use_rel}" "${have_rel_sign_arg}" "$2" "${command}" "-S/--sign") || return 1 - have_rel_sign_arg=1 - shift 2 - ;; - -g | --gpg-key ) - rel_gpg_key=$(get_option_value "${use_rel}" "${have_rel_gpg_key_arg}" "$2" "${command}" "-g/--gpg-key") || return 1 - shift 2 - ;; - *) - err "ERROR: Unknown option '$1' for subcommand ${command}" - return 1 - ;; - esac - done - - test $have_image_arg -ne 1 && image="${default_image}" - test $have_sdir_arg -ne 1 && sdir="${default_sdir}" - test $have_rel_tag_arg -ne 1 && rel_tag="${default_rel_tag}" - test $have_rel_build_arg -ne 1 && rel_build="${default_rel_build}" - test $have_rel_sign_arg -ne 1 && rel_sign="${default_rel_sign}" - test $have_rel_gpg_key_arg -ne 1 && rel_gpg_key="${default_rel_gpg_key}" - test $have_refresh_arg -ne 1 && refresh_docker="${default_refresh}" - test $have_build_os_arg -ne 1 && build_os="${default_build_os}" - test $have_build_arch_arg -ne 1 && build_arch="${default_build_os}" - test $have_vers_rel_arg -ne 1 && vers_release="${default_vers_rel}" - test $have_vers_git_arg -ne 1 && vers_git="${default_vers_git}" - - case "${command}" in - assetfs ) - if is_set "${refresh_docker}" - then - status_stage "==> Refreshing Consul build container image" - export GO_BUILD_TAG="${image}" - refresh_docker_images ${sdir} go-build-image || return 1 - fi - status_stage "==> Build Static Assets" - build_assetfs "${sdir}" "${image}" || return 1 - ;; - consul ) - if is_set "${refresh_docker}" - then - status_stage "==> Refreshing Consul build container image" - export GO_BUILD_TAG=${image} - refresh_docker_images ${sdir} go-build-image || return 1 - fi - status_stage "==> Building Consul" - build_consul "${sdir}" "" "${image}" || return 1 - ;; - consul-local ) - build_consul_local "${sdir}" "${build_os}" "${build_arch}" "" || return 1 - ;; - publish ) - publish_release "${sdir}" true true || return 1 - ;; - release ) - if is_set "${refresh_docker}" - then - refresh_docker_images ${sdir} || return 1 - fi - build_release "${sdir}" "${rel_tag}" "${rel_build}" "${rel_sign}" "${rel_gpg_key}" || return 1 - ;; - ui ) - - if is_set "${refresh_docker}" - then - status_stage "==> Refreshing UI build container image" - export UI_BUILD_TAG=${image} - refresh_docker_images ${sdir} ui-build-image || return 1 - fi - status_stage "==> Building UI" - build_ui "${sdir}" "${image}" || return 1 - ;; - ui-legacy ) - if is_set "${refresh_docker}" - then - status_stage "==> Refreshing Legacy UI build container image" - export UI_LEGACY_BUILD_TAG=${image} - refresh_docker_images ${sdir} ui-legacy-build-image || return 1 - fi - status_stage "==> Building Legacy UI" - build_ui_legacy "${sdir}" "${image}" || return 1 - ;; - version ) - parse_version "${sdir}" "${vers_release}" "${vers_git}" || return 1 - ;; - *) - err "Unkown subcommand: '$1' - possible values are 'assetfs', consul', 'consul-local' 'publish', 'release', 'ui', 'ui-legacy' and 'version'" - return 1 - ;; - esac - - return 0 -} - -main $@ -exit $? \ No newline at end of file diff --git a/build-support/scripts/dev.sh b/build-support/scripts/dev.sh new file mode 100644 index 000000000..6ea057ccd --- /dev/null +++ b/build-support/scripts/dev.sh @@ -0,0 +1,76 @@ +#!/bin/bash +SCRIPT_NAME="$(basename ${BASH_SOURCE[0]})" +pushd $(dirname ${BASH_SOURCE[0]}) > /dev/null +SCRIPT_DIR=$(pwd) +pushd ../.. > /dev/null +SOURCE_DIR=$(pwd) +popd > /dev/null +pushd ../functions > /dev/null +FN_DIR=$(pwd) +popd > /dev/null +popd > /dev/null + +source "${SCRIPT_DIR}/functions.sh" + +function usage { +cat <<-EOF +Usage: ${SCRIPT_NAME} [] + +Options: + + -s | --source DIR Path to source to build. + Defaults to "${SOURCE_DIR}" + + -h | --help Print this help text. +EOF +} + +function err_usage { + err "$1" + err "" + err "$(usage)" +} + +function main { + declare sdir="${SOURCE_DIR}" + declare build_os="" + declare build_arch="" + + + while test $# -gt 0 + do + case "$1" in + -h | --help ) + usage + return 0 + ;; + -s | --source ) + if test -z "$2" + then + err_usage "ERROR: option -s/--source requires an argument" + return 1 + fi + + if ! test -d "$2" + then + err_usage "ERROR: '$2' is not a directory and not suitable for the value of -s/--source" + return 1 + fi + + sdir="$2" + shift 2 + ;; + * ) + err_usage "ERROR: Unknown argument: '$1'" + return 1 + ;; + esac + done + + set_dev_mode "${sdir}" || return 1 + + return 0 +} + +main $@ +exit $? \ No newline at end of file From 3d1f4e59a83b4c2950d121bc02d0a034003c90a0 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Fri, 15 Jun 2018 13:56:45 +0100 Subject: [PATCH 323/627] Find a way to test pre-ember load 1. Also add index.html things to test/index.html 2. Use content-for to hedge against keeping content in sync (requires an addon) 3. Test passes but only when run on its own, as we need to rely on content in the QUnit runner, theoretically it is not running our test in isolation. Skipping the test for the moment so we don't have a filaing test when all run together --- ui-v2/app/index.html | 3 +-- ui-v2/lib/.eslintrc.js | 6 ++++++ ui-v2/lib/startup/index.js | 17 +++++++++++++++++ ui-v2/lib/startup/package.json | 6 ++++++ ui-v2/package.json | 5 +++++ ui-v2/tests/acceptance/startup.feature | 18 ++++++++++++++++++ ui-v2/tests/acceptance/steps/startup-steps.js | 10 ++++++++++ ui-v2/tests/index.html | 2 +- ui-v2/tests/steps.js | 15 ++++++++++++++- 9 files changed, 78 insertions(+), 4 deletions(-) create mode 100644 ui-v2/lib/.eslintrc.js create mode 100644 ui-v2/lib/startup/index.js create mode 100644 ui-v2/lib/startup/package.json create mode 100644 ui-v2/tests/acceptance/startup.feature create mode 100644 ui-v2/tests/acceptance/steps/startup-steps.js diff --git a/ui-v2/app/index.html b/ui-v2/app/index.html index 5029c9906..1cc468375 100644 --- a/ui-v2/app/index.html +++ b/ui-v2/app/index.html @@ -1,5 +1,5 @@ - + @@ -24,7 +24,6 @@ {{content-for "body"}} - diff --git a/ui-v2/lib/.eslintrc.js b/ui-v2/lib/.eslintrc.js new file mode 100644 index 000000000..548ea343c --- /dev/null +++ b/ui-v2/lib/.eslintrc.js @@ -0,0 +1,6 @@ +module.exports = { + env: { + node: true, + browser: false, + }, +}; diff --git a/ui-v2/lib/startup/index.js b/ui-v2/lib/startup/index.js new file mode 100644 index 000000000..64280e1a5 --- /dev/null +++ b/ui-v2/lib/startup/index.js @@ -0,0 +1,17 @@ +/* eslint-env node */ +'use strict'; + +module.exports = { + name: 'startup', + isDevelopingAddon: function() { + return true; + }, + contentFor: function(type, config) { + switch (type) { + case 'body': + return ``; + case 'root-class': + return 'ember-loading'; + } + }, +}; diff --git a/ui-v2/lib/startup/package.json b/ui-v2/lib/startup/package.json new file mode 100644 index 000000000..b6b50fef9 --- /dev/null +++ b/ui-v2/lib/startup/package.json @@ -0,0 +1,6 @@ +{ + "name": "startup", + "keywords": [ + "ember-addon" + ] +} diff --git a/ui-v2/package.json b/ui-v2/package.json index 13642f549..210c0c8a1 100644 --- a/ui-v2/package.json +++ b/ui-v2/package.json @@ -89,5 +89,10 @@ }, "engines": { "node": "^4.5 || 6.* || >= 7.*" + }, + "ember-addon": { + "paths": [ + "lib/startup" + ] } } diff --git a/ui-v2/tests/acceptance/startup.feature b/ui-v2/tests/acceptance/startup.feature new file mode 100644 index 000000000..70e54705c --- /dev/null +++ b/ui-v2/tests/acceptance/startup.feature @@ -0,0 +1,18 @@ +@setupApplicationTest +Feature: startup + In order to give users an indication as early as possible that they are at the right place + As a user + I should be able to see a startup logo +@ignore + Scenario: When loading the index.html file into a browser + Given 1 datacenter model with the value "dc-1" + Then the url should be '' + Then "html" has the "ember-loading" class + When I visit the services page for yaml + --- + dc: dc-1 + --- + Then the url should be /dc-1/services + Then "html" doesn't have the "ember-loading" class + + diff --git a/ui-v2/tests/acceptance/steps/startup-steps.js b/ui-v2/tests/acceptance/steps/startup-steps.js new file mode 100644 index 000000000..c5f07c804 --- /dev/null +++ b/ui-v2/tests/acceptance/steps/startup-steps.js @@ -0,0 +1,10 @@ +import steps from './steps'; + +// step definitions that are shared between features should be moved to the +// tests/acceptance/steps/steps.js file + +export default function(assert) { + return steps(assert).then('I should find a file', function() { + assert.ok(true, this.step); + }); +} diff --git a/ui-v2/tests/index.html b/ui-v2/tests/index.html index 1a85be183..bb4c9e254 100644 --- a/ui-v2/tests/index.html +++ b/ui-v2/tests/index.html @@ -1,5 +1,5 @@ - + diff --git a/ui-v2/tests/steps.js b/ui-v2/tests/steps.js index 4a429f6bb..c12af6878 100644 --- a/ui-v2/tests/steps.js +++ b/ui-v2/tests/steps.js @@ -179,7 +179,11 @@ export default function(assert) { assert.equal(request.url, url, `Expected the request url to be ${url}, was ${request.url}`); }) .then('the url should be $url', function(url) { - const current = currentURL(); + // TODO: nice! $url should be wrapped in "" + if (url === "''") { + url = ''; + } + const current = currentURL() || ''; assert.equal(current, url, `Expected the url to be ${url} was ${current}`); }) .then(['I see $num $model', 'I see $num $model model', 'I see $num $model models'], function( @@ -273,6 +277,15 @@ export default function(assert) { .then(['I see $property'], function(property, component) { assert.ok(currentPage[property], `Expected to see ${property}`); }) + // TODO: Think of better language + // TODO: These should be mergeable + .then(['"$selector" has the "$class" class'], function(selector, cls) { + // because `find` doesn't work, guessing its sandboxed to ember's container + assert.ok(document.querySelector(selector).classList.contains(cls)); + }) + .then(['"$selector" doesn\'t have the "$class" class'], function(selector, cls) { + assert.ok(!document.querySelector(selector).classList.contains(cls)); + }) .then('ok', function() { assert.ok(true); }) From 9cb81dc47ecf0b34f49eb45076116ea8436e0931 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Fri, 15 Jun 2018 09:20:13 -0400 Subject: [PATCH 324/627] Switch over to defaulting to the new UI --- agent/http.go | 12 ++++++------ website/source/intro/getting-started/ui.html.md | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/agent/http.go b/agent/http.go index b9791232d..1f149a227 100644 --- a/agent/http.go +++ b/agent/http.go @@ -157,9 +157,9 @@ func (s *HTTPServer) handler(enableDebug bool) http.Handler { } if s.IsUIEnabled() { - new_ui, err := strconv.ParseBool(os.Getenv("CONSUL_UI_BETA")) + legacy_ui, err := strconv.ParseBool(os.Getenv("CONSUL_UI_LEGACY")) if err != nil { - new_ui = false + legacy_ui = false } var uifs http.FileSystem @@ -169,15 +169,15 @@ func (s *HTTPServer) handler(enableDebug bool) http.Handler { } else { fs := assetFS() - if new_ui { - fs.Prefix += "/v2/" - } else { + if legacy_ui { fs.Prefix += "/v1/" + } else { + fs.Prefix += "/v2/" } uifs = fs } - if new_ui { + if !legacy_ui { uifs = &redirectFS{fs: uifs} } diff --git a/website/source/intro/getting-started/ui.html.md b/website/source/intro/getting-started/ui.html.md index e478bc2bf..2a185c245 100644 --- a/website/source/intro/getting-started/ui.html.md +++ b/website/source/intro/getting-started/ui.html.md @@ -27,13 +27,13 @@ By default this is `http://localhost:8500/ui`. You can view a live demo of the Consul Web UI [here](http://demo.consul.io). -## How to Use the New UI +## How to Use the Legacy UI -On May 11, 2018, our redesign of the web UI went into beta. You can use it with -Consul 1.1.0 by setting the environment variable `CONSUL_UI_BETA` to `true`. -Without this environment variable, the web UI will default to the old version. To -use the old UI version, either set `CONSUL_UI_BETA` to false, or don't include -that environment variable at all. +As of Consul version 1.2.0 the original Consul UI is deprecated. You can +still enable it by setting the environment variable `CONSUL_UI_LEGACY` to `true`. +Without this environment variable, the web UI will default to the latest version. +To use the latest UI version, either set `CONSUL_UI_LEGACY` to false or don't +include that environment variable at all. ## Next Steps From 3884d10de42b478f72e8713c16080fe65ef87826 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Fri, 15 Jun 2018 14:44:14 -0400 Subject: [PATCH 325/627] Add rsync so the ui can build again Also add back the init target to the ui-v2 makefile --- build-support/docker/Build-UI.dockerfile | 2 +- ui-v2/GNUmakefile | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/build-support/docker/Build-UI.dockerfile b/build-support/docker/Build-UI.dockerfile index 9a4a0198b..72a80d2ba 100644 --- a/build-support/docker/Build-UI.dockerfile +++ b/build-support/docker/Build-UI.dockerfile @@ -6,7 +6,7 @@ ARG MAKE_VERSION=4.2.1-r0 ARG YARN_VERSION=1.7.0 RUN apk update && \ - apk add nodejs=${NODEJS_VERSION} nodejs-npm=${NODEJS_VERSION} make=${MAKE_VERSION} && \ + apk add nodejs=${NODEJS_VERSION} nodejs-npm=${NODEJS_VERSION} make=${MAKE_VERSION} rsync && \ npm install --global yarn@${YARN_VERSION} && \ mkdir /consul-src diff --git a/ui-v2/GNUmakefile b/ui-v2/GNUmakefile index 49ac33456..c72072781 100644 --- a/ui-v2/GNUmakefile +++ b/ui-v2/GNUmakefile @@ -7,6 +7,9 @@ dist: yarn run build mv dist ../pkg/web_ui/v2 +init: + yarn install + lint: yarn run lint:js format: From e542e6303112b1aab762d368ab2834056e26b298 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Fri, 15 Jun 2018 15:23:26 -0400 Subject: [PATCH 326/627] Generalize git pushing in a bash function --- GNUmakefile | 17 ++- build-support/functions/01-util.sh | 180 +++++++++++++++++++++++++- build-support/functions/03-release.sh | 6 +- build-support/functions/04-publish.sh | 56 +------- build-support/scripts/build-docker.sh | 19 ++- build-support/scripts/build-local.sh | 5 + build-support/scripts/dev.sh | 30 ++++- build-support/scripts/publish.sh | 7 + build-support/scripts/release.sh | 19 ++- build-support/scripts/version.sh | 5 + 10 files changed, 276 insertions(+), 68 deletions(-) mode change 100644 => 100755 build-support/scripts/build-docker.sh mode change 100644 => 100755 build-support/scripts/build-local.sh mode change 100644 => 100755 build-support/scripts/dev.sh diff --git a/GNUmakefile b/GNUmakefile index 0b9502907..99083ee0e 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -63,6 +63,21 @@ else DIST_DATE_ARG= endif +PUB_GIT?=1 +PUB_WEBSITE?=1 + +ifeq ($(PUB_GIT),1) +PUB_GIT_ARG=-g +else +PUB_GIT_ARG= +endif + +ifeq ($(PUB_WEBSITE),1) +PUB_WEBSITE_ARG=-g +else +PUB_WEBSITE_ARG= +endif + export GO_BUILD_TAG export UI_BUILD_TAG export UI_LEGACY_BUILD_TAG @@ -105,7 +120,7 @@ dist: @$(SHELL) $(CURDIR)/build-support/scripts/release.sh -t '$(DIST_TAG)' -b '$(DIST_BUILD)' -S '$(DIST_SIGN)' '$(DIST_VERSION_ARG)' '$(DIST_DATE_ARG)' publish: - @$(SHELL) $(CURDIR)/build-support/scripts/publish.sh -g -w + @$(SHELL) $(CURDIR)/build-support/scripts/publish.sh '$(PUB_GIT_ARG)' '$(PUB_WEBSITE_ARG)' dev-tree: @$(SHELL) $(CURDIR)/build-support/scripts/dev.sh diff --git a/build-support/functions/01-util.sh b/build-support/functions/01-util.sh index dfefc9dfd..102592b73 100644 --- a/build-support/functions/01-util.sh +++ b/build-support/functions/01-util.sh @@ -313,7 +313,7 @@ function normalize_git_url { url="${1#https://}" url="${url#git@}" url="${url%.git}" - url="$(sed ${SED_EXT} -e 's/\([^\/:]*\)[:\/]\(.*\)/\1:\2/' <<< "${url}")" + url="$(sed ${SED_EXT} -e 's/([^\/:]*)[:\/](.*)/\1:\2/' <<< "${url}")" echo "$url" return 0 } @@ -336,6 +336,7 @@ function find_git_remote { fi need_url=$(normalize_git_url "${PUBLISH_GIT_HOST}:${PUBLISH_GIT_REPO}") + debug "Required normalized remote: ${need_url}" pushd "$1" > /dev/null @@ -345,6 +346,7 @@ function find_git_remote { url=$(git remote get-url --push ${remote}) || continue url=$(normalize_git_url "${url}") + debug "Testing Remote: ${remote}: ${url}" if test "${url}" == "${need_url}" then echo "${remote}" @@ -354,7 +356,7 @@ function find_git_remote { done popd > /dev/null - return $ret + return ${ret} } function is_git_clean { @@ -390,6 +392,113 @@ function is_git_clean { return ${ret} } +function update_git_env { + # Arguments: + # $1 - Path to git repo + # + # Returns: + # 0 - success + # * - error + # + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. is_git_clean must be called with the path to a git repo as the first argument'" + return 1 + fi + + export GIT_COMMIT=$(git rev-parse --short HEAD) + export GIT_DIRTY=$(test -n "$(git status --porcelain)" && echo "+CHANGES") + export GIT_DESCRIBE=$(git describe --tags --always) + export GIT_IMPORT=github.com/hashicorp/consul/version + export GOLDFLAGS="-X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}${GIT_DIRTY} -X ${GIT_IMPORT}.GitDescribe=${GIT_DESCRIBE}" + return 0 +} + +function git_push_ref { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - Git ref (optional) + # + # Returns: + # 0 - success + # * - error + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. push_git_release must be called with the path to the top level source as the first argument'" + return 1 + fi + + local sdir="$1" + local ret=0 + + # find the correct remote corresponding to the desired repo (basically prevent pushing enterprise to oss or oss to enterprise) + local remote=$(find_git_remote "${sdir}") || return 1 + status "Using git remote: ${remote}" + + local ref="" + + pushd "${sdir}" > /dev/null + + if test -z "$2" + then + # If no git ref was provided we lookup the current local branch and its tracking branch + # It must have a tracking upstream and it must be tracking the sanctioned git remote + local head=$(git_branch "${sdir}") || return 1 + local upstream=$(git_upstream "${sdir}") || return 1 + + # upstream branch for this branch does not track the remote we need to push to + # basically this checks that the upstream (could be something like origin/master) references the correct remote + # if it doesn't then the string modification wont apply and the var will reamin unchanged and equal to itself. + if test "${upstream#${remote}/}" == "${upstream}" + then + err "ERROR: Upstream branch '${upstream}' does not track the correct remote '${remote}' - cannot push" + ret=1 + fi + ref="refs/heads/${head}" + else + # A git ref was provided - get the full ref and make sure it isn't ambiguous and also to + # be able to determine whether its a branch or tag we are pushing + ref_out=$(git rev-parse --symbolic-full-name "$2" --) + + # -ne 2 because it should have the ref on one line followed by a line with '--' + if test "$(wc -l <<< "${ref_out}")" -ne 2 + then + err "ERROR: Git ref '$2' is ambiguous" + debug "${ref_out}" + ret=1 + else + ref=$(head -n 1 <<< "${ref_out}") + fi + fi + + if test ${ret} -eq 0 + then + case "${ref}" in + refs/tags/*) + status "Pushing tag ${ref#refs/tags/} to ${remote}" + ;; + refs/heads/*) + status "Pushing local branch ${ref#refs/tags/} to ${remote}" + ;; + *) + err "ERROR: git_push_ref func is refusing to push ref that isn't a branch or tag" + return 1 + esac + + if ! git push "${remote}" "${ref}" + then + err "ERROR: Failed to push ${ref} to remote: ${remote}" + ret=1 + fi + fi + + popd > /dev/null + + return $ret +} + function update_version { # Arguments: # $1 - Path to the version file @@ -577,4 +686,71 @@ function set_dev_mode { add_unreleased_to_changelog "${sdir}" || return 1 return 0 +} + +function git_staging_empty { + # Arguments: + # $1 - Path to git repo + # + # Returns: + # 0 - success (nothing staged) + # * - error (staged files) + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. commit_dev_mode must be called with the path to a git repo as the first argument'" + return 1 + fi + + pushd "$1" > /dev/null + + declare -i ret=0 + + for status in $(git status --porcelain=v2 | awk '{print $2}' | cut -b 1) + do + if test "${status}" != "." + then + ret=1 + break + fi + done + + popd > /dev/null + return ${ret} +} + +function commit_dev_mode { + # Arguments: + # $1 - Path to top level Consul source + # + # Returns: + # 0 - success + # * - error + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. commit_dev_mode must be called with the path to a git repo as the first argument'" + return 1 + fi + + status "Checking for previously staged files" + git_staging_empty "$1" || return 1 + + declare -i ret=0 + + pushd "$1" > /dev/null + + status "Staging CHANGELOG.md and version_*.go files" + git add CHANGELOG.md && git add version/version_*.go + ret=$? + + if test ${ret} -eq 0 + then + status "Adding Commit" + git commit -m "Putting source back into Dev Mode" + ret=$? + fi + + popd >/dev/null + return ${ret} } \ No newline at end of file diff --git a/build-support/functions/03-release.sh b/build-support/functions/03-release.sh index 8858867ac..0429adbce 100644 --- a/build-support/functions/03-release.sh +++ b/build-support/functions/03-release.sh @@ -258,8 +258,6 @@ function build_consul_release { build_consul "$1" "" "$2" } - - function build_release { # Arguments: (yeah there are lots) # $1 - Path to the top level Consul source @@ -328,7 +326,7 @@ function build_release { set_vers=$(get_version "${sdir}" false false) fi - if ! set_release_mode "${sdir}" "${set_vers}" "${set_date}" + if is_set "${do_tag}" && ! set_release_mode "${sdir}" "${set_vers}" "${set_date}" then err "ERROR: Failed to put source into release mode" return 1 @@ -398,6 +396,8 @@ function build_release { err "ERROR: Failed to tag the release" return 1 fi + + update_git_env "${sdir}" fi if is_set "${do_build}" diff --git a/build-support/functions/04-publish.sh b/build-support/functions/04-publish.sh index 02abdcdc6..60efe8deb 100644 --- a/build-support/functions/04-publish.sh +++ b/build-support/functions/04-publish.sh @@ -19,59 +19,6 @@ function hashicorp_release { return 0 } -function push_git_release { - # Arguments: - # $1 - Path to the top level Consul source - # $2 - Tag to push - # - # Returns: - # 0 - success - # * - error - - if ! test -d "$1" - then - err "ERROR: '$1' is not a directory. push_git_release must be called with the path to the top level source as the first argument'" - return 1 - fi - - local sdir="$1" - local ret=0 - - # find the correct remote corresponding to the desired repo (basically prevent pushing enterprise to oss or oss to enterprise) - local remote=$(find_git_remote "${sdir}") || return 1 - local head=$(git_branch "${sdir}") || return 1 - local upstream=$(git_upstream "${sdir}") || return 1 - status "Using git remote: ${remote}" - - # upstream branch for this branch does not track the remote we need to push to - if test "${upstream#${remote}}" == "${upstream}" - then - err "ERROR: Upstream branch '${upstream}' does not track the correct remote '${remote}'" - return 1 - fi - - pushd "${sdir}" > /dev/null - - status "Pushing local branch ${head} to ${upstream}" - if ! git push "${remote}" - then - err "ERROR: Failed to push to remote: ${remote}" - ret=1 - fi - - status "Pushing tag ${2} to ${remote}" - if test "${ret}" -eq 0 && ! git push "${remote}" "${2}" - then - err "ERROR: Failed to push tag ${2} to ${remote}" - ret = 1 - fi - - popd > /dev/null - - - return $ret -} - function confirm_git_push_changes { # Arguments: # $1 - Path to git repo @@ -234,7 +181,8 @@ function publish_release { if is_set "${pub_git}" then status_stage "==> Pushing to Git" - push_git_release "$1" "v${vers}" || return 1 + git_push_ref "$1" || return 1 + git_push_ref "$1" "v${vers}" || return 1 fi if is_set "${pub_hc_releases}" diff --git a/build-support/scripts/build-docker.sh b/build-support/scripts/build-docker.sh old mode 100644 new mode 100755 index 7b6330844..f7736b423 --- a/build-support/scripts/build-docker.sh +++ b/build-support/scripts/build-docker.sh @@ -16,6 +16,10 @@ function usage { cat <<-EOF Usage: ${SCRIPT_NAME} (consul|ui|ui-legacy|static-assets) [] +Description: + This script will build the various Consul components within docker containers + and copy all the relevant artifacts out of the containers back to the source. + Options: -i | --image IMAGE Alternative Docker image to run the build within. @@ -38,10 +42,7 @@ function main { declare image= declare sdir="${SOURCE_DIR}" declare -i refresh=0 - declare command="$1" - - # get rid of the subcommand - shift + declare command="" while test $# -gt 0 do @@ -80,6 +81,10 @@ function main { refresh=1 shift ;; + consul | ui | ui-legacy | static-assets ) + command="$1" + shift + ;; * ) err_usage "ERROR: Unknown argument '$1'" return 1 @@ -87,6 +92,12 @@ function main { esac done + if test -z "${command}" + then + err_usage "ERROR: No command specified" + return 1 + fi + case "${command}" in consul ) if is_set "${refresh}" diff --git a/build-support/scripts/build-local.sh b/build-support/scripts/build-local.sh old mode 100644 new mode 100755 index 0ecf1c8a4..276fd3041 --- a/build-support/scripts/build-local.sh +++ b/build-support/scripts/build-local.sh @@ -16,6 +16,11 @@ function usage { cat <<-EOF Usage: ${SCRIPT_NAME} [] +Description: + This script will build the Consul binary on the local system. + All the requisite tooling must be installed for this to be + successful. + Options: -s | --source DIR Path to source to build. diff --git a/build-support/scripts/dev.sh b/build-support/scripts/dev.sh old mode 100644 new mode 100755 index 6ea057ccd..6046ef3cc --- a/build-support/scripts/dev.sh +++ b/build-support/scripts/dev.sh @@ -16,11 +16,18 @@ function usage { cat <<-EOF Usage: ${SCRIPT_NAME} [] +Description: + + This script will put the source back into dev mode after a release. + Options: -s | --source DIR Path to source to build. Defaults to "${SOURCE_DIR}" + --no-git Do not commit or attempt to push + the changes back to the upstream. + -h | --help Print this help text. EOF } @@ -32,9 +39,10 @@ function err_usage { } function main { - declare sdir="${SOURCE_DIR}" - declare build_os="" - declare build_arch="" + declare sdir="${SOURCE_DIR}" + declare build_os="" + declare build_arch="" + declare -i do_git=1 while test $# -gt 0 @@ -60,6 +68,10 @@ function main { sdir="$2" shift 2 ;; + --no-git ) + do_git=0 + shift + ;; * ) err_usage "ERROR: Unknown argument: '$1'" return 1 @@ -69,6 +81,18 @@ function main { set_dev_mode "${sdir}" || return 1 + if is_set "${do_git}" + then + status_stage "==> Commiting Dev Mode Changes" + commit_dev_mode "${sdir}" || return 1 + + status_stage "==> Confirming Git Changes" + confirm_git_push_changes "${sdir}" + + status_stage "==> Pushing to Git" + git_push_ref "$1" || return 1 + fi + return 0 } diff --git a/build-support/scripts/publish.sh b/build-support/scripts/publish.sh index e6f0a481b..034b37861 100755 --- a/build-support/scripts/publish.sh +++ b/build-support/scripts/publish.sh @@ -16,6 +16,13 @@ function usage { cat <<-EOF Usage: ${SCRIPT_NAME} [] +Description: + + This script will "publish" a Consul release. It expects a prebuilt release in + pkg/dist matching the version in the repo and a clean git status. It will + prompt you to confirm the consul version and git changes you are going to + publish prior to pushing to git and to releases.hashicorp.com. + Options: -s | --source DIR Path to source to build. Defaults to "${SOURCE_DIR}" diff --git a/build-support/scripts/release.sh b/build-support/scripts/release.sh index a7fa95900..2cb6ea3e6 100755 --- a/build-support/scripts/release.sh +++ b/build-support/scripts/release.sh @@ -16,11 +16,28 @@ function usage { cat <<-EOF Usage: ${SCRIPT_NAME} [] +Description: + + This script will do a full release build of Consul. Building each component + is done within a docker container. In addition to building Consul this + script will do a few more things. + + * Update version/version*.go files + * Update CHANGELOG.md to put things into release mode + * Create a release commit. It changes in the commit include the CHANGELOG.md + version files and the assetfs. + * Tag the release + * Generate the SHA256SUMS file for the binaries + * Sign the SHA256SUMS file with a GPG key + + Options: -s | --source DIR Path to source to build. Defaults to "${SOURCE_DIR}" - -t | --tag BOOL Whether to add a release commit and tag the build + -t | --tag BOOL Whether to add a release commit and tag the build. + This also controls whether we put the tree into + release mode Defaults to 1. -b | --build BOOL Whether to perform the build of the ui's, assetfs and diff --git a/build-support/scripts/version.sh b/build-support/scripts/version.sh index c0b4c51ab..30a51cfbf 100755 --- a/build-support/scripts/version.sh +++ b/build-support/scripts/version.sh @@ -16,6 +16,11 @@ function usage { cat <<-EOF Usage: ${SCRIPT_NAME} [] +Description: + + This script is just a convenience around discover what the Consul + version would be if you were to build it. + Options: -s | --source DIR Path to source to build. Defaults to "${SOURCE_DIR}" From f6d1aa8fc3b787cc9fd2be434cddf00292e758e0 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 09:06:57 -0400 Subject: [PATCH 327/627] Make some room for overrides of build system functions --- build-support/functions/{01-util.sh => 10-util.sh} | 0 build-support/functions/{02-build.sh => 20-util.sh} | 0 build-support/functions/{03-release.sh => 30-release.sh} | 0 build-support/functions/{04-publish.sh => 40-publish.sh} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename build-support/functions/{01-util.sh => 10-util.sh} (100%) rename build-support/functions/{02-build.sh => 20-util.sh} (100%) rename build-support/functions/{03-release.sh => 30-release.sh} (100%) rename build-support/functions/{04-publish.sh => 40-publish.sh} (100%) diff --git a/build-support/functions/01-util.sh b/build-support/functions/10-util.sh similarity index 100% rename from build-support/functions/01-util.sh rename to build-support/functions/10-util.sh diff --git a/build-support/functions/02-build.sh b/build-support/functions/20-util.sh similarity index 100% rename from build-support/functions/02-build.sh rename to build-support/functions/20-util.sh diff --git a/build-support/functions/03-release.sh b/build-support/functions/30-release.sh similarity index 100% rename from build-support/functions/03-release.sh rename to build-support/functions/30-release.sh diff --git a/build-support/functions/04-publish.sh b/build-support/functions/40-publish.sh similarity index 100% rename from build-support/functions/04-publish.sh rename to build-support/functions/40-publish.sh From ade900f2b6766eb685911e9f24c69f070b66f14e Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 09:15:01 -0400 Subject: [PATCH 328/627] Dont override set but null vars --- build-support/functions/00-vars.sh | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/build-support/functions/00-vars.sh b/build-support/functions/00-vars.sh index e65ee9c54..03e800b01 100644 --- a/build-support/functions/00-vars.sh +++ b/build-support/functions/00-vars.sh @@ -7,11 +7,7 @@ UI_LEGACY_BUILD_CONTAINER_DEFAULT="consul-build-ui-legacy" GO_BUILD_CONTAINER_DEFAULT="consul-build-go" # Whether to colorize shell output -if test -z "${COLORIZE}" -then - COLORIZE=1 -fi - +COLORIZE=${COLORIZE-1} # determine GOPATH and the first GOPATH to use for intalling binaries GOPATH=${GOPATH:-$(go env GOPATH)} @@ -22,24 +18,14 @@ case $(uname) in esac MAIN_GOPATH=$(cut -d: -f1 <<< "${GOPATH}") - # Build debugging output is off by default -if test -z "${BUILD_DEBUG}" -then - BUILD_DEBUG=0 -fi +BUILD_DEBUG=${BUILD_DEBUG-0} # default publish host is github.com - only really useful to use something else for testing -if test -z "${PUBLISH_GIT_HOST}" -then - PUBLISH_GIT_HOST=github.com -fi +PUBLISH_GIT_HOST="${PUBLISH_GIT_HOST-github.com}" # default publish repo is hashicorp/consul - useful to override for testing as well as in the enterprise repo -if test -z "${PUBLISH_GIT_REPO}" -then - PUBLISH_GIT_REPO=hashicorp/consul.git -fi +PUBLISH_GIT_REPO="${PUBLISH_GIT_REPO-hashicorp/consul.git}" if test "$(uname)" == "Darwin" then @@ -47,4 +33,3 @@ then else SED_EXT="" fi - \ No newline at end of file From 7d2c8689f5ed664467902b12f55615d416e39f43 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 09:21:38 -0400 Subject: [PATCH 329/627] Update the name of a function source --- build-support/functions/{20-util.sh => 20-build.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename build-support/functions/{20-util.sh => 20-build.sh} (100%) diff --git a/build-support/functions/20-util.sh b/build-support/functions/20-build.sh similarity index 100% rename from build-support/functions/20-util.sh rename to build-support/functions/20-build.sh From 551cf5a77b5fc0270a503eb47acd9810912b4d55 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 09:22:31 -0400 Subject: [PATCH 330/627] Make sure to commit all version*.go files --- build-support/functions/10-util.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-support/functions/10-util.sh b/build-support/functions/10-util.sh index 102592b73..57b542b36 100644 --- a/build-support/functions/10-util.sh +++ b/build-support/functions/10-util.sh @@ -741,7 +741,7 @@ function commit_dev_mode { pushd "$1" > /dev/null status "Staging CHANGELOG.md and version_*.go files" - git add CHANGELOG.md && git add version/version_*.go + git add CHANGELOG.md && git add version/version*.go ret=$? if test ${ret} -eq 0 From 588cfbb3599195156daf18ebd93ba51447bce60c Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 09:32:00 -0400 Subject: [PATCH 331/627] Add notion of a consul pkg name to coexist with enterprise --- build-support/functions/00-vars.sh | 2 ++ build-support/functions/30-release.sh | 32 +++++++++++++-------------- build-support/functions/40-publish.sh | 4 ++-- 3 files changed, 20 insertions(+), 18 deletions(-) diff --git a/build-support/functions/00-vars.sh b/build-support/functions/00-vars.sh index 03e800b01..c53cffece 100644 --- a/build-support/functions/00-vars.sh +++ b/build-support/functions/00-vars.sh @@ -27,6 +27,8 @@ PUBLISH_GIT_HOST="${PUBLISH_GIT_HOST-github.com}" # default publish repo is hashicorp/consul - useful to override for testing as well as in the enterprise repo PUBLISH_GIT_REPO="${PUBLISH_GIT_REPO-hashicorp/consul.git}" +CONSUL_PKG_NAME="consul" + if test "$(uname)" == "Darwin" then SED_EXT="-E" diff --git a/build-support/functions/30-release.sh b/build-support/functions/30-release.sh index 0429adbce..edf783a6c 100644 --- a/build-support/functions/30-release.sh +++ b/build-support/functions/30-release.sh @@ -177,25 +177,25 @@ function check_release { declare -a expected_files - expected_files+=("consul_${2}_SHA256SUMS") + expected_files+=("${CONSUL_PKG_NAME}_${2}_SHA256SUMS") echo "check sig: $3" if is_set "$3" then - expected_files+=("consul_${2}_SHA256SUMS.sig") + expected_files+=("${CONSUL_PKG_NAME}_${2}_SHA256SUMS.sig") fi - expected_files+=("consul_${2}_darwin_386.zip") - expected_files+=("consul_${2}_darwin_amd64.zip") - expected_files+=("consul_${2}_freebsd_386.zip") - expected_files+=("consul_${2}_freebsd_amd64.zip") - expected_files+=("consul_${2}_freebsd_arm.zip") - expected_files+=("consul_${2}_linux_386.zip") - expected_files+=("consul_${2}_linux_amd64.zip") - expected_files+=("consul_${2}_linux_arm.zip") - expected_files+=("consul_${2}_linux_arm64.zip") - expected_files+=("consul_${2}_solaris_amd64.zip") - expected_files+=("consul_${2}_windows_386.zip") - expected_files+=("consul_${2}_windows_amd64.zip") + expected_files+=("${CONSUL_PKG_NAME}_${2}_darwin_386.zip") + expected_files+=("${CONSUL_PKG_NAME}_${2}_darwin_amd64.zip") + expected_files+=("${CONSUL_PKG_NAME}_${2}_freebsd_386.zip") + expected_files+=("${CONSUL_PKG_NAME}_${2}_freebsd_amd64.zip") + expected_files+=("${CONSUL_PKG_NAME}_${2}_freebsd_arm.zip") + expected_files+=("${CONSUL_PKG_NAME}_${2}_linux_386.zip") + expected_files+=("${CONSUL_PKG_NAME}_${2}_linux_amd64.zip") + expected_files+=("${CONSUL_PKG_NAME}_${2}_linux_arm.zip") + expected_files+=("${CONSUL_PKG_NAME}_${2}_linux_arm64.zip") + expected_files+=("${CONSUL_PKG_NAME}_${2}_solaris_amd64.zip") + expected_files+=("${CONSUL_PKG_NAME}_${2}_windows_386.zip") + expected_files+=("${CONSUL_PKG_NAME}_${2}_windows_amd64.zip") declare -a found_files @@ -420,7 +420,7 @@ function build_release { fi status_stage "==> Generating SHA 256 Hashes for Binaries" - shasum_release "${sdir}/pkg/dist" "consul_${vers}_SHA256SUMS" + shasum_release "${sdir}/pkg/dist" "${CONSUL_PKG_NAME}_${vers}_SHA256SUMS" if test $? -ne 0 then err "ERROR: Failed to generate SHA 256 hashes for the release" @@ -429,7 +429,7 @@ function build_release { if is_set "${do_sha256}" then - sign_release "${sdir}/pkg/dist/consul_${vers}_SHA256SUMS" "${gpg_key}" + sign_release "${sdir}/pkg/dist/${CONSUL_PKG_NAME}_${vers}_SHA256SUMS" "${gpg_key}" if test $? -ne 0 then err "ERROR: Failed to sign the SHA 256 hashes file" diff --git a/build-support/functions/40-publish.sh b/build-support/functions/40-publish.sh index 60efe8deb..bbc3dc1ad 100644 --- a/build-support/functions/40-publish.sh +++ b/build-support/functions/40-publish.sh @@ -79,7 +79,7 @@ function confirm_consul_version { # 0 - success # * - error - local zfile="${1}/consul_${2}_$(go env GOOS)_$(go env GOARCH).zip" + local zfile="${1}/${CONSUL_PKG_NAME}_${2}_$(go env GOOS)_$(go env GOARCH).zip" if ! test -f "${zfile}" then @@ -88,7 +88,7 @@ function confirm_consul_version { fi local ret=0 - local tfile="$(mktemp) -t "consul_")" + local tfile="$(mktemp) -t "${CONSUL_PKG_NAME}_")" unzip -p "${zfile}" "consul" > "${tfile}" if test $? -eq 0 From e7b0a729eae4a8188b9970007d70671ae7caf71e Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 09:57:12 -0400 Subject: [PATCH 332/627] Only remove builds with the same suffix --- build-support/functions/20-build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build-support/functions/20-build.sh b/build-support/functions/20-build.sh index 6205db50e..7e5d198a7 100644 --- a/build-support/functions/20-build.sh +++ b/build-support/functions/20-build.sh @@ -202,7 +202,7 @@ function build_consul_post { pushd "${sdir}" > /dev/null # recreate the pkg dir - rm -r pkg/bin/* 2> /dev/null + rm -r pkg/bin/*${2} 2> /dev/null mkdir -p pkg/bin 2> /dev/null # move all files in pkg.new into pkg @@ -379,4 +379,4 @@ function build_consul_local { return 1 fi return 0 -} \ No newline at end of file +} From c94c212e97844e516d4037948e582d24d559fff7 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 10:07:29 -0400 Subject: [PATCH 333/627] Fix a bug in parse_version that prevented using auxillary version files --- build-support/functions/10-util.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build-support/functions/10-util.sh b/build-support/functions/10-util.sh index 57b542b36..4bc3d2567 100644 --- a/build-support/functions/10-util.sh +++ b/build-support/functions/10-util.sh @@ -141,9 +141,9 @@ function parse_version { # try to determine the version if we have build tags for tag in "$GOTAGS" do - for vfile in $(ls "${1}/version/version_*.go" 2> /dev/null| sort) + for vfile in $(find "${1}/version" -name "version_*.go" 2> /dev/null| sort) do - if grep -q "// +build $tag" $file + if grep -q "// +build $tag" "${vfile}" then version_main=$(awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) release_main=$(awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) From 74d2806cc13a6e84e79b50eb97e464442fa1cbad Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 10:26:04 -0400 Subject: [PATCH 334/627] Use CONSUL_PKG_NAME in package_release --- build-support/functions/30-release.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build-support/functions/30-release.sh b/build-support/functions/30-release.sh index edf783a6c..93c8f99f8 100644 --- a/build-support/functions/30-release.sh +++ b/build-support/functions/30-release.sh @@ -98,10 +98,10 @@ function package_release { for platform in $(find "${sdir}/pkg/bin" -mindepth 1 -maxdepth 1 -type d) do local os_arch=$(basename $platform) - local dest="${sdir}/pkg/dist/consul_${vers}_${os_arch}.zip" + local dest="${sdir}/pkg/dist/${CONSUL_PKG_NAME}_${vers}_${os_arch}.zip" status "Compressing ${os_arch} directory into ${dest}" pushd "${platform}" > /dev/null - zip "${sdir}/pkg/dist/consul_${vers}_${os_arch}.zip" ./* + zip "${sdir}/pkg/dist/${CONSUL_PKG_NAME}_${vers}_${os_arch}.zip" ./* ret=$? popd > /dev/null From b7a15e66164dee5b5fb7bab9f230a40010f8cf0c Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 18 Jun 2018 15:38:52 +0100 Subject: [PATCH 335/627] Ensure all tags for a service are listed on the service detail page --- ui-v2/app/services/services.js | 6 +++++ ui-v2/app/templates/dc/services/show.hbs | 6 ++--- .../tests/acceptance/dc/services/show.feature | 22 +++++++++++++++++++ .../steps/dc/services/show-steps.js | 10 +++++++++ ui-v2/tests/helpers/type-to-url.js | 22 +++++++++++++------ ui-v2/tests/steps.js | 14 ++++++++---- ui-v2/yarn.lock | 17 +++++++++----- 7 files changed, 78 insertions(+), 19 deletions(-) create mode 100644 ui-v2/tests/acceptance/dc/services/show.feature create mode 100644 ui-v2/tests/acceptance/steps/dc/services/show-steps.js diff --git a/ui-v2/app/services/services.js b/ui-v2/app/services/services.js index 62cf2fa4d..7e92a418a 100644 --- a/ui-v2/app/services/services.js +++ b/ui-v2/app/services/services.js @@ -21,6 +21,12 @@ export default Service.extend({ .then(function(item) { const nodes = get(item, 'Nodes'); const service = get(nodes, 'firstObject'); + const tags = nodes + .reduce(function(prev, item) { + return prev.concat(get(item, 'Service.Tags') || []); + }, []) + .uniq(); + set(service, 'Tags', tags); set(service, 'Nodes', nodes); return service; }); diff --git a/ui-v2/app/templates/dc/services/show.hbs b/ui-v2/app/templates/dc/services/show.hbs index 5f6ba6a21..23dc23af9 100644 --- a/ui-v2/app/templates/dc/services/show.hbs +++ b/ui-v2/app/templates/dc/services/show.hbs @@ -15,11 +15,11 @@ {{/if}} {{/block-slot}} {{#block-slot 'content'}} -{{#if (gt item.Service.Tags.length 0)}} +{{#if (gt item.Tags.length 0)}}
Tags
-
- {{join ', ' item.Service.Tags}} +
+ {{join ', ' item.Tags}}
{{/if}} diff --git a/ui-v2/tests/acceptance/dc/services/show.feature b/ui-v2/tests/acceptance/dc/services/show.feature new file mode 100644 index 000000000..29daba7b6 --- /dev/null +++ b/ui-v2/tests/acceptance/dc/services/show.feature @@ -0,0 +1,22 @@ +@setupApplicationTest +Feature: dc / services / show: Show Service + Scenario: Given various service with various tags, all tags are displayed + Given 1 datacenter model with the value "dc1" + And 3 node models + And 1 service model from yaml + --- + - Service: + Tags: ['Tag1', 'Tag2'] + - Service: + Tags: ['Tag3', 'Tag1'] + - Service: + Tags: ['Tag2', 'Tag3'] + --- + When I visit the service page for yaml + --- + dc: dc1 + service: service-0 + --- + Then I see the text "Tag1, Tag2, Tag3" in "[data-test-tags]" + Then ok + diff --git a/ui-v2/tests/acceptance/steps/dc/services/show-steps.js b/ui-v2/tests/acceptance/steps/dc/services/show-steps.js new file mode 100644 index 000000000..a7eff3228 --- /dev/null +++ b/ui-v2/tests/acceptance/steps/dc/services/show-steps.js @@ -0,0 +1,10 @@ +import steps from '../../steps'; + +// step definitions that are shared between features should be moved to the +// tests/acceptance/steps/steps.js file + +export default function(assert) { + return steps(assert).then('I should find a file', function() { + assert.ok(true, this.step); + }); +} diff --git a/ui-v2/tests/helpers/type-to-url.js b/ui-v2/tests/helpers/type-to-url.js index 68446a8b7..6fa84bbbb 100644 --- a/ui-v2/tests/helpers/type-to-url.js +++ b/ui-v2/tests/helpers/type-to-url.js @@ -2,22 +2,30 @@ export default function(type) { let url = null; switch (type) { case 'dc': - url = '/v1/catalog/datacenters'; + url = ['/v1/catalog/datacenters']; break; case 'service': - url = '/v1/internal/ui/services'; + url = ['/v1/internal/ui/services', '/v1/health/service/']; break; case 'node': - url = '/v1/internal/ui/nodes'; - // url = '/v1/health/service/_'; + url = ['/v1/internal/ui/nodes']; break; case 'kv': url = '/v1/kv/'; break; case 'acl': - url = '/v1/acl/list'; - // url = '/v1/acl/info/_'; + url = ['/v1/acl/list']; break; } - return url; + return function(actual) { + if (url === null) { + return false; + } + if (typeof url === 'string') { + return url === actual; + } + return url.some(function(item) { + return actual.indexOf(item) === 0; + }); + }; } diff --git a/ui-v2/tests/steps.js b/ui-v2/tests/steps.js index 4a429f6bb..990c313bf 100644 --- a/ui-v2/tests/steps.js +++ b/ui-v2/tests/steps.js @@ -1,6 +1,6 @@ /* eslint no-console: "off" */ import yadda from './helpers/yadda'; -import { currentURL, click, triggerKeyEvent } from '@ember/test-helpers'; +import { currentURL, click, triggerKeyEvent, find } from '@ember/test-helpers'; import getDictionary from '@hashicorp/ember-cli-api-double/dictionary'; import pages from 'consul-ui/tests/pages'; import api from 'consul-ui/tests/helpers/api'; @@ -38,14 +38,14 @@ export default function(assert) { }, yadda) ) // doubles - .given(['$number $model model', '$number $model models'], function(number, model) { + .given(['$number $model model[s]?', '$number $model models'], function(number, model) { return create(number, model); }) - .given(['$number $model model with the value "$value"'], function(number, model, value) { + .given(['$number $model model[s]? with the value "$value"'], function(number, model, value) { return create(number, model, value); }) .given( - ['$number $model model[s]? from yaml\n$yaml', '$number $model model from json\n$json'], + ['$number $model model[s]? from yaml\n$yaml', '$number $model model[s]? from json\n$json'], function(number, model, data) { return create(number, model, data); } @@ -273,6 +273,12 @@ export default function(assert) { .then(['I see $property'], function(property, component) { assert.ok(currentPage[property], `Expected to see ${property}`); }) + .then(['I see the text "$text" in "$selector"'], function(text, selector) { + assert.ok( + find(selector).textContent.indexOf(text) !== -1, + `Expected to see "${text}" in "${selector}"` + ); + }) .then('ok', function() { assert.ok(true); }) diff --git a/ui-v2/yarn.lock b/ui-v2/yarn.lock index c760ef53b..c712625d4 100644 --- a/ui-v2/yarn.lock +++ b/ui-v2/yarn.lock @@ -70,8 +70,8 @@ "@glimmer/di" "^0.2.0" "@hashicorp/api-double@^1.1.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@hashicorp/api-double/-/api-double-1.1.0.tgz#299d3c560090dfe9c335db64d63c3ef0c5da79c4" + version "1.2.0" + resolved "https://registry.yarnpkg.com/@hashicorp/api-double/-/api-double-1.2.0.tgz#d2846f79d086ac009673ae755da15301e0f2f7c3" dependencies: "@gardenhq/o" "^8.0.1" "@gardenhq/tick-control" "^2.0.0" @@ -5704,7 +5704,14 @@ js-yaml@0.3.x: version "0.3.7" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-0.3.7.tgz#d739d8ee86461e54b354d6a7d7d1f2ad9a167f62" -js-yaml@^3.10.0, js-yaml@^3.11.0, js-yaml@^3.2.5, js-yaml@^3.2.7, js-yaml@^3.6.1, js-yaml@^3.7.0, js-yaml@^3.8.4, js-yaml@^3.9.0, js-yaml@^3.9.1: +js-yaml@^3.10.0, js-yaml@^3.11.0, js-yaml@^3.8.4: + version "3.12.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.12.0.tgz#eaed656ec8344f10f527c6bfa1b6e2244de167d1" + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +js-yaml@^3.2.5, js-yaml@^3.2.7, js-yaml@^3.6.1, js-yaml@^3.7.0, js-yaml@^3.9.0, js-yaml@^3.9.1: version "3.11.0" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.11.0.tgz#597c1a8bd57152f26d622ce4117851a51f5ebaef" dependencies: @@ -6711,8 +6718,8 @@ morgan@^1.8.1: on-headers "~1.0.1" mousetrap@^1.6.1: - version "1.6.1" - resolved "https://registry.yarnpkg.com/mousetrap/-/mousetrap-1.6.1.tgz#2a085f5c751294c75e7e81f6ec2545b29cbf42d9" + version "1.6.2" + resolved "https://registry.yarnpkg.com/mousetrap/-/mousetrap-1.6.2.tgz#caadd9cf886db0986fb2fee59a82f6bd37527587" mout@^1.0.0: version "1.1.0" From b6aecb2d7785d43351811d6eac7a133d210932aa Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 11:23:32 -0400 Subject: [PATCH 336/627] Refactor a little to accomodate overriding packaging using directories --- build-support/functions/10-util.sh | 51 +++++++- build-support/functions/20-build.sh | 50 +++++--- build-support/functions/30-release.sh | 169 +++++++++++++++++++------- 3 files changed, 211 insertions(+), 59 deletions(-) diff --git a/build-support/functions/10-util.sh b/build-support/functions/10-util.sh index 4bc3d2567..03a94de5f 100644 --- a/build-support/functions/10-util.sh +++ b/build-support/functions/10-util.sh @@ -753,4 +753,53 @@ function commit_dev_mode { popd >/dev/null return ${ret} -} \ No newline at end of file +} + +function gpg_detach_sign { + # Arguments: + # $1 - File to sign + # $2 - Alternative GPG key to use for signing + # + # Returns: + # 0 - success + # * - failure + + # determine whether the gpg key to use is being overridden + local gpg_key=${HASHICORP_GPG_KEY} + if test -n "$2" + then + gpg_key=$2 + fi + + gpg --default-key "${gpg_key}" --detach-sig --yes -v "$1" + return $? +} + +function shasum_directory { + # Arguments: + # $1 - Path to directory containing the files to shasum + # $2 - File to output sha sums to + # + # Returns: + # 0 - success + # * - failure + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory and shasum_release requires passing a directory as the first argument" + return 1 + fi + + if test -z "$2" + then + err "ERROR: shasum_release requires a second argument to be the filename to output the shasums to but none was given" + return 1 + fi + + pushd $1 > /dev/null + shasum -a256 * > "$2" + ret=$? + popd >/dev/null + + return $ret +} diff --git a/build-support/functions/20-build.sh b/build-support/functions/20-build.sh index 7e5d198a7..346535354 100644 --- a/build-support/functions/20-build.sh +++ b/build-support/functions/20-build.sh @@ -180,7 +180,7 @@ function build_assetfs { function build_consul_post { # Arguments # $1 - Path to the top level Consul source - # $2 - build suffix (Optional) + # $2 - Subdirectory under pkg/bin (Optional) # # Returns: # 0 - success @@ -199,17 +199,25 @@ function build_consul_post { local sdir="$1" + local extra_dir_name="$2" + local extra_dir="" + + if test -n "${extra_dir_name}" + then + extra_dir="${extra_dir_name}/" + fi + pushd "${sdir}" > /dev/null # recreate the pkg dir - rm -r pkg/bin/*${2} 2> /dev/null - mkdir -p pkg/bin 2> /dev/null + rm -r pkg/bin/${extra_dir}* 2> /dev/null + mkdir -p pkg/bin/${extra_dir} 2> /dev/null # move all files in pkg.new into pkg - cp -r pkg.bin.new/* pkg/bin/ + cp -r pkg.bin.new/${extra_dir}* pkg/bin/${extra_dir} rm -r pkg.bin.new - DEV_PLATFORM="./pkg/bin/$(go env GOOS)_$(go env GOARCH)${2}" + DEV_PLATFORM="./pkg/bin/${extra_dir}$(go env GOOS)_$(go env GOARCH)" for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f) do # recreate the bin dir @@ -228,7 +236,7 @@ function build_consul_post { function build_consul { # Arguments: # $1 - Path to the top level Consul source - # $2 - build suffix (optional - must specify if needing to specify the docker image) + # $2 - Subdirectory to put binaries in under pkg/bin (optional - must specify if needing to specify the docker image) # $3 - The docker image to run the build within (optional) # # Returns: @@ -248,7 +256,8 @@ function build_consul { fi local sdir="$1" - local build_suffix="$2" + local extra_dir_name="$2" + local extra_dir="" local image_name=${GO_BUILD_CONTAINER_DEFAULT} if test -n "$3" then @@ -271,15 +280,20 @@ function build_consul { fi XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"} XC_ARCH=${XC_ARCH:-"386 amd64 arm arm64"} + + if test -n "${extra_dir_name}" + then + extra_dir="${extra_dir_name}/" + fi - local container_id=$(docker create -it -e CGO_ENABLED=0 ${image_name} gox -os="${XC_OS}" -arch="${XC_ARCH}" -osarch="!darwin/arm !darwin/arm64" -ldflags "${GOLDFLAGS}" -output "pkg/bin/{{.OS}}_{{.Arch}}${build_suffix}/consul" -tags="${GOTAGS}") + local container_id=$(docker create -it -e CGO_ENABLED=0 ${image_name} gox -os="${XC_OS}" -arch="${XC_ARCH}" -osarch="!darwin/arm !darwin/arm64" -ldflags "${GOLDFLAGS}" -output "pkg/bin/${extra_dir}{{.OS}}_{{.Arch}}/consul" -tags="${GOTAGS}") ret=$? if test $ret -eq 0 then - status "Copying the source from '${sdir}' to /go/src/github.com/hashicorp/consul/pkg" + status "Copying the source from '${sdir}' to /go/src/github.com/hashicorp/consul" ( - tar -c $(ls | grep -v "ui\|ui-v2\|website\|bin\|.git") | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul && + tar -c $(ls | grep -v "^(ui\|ui-v2\|website\|bin\|pkg\|.git)") | docker cp - ${container_id}:/go/src/github.com/hashicorp/consul && status "Running build in container" && docker start -i ${container_id} && status "Copying back artifacts" && @@ -290,7 +304,7 @@ function build_consul { if test $ret -eq 0 then - build_consul_post "${sdir}" "${build_suffix}" + build_consul_post "${sdir}" "${extra_dir_name}" ret=$? else rm -r pkg.bin.new 2> /dev/null @@ -305,7 +319,7 @@ function build_consul_local { # $1 - Path to the top level Consul source # $2 - Space separated string of OSes to build. If empty will use env vars for determination. # $3 - Space separated string of architectures to build. If empty will use env vars for determination. - # $4 - build suffix (optional) + # $4 - Subdirectory to put binaries in under pkg/bin (optional) # # Returns: # 0 - success @@ -326,7 +340,13 @@ function build_consul_local { local sdir="$1" local build_os="$2" local build_arch="$3" - local build_suffix="$4" + local extra_dir_name="$4" + local extra_dir="" + + if test -n "${extra_dir_name}" + then + extra_dir="${extra_dir_name}/" + fi pushd ${sdir} > /dev/null if is_set "${CONSUL_DEV}" @@ -361,7 +381,7 @@ function build_consul_local { -arch="${build_arch}" \ -osarch="!darwin/arm !darwin/arm64" \ -ldflags="${GOLDFLAGS}" \ - -output "pkg.bin.new/{{.OS}}_{{.Arch}}${build_suffix}/consul" \ + -output "pkg.bin.new/${extra_dir}{{.OS}}_{{.Arch}}${build_suffix}/consul" \ -tags="${GOTAGS}" \ . @@ -372,7 +392,7 @@ function build_consul_local { return 1 fi - build_consul_post "${sdir}" "${build_suffix}" + build_consul_post "${sdir}" "${extra_dir_name}" if test $? -ne 0 then err "ERROR: Failed postprocessing Consul binaries" diff --git a/build-support/functions/30-release.sh b/build-support/functions/30-release.sh index 93c8f99f8..95df573d1 100644 --- a/build-support/functions/30-release.sh +++ b/build-support/functions/30-release.sh @@ -63,10 +63,54 @@ function tag_release { return $ret } -function package_release { +function package_binaries { + # Arguments: + # $1 - Path to the directory containing the built binaries + # $2 - Destination path of the packaged binaries + # $3 - Version + # + # Returns: + # 0 - success + # * - error + + local sdir="$1" + local ddir="$2" + local vers="$3" + local ret=0 + + + if ! test -d "${sdir}" + then + err "ERROR: '$1' is not a directory. package_binaries must be called with the path to the directory containing the binaries" + return 1 + fi + + rm -rf "${ddir}" > /dev/null 2>&1 + mkdir -p "${ddir}" >/dev/null 2>&1 + for platform in $(find "${sdir}" -mindepth 1 -maxdepth 1 -type d) + do + local os_arch=$(basename $platform) + local dest="${ddir}/${CONSUL_PKG_NAME}_${vers}_${os_arch}.zip" + status "Compressing ${os_arch} directory into ${dest}" + pushd "${platform}" > /dev/null + zip "${ddir}/${CONSUL_PKG_NAME}_${vers}_${os_arch}.zip" ./* + ret=$? + popd > /dev/null + + if test "$ret" -ne 0 + then + break + fi + done + + return ${ret} +} + +function package_release_one { # Arguments: # $1 - Path to the top level Consul source # $2 - Version to use in the names of the zip files (optional) + # $3 - Subdirectory under pkg/dist to use (optional) # # Returns: # 0 - success @@ -78,9 +122,16 @@ function package_release { return 1 fi - local sdir="${1}" + local sdir="$1" local ret=0 - local vers="${2}" + local vers="$2" + local extra_dir_name="$3" + local extra_dir="" + + if test -n "${extra_dir_name}" + then + extra_dir="${extra_dir_name}/" + fi if test -z "${vers}" then @@ -93,81 +144,91 @@ function package_release { fi fi - rm -rf "${sdir}/pkg/dist" > /dev/null 2>&1 - mkdir -p "${sdir}/pkg/dist" >/dev/null 2>&1 - for platform in $(find "${sdir}/pkg/bin" -mindepth 1 -maxdepth 1 -type d) - do - local os_arch=$(basename $platform) - local dest="${sdir}/pkg/dist/${CONSUL_PKG_NAME}_${vers}_${os_arch}.zip" - status "Compressing ${os_arch} directory into ${dest}" - pushd "${platform}" > /dev/null - zip "${sdir}/pkg/dist/${CONSUL_PKG_NAME}_${vers}_${os_arch}.zip" ./* - ret=$? - popd > /dev/null - - if test "$ret" -ne 0 - then - break - fi - done + package_binaries "${sdir}/pkg/bin/${extra_dir}" "${sdir}/pkg/dist/${extra_dir}" "${vers}" + return $? +} + +function package_release { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - Version to use in the names of the zip files (optional) + # + # Returns: + # 0 - success + # * - error - return $ret + package_release_one "$1" "$2" "" + return $? } function shasum_release { # Arguments: - # $1 - Path to directory containing the files to shasum - # $2 - File to output sha sums to + # $1 - Path to the dist directory + # $2 - Version of the release # # Returns: # 0 - success # * - failure + local sdir="$1" + local vers="$2" + if ! test -d "$1" then - err "ERROR: '$1' is not a directory and shasum_release requires passing a directory as the first argument" + err "ERROR: sign_release requires a path to the dist dir as the first argument" return 1 fi - if test -z "$2" + if test -z "${vers}" then - err "ERROR: shasum_release requires a second argument to be the filename to output the shasums to but none was given" - return 1 + err "ERROR: sign_release requires a version to be specified as the second argument" + return 1 fi - pushd $1 > /dev/null - shasum -a256 * > "$2" - ret=$? - popd >/dev/null + local hfile="${CONSUL_PKG_NAME}_${vers}_SHA256SUMS" - return $ret + shasum_directory "${sdir}" "${sdir}/${hfile}" + return $? } function sign_release { # Arguments: - # $1 - File to sign + # $1 - Path to distribution directory + # $2 - Version # $2 - Alternative GPG key to use for signing # # Returns: # 0 - success # * - failure - # determine whether the gpg key to use is being overridden - local gpg_key=${HASHICORP_GPG_KEY} - if test -n "$2" + local sdir="$1" + local vers="$2" + + if ! test -d "${sdir}" then - gpg_key=$2 + err "ERROR: sign_release requires a path to the dist dir as the first argument" + return 1 fi - gpg --default-key "${gpg_key}" --detach-sig --yes -v "$1" - return $? + if test -z "${vers}" + then + err "ERROR: sign_release requires a version to be specified as the second argument" + return 1 + fi + + local hfile="${CONSUL_PKG_NAME}_${vers}_SHA256SUMS" + + status_stage "==> Signing ${hfile}" + gpg_detach_sign "${1}/${hfile}" "$2" || return 1 + return 0 } -function check_release { +function check_release_one { # Arguments: # $1 - Path to the release files # $2 - Version to expect # $3 - boolean whether to expect the signature file + # $4 - Release Name (optional) # # Returns: # 0 - success @@ -177,6 +238,13 @@ function check_release { declare -a expected_files + declare log_extra="" + + if test -n "$4" + then + log_extra="for $4 " + fi + expected_files+=("${CONSUL_PKG_NAME}_${2}_SHA256SUMS") echo "check sig: $3" if is_set "$3" @@ -199,7 +267,7 @@ function check_release { declare -a found_files - status_stage "==> Verifying release contents - ${2}" + status_stage "==> Verifying release contents ${log_extra}- ${2}" debug "Expecting Files:" for fname in "${expected_files[@]}" do @@ -254,6 +322,21 @@ function check_release { return $ret } +function check_release { + # Arguments: + # $1 - Path to the release files + # $2 - Version to expect + # $3 - boolean whether to expect the signature file + # + # Returns: + # 0 - success + # * - failure + + check_release_one "$1" "$2" "$3" + return ${ret} +} + + function build_consul_release { build_consul "$1" "" "$2" } @@ -420,7 +503,7 @@ function build_release { fi status_stage "==> Generating SHA 256 Hashes for Binaries" - shasum_release "${sdir}/pkg/dist" "${CONSUL_PKG_NAME}_${vers}_SHA256SUMS" + shasum_release "${sdir}/pkg/dist" "${vers}" if test $? -ne 0 then err "ERROR: Failed to generate SHA 256 hashes for the release" @@ -429,7 +512,7 @@ function build_release { if is_set "${do_sha256}" then - sign_release "${sdir}/pkg/dist/${CONSUL_PKG_NAME}_${vers}_SHA256SUMS" "${gpg_key}" + sign_release "${sdir}/pkg/dist" "${vers}" "${gpg_key}" if test $? -ne 0 then err "ERROR: Failed to sign the SHA 256 hashes file" From b4d8d9b9eae5885f263056dce98f67e38925e24a Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 11:57:35 -0400 Subject: [PATCH 337/627] Fixup version release detection --- build-support/functions/10-util.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/build-support/functions/10-util.sh b/build-support/functions/10-util.sh index 03a94de5f..d4d5d7956 100644 --- a/build-support/functions/10-util.sh +++ b/build-support/functions/10-util.sh @@ -137,6 +137,7 @@ function parse_version { # Get the main version out of the source file version_main=$(awk '$1 == "Version" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) release_main=$(awk '$1 == "VersionPrerelease" && $2 == "=" { gsub(/"/, "", $3); print $3 }' < ${vfile}) + # try to determine the version if we have build tags for tag in "$GOTAGS" @@ -150,6 +151,7 @@ function parse_version { fi done done + # override the version from source with the value of the GIT_DESCRIBE env var if present if test -n "${git_version}" @@ -162,19 +164,19 @@ function parse_version { if is_set "${include_release}" then # Get the release version out of the source file - release="${release_main}" + rel_ver="${release_main}" # When no GIT_DESCRIBE env var is present and no release is in the source then we # are definitely in dev mode - if test -z "${git_version}" -a -z "$release" + if test -z "${git_version}" -a -z "${rel_ver}" then - release="dev" + rel_ver="dev" fi # Add the release to the version - if test -n "$release" + if test -n "${rel_ver}" then - version="${version}-${release}" + version="${version}-${rel_ver}" # add the git commit to the version if test -n "${git_commit}" From c59afc119a66f386cbd7b6298510bb3f7d793804 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 12:33:59 -0400 Subject: [PATCH 338/627] Return early when git changes are denied --- build-support/scripts/dev.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-support/scripts/dev.sh b/build-support/scripts/dev.sh index 6046ef3cc..999d631db 100755 --- a/build-support/scripts/dev.sh +++ b/build-support/scripts/dev.sh @@ -87,7 +87,7 @@ function main { commit_dev_mode "${sdir}" || return 1 status_stage "==> Confirming Git Changes" - confirm_git_push_changes "${sdir}" + confirm_git_push_changes "${sdir}" || return 1 status_stage "==> Pushing to Git" git_push_ref "$1" || return 1 From 1031a2bbf84eb0fbcad981349405c2b7ca668b65 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 12:34:30 -0400 Subject: [PATCH 339/627] Pass the right var to git_push_ref --- build-support/scripts/dev.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-support/scripts/dev.sh b/build-support/scripts/dev.sh index 999d631db..8200835b5 100755 --- a/build-support/scripts/dev.sh +++ b/build-support/scripts/dev.sh @@ -90,7 +90,7 @@ function main { confirm_git_push_changes "${sdir}" || return 1 status_stage "==> Pushing to Git" - git_push_ref "$1" || return 1 + git_push_ref "${sdir}" || return 1 fi return 0 From 0563be5d2dbf0ef3229aeef6822d06db465695f4 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 13:09:41 -0400 Subject: [PATCH 340/627] Set the binary type --- build-support/functions/00-vars.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/build-support/functions/00-vars.sh b/build-support/functions/00-vars.sh index c53cffece..7fe4a7d80 100644 --- a/build-support/functions/00-vars.sh +++ b/build-support/functions/00-vars.sh @@ -35,3 +35,5 @@ then else SED_EXT="" fi + +CONSUL_BINARY_TYPE=oss \ No newline at end of file From 0b42917150d593925f19f0a670fdb3e429d73848 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 13:20:12 -0400 Subject: [PATCH 341/627] =?UTF-8?q?Don=E2=80=99t=20automatically=20populat?= =?UTF-8?q?e=20release=20with=20dev=20when=20not=20generating=20a=20versio?= =?UTF-8?q?n=20based=20off=20git=20vars?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- build-support/functions/10-util.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build-support/functions/10-util.sh b/build-support/functions/10-util.sh index d4d5d7956..d884c79c1 100644 --- a/build-support/functions/10-util.sh +++ b/build-support/functions/10-util.sh @@ -168,7 +168,7 @@ function parse_version { # When no GIT_DESCRIBE env var is present and no release is in the source then we # are definitely in dev mode - if test -z "${git_version}" -a -z "${rel_ver}" + if test -z "${git_version}" -a -z "${rel_ver}" && is_set "${use_git_env}" then rel_ver="dev" fi From 5f6f48bf638f1f0ffad908d85f77398888dc45a0 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 13:42:46 -0400 Subject: [PATCH 342/627] Fix gpg_key usage, also make the location of the binary to check the version of overridable --- build-support/functions/30-release.sh | 2 +- build-support/functions/40-publish.sh | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/build-support/functions/30-release.sh b/build-support/functions/30-release.sh index 95df573d1..df1c51212 100644 --- a/build-support/functions/30-release.sh +++ b/build-support/functions/30-release.sh @@ -219,7 +219,7 @@ function sign_release { local hfile="${CONSUL_PKG_NAME}_${vers}_SHA256SUMS" status_stage "==> Signing ${hfile}" - gpg_detach_sign "${1}/${hfile}" "$2" || return 1 + gpg_detach_sign "${1}/${hfile}" "$3" || return 1 return 0 } diff --git a/build-support/functions/40-publish.sh b/build-support/functions/40-publish.sh index bbc3dc1ad..ec1494e28 100644 --- a/build-support/functions/40-publish.sh +++ b/build-support/functions/40-publish.sh @@ -70,9 +70,9 @@ function confirm_git_push_changes { return $ret } -function confirm_consul_version { +function confirm_consul_version_zip { # Arguments: - # $1 - Path to the release files + # $1 - Path to the zipped binary to test # $2 - Version to look for # # Returns: @@ -128,6 +128,11 @@ function confirm_consul_version { return ${ret} } +function confirm_consul_version { + confirm_consul_version_zip "$1" "$2" + return $? +} + function publish_release { # Arguments: From 7cd5e413ee57f35bab594aa172106d4235e7368a Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 18 Jun 2018 19:07:57 +0100 Subject: [PATCH 343/627] Sets code editor height to be auto, with a reasonable min height --- ui-v2/app/styles/components/code-editor.scss | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ui-v2/app/styles/components/code-editor.scss b/ui-v2/app/styles/components/code-editor.scss index 4fecca03f..1132925cc 100644 --- a/ui-v2/app/styles/components/code-editor.scss +++ b/ui-v2/app/styles/components/code-editor.scss @@ -19,7 +19,10 @@ $syntax-dark-gray: #535f73; $syntax-gutter-grey: #2a2f36; $syntax-yellow: $yellow; - +.CodeMirror { + min-height: 300px; + height: auto; +} .CodeMirror-lint-tooltip { background-color: #f9f9fa; border: 1px solid $syntax-light-gray; From c550eb899dfc02f28ec76675662180b224f55bf4 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 14:36:24 -0400 Subject: [PATCH 344/627] Quote some make vars --- GNUmakefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index 99083ee0e..e848ba100 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -52,13 +52,13 @@ DIST_BUILD?=1 DIST_SIGN?=1 ifdef DIST_VERSION -DIST_VERSION_ARG=-v $(DIST_VERSION) +DIST_VERSION_ARG=-v "$(DIST_VERSION)" else DIST_VERSION_ARG= endif ifdef DIST_RELEASE_DATE -DIST_DATE_ARG=-d $(DIST_RELEASE_DATE) +DIST_DATE_ARG=-d "$(DIST_RELEASE_DATE)" else DIST_DATE_ARG= endif @@ -117,10 +117,10 @@ linux: # dist builds binaries for all platforms and packages them for distribution dist: - @$(SHELL) $(CURDIR)/build-support/scripts/release.sh -t '$(DIST_TAG)' -b '$(DIST_BUILD)' -S '$(DIST_SIGN)' '$(DIST_VERSION_ARG)' '$(DIST_DATE_ARG)' + @$(SHELL) $(CURDIR)/build-support/scripts/release.sh -t '$(DIST_TAG)' -b '$(DIST_BUILD)' -S '$(DIST_SIGN)' $(DIST_VERSION_ARG) $(DIST_DATE_ARG) publish: - @$(SHELL) $(CURDIR)/build-support/scripts/publish.sh '$(PUB_GIT_ARG)' '$(PUB_WEBSITE_ARG)' + @$(SHELL) $(CURDIR)/build-support/scripts/publish.sh $(PUB_GIT_ARG) $(PUB_WEBSITE_ARG) dev-tree: @$(SHELL) $(CURDIR)/build-support/scripts/dev.sh From 51731e13bef2ec261e890e473bb52c15e0c5ad58 Mon Sep 17 00:00:00 2001 From: Stanley Shilov Date: Mon, 18 Jun 2018 13:41:22 -0500 Subject: [PATCH 345/627] Sessions TTL should be of type string in ui-v2 --- ui-v2/app/models/session.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui-v2/app/models/session.js b/ui-v2/app/models/session.js index 1ce2ba997..d569c051b 100644 --- a/ui-v2/app/models/session.js +++ b/ui-v2/app/models/session.js @@ -13,7 +13,7 @@ export default Model.extend({ ModifyIndex: attr('number'), LockDelay: attr('number'), Behavior: attr('string'), - TTL: attr('number'), + TTL: attr('string'), Checks: attr({ defaultValue: function() { return []; From 7f4e18d3d10f30ee9568252c695539ed9ffa8ed7 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 14:53:50 -0400 Subject: [PATCH 346/627] Quote $@ when passing around --- build-support/scripts/build-docker.sh | 2 +- build-support/scripts/build-local.sh | 2 +- build-support/scripts/dev.sh | 2 +- build-support/scripts/publish.sh | 2 +- build-support/scripts/release.sh | 3 ++- build-support/scripts/version.sh | 2 +- 6 files changed, 7 insertions(+), 6 deletions(-) diff --git a/build-support/scripts/build-docker.sh b/build-support/scripts/build-docker.sh index f7736b423..472324a97 100755 --- a/build-support/scripts/build-docker.sh +++ b/build-support/scripts/build-docker.sh @@ -148,5 +148,5 @@ function main { return 0 } -main $@ +main "$@" exit $? \ No newline at end of file diff --git a/build-support/scripts/build-local.sh b/build-support/scripts/build-local.sh index 276fd3041..45f33282d 100755 --- a/build-support/scripts/build-local.sh +++ b/build-support/scripts/build-local.sh @@ -103,5 +103,5 @@ function main { return 0 } -main $@ +main "$@" exit $? \ No newline at end of file diff --git a/build-support/scripts/dev.sh b/build-support/scripts/dev.sh index 8200835b5..222002d99 100755 --- a/build-support/scripts/dev.sh +++ b/build-support/scripts/dev.sh @@ -96,5 +96,5 @@ function main { return 0 } -main $@ +main "$@" exit $? \ No newline at end of file diff --git a/build-support/scripts/publish.sh b/build-support/scripts/publish.sh index 034b37861..3d6faf01f 100755 --- a/build-support/scripts/publish.sh +++ b/build-support/scripts/publish.sh @@ -89,6 +89,6 @@ function main { return 0 } -main $@ +main "$@" exit $? \ No newline at end of file diff --git a/build-support/scripts/release.sh b/build-support/scripts/release.sh index 2cb6ea3e6..581d019b3 100755 --- a/build-support/scripts/release.sh +++ b/build-support/scripts/release.sh @@ -128,6 +128,7 @@ function main { shift 2 ;; -d | --date) + echo "$2" ensure_arg "-d/--date" "$2" || return 1 release_date="$2" shift 2 @@ -143,6 +144,6 @@ function main { return $? } -main $@ +main "$@" exit $? \ No newline at end of file diff --git a/build-support/scripts/version.sh b/build-support/scripts/version.sh index 30a51cfbf..d7c166f0f 100755 --- a/build-support/scripts/version.sh +++ b/build-support/scripts/version.sh @@ -87,6 +87,6 @@ function main { return 0 } -main $@ +main "$@" exit $? \ No newline at end of file From da12d8a88c460a69f0d29bf395caeec1263ed379 Mon Sep 17 00:00:00 2001 From: Omar Khawaja Date: Mon, 18 Jun 2018 15:25:35 -0400 Subject: [PATCH 347/627] update encryption doc and add guide for creating certificates (#4238) * update encryption doc and add guide for creating certificates in consul with cfssl * add details about CLI and disabling HTTP * delete $ symbols and add guide elements * add missing periods and steps heading --- website/source/docs/agent/encryption.html.md | 8 +- .../docs/guides/creating-certificates.html.md | 198 ++++++++++++++++++ website/source/layouts/docs.erb | 4 + 3 files changed, 206 insertions(+), 4 deletions(-) create mode 100644 website/source/docs/guides/creating-certificates.html.md diff --git a/website/source/docs/agent/encryption.html.md b/website/source/docs/agent/encryption.html.md index 284d918c9..148647f6f 100644 --- a/website/source/docs/agent/encryption.html.md +++ b/website/source/docs/agent/encryption.html.md @@ -78,10 +78,8 @@ Consul supports using TLS to verify the authenticity of servers and clients. To Consul requires that all clients and servers have key pairs that are generated by a single Certificate Authority. This can be a private CA, used only internally. The CA then signs keys for each of the agents, as in -[this tutorial on generating both a CA and signing keys](http://russellsimpkins.blogspot.com/2015/10/consul-adding-tls-using-self-signed.html) -using OpenSSL. - --> **Note:** Client certificates must have [Extended Key Usage](https://www.openssl.org/docs/manmaster/man5/x509v3_config.html#Extended-Key-Usage) enabled for client and server authentication. +[this tutorial on generating both a CA and signing keys](/docs/guides/creating-certificates.html) +using [cfssl][cfssl]. TLS can be used to verify the authenticity of the servers or verify the authenticity of clients. These modes are controlled by the [`verify_outgoing`](/docs/agent/options.html#verify_outgoing), @@ -133,3 +131,5 @@ if applicable) to `true`. 5. Perform another rolling restart of each agent in the cluster. At this point, full TLS encryption for RPC communication should be enabled. + +[cfssl]: https://cfssl.org/ diff --git a/website/source/docs/guides/creating-certificates.html.md b/website/source/docs/guides/creating-certificates.html.md new file mode 100644 index 000000000..34708ac38 --- /dev/null +++ b/website/source/docs/guides/creating-certificates.html.md @@ -0,0 +1,198 @@ +--- +layout: "docs" +page_title: "Creating Certificates" +sidebar_current: "docs-guides-creating-certificates" +description: |- + Learn how to create certificates for Consul. +--- + +# Creating Certificates + +Correctly configuring TLS can be a complex process, especially given the wide +range of deployment methodologies. This guide will provide you with a +production ready TLS configuration. + +~> Note that while Consul's TLS configuration will be production ready, key + management and rotation is a complex subject not covered by this guide. + [Vault][vault] is the suggested solution for key generation and management. + +The first step to configuring TLS for Consul is generating certificates. In +order to prevent unauthorized cluster access, Consul requires all certificates +be signed by the same Certificate Authority (CA). This should be a _private_ CA +and not a public one like [Let's Encrypt][letsencrypt] as any certificate +signed by this CA will be allowed to communicate with the cluster. + +~> Consul certificates may be signed by intermediate CAs as long as the root CA + is the same. Append all intermediate CAs to the `cert_file`. + + +## Reference Material + +- [Encryption](/docs/agent/encryption.html) + +## Estimated Time to Complete + +20 minutes + +## Prerequisites + +This guide assumes you have [cfssl][cfssl] installed (be sure to install +cfssljson as well). + +## Steps + +### Step 1: Create Certificate Authority + +There are a variety of tools for managing your own CA, [like the PKI secret +backend in Vault][vault-pki], but for the sake of simplicity this guide will +use [cfssl][cfssl]. You can generate a private CA certificate and key with +[cfssl][cfssl]: + +```shell +# Generate a default CSR +$ cfssl print-defaults csr > ca-csr.json +``` +Change the `key` field to use RSA with a size of 2048 + +```json +{ + "CN": "example.net", + "hosts": [ + "example.net", + "www.example.net" + ], + "key": { + "algo": "rsa", + "size": 2048 + }, + "names": [ + { + "C": "US", + "ST": "CA", + "L": "San Francisco" + } + ] +} +``` + +```shell +# Generate the CA's private key and certificate +$ cfssl gencert -initca ca-csr.json | cfssljson -bare consul-ca +``` + +The CA key (`consul-ca-key.pem`) will be used to sign certificates for Consul +nodes and must be kept private. The CA certificate (`consul-ca.pem`) contains +the public key necessary to validate Consul certificates and therefore must be +distributed to every node that requires access. + +### Step 2: Generate and Sign Node Certificates + +Once you have a CA certificate and key you can generate and sign the +certificates Consul will use directly. TLS certificates commonly use the +fully-qualified domain name of the system being identified as the certificate's +Common Name (CN). However, hosts (and therefore hostnames and IPs) are often +ephemeral in Consul clusters. Not only would signing a new certificate per +Consul node be difficult, but using a hostname provides no security or +functional benefits to Consul. To fulfill the desired security properties +(above) Consul certificates are signed with their region and role such as: + +* `client.node.global.consul` for a client node in the `global` region +* `server.node.us-west.consul` for a server node in the `us-west` region + +To create certificates for the client and server in the cluster with +[cfssl][cfssl], create the following configuration file as `cfssl.json` to increase the default certificate expiration time: + +```json +{ + "signing": { + "default": { + "expiry": "87600h", + "usages": [ + "signing", + "key encipherment", + "server auth", + "client auth" + ] + } + } +} +``` + +```shell +# Generate a certificate for the Consul server +$ echo '{"key":{"algo":"rsa","size":2048}}' | cfssl gencert -ca=consul-ca.pem -ca-key=consul-ca-key.pem -config=cfssl.json \ + -hostname="server.node.global.consul,localhost,127.0.0.1" - | cfssljson -bare server + +# Generate a certificate for the Consul client +$ echo '{"key":{"algo":"rsa","size":2048}}' | cfssl gencert -ca=consul-ca.pem -ca-key=consul-ca-key.pem -config=cfssl.json \ + -hostname="client.node.global.consul,localhost,127.0.0.1" - | cfssljson -bare client + +# Generate a certificate for the CLI +$ echo '{"key":{"algo":"rsa","size":2048}}' | cfssl gencert -ca=consul-ca.pem -ca-key=consul-ca-key.pem -profile=client \ + - | cfssljson -bare cli +``` + +Using `localhost` and `127.0.0.1` as subject alternate names (SANs) allows +tools like `curl` to be able to communicate with Consul's HTTP API when run on +the same host. Other SANs may be added including a DNS resolvable hostname to +allow remote HTTP requests from third party tools. + +You should now have the following files: + +* `cfssl.json` - cfssl configuration. +* `consul-ca.csr` - CA signing request. +* `consul-ca-key.pem` - CA private key. Keep safe! +* `consul-ca.pem` - CA public certificate. +* `cli.csr` - Consul CLI certificate signing request. +* `cli-key.pem` - Consul CLI private key. +* `cli.pem` - Consul CLI certificate. +* `client.csr` - Consul client node certificate signing request for the `global` region. +* `client-key.pem` - Consul client node private key for the `global` region. +* `client.pem` - Consul client node public certificate for the `global` region. +* `server.csr` - Consul server node certificate signing request for the `global` region. +* `server-key.pem` - Consul server node private key for the `global` region. +* `server.pem` - Consul server node public certificate for the `global` region. + +Each Consul node should have the appropriate key (`-key.pem`) and certificate +(`.pem`) file for its region and role. In addition each node needs the CA's +public certificate (`consul-ca.pem`). + +Please note you will need the keys for the CLI if you choose to disable +HTTP (in which case running the command `consul members` will return an error). +This is because the Consul CLI defaults to communicating via HTTP instead of +HTTPS. We can configure the local Consul client to connect using TLS and specify +our custom keys and certificates using the command line: + +```shell +$ consul members -ca-file=consul-ca.pem -client-cert=cli.pem -client-key=cli-key.pem -http-addr="https://localhost:9090" +``` +(The command is assuming HTTPS is configured to use port 9090. To see how +you can change this, visit the [Configuration](/docs/agent/options.html) page) + +This process can be cumbersome to type each time, so the Consul CLI also +searches environment variables for default values. Set the following +environment variables in your shell: + +```shell +$ export CONSUL_HTTP_ADDR=https://localhost:9090 +$ export CONSUL_CACERT=consul-ca.pem +$ export CONSUL_CLIENT_CERT=cli.pem +$ export CONSUL_CLIENT_KEY=cli-key.pem +``` + +* `CONSUL_HTTP_ADDR` is the URL of the Consul agent and sets the default for + `-http-addr`. +* `CONSUL_CACERT` is the location of your CA certificate and sets the default + for `-ca-file`. +* `CONSUL_CLIENT_CERT` is the location of your CLI certificate and sets the + default for `-client-cert`. +* `CONSUL_CLIENT_KEY` is the location of your CLI key and sets the default for + `-client-key`. + +After these environment variables are correctly configured, the CLI will +respond as expected. + +[cfssl]: https://cfssl.org/ +[letsencrypt]: https://letsencrypt.org/ +[vault]: https://www.vaultproject.io/ +[vault-pki]: https://www.vaultproject.io/docs/secrets/pki/index.html diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index d503bdb5d..901d6a210 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -236,6 +236,10 @@ > Consul with Containers + > + Creating +Certificates + > DNS Caching From 5fc30a4e6fa3fe13e1b61e446865dd4bc30e7834 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Mon, 18 Jun 2018 17:01:20 -0400 Subject: [PATCH 348/627] Allow for building pre-releases/rcs/betas --- GNUmakefile | 8 +++- build-support/functions/10-util.sh | 53 +++++++++++++++++---------- build-support/functions/30-release.sh | 12 ++++-- build-support/scripts/release.sh | 11 +++++- 4 files changed, 58 insertions(+), 26 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index e848ba100..f0f915c7d 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -63,6 +63,12 @@ else DIST_DATE_ARG= endif +ifdef DIST_PRERELEASE +DIST_REL_ARG=-r "$(DIST_PRERELEASE)" +else +DIST_REL_ARG= +endif + PUB_GIT?=1 PUB_WEBSITE?=1 @@ -117,7 +123,7 @@ linux: # dist builds binaries for all platforms and packages them for distribution dist: - @$(SHELL) $(CURDIR)/build-support/scripts/release.sh -t '$(DIST_TAG)' -b '$(DIST_BUILD)' -S '$(DIST_SIGN)' $(DIST_VERSION_ARG) $(DIST_DATE_ARG) + @$(SHELL) $(CURDIR)/build-support/scripts/release.sh -t '$(DIST_TAG)' -b '$(DIST_BUILD)' -S '$(DIST_SIGN)' $(DIST_VERSION_ARG) $(DIST_DATE_ARG) $(DIST_REL_ARG) publish: @$(SHELL) $(CURDIR)/build-support/scripts/publish.sh $(PUB_GIT_ARG) $(PUB_WEBSITE_ARG) diff --git a/build-support/functions/10-util.sh b/build-support/functions/10-util.sh index d884c79c1..6289dcb03 100644 --- a/build-support/functions/10-util.sh +++ b/build-support/functions/10-util.sh @@ -94,6 +94,7 @@ function parse_version { # $1 - Path to the top level Consul source # $2 - boolean value for whether the release version should be parsed from the source # $3 - boolean whether to use GIT_DESCRIBE and GIT_COMMIT environment variables + # $4 - boolean whether to omit the version part of the version string. (optional) # # Return: # 0 - success (will write the version to stdout) @@ -114,6 +115,7 @@ function parse_version { local include_release="$2" local use_git_env="$3" + local omit_version="$4" local git_version="" local git_commit="" @@ -152,18 +154,17 @@ function parse_version { done done - + local version="${version_main}" # override the version from source with the value of the GIT_DESCRIBE env var if present if test -n "${git_version}" then version="${git_version}" - else - version="${version_main}" fi - + + local rel_ver="" if is_set "${include_release}" then - # Get the release version out of the source file + # Default to pre-release from the source rel_ver="${release_main}" # When no GIT_DESCRIBE env var is present and no release is in the source then we @@ -174,21 +175,28 @@ function parse_version { fi # Add the release to the version - if test -n "${rel_ver}" + if test -n "${rel_ver}" -a -n "${git_commit}" then - version="${version}-${rel_ver}" - - # add the git commit to the version - if test -n "${git_commit}" - then - version="${version} (${git_commit})" - fi + rel_ver="${rel_ver} (${git_commit})" fi fi - # Output the version - echo "$version" | tr -d "'" - return 0 + if test -n "${rel_ver}" + then + if is_set "${omit_version}" + then + echo "${rel_ver}" | tr -d "'" + else + echo "${version}-${rel_ver}" | tr -d "'" + fi + return 0 + elif ! is_set "${omit_version}" + then + echo "${version}" | tr -d "'" + return 0 + else + return 1 + fi } function get_version { @@ -624,6 +632,7 @@ function set_release_mode { # $1 - Path to top level Consul source # $2 - The version of the release # $3 - The release date + # $4 - The pre-release version # # # Returns: @@ -651,11 +660,17 @@ function set_release_mode { rel_date="$3" fi - status_stage "==> Updating CHANGELOG.md with release info: ${vers} (${rel_date})" - set_changelog_version "${sdir}" "${vers}" "${rel_date}" || return 1 + local changelog_vers="${vers}" + if test -n "$4" + then + changelog_vers="${vers}-$4" + fi + + status_stage "==> Updating CHANGELOG.md with release info: ${changelog_vers} (${rel_date})" + set_changelog_version "${sdir}" "${changelog_vers}" "${rel_date}" || return 1 status_stage "==> Updating version/version.go" - if ! update_version "${sdir}/version/version.go" "${vers}" + if ! update_version "${sdir}/version/version.go" "${vers}" "$4" then unset_changelog_version "${sdir}" return 1 diff --git a/build-support/functions/30-release.sh b/build-support/functions/30-release.sh index df1c51212..a006a0791 100644 --- a/build-support/functions/30-release.sh +++ b/build-support/functions/30-release.sh @@ -349,7 +349,8 @@ function build_release { # $4 - boolean whether to generate the sha256 sums # $5 - version to set within version.go and the changelog # $6 - release date to set within the changelog - # $7 - alternative gpg key to use for signing operations (optional) + # $7 - release version to set + # $8 - alternative gpg key to use for signing operations (optional) # # Returns: # 0 - success @@ -361,7 +362,8 @@ function build_release { debug "Sign Release: $4" debug "Version: $5" debug "Release Date: $6" - debug "GPG Key: $7" + debug "Release Vers: $7" + debug "GPG Key: $8" if ! test -d "$1" then @@ -379,7 +381,7 @@ function build_release { local do_tag="$2" local do_build="$3" local do_sha256="$4" - local gpg_key="$7" + local gpg_key="$8" if test -z "${gpg_key}" then @@ -403,13 +405,15 @@ function build_release { local set_vers="$5" local set_date="$6" + local set_release="$7" if test -z "${set_vers}" then set_vers=$(get_version "${sdir}" false false) + set_release=$(parse_version "${sdir}" true false true) fi - if is_set "${do_tag}" && ! set_release_mode "${sdir}" "${set_vers}" "${set_date}" + if is_set "${do_tag}" && ! set_release_mode "${sdir}" "${set_vers}" "${set_date}" "${set_release}" then err "ERROR: Failed to put source into release mode" return 1 diff --git a/build-support/scripts/release.sh b/build-support/scripts/release.sh index 581d019b3..879fe4320 100755 --- a/build-support/scripts/release.sh +++ b/build-support/scripts/release.sh @@ -53,6 +53,8 @@ Options: the version will be parsed from the source. -d | --date DATE The release date. Defaults to today. + + -r | --release STRING The prerelease version. Defaults to an empty pre-release. -h | --help Print this help text. EOF @@ -81,6 +83,7 @@ function main { declare -i do_sign=1 declare gpg_key="${HASHICORP_GPG_KEY}" declare version="" + declare release_ver="" declare release_date=$(date +"%B %d, %Y") while test $# -gt 0 @@ -128,11 +131,15 @@ function main { shift 2 ;; -d | --date) - echo "$2" ensure_arg "-d/--date" "$2" || return 1 release_date="$2" shift 2 ;; + -r | --release) + ensure_arg "-r/--release" "$2" || return 1 + release_ver="$2" + shift 2 + ;; *) err_usage "ERROR: Unknown argument: '$1'" return 1 @@ -140,7 +147,7 @@ function main { esac done - build_release "${sdir}" "${do_tag}" "${do_build}" "${do_sign}" "${version}" "${release_date}" "${gpg_key}" + build_release "${sdir}" "${do_tag}" "${do_build}" "${do_sign}" "${version}" "${release_date}" "${release_ver}" "${gpg_key}" return $? } From 6bae5e431c92e3f40d706b892ac8c31030ea00c2 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Tue, 19 Jun 2018 10:25:41 +0100 Subject: [PATCH 349/627] Make sure the body background is white --- ui-v2/app/styles/core/reset.css | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ui-v2/app/styles/core/reset.css b/ui-v2/app/styles/core/reset.css index 24f405788..5e37b0d81 100644 --- a/ui-v2/app/styles/core/reset.css +++ b/ui-v2/app/styles/core/reset.css @@ -1,3 +1,6 @@ +body { + background-color: $white; +} fieldset { border: 0; width: 100%; From 0e3d8557349811002b478e9b72c66353d137130d Mon Sep 17 00:00:00 2001 From: John Cowen Date: Tue, 19 Jun 2018 10:51:31 +0100 Subject: [PATCH 350/627] Sort ACL tokens by name --- ui-v2/app/templates/dc/acls/index.hbs | 2 +- .../acceptance/dc/acls/list-order.feature | 38 +++++++++++++++++++ .../steps/dc/acls/list-order-steps.js | 10 +++++ ui-v2/tests/steps.js | 7 +++- 4 files changed, 55 insertions(+), 2 deletions(-) create mode 100644 ui-v2/tests/acceptance/dc/acls/list-order.feature create mode 100644 ui-v2/tests/acceptance/steps/dc/acls/list-order-steps.js diff --git a/ui-v2/app/templates/dc/acls/index.hbs b/ui-v2/app/templates/dc/acls/index.hbs index f4668373c..1ca1224ed 100644 --- a/ui-v2/app/templates/dc/acls/index.hbs +++ b/ui-v2/app/templates/dc/acls/index.hbs @@ -15,7 +15,7 @@ {{#block-slot 'content'}} {{#if (gt filtered.length 0)}} {{#tabular-collection - items=filtered as |item index| + items=(sort-by 'Name:asc' filtered) as |item index| }} {{#block-slot 'header'}} Name diff --git a/ui-v2/tests/acceptance/dc/acls/list-order.feature b/ui-v2/tests/acceptance/dc/acls/list-order.feature new file mode 100644 index 000000000..a7c75d901 --- /dev/null +++ b/ui-v2/tests/acceptance/dc/acls/list-order.feature @@ -0,0 +1,38 @@ +@setupApplicationTest +Feature: dc / acls / list-order + In order to be able to find ACL tokens easier + As a user + I want to see the ACL listed alphabetically by Name + + Scenario: I have 10 randomly sorted tokens + Given 1 datacenter model with the value "datacenter" + And 10 acl model from yaml + --- + - Name: zz + - Name: 123 + - Name: aa + - Name: 9857 + - Name: sfgr + - Name: foo + - Name: bar + - Name: xft + - Name: z-35y + - Name: __acl + --- + When I visit the acls page for yaml + --- + dc: datacenter + --- + Then I see name on the acls like yaml + --- + - __acl + - 123 + - 9857 + - aa + - bar + - foo + - sfgr + - xft + - z-35y + - zz + --- diff --git a/ui-v2/tests/acceptance/steps/dc/acls/list-order-steps.js b/ui-v2/tests/acceptance/steps/dc/acls/list-order-steps.js new file mode 100644 index 000000000..a7eff3228 --- /dev/null +++ b/ui-v2/tests/acceptance/steps/dc/acls/list-order-steps.js @@ -0,0 +1,10 @@ +import steps from '../../steps'; + +// step definitions that are shared between features should be moved to the +// tests/acceptance/steps/steps.js file + +export default function(assert) { + return steps(assert).then('I should find a file', function() { + assert.ok(true, this.step); + }); +} diff --git a/ui-v2/tests/steps.js b/ui-v2/tests/steps.js index 4a429f6bb..74281d163 100644 --- a/ui-v2/tests/steps.js +++ b/ui-v2/tests/steps.js @@ -216,7 +216,12 @@ export default function(assert) { const iterator = new Array(_component.length).fill(true); iterator.forEach(function(item, i, arr) { const actual = _component.objectAt(i)[property]; - const expected = yaml[i]; + // anything coming from the DOM is going to be text/strings + // if the yaml has numbers, cast them to strings + // TODO: This would get problematic for deeper objects + // will have to look to do this recursively + const expected = typeof yaml[i] === 'number' ? yaml[i].toString() : yaml[i]; + assert.deepEqual( actual, expected, From b9d1e7042a7bf466d18a1d63a2844528570d2143 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Tue, 19 Jun 2018 10:08:16 -0400 Subject: [PATCH 351/627] Make filtering out TXT RRs only apply when they would end up in Additional section ANY queries are no longer affected. --- agent/config/config.go | 2 +- agent/dns.go | 24 +++++++++++------------ agent/dns_test.go | 15 ++++++++------ website/source/docs/agent/options.html.md | 9 +++++---- 4 files changed, 27 insertions(+), 23 deletions(-) diff --git a/agent/config/config.go b/agent/config/config.go index 48227d955..13f7db6a9 100644 --- a/agent/config/config.go +++ b/agent/config/config.go @@ -360,7 +360,7 @@ type DNS struct { RecursorTimeout *string `json:"recursor_timeout,omitempty" hcl:"recursor_timeout" mapstructure:"recursor_timeout"` ServiceTTL map[string]string `json:"service_ttl,omitempty" hcl:"service_ttl" mapstructure:"service_ttl"` UDPAnswerLimit *int `json:"udp_answer_limit,omitempty" hcl:"udp_answer_limit" mapstructure:"udp_answer_limit"` - NodeMetaTXT *bool `json:"additional_node_meta_txt,omitempty" hcl:"additional_node_meta_txt" mapstructure:"additional_node_meta_txt"` + NodeMetaTXT *bool `json:"enable_additional_node_meta_txt,omitempty" hcl:"enable_additional_node_meta_txt" mapstructure:"enable_additional_node_meta_txt"` } type HTTPConfig struct { diff --git a/agent/dns.go b/agent/dns.go index 1b8c2e20c..993511d0d 100644 --- a/agent/dns.go +++ b/agent/dns.go @@ -376,7 +376,7 @@ func (d *DNSServer) nameservers(edns bool) (ns []dns.RR, extra []dns.RR) { } ns = append(ns, nsrr) - glue := d.formatNodeRecord(nil, addr, fqdn, dns.TypeANY, d.config.NodeTTL, edns) + glue := d.formatNodeRecord(nil, addr, fqdn, dns.TypeANY, d.config.NodeTTL, edns, false) extra = append(extra, glue...) // don't provide more than 3 servers @@ -584,7 +584,7 @@ RPC: n := out.NodeServices.Node edns := req.IsEdns0() != nil addr := d.agent.TranslateAddress(datacenter, n.Address, n.TaggedAddresses) - records := d.formatNodeRecord(out.NodeServices.Node, addr, req.Question[0].Name, qType, d.config.NodeTTL, edns) + records := d.formatNodeRecord(out.NodeServices.Node, addr, req.Question[0].Name, qType, d.config.NodeTTL, edns, true) if records != nil { resp.Answer = append(resp.Answer, records...) } @@ -612,7 +612,7 @@ func encodeKVasRFC1464(key, value string) (txt string) { } // formatNodeRecord takes a Node and returns an A, AAAA, TXT or CNAME record -func (d *DNSServer) formatNodeRecord(node *structs.Node, addr, qName string, qType uint16, ttl time.Duration, edns bool) (records []dns.RR) { +func (d *DNSServer) formatNodeRecord(node *structs.Node, addr, qName string, qType uint16, ttl time.Duration, edns, answer bool) (records []dns.RR) { // Parse the IP ip := net.ParseIP(addr) var ipv4 net.IP @@ -673,17 +673,17 @@ func (d *DNSServer) formatNodeRecord(node *structs.Node, addr, qName string, qTy } } - node_meta_txt := true + node_meta_txt := false if node == nil { node_meta_txt = false - } else if qType == dns.TypeANY { - // Since any RR type is requested allow the configuration to - // determine whether or not node meta gets added as TXT records + } else if answer { + node_meta_txt = true + } else { + // Use configuration when the TXT RR would + // end up in the Additional section of the + // DNS response node_meta_txt = d.config.NodeMetaTXT - } else if qType != dns.TypeTXT { - // qType isn't TXT or ANY so avoid emitting the TXT records - node_meta_txt = false } if node_meta_txt { @@ -1158,7 +1158,7 @@ func (d *DNSServer) serviceNodeRecords(dc string, nodes structs.CheckServiceNode handled[addr] = struct{}{} // Add the node record - records := d.formatNodeRecord(node.Node, addr, qName, qType, ttl, edns) + records := d.formatNodeRecord(node.Node, addr, qName, qType, ttl, edns, true) if records != nil { resp.Answer = append(resp.Answer, records...) count++ @@ -1207,7 +1207,7 @@ func (d *DNSServer) serviceSRVRecords(dc string, nodes structs.CheckServiceNodes } // Add the extra record - records := d.formatNodeRecord(node.Node, addr, srvRec.Target, dns.TypeANY, ttl, edns) + records := d.formatNodeRecord(node.Node, addr, srvRec.Target, dns.TypeANY, ttl, edns, false) if len(records) > 0 { // Use the node address if it doesn't differ from the service address if addr == node.Node.Address { diff --git a/agent/dns_test.go b/agent/dns_test.go index 454d598c3..a171132e2 100644 --- a/agent/dns_test.go +++ b/agent/dns_test.go @@ -473,7 +473,7 @@ func TestDNS_NodeLookup_TXT(t *testing.T) { } func TestDNS_NodeLookup_TXT_DontSuppress(t *testing.T) { - a := NewTestAgent(t.Name(), `dns_config = { additional_node_meta_txt = false }`) + a := NewTestAgent(t.Name(), `dns_config = { enable_additional_node_meta_txt = false }`) defer a.Shutdown() args := &structs.RegisterRequest{ @@ -555,11 +555,10 @@ func TestDNS_NodeLookup_ANY(t *testing.T) { }, } verify.Values(t, "answer", in.Answer, wantAnswer) - } -func TestDNS_NodeLookup_ANY_SuppressTXT(t *testing.T) { - a := NewTestAgent(t.Name(), `dns_config = { additional_node_meta_txt = false }`) +func TestDNS_NodeLookup_ANY_DontSuppressTXT(t *testing.T) { + a := NewTestAgent(t.Name(), `dns_config = { enable_additional_node_meta_txt = false }`) defer a.Shutdown() args := &structs.RegisterRequest{ @@ -590,6 +589,10 @@ func TestDNS_NodeLookup_ANY_SuppressTXT(t *testing.T) { Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeA, Class: dns.ClassINET, Rdlength: 0x4}, A: []byte{0x7f, 0x0, 0x0, 0x1}, // 127.0.0.1 }, + &dns.TXT{ + Hdr: dns.RR_Header{Name: "bar.node.consul.", Rrtype: dns.TypeTXT, Class: dns.ClassINET, Rdlength: 0xa}, + Txt: []string{"key=value"}, + }, } verify.Values(t, "answer", in.Answer, wantAnswer) } @@ -4695,7 +4698,7 @@ func TestDNS_ServiceLookup_FilterACL(t *testing.T) { } func TestDNS_ServiceLookup_MetaTXT(t *testing.T) { - a := NewTestAgent(t.Name(), `dns_config = { additional_node_meta_txt = true }`) + a := NewTestAgent(t.Name(), `dns_config = { enable_additional_node_meta_txt = true }`) defer a.Shutdown() args := &structs.RegisterRequest{ @@ -4740,7 +4743,7 @@ func TestDNS_ServiceLookup_MetaTXT(t *testing.T) { } func TestDNS_ServiceLookup_SuppressTXT(t *testing.T) { - a := NewTestAgent(t.Name(), `dns_config = { additional_node_meta_txt = false }`) + a := NewTestAgent(t.Name(), `dns_config = { enable_additional_node_meta_txt = false }`) defer a.Shutdown() // Register a node with a service. diff --git a/website/source/docs/agent/options.html.md b/website/source/docs/agent/options.html.md index 1e1e274b9..3f259e9ef 100644 --- a/website/source/docs/agent/options.html.md +++ b/website/source/docs/agent/options.html.md @@ -778,10 +778,11 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass be increasingly uncommon to need to change this value with modern resolvers). - * `additional_node_meta_txt` - If set - to false, node metadata will not be synthesized into TXT records and returned except for queries specifically for - TXT records. By default, TXT records will be generated for node queries with an ANY query type or for SRV queries - of services. + * `enable_additional_node_meta_txt` - + When set to true, Consul will add TXT records for Node metadata into the Additional section of the DNS responses for several + query types such as SRV queries. When set to false those records are emitted. This does not impact the behavior of those + same TXT records when they would be added to the Answer section of the response like when querying with type TXT or ANY. This + defaults to true. * `domain` Equivalent to the [`-domain` command-line flag](#_domain). From 36e789e95782658d3848b90f110177335a3b62f9 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Tue, 19 Jun 2018 10:49:07 -0400 Subject: [PATCH 352/627] Fix a couple find warnings on linux MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Additionally add the ability to use go install for dev builds rather than gox (travis doesn’t have gox) --- GNUmakefile | 6 +-- build-support/functions/20-build.sh | 62 +++++++++++++++++++++------ build-support/functions/30-release.sh | 2 +- 3 files changed, 52 insertions(+), 18 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index f0f915c7d..f64405688 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -16,8 +16,8 @@ GOTEST_PKGS ?= "./..." else GOTEST_PKGS=$(shell go list ./... | sed 's/github.com\/hashicorp\/consul/./' | egrep -v "^($(GOTEST_PKGS_EXCLUDE))$$") endif -GOOS=$(shell go env GOOS) -GOARCH=$(shell go env GOARCH) +GOOS?=$(shell go env GOOS) +GOARCH?=$(shell go env GOARCH) GOPATH=$(shell go env GOPATH) ASSETFS_PATH?=agent/bindata_assetfs.go @@ -103,7 +103,7 @@ bin: tools dev-build dev: changelogfmt vendorfmt dev-build dev-build: - @$(SHELL) $(CURDIR)/build-support/scripts/build-local.sh + @$(SHELL) $(CURDIR)/build-support/scripts/build-local.sh -o $(GOOS) -a $(GOARCH) dev-docker: @docker build -t '$(CONSUL_DEV_IMAGE)' --build-arg 'GIT_COMMIT=$(GIT_COMMIT)' --build-arg 'GIT_DIRTY=$(GIT_DIRTY)' --build-arg 'GIT_DESCRIBE=$(GIT_DESCRIBE)' -f $(CURDIR)/build-support/docker/Consul-Dev.dockerfile $(CURDIR) diff --git a/build-support/functions/20-build.sh b/build-support/functions/20-build.sh index 346535354..80e572ef3 100644 --- a/build-support/functions/20-build.sh +++ b/build-support/functions/20-build.sh @@ -218,7 +218,7 @@ function build_consul_post { rm -r pkg.bin.new DEV_PLATFORM="./pkg/bin/${extra_dir}$(go env GOOS)_$(go env GOARCH)" - for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f) + for F in $(find ${DEV_PLATFORM} -type f -mindepth 1 -maxdepth 1 ) do # recreate the bin dir rm -r bin/* 2> /dev/null @@ -330,6 +330,8 @@ function build_consul_local { # If the CONSUL_DEV environment var is truthy only the local platform/architecture is built. # If the XC_OS or the XC_ARCH environment vars are present then only those platforms/architectures # will be built. Otherwise all supported platform/architectures are built + # The NOGOX environment variable will be used if present. This will prevent using gox and instead + # build with go install if ! test -d "$1" then @@ -374,22 +376,54 @@ function build_consul_local { build_arch="${XC_ARCH}" fi + local use_gox=1 + is_set "${NOGOX}" && use_gox=0 + which gox > /dev/null || use_gox=0 + status_stage "==> Building Consul - OSes: ${build_os}, Architectures: ${build_arch}" mkdir pkg.bin.new 2> /dev/null - CGO_ENABLED=0 gox \ - -os="${build_os}" \ - -arch="${build_arch}" \ - -osarch="!darwin/arm !darwin/arm64" \ - -ldflags="${GOLDFLAGS}" \ - -output "pkg.bin.new/${extra_dir}{{.OS}}_{{.Arch}}${build_suffix}/consul" \ - -tags="${GOTAGS}" \ - . + if is_set "${use_gox}" + then + status "Using gox for concurrent compilation" + + CGO_ENABLED=0 gox \ + -os="${build_os}" \ + -arch="${build_arch}" \ + -osarch="!darwin/arm !darwin/arm64" \ + -ldflags="${GOLDFLAGS}" \ + -output "pkg.bin.new/${extra_dir}{{.OS}}_{{.Arch}}/consul" \ + -tags="${GOTAGS}" \ + . - if test $? -ne 0 - then - err "ERROR: Failed to build Consul" - rm -r pkg.bin.new - return 1 + if test $? -ne 0 + then + err "ERROR: Failed to build Consul" + rm -r pkg.bin.new + return 1 + fi + else + status "Building sequentially with go install" + for os in ${build_os} + do + for arch in ${build_arch} + do + outdir="pkg.bin.new/${extra_dir}${os}_${arch}" + osarch="${os}/${arch}" + if test "${osarch}" == "darwin/arm" -o "${osarch}" == "darwin/arm64" + then + continue + fi + + mkdir -p "${outdir}" + GOOS=${os} GOARCH=${arch} go install -ldflags "${GOLDFLAGS}" -tags "${GOTAGS}" && cp "${MAIN_GOPATH}/bin/consul" "${outdir}/consul" + if test $? -ne 0 + then + err "ERROR: Failed to build Consul for ${osarch}" + rm -r pkg.bin.new + return 1 + fi + done + done fi build_consul_post "${sdir}" "${extra_dir_name}" diff --git a/build-support/functions/30-release.sh b/build-support/functions/30-release.sh index a006a0791..5b7aee668 100644 --- a/build-support/functions/30-release.sh +++ b/build-support/functions/30-release.sh @@ -87,7 +87,7 @@ function package_binaries { rm -rf "${ddir}" > /dev/null 2>&1 mkdir -p "${ddir}" >/dev/null 2>&1 - for platform in $(find "${sdir}" -mindepth 1 -maxdepth 1 -type d) + for platform in $(find -type d "${sdir}" -mindepth 1 -maxdepth 1 ) do local os_arch=$(basename $platform) local dest="${ddir}/${CONSUL_PKG_NAME}_${vers}_${os_arch}.zip" From 2d863b8d4cac9ecd83416c2b3118e2bc40641447 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Tue, 19 Jun 2018 12:41:24 -0400 Subject: [PATCH 353/627] PR Updates --- build-support/functions/20-build.sh | 2 +- build-support/functions/30-release.sh | 2 +- build-support/scripts/build-docker.sh | 2 +- build-support/scripts/functions.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/build-support/functions/20-build.sh b/build-support/functions/20-build.sh index 80e572ef3..1a96272f7 100644 --- a/build-support/functions/20-build.sh +++ b/build-support/functions/20-build.sh @@ -218,7 +218,7 @@ function build_consul_post { rm -r pkg.bin.new DEV_PLATFORM="./pkg/bin/${extra_dir}$(go env GOOS)_$(go env GOARCH)" - for F in $(find ${DEV_PLATFORM} -type f -mindepth 1 -maxdepth 1 ) + for F in $(find ${DEV_PLATFORM} -mindepth 1 -maxdepth 1 -type f ) do # recreate the bin dir rm -r bin/* 2> /dev/null diff --git a/build-support/functions/30-release.sh b/build-support/functions/30-release.sh index 5b7aee668..8021d73f1 100644 --- a/build-support/functions/30-release.sh +++ b/build-support/functions/30-release.sh @@ -87,7 +87,7 @@ function package_binaries { rm -rf "${ddir}" > /dev/null 2>&1 mkdir -p "${ddir}" >/dev/null 2>&1 - for platform in $(find -type d "${sdir}" -mindepth 1 -maxdepth 1 ) + for platform in $(find "${sdir}" -mindepth 1 -maxdepth 1 -type d ) do local os_arch=$(basename $platform) local dest="${ddir}/${CONSUL_PKG_NAME}_${vers}_${os_arch}.zip" diff --git a/build-support/scripts/build-docker.sh b/build-support/scripts/build-docker.sh index 472324a97..d741ef243 100755 --- a/build-support/scripts/build-docker.sh +++ b/build-support/scripts/build-docker.sh @@ -133,7 +133,7 @@ function main { if is_set "${refresh}" then status_stage "==> Refreshing Legacy UI build container image" - export UI_LEAGCY_BUILD_TAG="${image:-${UI_LEGACY_BUILD_CONTAINER_DEFAULT}}" + export UI_LEGACY_BUILD_TAG="${image:-${UI_LEGACY_BUILD_CONTAINER_DEFAULT}}" refresh_docker_images "${sdir}" ui-legacy-build-image || return 1 fi status_stage "==> Building UI" diff --git a/build-support/scripts/functions.sh b/build-support/scripts/functions.sh index ec473662c..2ddae96f2 100755 --- a/build-support/scripts/functions.sh +++ b/build-support/scripts/functions.sh @@ -9,7 +9,7 @@ FUNC_DIR=$(pwd) popd > /dev/null popd > /dev/null -func_sources=$(find ${FUNC_DIR} -type f -mindepth 1 -maxdepth 1 -name "*.sh" | sort -n) +func_sources=$(find ${FUNC_DIR} -mindepth 1 -maxdepth 1 -name "*.sh" -type f | sort -n) for src in $func_sources do From 5c9275c60b304485649b44bd917cedac8b6f2f05 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Tue, 19 Jun 2018 13:51:49 -0400 Subject: [PATCH 354/627] Update ui-v2 makefile to handle updating node_modules when needed Also dont include the dist and node_modules folders in the build context. --- build-support/docker/Build-UI.dockerfile | 2 +- build-support/functions/20-build.sh | 2 +- ui-v2/GNUmakefile | 49 +++++++++++------------- 3 files changed, 24 insertions(+), 29 deletions(-) diff --git a/build-support/docker/Build-UI.dockerfile b/build-support/docker/Build-UI.dockerfile index 72a80d2ba..666a1369c 100644 --- a/build-support/docker/Build-UI.dockerfile +++ b/build-support/docker/Build-UI.dockerfile @@ -11,4 +11,4 @@ RUN apk update && \ mkdir /consul-src WORKDIR /consul-src -CMD make init build +CMD make diff --git a/build-support/functions/20-build.sh b/build-support/functions/20-build.sh index 1a96272f7..6297ead7f 100644 --- a/build-support/functions/20-build.sh +++ b/build-support/functions/20-build.sh @@ -68,7 +68,7 @@ function build_ui { then status "Copying the source from '${ui_dir}' to /consul-src within the container" ( - docker cp . ${container_id}:/consul-src && + tar -c $(ls | grep -v "^(node_modules\|dist)") | docker cp - ${container_id}:/consul-src && status "Running build in container" && docker start -i ${container_id} && rm -rf ${1}/ui-v2/dist && status "Copying back artifacts" && docker cp ${container_id}:/consul-src/dist ${1}/ui-v2/dist diff --git a/ui-v2/GNUmakefile b/ui-v2/GNUmakefile index c72072781..f703de4f4 100644 --- a/ui-v2/GNUmakefile +++ b/ui-v2/GNUmakefile @@ -1,33 +1,28 @@ ROOT:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) -server: - yarn run start - -dist: +all: build + +deps: node_modules + +build: deps yarn run build - mv dist ../pkg/web_ui/v2 - -init: - yarn install - -lint: - yarn run lint:js -format: - yarn run format:js - -.PHONY: server dist lint format - -.DEFAULT_GOAL=all -.PHONY: deps test all build start -all: deps -deps: node_modules yarn.lock package.json -node_modules: - yarn -build: - yarn run build -start: + +start: deps yarn run start -test: + +test: deps yarn run test -test-view: + +test-view: deps yarn run test:view + +lint: deps + yarn run lint:js + +format: deps + yarn run format:js + +node_modules: yarn.lock package.json + yarn install + +.PHONY: all deps build start test test-view lint format From bfe2fcbdf1df7995a79b4ff13ed407a31a6754d1 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Tue, 19 Jun 2018 13:59:26 -0400 Subject: [PATCH 355/627] Update the runtime tests --- agent/config/runtime_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/agent/config/runtime_test.go b/agent/config/runtime_test.go index 060215c35..191e8d1ea 100644 --- a/agent/config/runtime_test.go +++ b/agent/config/runtime_test.go @@ -3371,6 +3371,7 @@ func TestFullConfig(t *testing.T) { DNSRecursors: []string{"63.38.39.58", "92.49.18.18"}, DNSServiceTTL: map[string]time.Duration{"*": 32030 * time.Second}, DNSUDPAnswerLimit: 29909, + DNSNodeMetaTXT: true, DataDir: dataDir, Datacenter: "rzo029wg", DevMode: true, @@ -4043,6 +4044,7 @@ func TestSanitize(t *testing.T) { "DNSDomain": "", "DNSEnableTruncate": false, "DNSMaxStale": "0s", + "DNSNodeMetaTXT": false, "DNSNodeTTL": "0s", "DNSOnlyPassing": false, "DNSPort": 0, From 2aaacd3ff4685c73c5e3fd3d41c93ba83a645aed Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 20 Jun 2018 10:22:38 +0100 Subject: [PATCH 356/627] Ensure a blank token is sent if the localStorage kv doesn't exist --- ui-v2/app/services/settings.js | 6 +++- ui-v2/app/templates/settings.hbs | 2 +- .../acceptance/steps/token-header-steps.js | 10 ++++++ ui-v2/tests/acceptance/token-header.feature | 36 +++++++++++++++++++ ui-v2/tests/helpers/yadda-annotations.js | 1 + ui-v2/tests/pages.js | 2 ++ ui-v2/tests/pages/settings.js | 6 ++++ ui-v2/tests/steps.js | 31 ++++++++++++++++ 8 files changed, 92 insertions(+), 2 deletions(-) create mode 100644 ui-v2/tests/acceptance/steps/token-header-steps.js create mode 100644 ui-v2/tests/acceptance/token-header.feature create mode 100644 ui-v2/tests/pages/settings.js diff --git a/ui-v2/app/services/settings.js b/ui-v2/app/services/settings.js index bc7c6456d..cc3d8021e 100644 --- a/ui-v2/app/services/settings.js +++ b/ui-v2/app/services/settings.js @@ -7,8 +7,12 @@ export default Service.extend({ storage: window.localStorage, findHeaders: function() { // TODO: if possible this should be a promise + const token = get(this, 'storage').getItem('token'); + // TODO: The old UI always sent ?token= + // replicate the old functionality here + // but remove this to be cleaner if its not necessary return { - 'X-Consul-Token': get(this, 'storage').getItem('token'), + 'X-Consul-Token': token === null ? '' : token, }; }, findAll: function(key) { diff --git a/ui-v2/app/templates/settings.hbs b/ui-v2/app/templates/settings.hbs index 8b84f3e86..8406d57a0 100644 --- a/ui-v2/app/templates/settings.hbs +++ b/ui-v2/app/templates/settings.hbs @@ -13,7 +13,7 @@
diff --git a/ui-v2/tests/acceptance/steps/token-header-steps.js b/ui-v2/tests/acceptance/steps/token-header-steps.js new file mode 100644 index 000000000..c5f07c804 --- /dev/null +++ b/ui-v2/tests/acceptance/steps/token-header-steps.js @@ -0,0 +1,10 @@ +import steps from './steps'; + +// step definitions that are shared between features should be moved to the +// tests/acceptance/steps/steps.js file + +export default function(assert) { + return steps(assert).then('I should find a file', function() { + assert.ok(true, this.step); + }); +} diff --git a/ui-v2/tests/acceptance/token-header.feature b/ui-v2/tests/acceptance/token-header.feature new file mode 100644 index 000000000..a1c35b80a --- /dev/null +++ b/ui-v2/tests/acceptance/token-header.feature @@ -0,0 +1,36 @@ +@setupApplicationTest +Feature: token headers + In order to authenticate with tokens + As a user + I need to be able to specify a ACL token AND/OR leave it blank to authenticate with the API + Scenario: Arriving at the index page having not set a token previously + Given 1 datacenter model with the value "datacenter" + When I visit the index page + Then the url should be /datacenter/services + And a GET request is made to "/v1/catalog/datacenters" from yaml + --- + headers: + X-Consul-Token: '' + --- + Scenario: Set a token and then navigate to the index page + Given 1 datacenter model with the value "datacenter" + When I visit the settings page + Then the url should be /settings + Then I type with yaml + --- + token: [Token] + --- + And I submit + When I visit the index page + Then the url should be /datacenter/services + And a GET request is made to "/v1/catalog/datacenters" from yaml + --- + headers: + X-Consul-Token: [Token] + --- + Where: + --------- + | Token | + | token | + | '' | + --------- diff --git a/ui-v2/tests/helpers/yadda-annotations.js b/ui-v2/tests/helpers/yadda-annotations.js index bb7b427c1..7c972601e 100644 --- a/ui-v2/tests/helpers/yadda-annotations.js +++ b/ui-v2/tests/helpers/yadda-annotations.js @@ -64,6 +64,7 @@ function setupScenario(featureAnnotations, scenarioAnnotations) { } return function(model) { model.afterEach(function() { + window.localStorage.clear(); api.server.reset(); }); }; diff --git a/ui-v2/tests/pages.js b/ui-v2/tests/pages.js index 1c96d8b94..e89056fa1 100644 --- a/ui-v2/tests/pages.js +++ b/ui-v2/tests/pages.js @@ -1,5 +1,6 @@ import index from 'consul-ui/tests/pages/index'; import dcs from 'consul-ui/tests/pages/dc'; +import settings from 'consul-ui/tests/pages/settings'; import services from 'consul-ui/tests/pages/dc/services/index'; import service from 'consul-ui/tests/pages/dc/services/show'; import nodes from 'consul-ui/tests/pages/dc/nodes/index'; @@ -12,6 +13,7 @@ import acl from 'consul-ui/tests/pages/dc/acls/edit'; export default { index, dcs, + settings, services, service, nodes, diff --git a/ui-v2/tests/pages/settings.js b/ui-v2/tests/pages/settings.js new file mode 100644 index 000000000..0418b0385 --- /dev/null +++ b/ui-v2/tests/pages/settings.js @@ -0,0 +1,6 @@ +import { create, visitable, clickable } from 'ember-cli-page-object'; + +export default create({ + visit: visitable('/settings'), + submit: clickable('[type=submit]'), +}); diff --git a/ui-v2/tests/steps.js b/ui-v2/tests/steps.js index a75d11499..4f8b3db00 100644 --- a/ui-v2/tests/steps.js +++ b/ui-v2/tests/steps.js @@ -150,6 +150,37 @@ export default function(assert) { ); }); }) + // TODO: This one can replace the above one, it covers more use cases + // also DRY it out a bit + .then('a $method request is made to "$url" from yaml\n$yaml', function(method, url, yaml) { + const request = api.server.history[api.server.history.length - 2]; + assert.equal( + request.method, + method, + `Expected the request method to be ${method}, was ${request.method}` + ); + assert.equal(request.url, url, `Expected the request url to be ${url}, was ${request.url}`); + let data = yaml.body || {}; + const body = JSON.parse(request.requestBody); + Object.keys(data).forEach(function(key, i, arr) { + assert.equal( + body[key], + data[key], + `Expected the payload to contain ${key} to equal ${body[key]}, ${key} was ${data[key]}` + ); + }); + data = yaml.headers || {}; + const headers = request.requestHeaders; + Object.keys(data).forEach(function(key, i, arr) { + assert.equal( + headers[key], + data[key], + `Expected the payload to contain ${key} to equal ${headers[key]}, ${key} was ${ + data[key] + }` + ); + }); + }) .then('a $method request is made to "$url" with the body "$body"', function( method, url, From cd45c420c6689e616089a8135c10d48ba33503bf Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 20 Jun 2018 13:08:59 +0100 Subject: [PATCH 357/627] Add port the service is running on for each node in the service view --- ui-v2/app/templates/dc/services/show.hbs | 8 ++-- .../tests/acceptance/dc/services/show.feature | 37 ++++++++++++++++++- ui-v2/tests/pages/dc/services/show.js | 10 ++++- ui-v2/tests/steps.js | 1 + ui-v2/yarn.lock | 15 +++++++- 5 files changed, 62 insertions(+), 9 deletions(-) diff --git a/ui-v2/app/templates/dc/services/show.hbs b/ui-v2/app/templates/dc/services/show.hbs index 23dc23af9..f791b8cd9 100644 --- a/ui-v2/app/templates/dc/services/show.hbs +++ b/ui-v2/app/templates/dc/services/show.hbs @@ -24,7 +24,7 @@ {{/if}} {{#if (gt unhealthy.length 0) }} -
+

Unhealthy Nodes

    @@ -34,7 +34,7 @@ data-test-node=item.Node.Node href=(href-to 'dc.nodes.show' item.Node.Node) name=item.Node.Node - address=item.Node.Address + address=(concat item.Node.Address ':' item.Service.Port) checks=item.Checks }} {{/each}} @@ -43,7 +43,7 @@
{{/if}} {{#if (gt healthy.length 0) }} -
+

Healthy Nodes

{{#list-collection items=healthy @@ -53,7 +53,7 @@ href=(href-to 'dc.nodes.show' item.Node.Node) data-test-node=item.Node.Node name=item.Node.Node - address=item.Node.Address + address=(concat item.Node.Address ':' item.Service.Port) checks=item.Checks status=item.Checks.[0].Status }} diff --git a/ui-v2/tests/acceptance/dc/services/show.feature b/ui-v2/tests/acceptance/dc/services/show.feature index 29daba7b6..c99371753 100644 --- a/ui-v2/tests/acceptance/dc/services/show.feature +++ b/ui-v2/tests/acceptance/dc/services/show.feature @@ -1,6 +1,6 @@ @setupApplicationTest Feature: dc / services / show: Show Service - Scenario: Given various service with various tags, all tags are displayed + Scenario: Given various services with various tags, all tags are displayed Given 1 datacenter model with the value "dc1" And 3 node models And 1 service model from yaml @@ -18,5 +18,38 @@ Feature: dc / services / show: Show Service service: service-0 --- Then I see the text "Tag1, Tag2, Tag3" in "[data-test-tags]" - Then ok + Scenario: Given various services the various ports on their nodes are displayed + Given 1 datacenter model with the value "dc1" + And 3 node models + And 1 service model from yaml + --- + - Checks: + - Status: passing + Service: + Port: 8080 + Node: + Address: 1.1.1.1 + - Service: + Port: 8000 + Node: + Address: 2.2.2.2 + - Service: + Port: 8888 + Node: + Address: 3.3.3.3 + --- + When I visit the service page for yaml + --- + dc: dc1 + service: service-0 + --- + Then I see address on the healthy like yaml + --- + - "1.1.1.1:8080" + --- + Then I see address on the unhealthy like yaml + --- + - "2.2.2.2:8000" + - "3.3.3.3:8888" + --- diff --git a/ui-v2/tests/pages/dc/services/show.js b/ui-v2/tests/pages/dc/services/show.js index 136db28e7..b98a3655e 100644 --- a/ui-v2/tests/pages/dc/services/show.js +++ b/ui-v2/tests/pages/dc/services/show.js @@ -1,4 +1,4 @@ -import { create, visitable, collection, attribute } from 'ember-cli-page-object'; +import { create, visitable, collection, attribute, text } from 'ember-cli-page-object'; import filter from 'consul-ui/tests/pages/components/catalog-filter'; export default create({ @@ -6,5 +6,13 @@ export default create({ nodes: collection('[data-test-node]', { name: attribute('data-test-node'), }), + healthy: collection('[data-test-healthy] [data-test-node]', { + name: attribute('data-test-node'), + address: text('header strong'), + }), + unhealthy: collection('[data-test-unhealthy] [data-test-node]', { + name: attribute('data-test-node'), + address: text('header strong'), + }), filter: filter, }); diff --git a/ui-v2/tests/steps.js b/ui-v2/tests/steps.js index a75d11499..b0b268ffb 100644 --- a/ui-v2/tests/steps.js +++ b/ui-v2/tests/steps.js @@ -211,6 +211,7 @@ export default function(assert) { `Expected ${num} ${model}s with ${property} set to "${value}", saw ${len}` ); }) + // TODO: Make this accept a 'contains' word so you can search for text containing also .then('I see $property on the $component like yaml\n$yaml', function( property, component, diff --git a/ui-v2/yarn.lock b/ui-v2/yarn.lock index c712625d4..77ef62c14 100644 --- a/ui-v2/yarn.lock +++ b/ui-v2/yarn.lock @@ -86,13 +86,14 @@ resolved "https://registry.yarnpkg.com/@hashicorp/consul-api-double/-/consul-api-double-1.1.0.tgz#658f9e89208fa23f251ca66c66aeb7241a13f23f" "@hashicorp/ember-cli-api-double@^1.0.2": - version "1.0.2" - resolved "https://registry.yarnpkg.com/@hashicorp/ember-cli-api-double/-/ember-cli-api-double-1.0.2.tgz#684d418cc2a981254cc23035ceb452c86f0cd934" + version "1.1.1" + resolved "https://registry.yarnpkg.com/@hashicorp/ember-cli-api-double/-/ember-cli-api-double-1.1.1.tgz#9380fdcf404f30f9d2e2a6422bfd83fe0dbe413f" dependencies: "@hashicorp/api-double" "^1.1.0" array-range "^1.0.1" ember-cli-babel "^6.6.0" js-yaml "^3.11.0" + merge-options "^1.0.1" pretender "^2.0.0" "@sinonjs/formatio@^2.0.0": @@ -5483,6 +5484,10 @@ is-path-inside@^1.0.0: dependencies: path-is-inside "^1.0.1" +is-plain-obj@^1.1: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e" + is-plain-object@^2.0.1, is-plain-object@^2.0.3, is-plain-object@^2.0.4: version "2.0.4" resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" @@ -6544,6 +6549,12 @@ merge-descriptors@1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/merge-descriptors/-/merge-descriptors-1.0.1.tgz#b00aaa556dd8b44568150ec9d1b953f3f90cbb61" +merge-options@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/merge-options/-/merge-options-1.0.1.tgz#2a64b24457becd4e4dc608283247e94ce589aa32" + dependencies: + is-plain-obj "^1.1" + merge-trees@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/merge-trees/-/merge-trees-1.0.1.tgz#ccbe674569787f9def17fd46e6525f5700bbd23e" From edc67aba44a23336f8e7ef83470ce2fc155eb5ba Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 20 Jun 2018 13:44:36 +0100 Subject: [PATCH 358/627] Make sure the colon doesn't wrap with lots of tags --- ui-v2/app/styles/routes/dc/service/index.scss | 1 - 1 file changed, 1 deletion(-) diff --git a/ui-v2/app/styles/routes/dc/service/index.scss b/ui-v2/app/styles/routes/dc/service/index.scss index 975da4ad7..214b12937 100644 --- a/ui-v2/app/styles/routes/dc/service/index.scss +++ b/ui-v2/app/styles/routes/dc/service/index.scss @@ -4,6 +4,5 @@ html.template-service.template-show main dl { } html.template-service.template-show main dt::after { content: ':'; - display: inline-block; margin-right: 0.2em; } From 4606db8c04e9eb063eeaa492c8c7042d9227c0ac Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Wed, 20 Jun 2018 08:55:11 -0400 Subject: [PATCH 359/627] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c0c1abcab..2b467ad47 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ IMPROVEMENTS: * agent: A Consul user-agent string is now sent to providers when making retry-join requests [GH-4013](https://github.com/hashicorp/consul/pull/4013) * client: Add metrics for failed RPCs [PR-4220](https://github.com/hashicorp/consul/pull/4220) +* agent: Add configuration entry to control including TXT records for node meta in DNS responses [PR-4215](https://github.com/hashicorp/consul/pull/4215) BUG FIXES: From fb5c2f737b07d07e7a3ee2a66e9f07848ee523ba Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 20 Jun 2018 14:01:42 +0100 Subject: [PATCH 360/627] Add a reusable %tag and use it for the ACL 'tags' ready to use elsewhere --- ui-v2/app/styles/components/table.scss | 11 +++++------ ui-v2/app/styles/components/tag.scss | 5 +++++ ui-v2/app/styles/routes/dc/service/index.scss | 11 ++++++++--- ui-v2/app/templates/dc/services/show.hbs | 4 +++- 4 files changed, 21 insertions(+), 10 deletions(-) create mode 100644 ui-v2/app/styles/components/tag.scss diff --git a/ui-v2/app/styles/components/table.scss b/ui-v2/app/styles/components/table.scss index 49578096e..c78786af2 100644 --- a/ui-v2/app/styles/components/table.scss +++ b/ui-v2/app/styles/components/table.scss @@ -1,4 +1,9 @@ @import './icons'; +@import './tag'; +td strong { + @extend %tag; + background-color: $gray; +} th { color: $text-light !important; } @@ -53,12 +58,6 @@ table td a { tbody { overflow-x: visible !important; } -td strong { - display: inline-block; - background-color: $gray; - padding: 1px 5px; - border-radius: $radius-small; -} th, td:not(.actions), td:not(.actions) a { diff --git a/ui-v2/app/styles/components/tag.scss b/ui-v2/app/styles/components/tag.scss new file mode 100644 index 000000000..f31eb7be8 --- /dev/null +++ b/ui-v2/app/styles/components/tag.scss @@ -0,0 +1,5 @@ +%tag { + display: inline-block; + padding: 1px 5px; + border-radius: $radius-small; +} diff --git a/ui-v2/app/styles/routes/dc/service/index.scss b/ui-v2/app/styles/routes/dc/service/index.scss index 214b12937..e8883041c 100644 --- a/ui-v2/app/styles/routes/dc/service/index.scss +++ b/ui-v2/app/styles/routes/dc/service/index.scss @@ -1,8 +1,13 @@ +@import '../../../components/tag'; html.template-service.template-show main dl { display: flex; margin-bottom: 1.4em; } -html.template-service.template-show main dt::after { - content: ':'; - margin-right: 0.2em; +html.template-service.template-show main dt { + display: none; +} +html.template-service.template-show main dd span { + @extend %tag; + background-color: $gray; + margin-bottom: 0.5em; } diff --git a/ui-v2/app/templates/dc/services/show.hbs b/ui-v2/app/templates/dc/services/show.hbs index 23dc23af9..acccf3468 100644 --- a/ui-v2/app/templates/dc/services/show.hbs +++ b/ui-v2/app/templates/dc/services/show.hbs @@ -19,7 +19,9 @@
Tags
- {{join ', ' item.Tags}} + {{#each item.Tags as |item|}} + {{item}} + {{/each}}
{{/if}} From fd65b75ba3fe576f6b9072d6dc8bcbaab36bea3b Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Wed, 20 Jun 2018 09:07:40 -0400 Subject: [PATCH 361/627] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2b467ad47..30314ef13 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ IMPROVEMENTS: * agent: A Consul user-agent string is now sent to providers when making retry-join requests [GH-4013](https://github.com/hashicorp/consul/pull/4013) * client: Add metrics for failed RPCs [PR-4220](https://github.com/hashicorp/consul/pull/4220) * agent: Add configuration entry to control including TXT records for node meta in DNS responses [PR-4215](https://github.com/hashicorp/consul/pull/4215) +* client: Make RPC rate limit configuration reloadable [GH-4012](https://github.com/hashicorp/consul/issues/4012) BUG FIXES: From 1ab5a36721afd541007ef4f2b1b03e5f3230bde5 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Wed, 20 Jun 2018 09:12:22 -0400 Subject: [PATCH 362/627] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 30314ef13..1f760f386 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ FEATURES: * dns: Enable PTR record lookups for services with IPs that have no registered node [[PR-4083](https://github.com/hashicorp/consul/pull/4083)] +* ui: Default to serving the new UI. Setting the `CONSUL_UI_LEGACY` environment variable to `1` of `true` will revert to serving the old UI IMPROVEMENTS: From 4de710f0ecf9809dfc8aa4b69d1007229604fa03 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 20 Jun 2018 14:38:54 +0100 Subject: [PATCH 363/627] Make sure token is set to blank if nothing is typed in settings --- ui-v2/app/services/settings.js | 6 +++++- ui-v2/tests/acceptance/settings/update.feature | 15 +++++++++++++++ .../acceptance/steps/settings/update-steps.js | 10 ++++++++++ ui-v2/tests/acceptance/token-header.feature | 2 +- ui-v2/tests/steps.js | 9 +++++++++ 5 files changed, 40 insertions(+), 2 deletions(-) create mode 100644 ui-v2/tests/acceptance/settings/update.feature create mode 100644 ui-v2/tests/acceptance/steps/settings/update-steps.js diff --git a/ui-v2/app/services/settings.js b/ui-v2/app/services/settings.js index cc3d8021e..2e51de89a 100644 --- a/ui-v2/app/services/settings.js +++ b/ui-v2/app/services/settings.js @@ -16,14 +16,18 @@ export default Service.extend({ }; }, findAll: function(key) { - return Promise.resolve({ token: get(this, 'storage').getItem('token') }); + const token = get(this, 'storage').getItem('token'); + return Promise.resolve({ token: token === null ? '' : token }); }, findBySlug: function(slug) { + // TODO: Force localStorage to always be strings... + // const value = get(this, 'storage').getItem(slug); return Promise.resolve(get(this, 'storage').getItem(slug)); }, persist: function(obj) { const storage = get(this, 'storage'); Object.keys(obj).forEach((item, i) => { + // TODO: ...everywhere storage.setItem(item, obj[item]); }); return Promise.resolve(obj); diff --git a/ui-v2/tests/acceptance/settings/update.feature b/ui-v2/tests/acceptance/settings/update.feature new file mode 100644 index 000000000..98ac584d0 --- /dev/null +++ b/ui-v2/tests/acceptance/settings/update.feature @@ -0,0 +1,15 @@ +@setupApplicationTest +Feature: settings / update: Update Settings + In order to authenticate with an ACL token + As a user + I need to be able to add my token via the UI + Scenario: I click Save without actually typing anything + Given 1 datacenter model with the value "datacenter" + When I visit the settings page + Then the url should be /settings + And I submit + Then I have settings like yaml + --- + token: '' + --- + diff --git a/ui-v2/tests/acceptance/steps/settings/update-steps.js b/ui-v2/tests/acceptance/steps/settings/update-steps.js new file mode 100644 index 000000000..960cdf533 --- /dev/null +++ b/ui-v2/tests/acceptance/steps/settings/update-steps.js @@ -0,0 +1,10 @@ +import steps from '../steps'; + +// step definitions that are shared between features should be moved to the +// tests/acceptance/steps/steps.js file + +export default function(assert) { + return steps(assert).then('I should find a file', function() { + assert.ok(true, this.step); + }); +} diff --git a/ui-v2/tests/acceptance/token-header.feature b/ui-v2/tests/acceptance/token-header.feature index a1c35b80a..750a3fe74 100644 --- a/ui-v2/tests/acceptance/token-header.feature +++ b/ui-v2/tests/acceptance/token-header.feature @@ -12,7 +12,7 @@ Feature: token headers headers: X-Consul-Token: '' --- - Scenario: Set a token and then navigate to the index page + Scenario: Set the token to [Token] and then navigate to the index page Given 1 datacenter model with the value "datacenter" When I visit the settings page Then the url should be /settings diff --git a/ui-v2/tests/steps.js b/ui-v2/tests/steps.js index 4f8b3db00..459d8fd95 100644 --- a/ui-v2/tests/steps.js +++ b/ui-v2/tests/steps.js @@ -242,6 +242,15 @@ export default function(assert) { `Expected ${num} ${model}s with ${property} set to "${value}", saw ${len}` ); }) + .then('I have settings like yaml\n$yaml', function(data) { + // TODO: Inject this + const settings = window.localStorage; + Object.keys(data).forEach(function(prop) { + const actual = settings.getItem(prop); + const expected = data[prop]; + assert.strictEqual(actual, expected, `Expected settings to be ${expected} was ${actual}`); + }); + }) .then('I see $property on the $component like yaml\n$yaml', function( property, component, From d64528665d1284a7e92b3c4af2a294dae907cebf Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 20 Jun 2018 15:14:16 +0100 Subject: [PATCH 364/627] Use the pill styling elsewhere... TODO: Probably change the %tag to %pill now I've remembered the word I was looking for! --- ui-v2/app/styles/app.scss | 1 + ui-v2/app/styles/routes/dc/service/index.scss | 2 ++ ui-v2/app/templates/dc/nodes/-services.hbs | 6 ++++-- ui-v2/app/templates/dc/services/index.hbs | 6 ++++-- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/ui-v2/app/styles/app.scss b/ui-v2/app/styles/app.scss index cebc321f1..9fa550d5f 100644 --- a/ui-v2/app/styles/app.scss +++ b/ui-v2/app/styles/app.scss @@ -45,6 +45,7 @@ @import 'components/notice'; @import 'routes/dc/service/index'; +@import 'routes/dc/nodes/index'; @import 'routes/dc/kv/index'; main a { diff --git a/ui-v2/app/styles/routes/dc/service/index.scss b/ui-v2/app/styles/routes/dc/service/index.scss index e8883041c..d60c5c77a 100644 --- a/ui-v2/app/styles/routes/dc/service/index.scss +++ b/ui-v2/app/styles/routes/dc/service/index.scss @@ -6,6 +6,8 @@ html.template-service.template-show main dl { html.template-service.template-show main dt { display: none; } +// TODO: Generalize this, also see nodes/index +html.template-service.template-list td.tags span, html.template-service.template-show main dd span { @extend %tag; background-color: $gray; diff --git a/ui-v2/app/templates/dc/nodes/-services.hbs b/ui-v2/app/templates/dc/nodes/-services.hbs index 185875f3c..1aeaa01be 100644 --- a/ui-v2/app/templates/dc/nodes/-services.hbs +++ b/ui-v2/app/templates/dc/nodes/-services.hbs @@ -20,9 +20,11 @@ {{item.Port}} - + {{#if (gt item.Tags.length 0)}} - {{join ', ' item.Tags}} + {{#each item.Tags as |item|}} + {{item}} + {{/each}} {{/if}} {{/block-slot}} diff --git a/ui-v2/app/templates/dc/services/index.hbs b/ui-v2/app/templates/dc/services/index.hbs index 067c63107..5267078b6 100644 --- a/ui-v2/app/templates/dc/services/index.hbs +++ b/ui-v2/app/templates/dc/services/index.hbs @@ -35,9 +35,11 @@
{{format_number item.ChecksCritical}}
- + {{#if (gt item.Tags.length 0)}} - {{join ', ' item.Tags}} + {{#each item.Tags as |item|}} + {{item}} + {{/each}} {{/if}} {{/block-slot}} From 5dd79165b8b98f9de1619dc6fc350dc01c630ca6 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 20 Jun 2018 15:26:22 +0100 Subject: [PATCH 365/627] Make sure the updated tag view doesn't break the tests --- ui-v2/tests/acceptance/dc/services/show.feature | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ui-v2/tests/acceptance/dc/services/show.feature b/ui-v2/tests/acceptance/dc/services/show.feature index 29daba7b6..78e2554a3 100644 --- a/ui-v2/tests/acceptance/dc/services/show.feature +++ b/ui-v2/tests/acceptance/dc/services/show.feature @@ -17,6 +17,8 @@ Feature: dc / services / show: Show Service dc: dc1 service: service-0 --- - Then I see the text "Tag1, Tag2, Tag3" in "[data-test-tags]" - Then ok + Then I see the text "Tag1" in "[data-test-tags] span:nth-child(1)" + Then I see the text "Tag2" in "[data-test-tags] span:nth-child(2)" + Then I see the text "Tag3" in "[data-test-tags] span:nth-child(3)" + Then pause for 5000 From 224aa2733d2feceeaf897ee99dfa8ba8f35a57ef Mon Sep 17 00:00:00 2001 From: John Cowen Date: Wed, 20 Jun 2018 15:26:55 +0100 Subject: [PATCH 366/627] Add missing scss file --- ui-v2/app/styles/routes/dc/nodes/index.scss | 6 ++++++ ui-v2/tests/acceptance/dc/services/show.feature | 1 - 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 ui-v2/app/styles/routes/dc/nodes/index.scss diff --git a/ui-v2/app/styles/routes/dc/nodes/index.scss b/ui-v2/app/styles/routes/dc/nodes/index.scss new file mode 100644 index 000000000..f7c3003be --- /dev/null +++ b/ui-v2/app/styles/routes/dc/nodes/index.scss @@ -0,0 +1,6 @@ +// TODO: Generalize this, also see services/index +html.template-node.template-show td.tags span { + @extend %tag; + background-color: $gray; + margin-bottom: 0.5em; +} diff --git a/ui-v2/tests/acceptance/dc/services/show.feature b/ui-v2/tests/acceptance/dc/services/show.feature index 78e2554a3..bf64a5d5b 100644 --- a/ui-v2/tests/acceptance/dc/services/show.feature +++ b/ui-v2/tests/acceptance/dc/services/show.feature @@ -20,5 +20,4 @@ Feature: dc / services / show: Show Service Then I see the text "Tag1" in "[data-test-tags] span:nth-child(1)" Then I see the text "Tag2" in "[data-test-tags] span:nth-child(2)" Then I see the text "Tag3" in "[data-test-tags] span:nth-child(3)" - Then pause for 5000 From c361a23d42633d345ebeafbb1ce5be55faa14b20 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Wed, 20 Jun 2018 15:22:43 -0400 Subject: [PATCH 367/627] Added more validation during publishing We verify the git remote/url with whoever is running (in addition to other automated checks) We also now run consul agent -dev, check is first 25 lines of output, consul info output and that consul leave works. --- build-support/functions/10-util.sh | 48 ++++++- build-support/functions/40-publish.sh | 178 +++++++++++++++++++++++--- 2 files changed, 203 insertions(+), 23 deletions(-) diff --git a/build-support/functions/10-util.sh b/build-support/functions/10-util.sh index 6289dcb03..e796a76f2 100644 --- a/build-support/functions/10-util.sh +++ b/build-support/functions/10-util.sh @@ -328,6 +328,45 @@ function normalize_git_url { return 0 } +function git_remote_url { + # Arguments: + # $1 - Path to the top level Consul source + # $2 - Remote name + # + # Returns: + # 0 - success + # * - error + # + # Note: + # The push url for the git remote will be echoed to stdout + + if ! test -d "$1" + then + err "ERROR: '$1' is not a directory. git_remote_url must be called with the path to the top level source as the first argument'" + return 1 + fi + + if test -z "$2" + then + err "ERROR: git_remote_url must be called with a second argument that is the name of the remote" + return 1 + fi + + local ret=0 + + pushd "$1" > /dev/null + + local url=$(git remote get-url --push $2 2>&1) || ret=1 + + popd > /dev/null + + if test "${ret}" -eq 0 + then + echo "${url}" + return 0 + fi +} + function find_git_remote { # Arguments: # $1 - Path to the top level Consul source @@ -429,6 +468,7 @@ function git_push_ref { # Arguments: # $1 - Path to the top level Consul source # $2 - Git ref (optional) + # $3 - remote (optional - if not specified we will try to determine it) # # Returns: # 0 - success @@ -442,10 +482,14 @@ function git_push_ref { local sdir="$1" local ret=0 + local remote="$3" # find the correct remote corresponding to the desired repo (basically prevent pushing enterprise to oss or oss to enterprise) - local remote=$(find_git_remote "${sdir}") || return 1 - status "Using git remote: ${remote}" + if test -z "${remote}" + then + local remote=$(find_git_remote "${sdir}") || return 1 + status "Using git remote: ${remote}" + fi local ref="" diff --git a/build-support/functions/40-publish.sh b/build-support/functions/40-publish.sh index ec1494e28..2e5546cef 100644 --- a/build-support/functions/40-publish.sh +++ b/build-support/functions/40-publish.sh @@ -19,6 +19,44 @@ function hashicorp_release { return 0 } +function confirm_git_remote { + # Arguments: + # $1 - Path to git repo + # $2 - remote name + # + # Returns: + # 0 - success + # * - error + # + + local remote="$2" + local url=$(git_remote_url "$1" "${remote}") + + echo -e "\n\nConfigured Git Remote: ${remote}" + echo -e "Configured Git URL: ${url}\n" + + local answer="" + + while true + do + case "${answer}" in + [yY]* ) + status "Remote Accepted" + return 0 + break + ;; + [nN]* ) + err "Remote Rejected" + return 1 + break + ;; + * ) + read -p "Is this Git Remote correct to push ${CONSUL_PKG_NAME} to? [y/n]: " answer + ;; + esac + done +} + function confirm_git_push_changes { # Arguments: # $1 - Path to git repo @@ -70,7 +108,7 @@ function confirm_git_push_changes { return $ret } -function confirm_consul_version_zip { +function extract_consul_local { # Arguments: # $1 - Path to the zipped binary to test # $2 - Version to look for @@ -94,43 +132,129 @@ function confirm_consul_version_zip { if test $? -eq 0 then chmod +x "${tfile}" - "${tfile}" version - - # put a empty line between the version output and the prompt + echo "${tfile}" + return 0 + else + err "ERROR: Failed to extract consul binary from the zip file" + return 1 + fi +} + +function confirm_consul_version { + # Arguments: + # $1 - consul exe to use + # + # Returns: + # 0 - success + # * - error + local consul_exe="$1" + + if ! test -x "${consul_exe}" + then + err "ERROR: '${consul_exe} is not an executable" + return 1 + fi + + "${consul_exe}" version + + # put a empty line between the version output and the prompt + echo "" + + local answer="" + + while true + do + case "${answer}" in + [yY]* ) + status "Version Accepted" + ret=0 + break + ;; + [nN]* ) + err "Version Rejected" + ret=1 + break + ;; + * ) + read -p "Is this Consul version correct? [y/n]: " answer + ;; + esac + done +} + +function confirm_consul_info { + # Arguments: + # $1 - Path to a consul exe that can be run on this system + # + # Returns: + # 0 - success + # * - error + + local consul_exe="$1" + local log_file="$(mktemp) -t "consul_log_")" + "${consul_exe}" agent -dev > "${log_file}" 2>&1 & + local consul_pid=$! + sleep 1 + status "First 25 lines/1s of the agents output:" + head -n 25 "${log_file}" + + echo "" + local ret=0 + local answer="" + + while true + do + case "${answer}" in + [yY]* ) + status "Consul Agent Output Accepted" + break + ;; + [nN]* ) + err "Consul Agent Output Rejected" + ret=1 + break + ;; + * ) + read -p "Is this Consul Agent Output correct? [y/n]: " answer + ;; + esac + done + + if test "${ret}" -eq 0 + then + status "Consul Info Output" + "${consul_exe}" info echo "" - local answer="" while true do case "${answer}" in [yY]* ) - status "Version Accepted" - ret=0 + status "Consul Info Output Accepted" break ;; [nN]* ) - err "Version Rejected" - ret=1 + err "Consul Info Output Rejected" + return 1 break ;; * ) - read -p "Is this Consul version correct? [y/n]: " answer + read -p "Is this Consul Info Output correct? [y/n]: " answer ;; esac done - else - err "ERROR: Failed to extract consul binary from the zip file" - ret=1 fi - rm "${tfile}" > /dev/null 2>&1 - return ${ret} + status "Requesting Consul to leave the cluster / shutdown" + "${consul_exe}" leave + wait ${consul_pid} > /dev/null 2>&1 + + return $? } -function confirm_consul_version { - confirm_consul_version_zip "$1" "$2" - return $? +function extract_consul { + extract_consul_local "$1" "$2" } @@ -174,8 +298,16 @@ function publish_release { status_stage "==> Verifying release files" check_release "${sdir}/pkg/dist" "${vers}" true || return 1 + status_stage "==> Extracting Consul version for local system" + local consul_exe=$(extract_consul "${sdir}/pkg/dist" "${vers}") || return 1 + # make sure to remove the temp file + trap "rm '${consul_exe}'" EXIT + status_stage "==> Confirming Consul Version" - confirm_consul_version "${sdir}/pkg/dist" "${vers}" || return 1 + confirm_consul_version "${consul_exe}" || return 1 + + status_stage "==> Confirming Consul Agent Info" + confirm_consul_info "${consul_exe}" || return 1 status_stage "==> Confirming Git is clean" is_git_clean "$1" true || return 1 @@ -183,11 +315,15 @@ function publish_release { status_stage "==> Confirming Git Changes" confirm_git_push_changes "$1" || return 1 + status_stage "==> Confirming Git Remote" + local remote=$(find_git_remote "${sdir}") || return 1 + confirm_git_remote "${sdir}" "${remote}" || return 1 + if is_set "${pub_git}" then status_stage "==> Pushing to Git" - git_push_ref "$1" || return 1 - git_push_ref "$1" "v${vers}" || return 1 + git_push_ref "$1" "" "${remote}" || return 1 + git_push_ref "$1" "v${vers}" "${remote}" || return 1 fi if is_set "${pub_hc_releases}" From 74fe2fae8bb93b0010b06b019f08e8b95b392d74 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Wed, 20 Jun 2018 15:44:19 -0400 Subject: [PATCH 368/627] Dont duplicate release versions in prereleases --- version/version.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index 5477d0ae3..b3d984069 100644 --- a/version/version.go +++ b/version/version.go @@ -35,8 +35,12 @@ func GetHumanVersion() string { if GitDescribe == "" && release == "" { release = "dev" } + if release != "" { - version += fmt.Sprintf("-%s", release) + if !strings.HasSuffix(version, "-"+release) { + // if we tagged a prerelease version then the release is in the version already + version += fmt.Sprintf("-%s", release) + } if GitCommit != "" { version += fmt.Sprintf(" (%s)", GitCommit) } From a127f167ee28eaab6b8f3c11b8e425f16764d6bf Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Wed, 20 Jun 2018 16:06:43 -0400 Subject: [PATCH 369/627] Allow showing git diff interactively to inspect release commits. --- build-support/functions/10-util.sh | 33 +++++++++++++++++++++++++++ build-support/functions/40-publish.sh | 7 +++++- 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/build-support/functions/10-util.sh b/build-support/functions/10-util.sh index e796a76f2..e997ccd2c 100644 --- a/build-support/functions/10-util.sh +++ b/build-support/functions/10-util.sh @@ -319,6 +319,39 @@ function git_log_summary { return $ret } +function git_diff { + # Arguments: + # $1 - Path to the git repo (optional - assumes pwd is git repo otherwise) + # $2 .. $N - Optional path specification + # + # Returns: + # 0 - success + # * - failure + # + + local gdir="$(pwd)" + if test -d "$1" + then + gdir="$1" + fi + + shift + + pushd "${gdir}" > /dev/null + + local ret=0 + + local head=$(git_branch) || ret=1 + local upstream=$(git_upstream) || ret=1 + + if test ${ret} -eq 0 + then + status "Git Diff - Paths: $@" + git diff ${HEAD} ${upstream} -- "$@" || ret=1 + fi + return $ret +} + function normalize_git_url { url="${1#https://}" url="${url#git@}" diff --git a/build-support/functions/40-publish.sh b/build-support/functions/40-publish.sh index 2e5546cef..3376ef168 100644 --- a/build-support/functions/40-publish.sh +++ b/build-support/functions/40-publish.sh @@ -97,8 +97,13 @@ function confirm_git_push_changes { ret=1 break ;; + ?) + # bindata_assetfs.go will make these meaningless + git_diff "$(pwd)" ":!agent/bindata_assetfs.go"|| ret 1 + answer="" + ;; * ) - read -p "Are these changes correct? [y/n]: " answer + read -p "Are these changes correct? [y/n] (or type ? to show the diff output): " answer ;; esac done From 9c836b0cb935d06371fe8d8f499c8fb14e20018c Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Wed, 20 Jun 2018 16:35:54 -0400 Subject: [PATCH 370/627] Add override capability to blacklist a remote --- build-support/functions/10-util.sh | 11 +++++++++++ build-support/functions/40-publish.sh | 5 ++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/build-support/functions/10-util.sh b/build-support/functions/10-util.sh index e997ccd2c..2e89cbd15 100644 --- a/build-support/functions/10-util.sh +++ b/build-support/functions/10-util.sh @@ -441,6 +441,17 @@ function find_git_remote { return ${ret} } +function git_remote_not_blacklisted { + # Arguments: + # $1 - path to the repo + # $2 - the remote name + # + # Returns: + # 0 - not blacklisted + # * - blacklisted + return 0 +} + function is_git_clean { # Arguments: # $1 - Path to git repo diff --git a/build-support/functions/40-publish.sh b/build-support/functions/40-publish.sh index 3376ef168..ffb8e64bf 100644 --- a/build-support/functions/40-publish.sh +++ b/build-support/functions/40-publish.sh @@ -320,8 +320,11 @@ function publish_release { status_stage "==> Confirming Git Changes" confirm_git_push_changes "$1" || return 1 - status_stage "==> Confirming Git Remote" + status_stage "==> Checking for blacklisted Git Remote" local remote=$(find_git_remote "${sdir}") || return 1 + git_remote_not_blacklisted "${sdir}" "${remote}" || return 1 + + status_stage "==> Confirming Git Remote" confirm_git_remote "${sdir}" "${remote}" || return 1 if is_set "${pub_git}" From 6d55c7a98c4a0882e66b870605fb69f777997bfe Mon Sep 17 00:00:00 2001 From: John Cowen Date: Tue, 19 Jun 2018 15:25:56 +0100 Subject: [PATCH 371/627] Add a failing test to show that lock session TTL's are not shown --- ui-v2/app/templates/dc/nodes/-sessions.hbs | 3 +- .../acceptance/dc/nodes/sessions/list.feature | 28 +++++++++++++++++++ .../steps/dc/nodes/sessions/list-steps.js | 10 +++++++ ui-v2/tests/helpers/set-cookies.js | 3 ++ ui-v2/tests/helpers/type-to-url.js | 5 ++++ ui-v2/tests/pages/dc/nodes/show.js | 3 ++ ui-v2/tests/steps.js | 5 ++++ 7 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 ui-v2/tests/acceptance/dc/nodes/sessions/list.feature create mode 100644 ui-v2/tests/acceptance/steps/dc/nodes/sessions/list-steps.js diff --git a/ui-v2/app/templates/dc/nodes/-sessions.hbs b/ui-v2/app/templates/dc/nodes/-sessions.hbs index 322cb9411..8e3d43ddb 100644 --- a/ui-v2/app/templates/dc/nodes/-sessions.hbs +++ b/ui-v2/app/templates/dc/nodes/-sessions.hbs @@ -1,5 +1,6 @@ {{#if (gt sessions.length 0)}} {{#tabular-collection + data-test-sessions class="sessions" items=sessions as |item index| }} @@ -22,7 +23,7 @@ {{item.LockDelay}} - + {{item.TTL}} diff --git a/ui-v2/tests/acceptance/dc/nodes/sessions/list.feature b/ui-v2/tests/acceptance/dc/nodes/sessions/list.feature new file mode 100644 index 000000000..41055dc74 --- /dev/null +++ b/ui-v2/tests/acceptance/dc/nodes/sessions/list.feature @@ -0,0 +1,28 @@ +@setupApplicationTest +Feature: dc / nodes / sessions /list: List Lock Sessions + In order to get information regarding lock sessions + As a user + I should be able to see a listing of lock sessions with necessary information under the lock sessions tab for a node + Scenario: Given 2 session with string TTLs + Given 1 datacenter model with the value "dc1" + And 1 node model from yaml + --- + - ID: node-0 + --- + And 2 session models from yaml + --- + - TTL: 30s + - TTL: 60m + --- + When I visit the node page for yaml + --- + dc: dc1 + node: node-0 + --- + And I click lockSessions on the tabs + Then I see lockSessionsIsSelected on the tabs + Then I see TTL on the sessions like yaml + --- + - 30s + - 60m + --- diff --git a/ui-v2/tests/acceptance/steps/dc/nodes/sessions/list-steps.js b/ui-v2/tests/acceptance/steps/dc/nodes/sessions/list-steps.js new file mode 100644 index 000000000..9bfbe9ac9 --- /dev/null +++ b/ui-v2/tests/acceptance/steps/dc/nodes/sessions/list-steps.js @@ -0,0 +1,10 @@ +import steps from '../../../steps'; + +// step definitions that are shared between features should be moved to the +// tests/acceptance/steps/steps.js file + +export default function(assert) { + return steps(assert).then('I should find a file', function() { + assert.ok(true, this.step); + }); +} diff --git a/ui-v2/tests/helpers/set-cookies.js b/ui-v2/tests/helpers/set-cookies.js index 04f085763..1e0547357 100644 --- a/ui-v2/tests/helpers/set-cookies.js +++ b/ui-v2/tests/helpers/set-cookies.js @@ -17,6 +17,9 @@ export default function(type, count, obj) { key = 'CONSUL_ACL_COUNT'; obj['CONSUL_ENABLE_ACLS'] = 1; break; + case 'session': + key = 'CONSUL_SESSION_COUNT'; + break; } if (key) { obj[key] = count; diff --git a/ui-v2/tests/helpers/type-to-url.js b/ui-v2/tests/helpers/type-to-url.js index 6fa84bbbb..0b2c8f582 100644 --- a/ui-v2/tests/helpers/type-to-url.js +++ b/ui-v2/tests/helpers/type-to-url.js @@ -16,6 +16,11 @@ export default function(type) { case 'acl': url = ['/v1/acl/list']; break; + case 'session': + url = function(url) { + return url.indexOf('/v1/session/node/') === 0; + }; + break; } return function(actual) { if (url === null) { diff --git a/ui-v2/tests/pages/dc/nodes/show.js b/ui-v2/tests/pages/dc/nodes/show.js index 577a03ae2..d51bda6cf 100644 --- a/ui-v2/tests/pages/dc/nodes/show.js +++ b/ui-v2/tests/pages/dc/nodes/show.js @@ -10,4 +10,7 @@ export default create({ services: collection('#services [data-test-tabular-row]', { port: attribute('data-test-service-port', '.port'), }), + sessions: collection('#lock-sessions [data-test-tabular-row]', { + TTL: attribute('data-test-session-ttl', '[data-test-session-ttl]'), + }), }); diff --git a/ui-v2/tests/steps.js b/ui-v2/tests/steps.js index a75d11499..e6e08e86e 100644 --- a/ui-v2/tests/steps.js +++ b/ui-v2/tests/steps.js @@ -33,6 +33,9 @@ export default function(assert) { case 'acls': model = 'acl'; break; + case 'sessions': + model = 'session'; + break; } cb(null, model); }, yadda) @@ -218,6 +221,8 @@ export default function(assert) { ) { const _component = currentPage[component]; const iterator = new Array(_component.length).fill(true); + // this will catch if we get aren't managing to select a component + assert.ok(iterator.length > 0); iterator.forEach(function(item, i, arr) { const actual = _component.objectAt(i)[property]; // anything coming from the DOM is going to be text/strings From 2c495a93823b846adbbe32f9faeee2f5ad514d8d Mon Sep 17 00:00:00 2001 From: John Cowen Date: Thu, 21 Jun 2018 11:06:45 +0100 Subject: [PATCH 372/627] Fix up following rebase, use array of strings rather than its own func --- ui-v2/tests/helpers/type-to-url.js | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ui-v2/tests/helpers/type-to-url.js b/ui-v2/tests/helpers/type-to-url.js index 0b2c8f582..470a5f291 100644 --- a/ui-v2/tests/helpers/type-to-url.js +++ b/ui-v2/tests/helpers/type-to-url.js @@ -17,9 +17,7 @@ export default function(type) { url = ['/v1/acl/list']; break; case 'session': - url = function(url) { - return url.indexOf('/v1/session/node/') === 0; - }; + url = ['/v1/session/node/']; break; } return function(actual) { From 3325872daf0466f8ddb76dc00bdf15eeb1ee7359 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Thu, 21 Jun 2018 12:44:32 +0100 Subject: [PATCH 373/627] Add a beforeModel hook at dc/index to auto transition to services --- ui-v2/app/routes/dc/index.js | 7 +++++++ ui-v2/tests/acceptance/dc/forwarding.feature | 12 ++++++++++++ ui-v2/tests/acceptance/steps/dc/forwarding-steps.js | 10 ++++++++++ ui-v2/tests/pages/dc.js | 2 +- ui-v2/tests/unit/routes/dc/index-test.js | 11 +++++++++++ 5 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 ui-v2/app/routes/dc/index.js create mode 100644 ui-v2/tests/acceptance/dc/forwarding.feature create mode 100644 ui-v2/tests/acceptance/steps/dc/forwarding-steps.js create mode 100644 ui-v2/tests/unit/routes/dc/index-test.js diff --git a/ui-v2/app/routes/dc/index.js b/ui-v2/app/routes/dc/index.js new file mode 100644 index 000000000..8ff228e5c --- /dev/null +++ b/ui-v2/app/routes/dc/index.js @@ -0,0 +1,7 @@ +import Route from '@ember/routing/route'; + +export default Route.extend({ + beforeModel: function() { + this.transitionTo('dc.services'); + }, +}); diff --git a/ui-v2/tests/acceptance/dc/forwarding.feature b/ui-v2/tests/acceptance/dc/forwarding.feature new file mode 100644 index 000000000..5ba35d01d --- /dev/null +++ b/ui-v2/tests/acceptance/dc/forwarding.feature @@ -0,0 +1,12 @@ +@setupApplicationTest +Feature: dc forwarding + In order to arrive at a useful page when only specifying a dc in the url + As a user + I should be redirected to the services page for the dc + Scenario: Arriving at the datacenter index page with no other url info + Given 1 datacenter model with the value "datacenter" + When I visit the dcs page for yaml + --- + dc: datacenter + --- + Then the url should be /datacenter/services diff --git a/ui-v2/tests/acceptance/steps/dc/forwarding-steps.js b/ui-v2/tests/acceptance/steps/dc/forwarding-steps.js new file mode 100644 index 000000000..960cdf533 --- /dev/null +++ b/ui-v2/tests/acceptance/steps/dc/forwarding-steps.js @@ -0,0 +1,10 @@ +import steps from '../steps'; + +// step definitions that are shared between features should be moved to the +// tests/acceptance/steps/steps.js file + +export default function(assert) { + return steps(assert).then('I should find a file', function() { + assert.ok(true, this.step); + }); +} diff --git a/ui-v2/tests/pages/dc.js b/ui-v2/tests/pages/dc.js index f181c0b4c..ad4eeb9a3 100644 --- a/ui-v2/tests/pages/dc.js +++ b/ui-v2/tests/pages/dc.js @@ -1,7 +1,7 @@ import { create, visitable, attribute, collection, clickable } from 'ember-cli-page-object'; export default create({ - visit: visitable('/:dc/services/'), + visit: visitable('/:dc/'), dcs: collection('[data-test-datacenter-picker]'), showDatacenters: clickable('[data-test-datacenter-selected]'), selectedDc: attribute('data-test-datacenter-selected', '[data-test-datacenter-selected]'), diff --git a/ui-v2/tests/unit/routes/dc/index-test.js b/ui-v2/tests/unit/routes/dc/index-test.js new file mode 100644 index 000000000..05fe014c5 --- /dev/null +++ b/ui-v2/tests/unit/routes/dc/index-test.js @@ -0,0 +1,11 @@ +import { moduleFor, test } from 'ember-qunit'; + +moduleFor('route:dc/index', 'Unit | Route | dc/index', { + // Specify the other units that are required for this test. + // needs: ['controller:foo'] +}); + +test('it exists', function(assert) { + let route = this.subject(); + assert.ok(route); +}); From 75da1e18689c653901e79141604827a1c95d9cc2 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Thu, 21 Jun 2018 12:02:16 -0700 Subject: [PATCH 374/627] website: remove backwards compat warning In practice, we strive for compatibility given the usage of the API in the wild, and don't need to make this warning as we once felt we should. --- website/source/api/index.html.md | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/website/source/api/index.html.md b/website/source/api/index.html.md index 83843b1f8..df9622d5d 100644 --- a/website/source/api/index.html.md +++ b/website/source/api/index.html.md @@ -14,14 +14,7 @@ CRUD operations on nodes, services, checks, configuration, and more. ## Version Prefix -All API routes are prefixed with `/v1/`. - -This documentation is only for the v1 API. - -~> **Backwards compatibility:** At the current version, Consul does not yet -promise backwards compatibility even with the v1 prefix. We'll remove this -warning when this policy changes. We expect to reach API stability by Consul -1.0. +All API routes are prefixed with `/v1/`. This documentation is only for the v1 API. ## ACLs From e91d2d2bcb3105d6beca5cc44d6412c0a431a30e Mon Sep 17 00:00:00 2001 From: petems Date: Thu, 14 Jun 2018 10:13:36 +0100 Subject: [PATCH 375/627] Adds extra thresholds and aggregates to Telemetry * Thresholds and aggregates of metrics for monitoring in Consul --- website/source/docs/agent/telemetry.html.md | 79 ++++++++++++++++++++- 1 file changed, 77 insertions(+), 2 deletions(-) diff --git a/website/source/docs/agent/telemetry.html.md b/website/source/docs/agent/telemetry.html.md index 1f8e49020..0a4d56f35 100644 --- a/website/source/docs/agent/telemetry.html.md +++ b/website/source/docs/agent/telemetry.html.md @@ -48,13 +48,88 @@ Below is sample output of a telemetry dump: # Key Metrics +These are some metrics emitted that can help you understand the health of your cluster at a glance. For a full list of metrics emitted by Consul, see [Metrics Reference](#metrics-reference) + +### Transaction timing + +| Metric Name | Description | +| :----------------------- | :---------- | +| `consul.kvs.apply` | This measures the time it takes to complete an update to the KV store. | +| `consul.txn.apply` | This measures the time spent applying a transaction operation. | +| `consul.raft.apply` | This counts the number of Raft transactions occurring over the interval. | +| `consul.raft.commitTime` | This measures the time it takes to commit a new entry to the Raft log on the leader. | + +**Why they're important:** Taken together, these metrics indicate how long it takes to complete write operations in various parts of the Consul cluster. Generally these should all be fairly consistent and no more than a few milliseconds. Sudden changes in any of the timing values could be due to unexpected load on the Consul servers, or due to problems on the servers themselves. + +**What to look for:** Deviations (in any of these metrics) of more than 50% from baseline over the previous hour. + +### Leadership changes + +| Metric Name | Description | +| :---------- | :---------- | +| `consul.raft.leader.lastContact` | Measures the time since the leader was last able to contact the follower nodes when checking its leader lease. | +| `consul.raft.state.candidate` | This increments whenever a Consul server starts an election. | +| `consul.raft.state.leader` | This increments whenever a Consul server becomes a leader. | + +**Why they're important:** Normally, your Consul cluster should have a stable leader. If there are frequent elections or leadership changes, it would likely indicate network issues between the Consul servers, or that the Consul servers themselves are unable to keep up with the load. + +**What to look for:** For a healthy cluster, you're looking for a `lastContact` lower than 200ms, `leader` > 0 and `candidate` == 0. Deviations from this might indicate flapping leadership. + +### Autopilot + +| Metric Name | Description | +| :---------- | :---------- | +| `consul.autopilot.healthy` | This tracks the overall health of the local server cluster. If all servers are considered healthy by Autopilot, this will be set to 1. If any are unhealthy, this will be 0. | + +**Why it's important:** Obviously, you want your cluster to be healthy. + +**What to look for:** Alert if `healthy` is 0. + +### Memory usage + +| Metric Name | Description | +| :---------- | :---------- | +| `consul.runtime.alloc_bytes` | This measures the number of bytes allocated by the Consul process. | +| `consul.runtime.sys_bytes` | This is the total number of bytes of memory obtained from the OS. | + +**Why they're important:** Consul keeps all of its data in memory. If Consul consumes all available memory, it will crash. + +**What to look for:** If `consul.runtime.sys_bytes` exceeds 90% of total avaliable system memory. + +### Garbage collection + +| Metric Name | Description | +| :---------- | :---------- | +| `consul.runtime.total_gc_pause_ns` | Number of nanoseconds consumed by stop-the-world garbage collection (GC) pauses since Consul started. | + +**Why it's important:** GC pause is a "stop-the-world" event, meaning that all runtime threads are blocked until GC completes. Normally these pauses last only a few nanoseconds. But if memory usage is high, the Go runtime may GC so frequently that it starts to slow down Consul. + +**What to look for:** Warning if `total_gc_pause_ns` exceeds 2 seconds/minute, critical if it exceeds 5 seconds/minute. + +**NOTE:** `total_gc_pause_ns` is a cumulative counter, so in order to calculate rates (such as GC/minute), +you will need to apply a function such as InfluxDB's [`non_negative_difference()`](https://docs.influxdata.com/influxdb/v1.5/query_language/functions/#non-negative-difference). + +### Network activity - RPC Count + +| Metric Name | Description | +| :---------- | :---------- | +| `consul.client.rpc` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server | +| `consul.client.rpc.exceeded` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's [`limits`](/docs/agent/options.html#limits) configuration. | +| `consul.client.rpc.failed` | Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails. | + +**Why they're important:** These measurements indicate the current load created from a Consul agent, including when the load becomes high enough to be rate limited. A high RPC count, especially from `consul.client.rpcexceeded` meaning that the requests are being rate-limited, could imply a misconfigured Consul agent. + +**What to look for:** +Sudden large changes to the `consul.client.rpc` metrics (greater than 50% deviation from baseline). +`consul.client.rpc.exceeded` or `consul.client.rpc.failed` count > 0, as it implies that an agent is being rate-limited or fails to make an RPC request to a Consul server + When telemetry is being streamed to an external metrics store, the interval is defined to be that store's flush interval. Otherwise, the interval can be assumed to be 10 seconds when retrieving metrics from the built-in store using the above described signals. -## Agent Health +## Metrics Reference -These metrics are used to monitor the health of specific Consul agents. +This is a full list of metrics emitted by Consul. From 25f90fbcdddac69d7e8ebb005497aafdc9a0e019 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Fri, 22 Jun 2018 13:52:29 +0100 Subject: [PATCH 376/627] Make sure the dc menu is as useful as possible when things go wrong 1. Check the dc's model for both dcs list and the requested dc. 2. If the dc model doesn't exist use and empty array for dcs and a fake dc with the Name 'Error' as we can't even trust what is in the `paramsFor` --- ui-v2/app/routes/application.js | 9 ++++++- ui-v2/app/templates/error.hbs | 2 +- .../acceptance/dc/services/error.feature | 27 +++++++++++++++++++ .../steps/dc/services/error-steps.js | 10 +++++++ ui-v2/tests/steps.js | 4 +++ ui-v2/yarn.lock | 4 +-- 6 files changed, 52 insertions(+), 4 deletions(-) create mode 100644 ui-v2/tests/acceptance/dc/services/error.feature create mode 100644 ui-v2/tests/acceptance/steps/dc/services/error-steps.js diff --git a/ui-v2/app/routes/application.js b/ui-v2/app/routes/application.js index d37941247..87e0ee1ea 100644 --- a/ui-v2/app/routes/application.js +++ b/ui-v2/app/routes/application.js @@ -49,9 +49,16 @@ export default Route.extend({ if (error.status === '') { error.message = 'Error'; } + const model = this.modelFor('dc'); hash({ error: error, - dc: error.status.toString().indexOf('5') !== 0 ? get(this, 'repo').getActive() : null, + dc: + error.status.toString().indexOf('5') !== 0 + ? get(this, 'repo').getActive() + : model && model.dc + ? model.dc + : { Name: 'Error' }, + dcs: model && model.dcs ? model.dcs : [], }) .then(model => { removeLoading(); diff --git a/ui-v2/app/templates/error.hbs b/ui-v2/app/templates/error.hbs index 3cd244eeb..d19729f04 100644 --- a/ui-v2/app/templates/error.hbs +++ b/ui-v2/app/templates/error.hbs @@ -1,7 +1,7 @@ {{#hashicorp-consul id="wrapper" dcs=dcs dc=dc}} {{#app-view class="error show"}} {{#block-slot 'header'}} -

+

{{#if error.status }} {{error.status}} ({{error.message}}) {{else}} diff --git a/ui-v2/tests/acceptance/dc/services/error.feature b/ui-v2/tests/acceptance/dc/services/error.feature new file mode 100644 index 000000000..7e613cdfa --- /dev/null +++ b/ui-v2/tests/acceptance/dc/services/error.feature @@ -0,0 +1,27 @@ +@setupApplicationTest +Feature: dc / services / error + Scenario: Arriving at the service page that doesn't exist + Given 2 datacenter models from yaml + --- + - dc-1 + - dc-2 + --- + When I visit the services page for yaml + --- + dc: 404-datacenter + --- + Then I see the text "404 (Page not found)" in "[data-test-error]" + Scenario: Arriving at the service page + Given 2 datacenter models from yaml + --- + - dc-1 + - dc-2 + --- + Given the url "/v1/internal/ui/services" responds with a 500 status + When I visit the services page for yaml + --- + dc: dc-1 + --- + Then I see the text "500 (The backend responded with an error)" in "[data-test-error]" + And I click "[data-test-datacenter-selected]" + And I see 2 datacenter models diff --git a/ui-v2/tests/acceptance/steps/dc/services/error-steps.js b/ui-v2/tests/acceptance/steps/dc/services/error-steps.js new file mode 100644 index 000000000..a7eff3228 --- /dev/null +++ b/ui-v2/tests/acceptance/steps/dc/services/error-steps.js @@ -0,0 +1,10 @@ +import steps from '../../steps'; + +// step definitions that are shared between features should be moved to the +// tests/acceptance/steps/steps.js file + +export default function(assert) { + return steps(assert).then('I should find a file', function() { + assert.ok(true, this.step); + }); +} diff --git a/ui-v2/tests/steps.js b/ui-v2/tests/steps.js index d82f13e2c..48dcd3c05 100644 --- a/ui-v2/tests/steps.js +++ b/ui-v2/tests/steps.js @@ -53,6 +53,10 @@ export default function(assert) { return create(number, model, data); } ) + // TODO: Abstract this away from HTTP + .given(['the url "$url" responds with a $status status'], function(url, status) { + return api.server.respondWithStatus(url, parseInt(status)); + }) // interactions .when('I visit the $name page', function(name) { currentPage = pages[name]; diff --git a/ui-v2/yarn.lock b/ui-v2/yarn.lock index 77ef62c14..07447e2ab 100644 --- a/ui-v2/yarn.lock +++ b/ui-v2/yarn.lock @@ -86,8 +86,8 @@ resolved "https://registry.yarnpkg.com/@hashicorp/consul-api-double/-/consul-api-double-1.1.0.tgz#658f9e89208fa23f251ca66c66aeb7241a13f23f" "@hashicorp/ember-cli-api-double@^1.0.2": - version "1.1.1" - resolved "https://registry.yarnpkg.com/@hashicorp/ember-cli-api-double/-/ember-cli-api-double-1.1.1.tgz#9380fdcf404f30f9d2e2a6422bfd83fe0dbe413f" + version "1.2.0" + resolved "https://registry.yarnpkg.com/@hashicorp/ember-cli-api-double/-/ember-cli-api-double-1.2.0.tgz#aed3a9659abb3f3c56d77e400abc7fcbdcf2b78b" dependencies: "@hashicorp/api-double" "^1.1.0" array-range "^1.0.1" From ad1641beba0f86fcbbbcdef6d800cd224cb0fbc8 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Fri, 22 Jun 2018 17:51:43 +0100 Subject: [PATCH 377/627] Remove upgrade banner html from v1 ui --- ui/index.html | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/ui/index.html b/ui/index.html index f477dea5f..3aa423d5c 100644 --- a/ui/index.html +++ b/ui/index.html @@ -140,28 +140,6 @@ +<% end %> \ No newline at end of file diff --git a/website/source/discovery.html.erb b/website/source/discovery.html.erb index 0abd9a53b..ff259a525 100644 --- a/website/source/discovery.html.erb +++ b/website/source/discovery.html.erb @@ -250,3 +250,7 @@ description: |- + +<% content_for :scripts do %> + +<% end %> \ No newline at end of file diff --git a/website/source/index.html.erb b/website/source/index.html.erb index 87d3c63dc..adef8ca9b 100644 --- a/website/source/index.html.erb +++ b/website/source/index.html.erb @@ -267,3 +267,7 @@ description: |- + +<% content_for :scripts do %> + +<% end %> \ No newline at end of file diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index c56a007b5..7bde4b7d5 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -34,7 +34,6 @@ <%= javascript_include_tag "consul-connect/vendor/intersection-observer-polyfill", defer: true %> <%= javascript_include_tag "consul-connect/vendor/siema.min", defer: true %> - <%= javascript_include_tag "consul-connect/application", defer: true %> <%= javascript_include_tag "application", defer: true %> @@ -120,6 +119,8 @@ + <%= yield_content :scripts %> + -<% end %> \ No newline at end of file +<% end %> diff --git a/website/source/discovery.html.erb b/website/source/discovery.html.erb index ff259a525..2d2dbe0ed 100644 --- a/website/source/discovery.html.erb +++ b/website/source/discovery.html.erb @@ -68,7 +68,7 @@ description: |-
- Service Registry + Service Registry
@@ -186,7 +186,7 @@ description: |-
- Service Registry + Service Registry
@@ -253,4 +253,4 @@ description: |- <% content_for :scripts do %> -<% end %> \ No newline at end of file +<% end %> diff --git a/website/source/segmentation.html.erb b/website/source/segmentation.html.erb index 4ffba5f84..d919691f9 100644 --- a/website/source/segmentation.html.erb +++ b/website/source/segmentation.html.erb @@ -69,7 +69,7 @@ description: |-
- Service Access Graph + Service Access Graph
From 3582ed68bf3e995299fe0e5ae7c0d7da478dd1fe Mon Sep 17 00:00:00 2001 From: kfishner Date: Thu, 21 Jun 2018 18:06:26 -0700 Subject: [PATCH 586/627] homepage copy edits --- website/source/index.html.erb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/source/index.html.erb b/website/source/index.html.erb index b9e03845a..08ebd9a64 100644 --- a/website/source/index.html.erb +++ b/website/source/index.html.erb @@ -15,7 +15,7 @@ description: |- New Consul 1.0 release. Get the details

Service Mesh Made Easy

-

Connect, configure, and secure services in dynamic infrastructure.

+

Distributed service networking layer to connect, secure, and configure services across any runtime platform and public or private cloud

@@ -71,7 +71,7 @@ description: |-
-

A paradigm shift

+

Service-based networking for dynamic infrastructure

The shift from static to dynamic infrastructure creates a challenge for connectivity and security. Consul is the service-based networking solution to connect, configure, and secure services in dynamic infrastructure.

@@ -84,13 +84,13 @@ description: |-

Static

- Host-based networking. + Host-based networking static, host-based networking

Dynamic

- Service-based networking. + Service-based networking
<%= inline_svg 'consul-connect/svgs/dynamic.svg' %>
@@ -129,7 +129,7 @@ description: |-

Service Configuration for runtime configuration

-

Feature rich Key/Value store lets you easily configure services.

+

Feature rich Key/Value store to easily configure services.

Learn more @@ -142,7 +142,7 @@ description: |-
-

Principles of Consul

+

Consul Principles

@@ -211,7 +211,7 @@ description: |-
-

Join millions of companies that trust Consul.

+

Companies that trust Consul

From 46571ea903c65a995d70fd42c962b26b301379de Mon Sep 17 00:00:00 2001 From: kfishner Date: Fri, 22 Jun 2018 11:21:15 -0700 Subject: [PATCH 587/627] copy and doc links --- website/source/configuration.html.erb | 19 +++++++++++-------- website/source/discovery.html.erb | 15 ++++++++------- website/source/index.html.erb | 25 +++++++++++++++---------- website/source/segmentation.html.erb | 27 ++++++++++++++++++--------- 4 files changed, 52 insertions(+), 34 deletions(-) diff --git a/website/source/configuration.html.erb b/website/source/configuration.html.erb index cb373bc60..7f8115753 100644 --- a/website/source/configuration.html.erb +++ b/website/source/configuration.html.erb @@ -8,8 +8,8 @@ description: |-
@@ -63,7 +66,7 @@ description: |-

Key/Value Store

Feature rich key/value store for dynamic service configuration data. Use it for feature flagging, maintenance modes, and more.

- Learn more + Learn more

@@ -82,7 +85,7 @@ description: |-

Transaction Support

The key/value store supports both read and write transactions. This allows multiple keys to be updated or read as an atomic transaction. Changes to service configuration can be done atomically to minimize churn and avoid inconsistencies.

- Learn more + Learn more

@@ -115,7 +118,7 @@ description: |-

Blocking Queries / Edge-Triggered Requests

The Consul API supports blocking queries, allowing edge triggered updates. Clients use this to get notified immediately of any changes. Tools like consul-template allow configuration files to be rendered in real-time to third-party sources when any configuration changes are made.

- Learn more + Learn more

@@ -148,7 +151,7 @@ description: |-

Watches

Watches use blocking queries monitor for any configuration or health status updates and invoke user specified scripts to handle changes. This makes it easy to build reactive infrastructure.

- Learn more + Learn more

@@ -181,7 +184,7 @@ description: |-

Distributed Locks and Semaphores

The key/value store supports distributed locks and semaphores. This makes it easier for applications to perform leader election or manage access to shared resources.

- Learn more + Learn more

diff --git a/website/source/discovery.html.erb b/website/source/discovery.html.erb index 2d2dbe0ed..21d020585 100644 --- a/website/source/discovery.html.erb +++ b/website/source/discovery.html.erb @@ -8,8 +8,9 @@ description: |-
-

Service discovery made easy.

-

Service Registry enables services to register and discover each other in dymanic infrastructure.

+

Service discovery made easy

+

Service registry, integrated health checks, and DNS and HTTP interfaces + enable any service to discover and be discovered by other services

@@ -82,7 +83,7 @@ description: |-

DNS Query Interface

Consul enables service discovery using a built-in DNS server. This allows existing applications to easily integrate, as almost all applications support using DNS to resolve IP addresses. Using DNS instead of a static IP address allows services to scale up/down and route around failures easily.

- Learn more + Learn more

@@ -115,7 +116,7 @@ description: |-

HTTP API with Edge Triggers

Consul provides an HTTP API to query the service registry for nodes, services, and health check information. The API also supports blocking queries, or long-polling for any changes. This allows automation tools to react to services being registered or health status changes to change configurations or traffic routing in real time.

- Learn more + Learn more

@@ -148,7 +149,7 @@ description: |-

Multi Datacenter

Consul supports to multiple datacenters out of the box with no complicated configuration. Look up services in other datacenters or keep the request local. Advanced features like Prepared Queries enable automatic failover to other datacenters.

- Learn more + Learn more

@@ -181,7 +182,7 @@ description: |-

Health Checks

Pairing service discovery with health checking prevents routing requests to unhealthy hosts and enables services to easily provide circuit breakers.

- Learn more + Learn more

diff --git a/website/source/index.html.erb b/website/source/index.html.erb index 08ebd9a64..8bbed5136 100644 --- a/website/source/index.html.erb +++ b/website/source/index.html.erb @@ -15,7 +15,7 @@ description: |- New Consul 1.0 release. Get the details

Service Mesh Made Easy

-

Distributed service networking layer to connect, secure, and configure services across any runtime platform and public or private cloud

+

Consul is a distributed service mesh to connect, secure, and configure services across any runtime platform and public or private cloud

@@ -72,7 +72,11 @@ description: |-
@@ -132,7 +136,7 @@ description: |-

Feature rich Key/Value store to easily configure services.

@@ -147,8 +151,9 @@ description: |-
-

Automation through Codification

-

Codify and automate service authorization policy.

+

API-Driven

+

Codify and automate service definitions, health checks, service + authorization policies, failover logic, and more.

@@ -177,7 +182,7 @@ description: |-
-

Workflows, not Technologies

+

Run and Connect Anywhere

Connect services across any runtime platform and public or private cloud. Connect services from Kubernetes to VMs, Containers to Serverless functions.

@@ -193,11 +198,11 @@ description: |-
-

Open and Extensible

+

Extend and Integrate

  • Provision clusters on any infrastructure.
  • Connect to services over TLS via proxy integrations.
  • -
  • Serve TLS certificates pluggable Certificate Authorities.
  • +
  • Serve TLS certificates with pluggable Certificate Authorities.
diff --git a/website/source/segmentation.html.erb b/website/source/segmentation.html.erb index d919691f9..fbb55583b 100644 --- a/website/source/segmentation.html.erb +++ b/website/source/segmentation.html.erb @@ -9,8 +9,8 @@ description: |-
New Feature -

Service segmentation made easy.

-

Secure service-to-service communication with automatic TLS encryption and identity-based authorization.

+

Service segmentation made easy

+

Secure service-to-service communication with automatic TLS encryption and identity-based authorization

@@ -38,7 +38,12 @@ description: |-
<%= inline_svg 'consul-connect/svgs/segmentation-challenge.svg' %>
-

East-west firewalls are the main tool for networking security in a static world. They depend on constraining traffic flow and use IP based rules to control ingress and egress traffic. But in a dynamic world where services move across machines and machines are frequently created and destroyed, this perimeter-based approach is difficult to scale as it requires complex network topologies and a large number of short lived firewall rules.

+

East-west firewalls use IP-based rules to secure ingress and + egress traffic. But in a dynamic world where services move across + machines and machines are frequently created and destroyed, this + perimeter-based approach is difficult to scale as it results in + complex network topologies and a sprawl of short-lived + firewall rules.

@@ -47,7 +52,11 @@ description: |-
<%= inline_svg 'consul-connect/svgs/segmentation-solution.svg' %>
-

Service segmentation is a new approach to secure the service itself rather than relying on the network. Consul Connect enables high level rules to codify which services are allowed to communicate directly, without IP based rules or networking middleware.

+

Service segmentation is a new approach to secure the service itself + rather than relying on the network. Consul uses service policies to + codify which services are allowed to communicate. These policies + scale across datacenters and large fleets without IP-based rules or + networking middleware.

@@ -62,9 +71,9 @@ description: |- @@ -83,7 +92,7 @@ description: |-

Secure services across any runtime platform

Secure communication between legacy and modern workloads. Sidecar proxies allow applications to be integrated without code changes and Layer 4 support provides nearly universal protocol compatibility.

- Learn more + Learn more

@@ -116,7 +125,7 @@ description: |-

Certificate-Based Service Identity

TLS certificates are used to identify services and secure communications. Certificates use the SPIFFE format for interoperability with other platforms. Consul can be a certificate authority to simplify deployment, or integrate with external signing authorities like Vault.

- Learn more + Learn more

@@ -138,7 +147,7 @@ description: |-

Encrypted communication

All traffic between services is encrypted and authenticated with mutual TLS. Using TLS provides a strong guarantee of the identity of services communicating, and ensure all data in transit is encrypted.

- Learn more + Learn more

From e0e10c1420da888d2fe5d62435efd30227233578 Mon Sep 17 00:00:00 2001 From: Kevin Fishner Date: Fri, 22 Jun 2018 14:56:36 -0700 Subject: [PATCH 588/627] fix up pearkes feedback --- website/source/index.html.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/index.html.erb b/website/source/index.html.erb index 8bbed5136..b69b41b36 100644 --- a/website/source/index.html.erb +++ b/website/source/index.html.erb @@ -72,7 +72,7 @@ description: |-

Service-based networking for dynamic infrastructure

-

The shift from static, on-prem infrastructure to dynamic, cloud +

The shift from static infrastructure to dynamic infrastructure changes the approach to networking from host-based to service-based. Connectivity moves from the use of static IPs to dynamic service discovery, and security moves from static firewalls to From bce00bf04aed8e18d6bc3015a06fe9b33472b53b Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Fri, 22 Jun 2018 23:32:15 +0100 Subject: [PATCH 589/627] Update CHANGELOG.md --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6c5781a8d..30407fdb5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,9 @@ FEATURES: +* **Connect Feature Beta**: This version includes a major new feature for Consul named Connect. Connect enables secure service-to-service communication with automatic TLS encryption and identity-based authorization. For more details and links to demos and getting started guides, see the [announcement blog post](https://www.hashicorp.com/blog/consul-1-2-service-mesh). + * Connect must be enabled explicitly in configuration so upgrading a cluster will not affect any existing functionality until it's enabled. + * This is a Beta release, we don't recommend enabling this in production yet. Please see the documentation for more information. * dns: Enable PTR record lookups for services with IPs that have no registered node [[PR-4083](https://github.com/hashicorp/consul/pull/4083)] IMPROVEMENTS: From 28c31d987baf9f0898a3cf7344d2172d3d0fd2b6 Mon Sep 17 00:00:00 2001 From: Mike Wickett Date: Fri, 22 Jun 2018 15:18:15 -0400 Subject: [PATCH 590/627] Add use case drop down to nav --- .../source/assets/stylesheets/_header.scss | 144 +++++++++++++++--- .../source/assets/stylesheets/_variables.scss | 37 ++--- .../assets/stylesheets/application.scss | 8 +- website/source/layouts/_sidebar.erb | 3 + website/source/layouts/layout.erb | 10 +- 5 files changed, 155 insertions(+), 47 deletions(-) diff --git a/website/source/assets/stylesheets/_header.scss b/website/source/assets/stylesheets/_header.scss index dde70d7d6..dea4e2abc 100755 --- a/website/source/assets/stylesheets/_header.scss +++ b/website/source/assets/stylesheets/_header.scss @@ -1,6 +1,45 @@ #header { background: $header-background-color; + // Kind of gnarly override for bootstrap's nav toggle behavior + @media (max-width: 991px) { + .navbar-header { + float: none; + } + .navbar-left, + .navbar-right { + float: none !important; + } + .navbar-toggle { + display: block; + } + .navbar-collapse { + border-top: 1px solid transparent; + box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1); + } + .navbar-fixed-top { + top: 0; + border-width: 0 0 1px; + } + .navbar-collapse.collapse { + display: none !important; + } + .navbar-nav { + float: none !important; + margin-top: 7.5px; + } + .navbar-nav > li { + float: none; + } + .navbar-nav > li > a { + padding-top: 10px; + padding-bottom: 10px; + } + .collapse.in { + display: block !important; + } + } + .navbar-toggle { height: $header-height; margin: 0; @@ -28,7 +67,9 @@ transition: opacity 0.15s ease-in-out; @extend svg.logo.white; - &:hover, &:focus, &:active { + &:hover, + &:focus, + &:active { opacity: 0.6; outline: 0; text-decoration: none; @@ -39,40 +80,97 @@ ul.nav { li { - a { - color: $header-link-color; - font-size: $header-font-size; - font-family: $font-family-open-sans; - font-weight: $font-weight-bold; - height: $header-height; - line-height: $header-height; - padding: 0 10px; - margin: 0; - text-decoration: none; + color: $consul-black; + font-size: $header-font-size; + font-family: $font-family-open-sans; + font-weight: $font-weight-bold; + height: $header-height; + margin: 0; + text-decoration: none; - &:hover, &:focus, &:active { - background-color: transparent; - color: $header-link-color-hover; - outline: 0; + &:hover, + &:focus, + &:active { + background-color: transparent; + color: $header-link-color-hover; + outline: 0; - svg { - fill: $header-link-color-hover; - } + svg { + fill: $header-link-color-hover; } + } + + span { + display: block; + padding: 15px; + line-height: 20px; svg { fill: $header-link-color; position: relative; - top: 2px; - width: 14px; - height: 14px; - margin-right: 3px; + top: -2px; + width: 9px; + height: 5px; + margin-left: 7px; + } + } + + svg { + fill: $header-link-color; + position: relative; + top: 2px; + width: 14px; + height: 14px; + margin-right: 3px; + } + + &:hover { + cursor: pointer; + + & > ul { + visibility: visible; + opacity: 1; + display: block; + transition: all 0.5s ease; + } + } + + ul { + visibility: hidden; + opacity: 0; + transition: all 0.5s ease; + min-width: 22rem; + box-shadow: 0px 4px 12px -2px rgba(63, 68, 85, 0.5); + border-radius: 3px; + padding: 2rem; + position: absolute; + height: 160px; + z-index: 1; + background-color: white; + margin-left: -15px; + + &:hover { + visibility: visible; + opacity: 1; + } + + li { + clear: both; + width: 100%; + display: block; + padding: 1rem; + position: relative; + height: 44px; + + a { + text-decoration: none; + } } } } } .buttons { - margin-top: 2px; + margin-top: 20px; } } diff --git a/website/source/assets/stylesheets/_variables.scss b/website/source/assets/stylesheets/_variables.scss index e90216f95..efdd309a5 100755 --- a/website/source/assets/stylesheets/_variables.scss +++ b/website/source/assets/stylesheets/_variables.scss @@ -1,27 +1,28 @@ // Colors -$white: #FFFFFF; +$white: #ffffff; $black: #000000; $gray-darker: #555555; +$consul-black: #252937; -$consul-red: #C62A71; -$consul-red-dark: #8C1C59; -$packer-blue: #1DAEFF; -$packer-blue-dark: #1D94DD; -$terraform-purple: #5C4EE5; -$terraform-purple-dark: #4040B2; -$vagrant-blue: #1563FF; -$vagrant-blue-dark: #104EB2; +$consul-red: #c62a71; +$consul-red-dark: #8c1c59; +$packer-blue: #1daeff; +$packer-blue-dark: #1d94dd; +$terraform-purple: #5c4ee5; +$terraform-purple-dark: #4040b2; +$vagrant-blue: #1563ff; +$vagrant-blue-dark: #104eb2; $vault-black: #000000; -$vault-blue: #00ABE0; -$vault-gray: #919FA8; +$vault-blue: #00abe0; +$vault-gray: #919fa8; // Typography -$font-family-klavika: 'klavika-web', Helvetica, sans-serif; +$font-family-klavika: 'klavika-web', Helvetica, sans-serif; $font-family-open-sans: 'Open Sans', sans-serif; $font-family-monospace: 'Fira Mono', monospace; -$font-size: 15px; -$font-weight-reg: 400; -$font-weight-bold: 600; +$font-size: 15px; +$font-weight-reg: 400; +$font-weight-bold: 600; // Body $body-font-color: $gray-darker; @@ -45,8 +46,8 @@ $sidebar-font-weight: $font-weight-reg; $header-background-color: $consul-red; $header-font-size: $font-size - 2; $header-height: 92px; -$header-link-color: rgba($white, 0.85); -$header-link-color-hover: $white; +$header-link-color: rgba($black, 0.85); +$header-link-color-hover: $black; $header-font-family: $font-family-klavika; // Footer @@ -56,7 +57,7 @@ $footer-link-color-hover: $black; // Button $button-background: $white; -$button-font-color: #7b8A8E; +$button-font-color: #7b8a8e; $button-font-family: $font-family-klavika; $button-font-size: $font-size; $button-font-weight: $font-weight-bold; diff --git a/website/source/assets/stylesheets/application.scss b/website/source/assets/stylesheets/application.scss index ce7ff246e..4b8ff1820 100644 --- a/website/source/assets/stylesheets/application.scss +++ b/website/source/assets/stylesheets/application.scss @@ -30,10 +30,10 @@ @import '_logos'; // Pages -@import "_community"; -@import "_docs"; -@import "_downloads"; +@import '_community'; +@import '_docs'; +@import '_downloads'; // @import "_home"; // Consul Connect -@import 'consul-connect/_index'; \ No newline at end of file +@import 'consul-connect/_index'; diff --git a/website/source/layouts/_sidebar.erb b/website/source/layouts/_sidebar.erb index cccf893d3..4613291f3 100644 --- a/website/source/layouts/_sidebar.erb +++ b/website/source/layouts/_sidebar.erb @@ -8,6 +8,9 @@

- @@ -242,13 +248,13 @@ description: |-

Ready to get started?

- + Download - Explore docs + Explore docs
diff --git a/website/source/discovery.html.erb b/website/source/discovery.html.erb index 21d020585..4509f088e 100644 --- a/website/source/discovery.html.erb +++ b/website/source/discovery.html.erb @@ -12,13 +12,13 @@ description: |-

Service registry, integrated health checks, and DNS and HTTP interfaces enable any service to discover and be discovered by other services

@@ -90,17 +90,21 @@ description: |-
-
- $ curl \ - --request POST \ - --data \ -'{ - "Name": "api", - "Service": { - "Service": "api", - "Tags": ["v1.2.3"], - "Failover": { - "Datacenters": ["dc1", "dc2"] +
+$ dig web-frontend.service.consul. ANY + +; <<>> DiG 9.8.3-P1 <<>> web-frontend.service.consul. ANY +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 29981 +;; flags: qr aa rd ra; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 0 + +;; QUESTION SECTION: +;web-frontend.service.consul. IN ANY + +;; ANSWER SECTION: +web-frontend.service.consul. 0 IN A 10.0.3.83 +web-frontend.service.consul. 0 IN A 10.0.1.109
@@ -124,16 +128,20 @@ description: |-
- $ curl \ - --request POST \ - --data \ -'{ - "Name": "api", - "Service": { - "Service": "api", - "Tags": ["v1.2.3"], - "Failover": { - "Datacenters": ["dc1", "dc2"] + $ curl http://localhost:8500/v1/health/service/web?index=11&wait=30s +{ + ... + "Node": "10-0-1-109", + "CheckID": "service:web", + "Name": "Service 'web' check", + "Status": "critical", + "ServiceID": "web", + "ServiceName": "web", + "CreateIndex": 10, + "ModifyIndex": 20 + ... +} +
@@ -157,16 +165,10 @@ description: |-
- $ curl \ - --request POST \ - --data \ -'{ - "Name": "api", - "Service": { - "Service": "api", - "Tags": ["v1.2.3"], - "Failover": { - "Datacenters": ["dc1", "dc2"] + $ curl http://localhost:8500/v1/catalog/datacenters +["dc1", "dc2"] +$ curl http://localhost:8500/v1/catalog/nodes?dc=dc2 +...
diff --git a/website/source/index.html.erb b/website/source/index.html.erb index 827eb0743..50512aa94 100644 --- a/website/source/index.html.erb +++ b/website/source/index.html.erb @@ -11,20 +11,20 @@ description: |-
- - New Consul 1.0 release. Get the details + + New HashiCorp Consul 1.2: Service Mesh. Read the blog post

Service Mesh Made Easy

Consul is a distributed service mesh to connect, secure, and configure services across any runtime platform and public or private cloud

- + Download - Get Started + Get Started
@@ -160,16 +160,18 @@ description: |-
- $ curl \ - --request POST \ - --data \ -'{ - "Name": "api", - "Service": { - "Service": "api", - "Tags": ["v1.2.3"], - "Failover": { - "Datacenters": ["dc1", "dc2"] + $ curl http://localhost:8500/v1/kv/deployment +[ + { + "LockIndex": 1, + "Session": "1c3f5836-4df4-0e26-6697-90dcce78acd9", + "Value": "Zm9v", + "Flags": 0, + "Key": "deployment", + "CreateIndex": 13, + "ModifyIndex": 19 + } +]
@@ -249,7 +251,7 @@ description: |-

Consul Open Source addresses the technical complexity of connecting services across distributed infrastructure.

- + diff --git a/website/source/segmentation.html.erb b/website/source/segmentation.html.erb index fbb55583b..639aa3c4c 100644 --- a/website/source/segmentation.html.erb +++ b/website/source/segmentation.html.erb @@ -12,13 +12,13 @@ description: |-

Service segmentation made easy

Secure service-to-service communication with automatic TLS encryption and identity-based authorization

@@ -99,17 +99,19 @@ description: |-
-
- $ curl \ - --request POST \ - --data \ -'{ - "Name": "api", - "Service": { - "Service": "api", - "Tags": ["v1.2.3"], - "Failover": { - "Datacenters": ["dc1", "dc2"] +
$ consul connect proxy \ + -service web \ + -service-addr 127.0.0.1:80 \ + -listen 10.0.1.109:7200 +==> Consul Connect proxy starting... + Configuration mode: Flags + Service: web + Public listener: 10.0.1.109:7200 => 127.0.0.1:80 + +==> Log data will now stream in as it occurs: + + 2018/06/23 09:33:51 [INFO] public listener starting on 10.0.1.109:7200 + 2018/06/23 09:33:51 [INFO] proxy loaded config and ready to serve
@@ -155,16 +157,7 @@ description: |-
- $ curl \ - --request POST \ - --data \ -'{ - "Name": "api", - "Service": { - "Service": "api", - "Tags": ["v1.2.3"], - "Failover": { - "Datacenters": ["dc1", "dc2"] + TODO
@@ -175,13 +168,13 @@ description: |-

Ready to get started?

- + Download - Explore docs + Explore docs
From e491abb1341c7e2a314bda8a9102dde84e203a40 Mon Sep 17 00:00:00 2001 From: Paul Banks Date: Sun, 24 Jun 2018 13:35:39 +0100 Subject: [PATCH 601/627] Fix some doc typos. --- .../source/docs/guides/connect-production.md | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/website/source/docs/guides/connect-production.md b/website/source/docs/guides/connect-production.md index 1bba85dc0..8bb66539a 100644 --- a/website/source/docs/guides/connect-production.md +++ b/website/source/docs/guides/connect-production.md @@ -49,7 +49,7 @@ Connect relies on to ensure it's security properties. A service's identity, in the form of an x.509 certificate, will only be issued to an API client that has `service:write` permission for that service. In other words, any client that has permission to _register_ an instance of a service -will be able to identify as that service and access all of resources that that +will be able to identify as that service and access all of the resources that that service is allowed to access. A secure ACL setup must meet these criteria: @@ -77,13 +77,13 @@ sufficient for ACL tokens to only be unique per _service_ and shared between instances. It is much better though if ACL tokens are unique per service _instance_ because -it limit the blast radius of a compromise. +it limits the blast radius of a compromise. A future release of Connect will support revoking specific certificates that have been issued. For example if a single node in a datacenter has been compromised, it will be possible to find all certificates issued to the agent on that node and revoke them. This will block all access to the intruder without -taking unaffected instances of the service(s) on that node offline too. +taking instances of the service(s) on other nodes offline too. While this will work with service-unique tokens, there is nothing stopping an attacker from obtaining certificates while spoofing the agent ID or other @@ -103,15 +103,19 @@ Vault](https://www.vaultproject.io/docs/secrets/consul/index.html). ## Configure Agent Transport Encryption Consul's gossip (UDP) and RPC (TCP) communications need to be encrypted -otherwise attackers may be able to see tokens and private keys while in flight -between the server and client agents or between client agent and application. +otherwise attackers may be able to see ACL tokens while in flight +between the server and client agents (RPC) or between client agent and +application (HTTP). Certificate private keys never leave the host they +are used on but are delivered to the application or proxy over local +HTTP so local agent traffic should be encrypted where potentially +untrusted parties might be able to observe localhost agent API traffic. Follow the [encryption documentation](/docs/agent/encryption.html) to ensure -both gossip encryption and RPC TLS are configured securely. +both gossip encryption and RPC/HTTP TLS are configured securely. For now client and server TLS certificates are still managed by manual configuration. In the future we plan to automate more of that with the same -mechanisms connect offers to user applications. +mechanisms Connect offers to user applications. ## Bootstrap Certificate Authority @@ -202,4 +206,4 @@ integrate](/docs/connect/native.html) with connect. If using any kind of proxy for connect, the application must ensure no untrusted connections can be made to it's unprotected listening port. This is typically done by binding to `localhost` and only allowing loopback traffic, but may also -be achieved using firewall rules or network namespacing. \ No newline at end of file +be achieved using firewall rules or network namespacing. From d9e779aa2e7f92af2b19559131588d466857cbd6 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Sun, 24 Jun 2018 15:11:54 -0700 Subject: [PATCH 602/627] website: fix two links on discovery page --- website/source/discovery.html.erb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/discovery.html.erb b/website/source/discovery.html.erb index 4509f088e..5afc8442a 100644 --- a/website/source/discovery.html.erb +++ b/website/source/discovery.html.erb @@ -242,13 +242,13 @@ $ curl http://localhost:8500/v1/catalog/nodes?dc=dc2

Ready to get started?

- + Download - Explore docs + Explore docs
From 8f890e5d7e69b353d0c4579b8504a44e0034938a Mon Sep 17 00:00:00 2001 From: RJ Spiker Date: Mon, 25 Jun 2018 09:41:18 -0600 Subject: [PATCH 603/627] website - some js fixes to make sure scripts are firing (#108) --- .../source/assets/javascripts/animations.js | 1934 +++++++++-------- .../javascripts/consul-connect/carousel.js | 9 +- 2 files changed, 974 insertions(+), 969 deletions(-) diff --git a/website/source/assets/javascripts/animations.js b/website/source/assets/javascripts/animations.js index 452b44a3f..f21f5dc7d 100644 --- a/website/source/assets/javascripts/animations.js +++ b/website/source/assets/javascripts/animations.js @@ -1,998 +1,1002 @@ -var qs = document.querySelector.bind(document) -var qsa = document.querySelectorAll.bind(document) +document.addEventListener('turbolinks:load', initializeAnimations) -// -// home page -// +function initializeAnimations() { + var qs = document.querySelector.bind(document) + var qsa = document.querySelectorAll.bind(document) -var $indexDynamic = qs('#index-dynamic-animation') -if ($indexDynamic) { - var initiated = false - var observer = new IntersectionObserver( - function(entries) { - if (!initiated && entries[0].isIntersecting) { - $indexDynamic.classList.add('active') - var lines = qsa( - '#lines-origin-aws > *, #lines-origin-azure > *, #lines-origin-gcp > *' - ) - setTimeout(function() { - timer = setInterval(function() { - lines[parseInt(Math.random() * lines.length)].classList.toggle( - 'off' - ) - }, 800) - }, 3000) - initiated = true + // + // home page + // + + var $indexDynamic = qs('#index-dynamic-animation') + if ($indexDynamic) { + var initiated = false + var observer = new IntersectionObserver( + function(entries) { + if (!initiated && entries[0].isIntersecting) { + $indexDynamic.classList.add('active') + var lines = qsa( + '#lines-origin-aws > *, #lines-origin-azure > *, #lines-origin-gcp > *' + ) + setTimeout(function() { + timer = setInterval(function() { + lines[parseInt(Math.random() * lines.length)].classList.toggle( + 'off' + ) + }, 800) + }, 3000) + initiated = true + } + }, + { threshold: 0.5 } + ) + observer.observe($indexDynamic) + } + + // + // configuration page + // + + var $configChallenge = qs('#configuration-challenge-animation') + var $configSolution = qs('#configuration-solution-animation') + + if ($configChallenge) { + // challenge animation + + var configChallengeTimeline = new TimelineLite({ + onComplete: function() { + configChallengeTimeline.restart() + configSolutionTimeline.restart() } - }, - { threshold: 0.5 } - ) - observer.observe($indexDynamic) -} - -// -// configuration page -// - -var $configChallenge = qs('#configuration-challenge-animation') -var $configSolution = qs('#configuration-solution-animation') - -if ($configChallenge) { - // challenge animation - - var configChallengeTimeline = new TimelineLite({ - onComplete: function() { - configChallengeTimeline.restart() - configSolutionTimeline.restart() - } - }) - - var line1 = qs('#c-line-1') - var line2 = qs('#c-line-2') - var line3 = qs('#c-line-3') - var line4 = qs('#c-line-4') - var line5 = qs('#c-line-5') - var line6 = qs('#c-line-6') - var line7 = qs('#c-line-7') - var line8 = qs('#c-line-8') - var box1 = qs('#c-box-1') - var box2 = qs('#c-box-2') - var box3 = qs('#c-box-3') - var box4 = qs('#c-box-4') - var box5 = qs('#c-box-5') - var box6 = qs('#c-box-6') - var box7 = qs('#c-box-7') - var box8 = qs('#c-box-8') - var progressBar = qs('#c-loading-bar > rect:last-child') - var cog = qs('#c-configuration-server > g > path') - - configChallengeTimeline - .to(box1, 1, {}) - .staggerTo( - [line1, line2, line3, line4, line5, line6, line7, line8], - 1.5, - { css: { strokeDashoffset: 0 } }, - 0.3, - 'start' - ) - .staggerTo( - [box1, box2, box3, box4, box5, box6, box7, box8], - 0.3, - { opacity: 1 }, - 0.3, - '-=2.5' - ) - .fromTo( - progressBar, - 3.5, - { attr: { width: 0 } }, - { attr: { width: 40 } }, - 'start' - ) - .to( - cog, - 3.5, - { rotation: 360, svgOrigin: '136px 127px', ease: Power1.easeOut }, - 'start' - ) - .call(function () { - configSolutionTimeline.resume(configSolutionTimeline.time()) }) - .to(line1, 2, {}) - .to( - [line1, line2, line3, line4, line5, line6, line7, line8, progressBar], - 0.5, - { opacity: 0 }, - 'reset' + + var line1 = qs('#c-line-1') + var line2 = qs('#c-line-2') + var line3 = qs('#c-line-3') + var line4 = qs('#c-line-4') + var line5 = qs('#c-line-5') + var line6 = qs('#c-line-6') + var line7 = qs('#c-line-7') + var line8 = qs('#c-line-8') + var box1 = qs('#c-box-1') + var box2 = qs('#c-box-2') + var box3 = qs('#c-box-3') + var box4 = qs('#c-box-4') + var box5 = qs('#c-box-5') + var box6 = qs('#c-box-6') + var box7 = qs('#c-box-7') + var box8 = qs('#c-box-8') + var progressBar = qs('#c-loading-bar > rect:last-child') + var cog = qs('#c-configuration-server > g > path') + + configChallengeTimeline + .to(box1, 1, {}) + .staggerTo( + [line1, line2, line3, line4, line5, line6, line7, line8], + 1.5, + { css: { strokeDashoffset: 0 } }, + 0.3, + 'start' + ) + .staggerTo( + [box1, box2, box3, box4, box5, box6, box7, box8], + 0.3, + { opacity: 1 }, + 0.3, + '-=2.5' + ) + .fromTo( + progressBar, + 3.5, + { attr: { width: 0 } }, + { attr: { width: 40 } }, + 'start' + ) + .to( + cog, + 3.5, + { rotation: 360, svgOrigin: '136px 127px', ease: Power1.easeOut }, + 'start' + ) + .call(function () { + configSolutionTimeline.resume(configSolutionTimeline.time()) + }) + .to(line1, 2, {}) + .to( + [line1, line2, line3, line4, line5, line6, line7, line8, progressBar], + 0.5, + { opacity: 0 }, + 'reset' + ) + .to( + [box1, box2, box3, box4, box5, box6, box7, box8], + 0.5, + { opacity: 0.5 }, + 'reset' + ) + .pause() + + // solution animation + + var configSolutionTimeline = new TimelineLite() + + var lines = qsa( + '#s-line-1, #s-line-2, #s-line-3, #s-line-4, #s-line-5, #s-line-6, #s-line-7, #s-line-8' ) - .to( - [box1, box2, box3, box4, box5, box6, box7, box8], - 0.5, - { opacity: 0.5 }, - 'reset' + var dots = qs('#s-dots') + var boxes = qsa( + '#s-service-box-1, #s-service-box-2, #s-service-box-3, #s-service-box-4, #s-service-box-5, #s-service-box-6, #s-service-box-7, #s-service-box-8' ) - .pause() + var progress = qs('#s-progress-indicator') - // solution animation + configSolutionTimeline + .to(boxes, 1, {}) + .to(lines, 1, { css: { strokeDashoffset: 0 } }, 'start') + .to(boxes, 0.5, { opacity: 1 }, '-=0.4') + .fromTo( + progress, + 1, + { attr: { width: 0 } }, + { attr: { width: 40 } }, + 'start' + ) + .to(dots, 0.25, { opacity: 1 }, '-=0.5') + .addPause() + .to(progress, 2, {}) + .to(lines, 0.5, { opacity: 0 }, 'reset') + .to(boxes, 0.5, { opacity: 0.5 }, 'reset') + .to(progress, 0.5, { opacity: 0 }, 'reset') + .to(dots, 0.5, { opacity: 0 }, 'reset') + .pause() - var configSolutionTimeline = new TimelineLite() + // kick off + $configChallenge.classList.add('active') + $configSolution.classList.add('active') + configChallengeTimeline.play() + configSolutionTimeline.play() + } - var lines = qsa( - '#s-line-1, #s-line-2, #s-line-3, #s-line-4, #s-line-5, #s-line-6, #s-line-7, #s-line-8' - ) - var dots = qs('#s-dots') - var boxes = qsa( - '#s-service-box-1, #s-service-box-2, #s-service-box-3, #s-service-box-4, #s-service-box-5, #s-service-box-6, #s-service-box-7, #s-service-box-8' - ) - var progress = qs('#s-progress-indicator') + // + // discovery page + // - configSolutionTimeline - .to(boxes, 1, {}) - .to(lines, 1, { css: { strokeDashoffset: 0 } }, 'start') - .to(boxes, 0.5, { opacity: 1 }, '-=0.4') - .fromTo( - progress, - 1, - { attr: { width: 0 } }, - { attr: { width: 40 } }, - 'start' + var $discoveryChallenge = qs('#discovery-challenge-animation') + var $discoverySolution = qs('#discovery-solution-animation') + + if ($discoveryChallenge) { + // challenge animation + var discoveryChallengeTimeline = new TimelineLite({ + onComplete: function() { + discoveryChallengeTimeline.restart() + discoverySolutionTimeline.restart() + } + }) + + // First, we get each of the elements we need to animate + var box = qs('#c-active-box') + var leftPlacement = qs('#c-box-left-placement') + var rightPlacement = qs('#c-box-right-placement') + var leftConnectionLines = qsa( + '#c-line-top-left > *, #c-line-bottom-left > *, #c-line-horizontal-left > *, #c-line-vertical-down > *' ) - .to(dots, 0.25, { opacity: 1 }, '-=0.5') - .addPause() - .to(progress, 2, {}) - .to(lines, 0.5, { opacity: 0 }, 'reset') - .to(boxes, 0.5, { opacity: 0.5 }, 'reset') - .to(progress, 0.5, { opacity: 0 }, 'reset') - .to(dots, 0.5, { opacity: 0 }, 'reset') - .pause() + var rightConnectionLines = qsa( + '#c-line-top-right > *, #c-line-bottom-right > *, #c-line-horizontal-left > *, #c-line-vertical-down > *, #c-line-horizontal-right > *' + ) + var leftConnectionTop = qs('#c-line-top-left') + var leftConnectionBottom = qs('#c-line-bottom-left') + var rightHorizontalConnection = qs('#c-line-horizontal-right') + var rightConnectionTop = qs('#c-line-top-right') + var rightConnectionBottom = qs('#c-line-bottom-right') + var rightConnectionLinesStroke = qsa( + '#c-line-top-right > *, #c-line-bottom-right > *, #c-line-horizontal-right > *, #c-line-horizontal-left > *, #c-line-vertical-down > *' + ) + var leftConnectionLinesStroke = qsa( + '#c-line-top-left > *, #c-line-bottom-left > *, #c-line-horizontal-left > *, #c-line-vertical-down > *' + ) + var brokenLinkLeft = qs('#c-broken-link-left') + var brokenLinkRight = qs('#c-broken-link-right') + var computer = qs('#c-computer') + var codeLines = qs('#c-computer > g') + var toLoadBalancerDown = qsa( + '#c-computer-to-load-balancers #c-arrow-down, #c-computer-to-load-balancers #c-circle' + ) + var toLoadBalancerRight = qs('#c-computer-to-load-balancers #c-arrow-right') + var toLoadBalancerLeft = qs('#c-computer-to-load-balancers #c-arrow-left') + var toLoadBalancerRest = qs('#c-computer-to-load-balancers #c-edit-box') + var progressBars = qsa( + '#c-load-balancer-left > #c-progress-bar, #c-load-balancer-right > #c-progress-bar-2, #c-load-balancer-middle > #c-progress-bar-3' + ) + var progressBarsBars = qsa( + '#c-load-balancer-left > #c-progress-bar > *:last-child, #c-load-balancer-right > #c-progress-bar-2 > *:last-child, #c-load-balancer-middle > #c-progress-bar-3 > *:last-child' + ) + var farLeftBoxBorder = qs('#c-box-far-left > path') - // kick off - $configChallenge.classList.add('active') - $configSolution.classList.add('active') - configChallengeTimeline.play() - configSolutionTimeline.play() -} - -// -// discovery page -// - -var $discoveryChallenge = qs('#discovery-challenge-animation') -var $discoverySolution = qs('#discovery-solution-animation') - -if ($discoveryChallenge) { - // challenge animation - var discoveryChallengeTimeline = new TimelineLite({ - onComplete: function() { - discoveryChallengeTimeline.restart() - discoverySolutionTimeline.restart() - } - }) - - // First, we get each of the elements we need to animate - var box = qs('#c-active-box') - var leftPlacement = qs('#c-box-left-placement') - var rightPlacement = qs('#c-box-right-placement') - var leftConnectionLines = qsa( - '#c-line-top-left > *, #c-line-bottom-left > *, #c-line-horizontal-left > *, #c-line-vertical-down > *' - ) - var rightConnectionLines = qsa( - '#c-line-top-right > *, #c-line-bottom-right > *, #c-line-horizontal-left > *, #c-line-vertical-down > *, #c-line-horizontal-right > *' - ) - var leftConnectionTop = qs('#c-line-top-left') - var leftConnectionBottom = qs('#c-line-bottom-left') - var rightHorizontalConnection = qs('#c-line-horizontal-right') - var rightConnectionTop = qs('#c-line-top-right') - var rightConnectionBottom = qs('#c-line-bottom-right') - var rightConnectionLinesStroke = qsa( - '#c-line-top-right > *, #c-line-bottom-right > *, #c-line-horizontal-right > *, #c-line-horizontal-left > *, #c-line-vertical-down > *' - ) - var leftConnectionLinesStroke = qsa( - '#c-line-top-left > *, #c-line-bottom-left > *, #c-line-horizontal-left > *, #c-line-vertical-down > *' - ) - var brokenLinkLeft = qs('#c-broken-link-left') - var brokenLinkRight = qs('#c-broken-link-right') - var computer = qs('#c-computer') - var codeLines = qs('#c-computer > g') - var toLoadBalancerDown = qsa( - '#c-computer-to-load-balancers #c-arrow-down, #c-computer-to-load-balancers #c-circle' - ) - var toLoadBalancerRight = qs('#c-computer-to-load-balancers #c-arrow-right') - var toLoadBalancerLeft = qs('#c-computer-to-load-balancers #c-arrow-left') - var toLoadBalancerRest = qs('#c-computer-to-load-balancers #c-edit-box') - var progressBars = qsa( - '#c-load-balancer-left > #c-progress-bar, #c-load-balancer-right > #c-progress-bar-2, #c-load-balancer-middle > #c-progress-bar-3' - ) - var progressBarsBars = qsa( - '#c-load-balancer-left > #c-progress-bar > *:last-child, #c-load-balancer-right > #c-progress-bar-2 > *:last-child, #c-load-balancer-middle > #c-progress-bar-3 > *:last-child' - ) - var farLeftBoxBorder = qs('#c-box-far-left > path') - - // Then, we run each step of the animation using GSAP's TimelineLine, a - // fantastic way to set up a series of complex movements - discoveryChallengeTimeline - .to(box, 1, {}) - // box moves to new position - .to(box, 1, { css: { transform: 'translate(96px, 48px)' } }) - .to(leftPlacement, 0.5, { css: { opacity: 1 } }, '-=1') - .to(rightPlacement, 0.25, { css: { opacity: 0 } }, '-=0.25') - // connection lines turn black - .to(leftConnectionLines, 0.5, { css: { stroke: '#000' } }) - .to(farLeftBoxBorder, 0.5, { css: { fill: '#000' } }, '-=0.5') - // broken link appears - .to( - leftConnectionTop, - 0.1, - { - css: { strokeDashoffset: 6 }, + // Then, we run each step of the animation using GSAP's TimelineLine, a + // fantastic way to set up a series of complex movements + discoveryChallengeTimeline + .to(box, 1, {}) + // box moves to new position + .to(box, 1, { css: { transform: 'translate(96px, 48px)' } }) + .to(leftPlacement, 0.5, { css: { opacity: 1 } }, '-=1') + .to(rightPlacement, 0.25, { css: { opacity: 0 } }, '-=0.25') + // connection lines turn black + .to(leftConnectionLines, 0.5, { css: { stroke: '#000' } }) + .to(farLeftBoxBorder, 0.5, { css: { fill: '#000' } }, '-=0.5') + // broken link appears + .to( + leftConnectionTop, + 0.1, + { + css: { strokeDashoffset: 6 }, + ease: Linear.easeNone + }, + '-=0.3' + ) + .to(brokenLinkLeft, 0.2, { css: { opacity: 1 } }, '-=0.15') + // computer appears and code is written + .to(computer, 0.5, { css: { opacity: 1 } }) + .staggerFrom( + codeLines, + 0.4, + { + css: { transform: 'translate(-64px, 0)', opacity: 0 } + }, + 0.1 + ) + .to(codeLines, 0.3, { + css: { transform: 'translate(0, 0)', opacity: 1 } + }) + // code moves to load balancers + .to(toLoadBalancerRest, 0.4, { css: { opacity: 1 } }) + .to(toLoadBalancerLeft, 0.2, { css: { opacity: 1 } }, 'loadBalancerArrows') + .to(toLoadBalancerRight, 0.2, { css: { opacity: 1 } }, 'loadBalancerArrows') + .to(toLoadBalancerDown, 0.2, { css: { opacity: 1 } }, 'loadBalancerArrows') + // load balancers progress bars, old broken link fades out + .to(progressBars, 0.2, { css: { opacity: 1 } }) + .staggerFromTo( + progressBarsBars, + 1.5, + { attr: { width: 0 } }, + { attr: { width: 40 } }, + 0.3 + ) + .to( + [] + .concat(toLoadBalancerRest) + .concat([].slice.call(toLoadBalancerDown)) + .concat([ + toLoadBalancerRight, + toLoadBalancerLeft, + brokenLinkLeft, + leftConnectionTop, + leftConnectionBottom + ]), + 0.5, + { css: { opacity: 0 } }, + '-=0.75' + ) + .to(computer, 0.5, { css: { opacity: .12 } }, '-=0.75') + .to(progressBars, 0.5, { css: { opacity: 0 } }) + // new connection is drawn + .to(rightHorizontalConnection, 0.3, { css: { strokeDashoffset: 0 } }) + .to(rightConnectionTop, 0.2, { + css: { strokeDashoffset: 0 }, ease: Linear.easeNone - }, - '-=0.3' - ) - .to(brokenLinkLeft, 0.2, { css: { opacity: 1 } }, '-=0.15') - // computer appears and code is written - .to(computer, 0.5, { css: { opacity: 1 } }) - .staggerFrom( - codeLines, - 0.4, - { - css: { transform: 'translate(-64px, 0)', opacity: 0 } - }, - 0.1 - ) - .to(codeLines, 0.3, { - css: { transform: 'translate(0, 0)', opacity: 1 } - }) - // code moves to load balancers - .to(toLoadBalancerRest, 0.4, { css: { opacity: 1 } }) - .to(toLoadBalancerLeft, 0.2, { css: { opacity: 1 } }, 'loadBalancerArrows') - .to(toLoadBalancerRight, 0.2, { css: { opacity: 1 } }, 'loadBalancerArrows') - .to(toLoadBalancerDown, 0.2, { css: { opacity: 1 } }, 'loadBalancerArrows') - // load balancers progress bars, old broken link fades out - .to(progressBars, 0.2, { css: { opacity: 1 } }) - .staggerFromTo( - progressBarsBars, - 1.5, - { attr: { width: 0 } }, - { attr: { width: 40 } }, - 0.3 - ) - .to( - [] - .concat(toLoadBalancerRest) - .concat([].slice.call(toLoadBalancerDown)) - .concat([ - toLoadBalancerRight, - toLoadBalancerLeft, - brokenLinkLeft, - leftConnectionTop, - leftConnectionBottom - ]), - 0.5, - { css: { opacity: 0 } }, - '-=0.75' - ) - .to(computer, 0.5, { css: { opacity: .12 } }, '-=0.75') - .to(progressBars, 0.5, { css: { opacity: 0 } }) - // new connection is drawn - .to(rightHorizontalConnection, 0.3, { css: { strokeDashoffset: 0 } }) - .to(rightConnectionTop, 0.2, { - css: { strokeDashoffset: 0 }, - ease: Linear.easeNone - }) - .to(rightConnectionBottom, 0.3, { - css: { strokeDashoffset: 0 }, - ease: Linear.easeNone - }) - // connection lines turn blue - .to( - rightConnectionLinesStroke, - 0.5, - { css: { stroke: '#3969ED' } }, - '-=0.3' - ) - .to(farLeftBoxBorder, 0.5, { css: { fill: '#3969ED' } }, '-=0.5') - // wait three seconds - .to(box, 3, {}) - // box moves back to original position - .to(box, 1, { css: { transform: 'translate(0, 0)' } }, 'loop2') - .to(leftPlacement, 0.25, { css: { opacity: 0 } }, '-=0.25') - .to(rightPlacement, 0.5, { css: { opacity: 1 } }, '-=0.5') - // connection lines turn black - .to(rightConnectionLines, 0.5, { css: { stroke: '#000' } }) - .to(farLeftBoxBorder, 0.5, { css: { fill: '#000' } }, '-=0.5') - // broken link appears - .to( - rightConnectionTop, - 0.1, - { - css: { strokeDashoffset: 6 }, + }) + .to(rightConnectionBottom, 0.3, { + css: { strokeDashoffset: 0 }, ease: Linear.easeNone - }, - '-=0.3' - ) - .to(brokenLinkRight, 0.2, { css: { opacity: 1 } }, '-=0.15') - // computer appears and code is written - .from(codeLines, 0.1, { css: { opacity: 0 } }) - .to(computer, 0.5, { css: { opacity: 1 } }, '-=0.1') - .staggerFromTo( - codeLines, - 0.4, - { css: { transform: 'translate(-64px, 0)', opacity: 0 } }, - { css: { transform: 'translate(0, 0)', opacity: 1 } }, - 0.1 - ) - // code moves to load balancers - .to(toLoadBalancerRest, 0.4, { css: { opacity: 1 } }) - .to(toLoadBalancerLeft, 0.2, { css: { opacity: 1 } }, 'loadBalancerArrows2') - .to( - toLoadBalancerRight, - 0.2, - { css: { opacity: 1 } }, - 'loadBalancerArrows2' - ) - .to(toLoadBalancerDown, 0.2, { css: { opacity: 1 } }, 'loadBalancerArrows2') - // load balancers progress bars, old broken link fades out - .to(progressBarsBars, 0.1, { attr: { width: 0 } }) - .to(progressBars, 0.2, { attr: { opacity: 1 } }) - .staggerFromTo( - progressBarsBars, - 1.5, - { css: { width: 0 } }, - { css: { width: 40 } }, - 0.3 - ) - .to( - [] - .concat(toLoadBalancerRest) - .concat([].slice.call(toLoadBalancerDown)) - .concat([ - toLoadBalancerRight, - toLoadBalancerLeft, - brokenLinkRight, - rightConnectionTop, - rightConnectionBottom, - rightHorizontalConnection - ]), - 0.5, - { css: { opacity: 0 } }, - '-=0.75' - ) - .to(computer, 0.5, { css: { opacity: .12 } }, '-=0.75') - .to(progressBars, 0.5, { css: { opacity: 0 } }) - // new connection is drawn - .to(leftConnectionTop, 0.01, { css: { strokeDashoffset: 17 } }) - .to(leftConnectionBottom, 0.01, { css: { strokeDashoffset: 56 } }) - .to([leftConnectionTop, leftConnectionBottom], 0.01, { - css: { opacity: 1 } + }) + // connection lines turn blue + .to( + rightConnectionLinesStroke, + 0.5, + { css: { stroke: '#3969ED' } }, + '-=0.3' + ) + .to(farLeftBoxBorder, 0.5, { css: { fill: '#3969ED' } }, '-=0.5') + // wait three seconds + .to(box, 3, {}) + // box moves back to original position + .to(box, 1, { css: { transform: 'translate(0, 0)' } }, 'loop2') + .to(leftPlacement, 0.25, { css: { opacity: 0 } }, '-=0.25') + .to(rightPlacement, 0.5, { css: { opacity: 1 } }, '-=0.5') + // connection lines turn black + .to(rightConnectionLines, 0.5, { css: { stroke: '#000' } }) + .to(farLeftBoxBorder, 0.5, { css: { fill: '#000' } }, '-=0.5') + // broken link appears + .to( + rightConnectionTop, + 0.1, + { + css: { strokeDashoffset: 6 }, + ease: Linear.easeNone + }, + '-=0.3' + ) + .to(brokenLinkRight, 0.2, { css: { opacity: 1 } }, '-=0.15') + // computer appears and code is written + .from(codeLines, 0.1, { css: { opacity: 0 } }) + .to(computer, 0.5, { css: { opacity: 1 } }, '-=0.1') + .staggerFromTo( + codeLines, + 0.4, + { css: { transform: 'translate(-64px, 0)', opacity: 0 } }, + { css: { transform: 'translate(0, 0)', opacity: 1 } }, + 0.1 + ) + // code moves to load balancers + .to(toLoadBalancerRest, 0.4, { css: { opacity: 1 } }) + .to(toLoadBalancerLeft, 0.2, { css: { opacity: 1 } }, 'loadBalancerArrows2') + .to( + toLoadBalancerRight, + 0.2, + { css: { opacity: 1 } }, + 'loadBalancerArrows2' + ) + .to(toLoadBalancerDown, 0.2, { css: { opacity: 1 } }, 'loadBalancerArrows2') + // load balancers progress bars, old broken link fades out + .to(progressBarsBars, 0.1, { attr: { width: 0 } }) + .to(progressBars, 0.2, { attr: { opacity: 1 } }) + .staggerFromTo( + progressBarsBars, + 1.5, + { css: { width: 0 } }, + { css: { width: 40 } }, + 0.3 + ) + .to( + [] + .concat(toLoadBalancerRest) + .concat([].slice.call(toLoadBalancerDown)) + .concat([ + toLoadBalancerRight, + toLoadBalancerLeft, + brokenLinkRight, + rightConnectionTop, + rightConnectionBottom, + rightHorizontalConnection + ]), + 0.5, + { css: { opacity: 0 } }, + '-=0.75' + ) + .to(computer, 0.5, { css: { opacity: .12 } }, '-=0.75') + .to(progressBars, 0.5, { css: { opacity: 0 } }) + // new connection is drawn + .to(leftConnectionTop, 0.01, { css: { strokeDashoffset: 17 } }) + .to(leftConnectionBottom, 0.01, { css: { strokeDashoffset: 56 } }) + .to([leftConnectionTop, leftConnectionBottom], 0.01, { + css: { opacity: 1 } + }) + .to(leftConnectionTop, 0.2, { + css: { strokeDashoffset: 0 }, + ease: Linear.easeNone + }) + .to(leftConnectionBottom, 0.3, { + css: { strokeDashoffset: 0 }, + ease: Linear.easeNone + }) + // connection lines turn blue + .to(leftConnectionLinesStroke, 0.5, { css: { stroke: '#3969ED' } }, '-=0.3') + .to(farLeftBoxBorder, 0.5, { css: { fill: '#3969ED' } }, '-=0.5') + .call(function () { + discoverySolutionTimeline.resume(discoverySolutionTimeline.time()) + }) + .to(box, 2, {}) + .pause() + + // solution animation + var discoverySolutionTimeline = new TimelineLite() + + var inactiveBox = qs('#s-active-service-1') + var inactiveBoxStroke = qs('#s-active-service-1 > path') + var activeBox = qs('#s-active-service-2') + var activeBoxStroke = qs('#s-active-service-2 > path') + var leftPlacement = qs('#s-dotted-service-box-2') + var rightPlacement = qs('#s-dotted-service-box-3') + var leftConnectionLine = qs('#s-connected-line-1') + var rightConnectionLine = qs('#s-connected-line-2') + var dottedLineLeft = qs('#s-dotted-line-left') + var dottedLineRight = qs('#s-dotted-lines-right') + var dottedLineRightPrimary = qs('#s-dotted-lines-right > path:nth-child(2)') + var dottedLineRightAlt = qs('#s-dotted-lines-right > path:last-child') + var syncLeft = qs('#s-dynamic-sync-left') + var syncRight = qs('#s-dynamic-sync-right') + var syncSpinnerLeft = qs('#s-dynamic-sync-left > path') + var syncSpinnerRight = qs('#s-dynamic-sync-right > path') + + discoverySolutionTimeline + .to(activeBox, 1, {}) + // box moves + .to(activeBox, 0.5, { x: 96, y: 48 }) + .to(leftPlacement, 0.25, { css: { opacity: 1 } }, '-=0.5') + .to(rightPlacement, 0.25, { css: { opacity: 0 } }, '-=0.1') + // connection is broken + .to(leftConnectionLine, 0.75, { css: { strokeDashoffset: 222 } }, '-=0.5') + // box color changes to black + .to(activeBoxStroke, 0.25, { css: { fill: '#000' } }, '-=0.4') + .to(inactiveBoxStroke, 0.25, { css: { fill: '#000' } }, '-=0.4') + // right sync lines appear + .to(dottedLineRight, 0.4, { css: { opacity: 1 } }) + .to(syncRight, 0.2, { css: { opacity: 1 } }, '-=0.2') + .to(syncSpinnerRight, 1, { rotation: 360, svgOrigin: '232px 127px' }) + // left sync lines appear + .to(dottedLineLeft, 0.4, { css: { opacity: 1 } }, '-=0.6') + .to(syncLeft, 0.2, { css: { opacity: 1 } }, '-=0.2') + .to(syncSpinnerLeft, 1, { rotation: 360, svgOrigin: '88px 127px' }) + // connection is redrawn + .to(rightConnectionLine, 0.75, { css: { strokeDashoffset: 0 } }) + // right sync lines disappear + .to(dottedLineRight, 0.4, { css: { opacity: 0 } }, '-=1.2') + .to(syncRight, 0.2, { css: { opacity: 0 } }, '-=1.2') + // left sync lines disappear + .to(dottedLineLeft, 0.4, { css: { opacity: 0 } }, '-=0.5') + .to(syncLeft, 0.2, { css: { opacity: 0 } }, '-=0.5') + // box color changes to pink + .to(activeBoxStroke, 0.25, { css: { fill: '#ca2171' } }, '-=0.2') + .to(inactiveBoxStroke, 0.25, { css: { fill: '#ca2171' } }, '-=0.2') + // wait three seconds + .to(activeBox, 3, {}) + // box moves + .to(activeBox, 0.5, { x: 0, y: 0 }) + .to(leftPlacement, 0.25, { css: { opacity: 0 } }, '-=0.1') + .to(rightPlacement, 0.25, { css: { opacity: 1 } }, '-=0.5') + // connection is broken + .to(rightConnectionLine, 0.75, { css: { strokeDashoffset: 270 } }, '-=0.5') + // box color changes to black + .to(activeBoxStroke, 0.25, { css: { fill: '#000' } }, '-=0.4') + .to(inactiveBoxStroke, 0.25, { css: { fill: '#000' } }, '-=0.4') + // right sync lines appear + .to(dottedLineRightAlt, 0.01, { css: { opacity: 1 } }) + .to(dottedLineRightPrimary, 0.01, { css: { opacity: 0 } }) + .to(dottedLineRight, 0.4, { css: { opacity: 1 } }) + .to(syncRight, 0.2, { css: { opacity: 1 } }, '-=0.2') + .fromTo( + syncSpinnerRight, + 1, + { rotation: 0 }, + { rotation: 360, svgOrigin: '232px 127px' } + ) + // left sync lines appear + .to(dottedLineLeft, 0.4, { css: { opacity: 1 } }, '-=0.6') + .to(syncLeft, 0.2, { css: { opacity: 1 } }, '-=0.2') + .fromTo( + syncSpinnerLeft, + 1, + { rotation: 0 }, + { rotation: 360, svgOrigin: '88px 127px' } + ) + // connection is redrawn + .to(leftConnectionLine, 0.75, { css: { strokeDashoffset: 0 } }) + // right sync lines disappear + .to(dottedLineRight, 0.4, { css: { opacity: 0 } }, '-=1.2') + .to(syncRight, 0.2, { css: { opacity: 0 } }, '-=1.2') + // left sync lines disappear + .to(dottedLineLeft, 0.4, { css: { opacity: 0 } }, '-=0.5') + .to(syncLeft, 0.2, { css: { opacity: 0 } }, '-=0.5') + // box color changes to pink + .to(activeBoxStroke, 0.25, { css: { fill: '#ca2171' } }, '-=0.2') + .to(inactiveBoxStroke, 0.25, { css: { fill: '#ca2171' } }, '-=0.2') + .addPause() + // wait three seconds + .to(activeBox, 2, {}) + .pause() + + // kick it off + $discoveryChallenge.classList.add('active') + $discoverySolution.classList.add('active') + discoveryChallengeTimeline.play() + discoverySolutionTimeline.play() + } + + // + // discovery page + // + + var $segmentationChallenge = qs('#segmentation-challenge-animation') + var $segmentationSolution = qs('#segmentation-solution-animation') + + if ($segmentationChallenge) { + // challenge animation + var segmentationChallengeTimeline = new TimelineLite({ + onComplete: function() { + segmentationChallengeTimeline.restart() + segmentationSolutionTimeline.restart() + } }) - .to(leftConnectionTop, 0.2, { - css: { strokeDashoffset: 0 }, - ease: Linear.easeNone - }) - .to(leftConnectionBottom, 0.3, { - css: { strokeDashoffset: 0 }, - ease: Linear.easeNone - }) - // connection lines turn blue - .to(leftConnectionLinesStroke, 0.5, { css: { stroke: '#3969ED' } }, '-=0.3') - .to(farLeftBoxBorder, 0.5, { css: { fill: '#3969ED' } }, '-=0.5') - .call(function () { - discoverySolutionTimeline.resume(discoverySolutionTimeline.time()) - }) - .to(box, 2, {}) - .pause() - // solution animation - var discoverySolutionTimeline = new TimelineLite() + var computerUpdatePath = qs('#c-firewall-updates #c-update_path') + var computerUpdateBox = qs('#c-firewall-updates #c-edit') + var computer = qs('#c-computer') + var progressBars = qsa( + '#c-progress-indicator, #c-progress-indicator-2, #c-progress-indicator-3' + ) + var progressBarBars = qsa( + '#c-progress-indicator > rect:last-child, #c-progress-indicator-2 > rect:last-child, #c-progress-indicator-3 > rect:last-child' + ) + var brokenLinks = qsa('#c-broken-link-1, #c-broken-link-2, #c-broken-link-3') + var box2 = qs('#c-box-2') + var box2Border = qs('#c-box-2 > path') + var box4 = qs('#c-box-4') + var box4Border = qs('#c-box-4 > path') + var box6 = qs('#c-box-6') + var box6Border = qs('#c-box-6 > path') + var box7 = qs('#c-box-7') + var box7Border = qs('#c-box-7 > path') + var path1a = qs('#c-path-1 > *:nth-child(2)') + var path1b = qs('#c-path-1 > *:nth-child(3)') + var path1c = qs('#c-path-1 > *:nth-child(1)') + var path2a = qs('#c-path-2 > *:nth-child(1)') + var path2b = qs('#c-path-2 > *:nth-child(3)') + var path2c = qs('#c-path-2 > *:nth-child(2)') + var path3a = qs('#c-path-3 > *:nth-child(2)') + var path3b = qs('#c-path-3 > *:nth-child(3)') + var path3c = qs('#c-path-3 > *:nth-child(1)') - var inactiveBox = qs('#s-active-service-1') - var inactiveBoxStroke = qs('#s-active-service-1 > path') - var activeBox = qs('#s-active-service-2') - var activeBoxStroke = qs('#s-active-service-2 > path') - var leftPlacement = qs('#s-dotted-service-box-2') - var rightPlacement = qs('#s-dotted-service-box-3') - var leftConnectionLine = qs('#s-connected-line-1') - var rightConnectionLine = qs('#s-connected-line-2') - var dottedLineLeft = qs('#s-dotted-line-left') - var dottedLineRight = qs('#s-dotted-lines-right') - var dottedLineRightPrimary = qs('#s-dotted-lines-right > path:nth-child(2)') - var dottedLineRightAlt = qs('#s-dotted-lines-right > path:last-child') - var syncLeft = qs('#s-dynamic-sync-left') - var syncRight = qs('#s-dynamic-sync-right') - var syncSpinnerLeft = qs('#s-dynamic-sync-left > path') - var syncSpinnerRight = qs('#s-dynamic-sync-right > path') - - discoverySolutionTimeline - .to(activeBox, 1, {}) - // box moves - .to(activeBox, 0.5, { x: 96, y: 48 }) - .to(leftPlacement, 0.25, { css: { opacity: 1 } }, '-=0.5') - .to(rightPlacement, 0.25, { css: { opacity: 0 } }, '-=0.1') - // connection is broken - .to(leftConnectionLine, 0.75, { css: { strokeDashoffset: 222 } }, '-=0.5') - // box color changes to black - .to(activeBoxStroke, 0.25, { css: { fill: '#000' } }, '-=0.4') - .to(inactiveBoxStroke, 0.25, { css: { fill: '#000' } }, '-=0.4') - // right sync lines appear - .to(dottedLineRight, 0.4, { css: { opacity: 1 } }) - .to(syncRight, 0.2, { css: { opacity: 1 } }, '-=0.2') - .to(syncSpinnerRight, 1, { rotation: 360, svgOrigin: '232px 127px' }) - // left sync lines appear - .to(dottedLineLeft, 0.4, { css: { opacity: 1 } }, '-=0.6') - .to(syncLeft, 0.2, { css: { opacity: 1 } }, '-=0.2') - .to(syncSpinnerLeft, 1, { rotation: 360, svgOrigin: '88px 127px' }) - // connection is redrawn - .to(rightConnectionLine, 0.75, { css: { strokeDashoffset: 0 } }) - // right sync lines disappear - .to(dottedLineRight, 0.4, { css: { opacity: 0 } }, '-=1.2') - .to(syncRight, 0.2, { css: { opacity: 0 } }, '-=1.2') - // left sync lines disappear - .to(dottedLineLeft, 0.4, { css: { opacity: 0 } }, '-=0.5') - .to(syncLeft, 0.2, { css: { opacity: 0 } }, '-=0.5') - // box color changes to pink - .to(activeBoxStroke, 0.25, { css: { fill: '#ca2171' } }, '-=0.2') - .to(inactiveBoxStroke, 0.25, { css: { fill: '#ca2171' } }, '-=0.2') - // wait three seconds - .to(activeBox, 3, {}) - // box moves - .to(activeBox, 0.5, { x: 0, y: 0 }) - .to(leftPlacement, 0.25, { css: { opacity: 0 } }, '-=0.1') - .to(rightPlacement, 0.25, { css: { opacity: 1 } }, '-=0.5') - // connection is broken - .to(rightConnectionLine, 0.75, { css: { strokeDashoffset: 270 } }, '-=0.5') - // box color changes to black - .to(activeBoxStroke, 0.25, { css: { fill: '#000' } }, '-=0.4') - .to(inactiveBoxStroke, 0.25, { css: { fill: '#000' } }, '-=0.4') - // right sync lines appear - .to(dottedLineRightAlt, 0.01, { css: { opacity: 1 } }) - .to(dottedLineRightPrimary, 0.01, { css: { opacity: 0 } }) - .to(dottedLineRight, 0.4, { css: { opacity: 1 } }) - .to(syncRight, 0.2, { css: { opacity: 1 } }, '-=0.2') - .fromTo( - syncSpinnerRight, - 1, - { rotation: 0 }, - { rotation: 360, svgOrigin: '232px 127px' } - ) - // left sync lines appear - .to(dottedLineLeft, 0.4, { css: { opacity: 1 } }, '-=0.6') - .to(syncLeft, 0.2, { css: { opacity: 1 } }, '-=0.2') - .fromTo( - syncSpinnerLeft, - 1, - { rotation: 0 }, - { rotation: 360, svgOrigin: '88px 127px' } - ) - // connection is redrawn - .to(leftConnectionLine, 0.75, { css: { strokeDashoffset: 0 } }) - // right sync lines disappear - .to(dottedLineRight, 0.4, { css: { opacity: 0 } }, '-=1.2') - .to(syncRight, 0.2, { css: { opacity: 0 } }, '-=1.2') - // left sync lines disappear - .to(dottedLineLeft, 0.4, { css: { opacity: 0 } }, '-=0.5') - .to(syncLeft, 0.2, { css: { opacity: 0 } }, '-=0.5') - // box color changes to pink - .to(activeBoxStroke, 0.25, { css: { fill: '#ca2171' } }, '-=0.2') - .to(inactiveBoxStroke, 0.25, { css: { fill: '#ca2171' } }, '-=0.2') - .addPause() - // wait three seconds - .to(activeBox, 2, {}) - .pause() - - // kick it off - $discoveryChallenge.classList.add('active') - $discoverySolution.classList.add('active') - discoveryChallengeTimeline.play() - discoverySolutionTimeline.play() -} - -// -// discovery page -// - -var $segmentationChallenge = qs('#segmentation-challenge-animation') -var $segmentationSolution = qs('#segmentation-solution-animation') - -if ($segmentationChallenge) { - // challenge animation - var segmentationChallengeTimeline = new TimelineLite({ - onComplete: function() { - segmentationChallengeTimeline.restart() - segmentationSolutionTimeline.restart() - } - }) - - var computerUpdatePath = qs('#c-firewall-updates #c-update_path') - var computerUpdateBox = qs('#c-firewall-updates #c-edit') - var computer = qs('#c-computer') - var progressBars = qsa( - '#c-progress-indicator, #c-progress-indicator-2, #c-progress-indicator-3' - ) - var progressBarBars = qsa( - '#c-progress-indicator > rect:last-child, #c-progress-indicator-2 > rect:last-child, #c-progress-indicator-3 > rect:last-child' - ) - var brokenLinks = qsa('#c-broken-link-1, #c-broken-link-2, #c-broken-link-3') - var box2 = qs('#c-box-2') - var box2Border = qs('#c-box-2 > path') - var box4 = qs('#c-box-4') - var box4Border = qs('#c-box-4 > path') - var box6 = qs('#c-box-6') - var box6Border = qs('#c-box-6 > path') - var box7 = qs('#c-box-7') - var box7Border = qs('#c-box-7 > path') - var path1a = qs('#c-path-1 > *:nth-child(2)') - var path1b = qs('#c-path-1 > *:nth-child(3)') - var path1c = qs('#c-path-1 > *:nth-child(1)') - var path2a = qs('#c-path-2 > *:nth-child(1)') - var path2b = qs('#c-path-2 > *:nth-child(3)') - var path2c = qs('#c-path-2 > *:nth-child(2)') - var path3a = qs('#c-path-3 > *:nth-child(2)') - var path3b = qs('#c-path-3 > *:nth-child(3)') - var path3c = qs('#c-path-3 > *:nth-child(1)') - - segmentationChallengeTimeline - .to(box2, 1, {}) - // box 4 and 6 appear - .to(box4Border, 0.4, { css: { fill: '#000' } }, 'box4-in') - .fromTo( - box4, - 0.3, - { scale: 0, rotation: 200, opacity: 0, svgOrigin: '291px 41px' }, - { scale: 1, rotation: 360, opacity: 1 }, - 'box4-in' - ) - .to(box6Border, 0.4, { css: { fill: '#000' } }, '-=0.2') - .fromTo( - box6, - 0.3, - { scale: 0, rotation: 200, opacity: 0, svgOrigin: '195px 289px' }, - { scale: 1, rotation: 360, opacity: 1 }, - '-=0.4' - ) - // wait for a moment - .to(box2, 1, {}) - // computer appears and sends updates to firewalls - .to(computer, 0.5, { opacity: 1 }) - .to(computerUpdateBox, 0.3, { opacity: 1 }, '-=0.2') - .to(computerUpdatePath, 0.3, { opacity: 1 }, '-=0.2') - // firewall progress bars - .to(progressBarBars, 0.01, { attr: { width: 0 } }) - .to(progressBars, 0.2, { opacity: 1 }) - .staggerTo(progressBarBars, 0.6, { attr: { width: 40 } }, 0.2) - // connection 1 made - .to(path1a, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) - .to(path1b, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) - .to(path1c, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) - // progress bars and firewall update lines fade out - .to(progressBars, 0.7, { opacity: 0 }, 'resetComputer1') - .to(computerUpdateBox, 0.7, { opacity: 0 }, 'resetComputer1') - .to(computerUpdatePath, 0.7, { opacity: 0 }, 'resetComputer1') - // connection turns blue - .to( - [path1a, path1b, path1c], - 0.5, - { css: { stroke: '#3969ED' } }, - 'resetComputer1' - ) - .to( - [box4Border, box6Border], - 0.5, - { css: { fill: '#3969ED' } }, - 'resetComputer1' - ) - // second connection draws - .to( - path2a, - 0.3, - { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }, - '-=0.3' - ) - .to(path2b, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) - .to(path2c, 0.2, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) - // second connection turns blue - .to([path2a, path2b, path2c], 0.5, { css: { stroke: '#3969ED' } }, '-=0.1') - .to(box7Border, 0.5, { css: { fill: '#3969ED' } }, '-=0.3') - // wait a moment - .to(box2, 2, {}) - // blue elements fade back to gray - .to( - [path1a, path1b, path1c, path2a, path2b, path2c], - 0.5, - { - css: { stroke: '#b5b8c4' } - }, - 'colorReset1' - ) - .to( - [box7Border, box4Border, box6Border], - 0.5, - { css: { fill: '#b5b8c4' } }, - 'colorReset1' - ) - // box 2 appears - .to(box2Border, 0.4, { css: { fill: '#000' } }, 'colorReset1') - .fromTo( - box2, - 0.3, - { scale: 0, rotation: 200, opacity: 0, svgOrigin: '195px 42px' }, - { scale: 1, rotation: 360, opacity: 1 }, - '-=0.4' - ) - // wait a moment - .to(box2, 1, {}) - // computer updates firewalls - .to(computerUpdateBox, 0.3, { opacity: 1 }, '-=0.2') - .to(computerUpdatePath, 0.3, { opacity: 1 }, '-=0.2') - // firewall progress bars - .to(progressBarBars, 0.01, { width: 0 }) - .to(progressBars, 0.2, { opacity: 1 }) - .staggerTo(progressBarBars, 0.6, { width: 40 }, 0.2) - // third connection made - .to(path3a, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) - .to(path3b, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) - .to(path3c, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) - // progress bars & computer arrows fade out - .to(progressBars, 0.5, { opacity: 0 }, 'computerReset2') - .to(computerUpdateBox, 0.5, { opacity: 0 }, 'computerReset2') - .to(computerUpdatePath, 0.5, { opacity: 0 }, 'computerReset2') - // third connection turns blue - .to( - [path3a, path3b, path3c], - 0.5, - { css: { stroke: '#3969ED' } }, - 'computerReset2' - ) - .to( - [box2Border, box7Border], - 0.5, - { css: { fill: '#3969ED' } }, - 'computerReset2' - ) - // wait a bit - .to(box2, 2, {}) - // third connection turns back to gray - .to( - [path3a, path3b, path3c], - 0.5, - { css: { stroke: '#b5b8c4' } }, - 'colorReset2' - ) - .to( - [box2Border, box7Border], - 0.5, - { css: { fill: '#b5b8c4' } }, - 'colorReset2' - ) - // boxes 2, 4, and 6 disappear - .to( - [box2, box4, box6], - 0.6, - { scale: 0, rotation: 200, opacity: 0 }, - '-=0.4' - ) - // lines turn red and broken links appear - .to( - [path1a, path1b, path1c, path2a, path2b, path2c, path3a, path3b, path3c], - 0.3, - { css: { stroke: '#ED4168' } }, - '-=0.2' - ) - .to(brokenLinks, 0.3, { opacity: 1 }, '-=0.3') - // wait a moment - .to(box2, 1, {}) - // code sent to firewalls - .to(computerUpdateBox, 0.3, { opacity: 1 }) - .to(computerUpdatePath, 0.3, { opacity: 1 }) - // firewall progress bars - .to(progressBarBars, 0.01, { width: 0 }) - .to(progressBars, 0.2, { opacity: 1 }) - .staggerTo(progressBarBars, 0.6, { width: 40 }, 0.2) - .to(box2, 0.5, {}) - // faulty connections removed - .to( - [ - path1a, - path1b, - path1c, + segmentationChallengeTimeline + .to(box2, 1, {}) + // box 4 and 6 appear + .to(box4Border, 0.4, { css: { fill: '#000' } }, 'box4-in') + .fromTo( + box4, + 0.3, + { scale: 0, rotation: 200, opacity: 0, svgOrigin: '291px 41px' }, + { scale: 1, rotation: 360, opacity: 1 }, + 'box4-in' + ) + .to(box6Border, 0.4, { css: { fill: '#000' } }, '-=0.2') + .fromTo( + box6, + 0.3, + { scale: 0, rotation: 200, opacity: 0, svgOrigin: '195px 289px' }, + { scale: 1, rotation: 360, opacity: 1 }, + '-=0.4' + ) + // wait for a moment + .to(box2, 1, {}) + // computer appears and sends updates to firewalls + .to(computer, 0.5, { opacity: 1 }) + .to(computerUpdateBox, 0.3, { opacity: 1 }, '-=0.2') + .to(computerUpdatePath, 0.3, { opacity: 1 }, '-=0.2') + // firewall progress bars + .to(progressBarBars, 0.01, { attr: { width: 0 } }) + .to(progressBars, 0.2, { opacity: 1 }) + .staggerTo(progressBarBars, 0.6, { attr: { width: 40 } }, 0.2) + // connection 1 made + .to(path1a, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) + .to(path1b, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) + .to(path1c, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) + // progress bars and firewall update lines fade out + .to(progressBars, 0.7, { opacity: 0 }, 'resetComputer1') + .to(computerUpdateBox, 0.7, { opacity: 0 }, 'resetComputer1') + .to(computerUpdatePath, 0.7, { opacity: 0 }, 'resetComputer1') + // connection turns blue + .to( + [path1a, path1b, path1c], + 0.5, + { css: { stroke: '#3969ED' } }, + 'resetComputer1' + ) + .to( + [box4Border, box6Border], + 0.5, + { css: { fill: '#3969ED' } }, + 'resetComputer1' + ) + // second connection draws + .to( path2a, - path2b, - path2c, - path3a, - path3b, - path3c - ].concat(brokenLinks), - 0.7, - { opacity: 0 } - ) - // progress bars and connection arrows fade out - .to(progressBars, 0.5, { opacity: 0 }, 'computerReset3') - .to(computerUpdateBox, 0.5, { opacity: 0 }, 'computerReset3') - .to(computerUpdatePath, 0.5, { opacity: 0 }, 'computerReset3') - .to(computer, 0.5, { opacity: 0 }, 'computerReset3') - .call(function () { - segmentationSolutionTimeline.resume(segmentationSolutionTimeline.time()) - }) - // wait a moment before the loop - .to(box2, 1, {}) - .pause() - - // solution animation - var segmentationSolutionTimeline = new TimelineLite() - - // service boxes - var box1 = qs('#s-service-2') - var box1Border = qs('#s-service-2 > path') - var box1Lock = qs('#s-service-2 #s-secure-indicator-2') - var box2 = qs('#s-service-4') - var box2Border = qs('#s-service-4 > path') - var box2Lock = qs('#s-service-4 #s-secure-indicator-4') - var box3 = qs('#s-service-6') - var box3Border = qs('#s-service-6 > path') - var box3Lock = qs('#s-service-6 #s-secure-indicator-6') - - // connection paths - var path1a = qs('#s-connection-path-2') - var path1b = qs('#s-connection-path-8') - var path2a = qs('#s-connection-path-9') - var path2b = qs('#s-connection-path-10') - var path3a = qs('#s-connection-path-1') - var path3b = qs('#s-connection-path-4') - var path3c = qs('#s-connection-path-5') - var path3d = qs('#s-connection-path-6') - - // inbound consul updates - var inboundPathLower = qs('#s-consul-inbound-paths-lower') - var inboundUpdateLower = qs('#s-dynamic-update-inbound-lower') - var inboundUpdateLowerSpinner = qs('#s-dynamic-update-inbound-lower > path') - var inboundPathUpper = qs('#s-consul-inbound-paths-upper') - var inboundUpdateUpper = qs('#s-dynamic-update-inbound-upper') - var inboundUpdateUpperSpinner = qs('#s-dynamic-update-inbound-upper > path') - - // outbound consul updates - var outboundPathsLower = qsa( - '#s-consul-server-connection-lower, #s-consul-outbound-5, #s-consul-outbound-6, #s-consul-outbound-7' - ) - var outboundUpdateLower = qsa( - '#s-dynamic-update-outbound-ower, #s-tls-cert-lower' - ) - var outboundUpdateLowerSpinner = qs('#s-dynamic-update-outbound-ower > path') - var outboundPathsUpper1 = qsa( - '#s-consul-server-connection-upper, #s-consul-outbound-3, #s-consul-outbound-4' - ) - var outboundPathsUpper2 = qsa( - '#s-consul-server-connection-upper, #s-consul-outbound-1, #s-soncul-outbound-2' - ) - var outboundUpdateUpper = qsa( - '#s-tls-cert-upper, #s-dynamic-update-outbound-upper' - ) - var outboundUpdateUpperSpinner = qs('#s-dynamic-update-outbound-upper > path') - - segmentationSolutionTimeline - .to(box2, 1, {}) - // boxes 2 and 3 appear - .fromTo( - box2, - 0.3, - { scale: 0, rotation: 200, opacity: 0, svgOrigin: '281px 104px' }, - { scale: 1, rotation: 360, opacity: 1 } - ) - .fromTo( - box3, - 0.3, - { scale: 0, rotation: 200, opacity: 0, svgOrigin: '185px 226px' }, - { scale: 1, rotation: 360, opacity: 1 }, - '-=0.1' - ) - // wait a moment - .to(box1, 0.5, {}) - // consul speaks to each box that needs a connection made - .to(outboundPathsUpper1, 0.5, { opacity: 1 }) - .to(outboundPathsLower, 0.5, { opacity: 1 }, '-=0.3') - .to(outboundUpdateUpper, 0.3, { opacity: 1 }, '-=0.3') - .to(outboundUpdateLower, 0.3, { opacity: 1 }, '-=0.1') - .to( - outboundUpdateUpperSpinner, - 0.7, - { - rotation: 360, - svgOrigin: '44px 99px' - }, - '-=0.5' - ) - .to( - outboundUpdateLowerSpinner, - 0.7, - { - rotation: 360, - svgOrigin: '44px 246px' - }, - '-=0.3' - ) - // pink borders, locks, connections drawn, consul talk fades - .to(box2Lock, 0.3, { opacity: 1 }, 'connections-1') - .to(box2Border, 0.3, { fill: '#CA2270' }, 'connections-1') - .to(box3Lock, 0.3, { opacity: 1 }, 'connections-1') - .to(box3Border, 0.3, { fill: '#CA2270' }, 'connections-1') - .to(outboundPathsUpper1, 0.7, { opacity: 0 }, 'connections-1') - .to(outboundPathsLower, 0.7, { opacity: 0 }, 'connections-1') - .to(outboundUpdateUpper, 0.7, { opacity: 0 }, 'connections-1') - .to(outboundUpdateLower, 0.7, { opacity: 0 }, 'connections-1') - .to( - path1a, - 0.5, - { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, - 'connections-1' - ) - .to( - path1b, - 0.5, - { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, - 'connections-1' - ) - .to( - path2a, - 0.5, - { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, - 'connections-1' - ) - .to( - path2b, - 0.5, - { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, - 'connections-1' - ) - // wait a moment - .to(box1, 0.5, {}) - // box 1 appears - .fromTo( - box1, - 0.3, - { scale: 0, rotation: 200, opacity: 0, svgOrigin: '185px 104px' }, - { scale: 1, rotation: 360, opacity: 1 }, - '-=0.1' - ) - // wait a moment, previous paths fade ('#EEB9D1') - .to(box1, 0.5, {}, 'stage-1-complete') - .to(box2Border, 0.5, { fill: '#EEB9D1' }, 'stage-1-complete') - .to(box3Border, 0.5, { fill: '#EEB9D1' }, 'stage-1-complete') - .to(path1a, 0.5, { css: { stroke: '#EEB9D1' } }, 'stage-1-complete') - .to(path1b, 0.5, { css: { stroke: '#EEB9D1' } }, 'stage-1-complete') - .to(path2a, 0.5, { css: { stroke: '#EEB9D1' } }, 'stage-1-complete') - .to(path2b, 0.5, { css: { stroke: '#EEB9D1' } }, 'stage-1-complete') - // consul speaks to each box that needs a connection made - .to(outboundPathsUpper2, 0.5, { opacity: 1 }) - .to(outboundPathsLower, 0.5, { opacity: 1 }, '-=0.3') - .to(outboundUpdateUpper, 0.3, { opacity: 1 }, '-=0.3') - .to(outboundUpdateLower, 0.3, { opacity: 1 }, '-=0.1') - .to( - outboundUpdateUpperSpinner, - 0.7, - { - rotation: 720, - svgOrigin: '44px 99px' - }, - '-=0.5' - ) - .to( - outboundUpdateLowerSpinner, - 0.7, - { - rotation: 720, - svgOrigin: '44px 246px' - }, - '-=0.3' - ) - // connections drawn - .to(box1Lock, 0.3, { opacity: 1 }, 'connections-2') - .to(box1Border, 0.3, { fill: '#CA2270' }, 'connections-2') - .to( - path3a, - 0.5, - { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, - 'connections-2' - ) - .to( - path3b, - 0.5, - { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, - 'connections-2' - ) - .to( - path3c, - 0.5, - { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, - 'connections-2' - ) - .to( - path3d, - 0.5, - { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, - 'connections-2' - ) - .to(box1, 0.7, {}, 'stage-2-complete') - .to(outboundPathsUpper2, 0.7, { opacity: 0 }, 'stage-2-complete') - .to(outboundPathsLower, 0.7, { opacity: 0 }, 'stage-2-complete') - .to(outboundUpdateUpper, 0.7, { opacity: 0 }, 'stage-2-complete') - .to(outboundUpdateLower, 0.7, { opacity: 0 }, 'stage-2-complete') - .to(box1Border, 0.5, { fill: '#EEB9D1' }, 'path-fade-2') - .to(path3a, 0.5, { css: { stroke: '#EEB9D1' } }, 'path-fade-2') - .to(path3b, 0.5, { css: { stroke: '#EEB9D1' } }, 'path-fade-2') - .to(path3c, 0.5, { css: { stroke: '#EEB9D1' } }, 'path-fade-2') - .to(path3d, 0.5, { css: { stroke: '#EEB9D1' } }, 'path-fade-2') - // wait a moment - .to(box1, 1, {}) - // all new boxes and connections fade - .to( - [ - box1, + 0.3, + { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }, + '-=0.3' + ) + .to(path2b, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) + .to(path2c, 0.2, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) + // second connection turns blue + .to([path2a, path2b, path2c], 0.5, { css: { stroke: '#3969ED' } }, '-=0.1') + .to(box7Border, 0.5, { css: { fill: '#3969ED' } }, '-=0.3') + // wait a moment + .to(box2, 2, {}) + // blue elements fade back to gray + .to( + [path1a, path1b, path1c, path2a, path2b, path2c], + 0.5, + { + css: { stroke: '#b5b8c4' } + }, + 'colorReset1' + ) + .to( + [box7Border, box4Border, box6Border], + 0.5, + { css: { fill: '#b5b8c4' } }, + 'colorReset1' + ) + // box 2 appears + .to(box2Border, 0.4, { css: { fill: '#000' } }, 'colorReset1') + .fromTo( box2, - box3, - path1a, - path1b, - path2a, - path2b, - path3a, - path3b, - path3c, - path3d - ], - 0.5, - { opacity: 0.3 } + 0.3, + { scale: 0, rotation: 200, opacity: 0, svgOrigin: '195px 42px' }, + { scale: 1, rotation: 360, opacity: 1 }, + '-=0.4' + ) + // wait a moment + .to(box2, 1, {}) + // computer updates firewalls + .to(computerUpdateBox, 0.3, { opacity: 1 }, '-=0.2') + .to(computerUpdatePath, 0.3, { opacity: 1 }, '-=0.2') + // firewall progress bars + .to(progressBarBars, 0.01, { width: 0 }) + .to(progressBars, 0.2, { opacity: 1 }) + .staggerTo(progressBarBars, 0.6, { width: 40 }, 0.2) + // third connection made + .to(path3a, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) + .to(path3b, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) + .to(path3c, 0.3, { css: { strokeDashoffset: 0 }, ease: Linear.easeNone }) + // progress bars & computer arrows fade out + .to(progressBars, 0.5, { opacity: 0 }, 'computerReset2') + .to(computerUpdateBox, 0.5, { opacity: 0 }, 'computerReset2') + .to(computerUpdatePath, 0.5, { opacity: 0 }, 'computerReset2') + // third connection turns blue + .to( + [path3a, path3b, path3c], + 0.5, + { css: { stroke: '#3969ED' } }, + 'computerReset2' + ) + .to( + [box2Border, box7Border], + 0.5, + { css: { fill: '#3969ED' } }, + 'computerReset2' + ) + // wait a bit + .to(box2, 2, {}) + // third connection turns back to gray + .to( + [path3a, path3b, path3c], + 0.5, + { css: { stroke: '#b5b8c4' } }, + 'colorReset2' + ) + .to( + [box2Border, box7Border], + 0.5, + { css: { fill: '#b5b8c4' } }, + 'colorReset2' + ) + // boxes 2, 4, and 6 disappear + .to( + [box2, box4, box6], + 0.6, + { scale: 0, rotation: 200, opacity: 0 }, + '-=0.4' + ) + // lines turn red and broken links appear + .to( + [path1a, path1b, path1c, path2a, path2b, path2c, path3a, path3b, path3c], + 0.3, + { css: { stroke: '#ED4168' } }, + '-=0.2' + ) + .to(brokenLinks, 0.3, { opacity: 1 }, '-=0.3') + // wait a moment + .to(box2, 1, {}) + // code sent to firewalls + .to(computerUpdateBox, 0.3, { opacity: 1 }) + .to(computerUpdatePath, 0.3, { opacity: 1 }) + // firewall progress bars + .to(progressBarBars, 0.01, { width: 0 }) + .to(progressBars, 0.2, { opacity: 1 }) + .staggerTo(progressBarBars, 0.6, { width: 40 }, 0.2) + .to(box2, 0.5, {}) + // faulty connections removed + .to( + [ + path1a, + path1b, + path1c, + path2a, + path2b, + path2c, + path3a, + path3b, + path3c + ].concat(brokenLinks), + 0.7, + { opacity: 0 } + ) + // progress bars and connection arrows fade out + .to(progressBars, 0.5, { opacity: 0 }, 'computerReset3') + .to(computerUpdateBox, 0.5, { opacity: 0 }, 'computerReset3') + .to(computerUpdatePath, 0.5, { opacity: 0 }, 'computerReset3') + .to(computer, 0.5, { opacity: 0 }, 'computerReset3') + .call(function () { + segmentationSolutionTimeline.resume(segmentationSolutionTimeline.time()) + }) + // wait a moment before the loop + .to(box2, 1, {}) + .pause() + + // solution animation + var segmentationSolutionTimeline = new TimelineLite() + + // service boxes + var box1 = qs('#s-service-2') + var box1Border = qs('#s-service-2 > path') + var box1Lock = qs('#s-service-2 #s-secure-indicator-2') + var box2 = qs('#s-service-4') + var box2Border = qs('#s-service-4 > path') + var box2Lock = qs('#s-service-4 #s-secure-indicator-4') + var box3 = qs('#s-service-6') + var box3Border = qs('#s-service-6 > path') + var box3Lock = qs('#s-service-6 #s-secure-indicator-6') + + // connection paths + var path1a = qs('#s-connection-path-2') + var path1b = qs('#s-connection-path-8') + var path2a = qs('#s-connection-path-9') + var path2b = qs('#s-connection-path-10') + var path3a = qs('#s-connection-path-1') + var path3b = qs('#s-connection-path-4') + var path3c = qs('#s-connection-path-5') + var path3d = qs('#s-connection-path-6') + + // inbound consul updates + var inboundPathLower = qs('#s-consul-inbound-paths-lower') + var inboundUpdateLower = qs('#s-dynamic-update-inbound-lower') + var inboundUpdateLowerSpinner = qs('#s-dynamic-update-inbound-lower > path') + var inboundPathUpper = qs('#s-consul-inbound-paths-upper') + var inboundUpdateUpper = qs('#s-dynamic-update-inbound-upper') + var inboundUpdateUpperSpinner = qs('#s-dynamic-update-inbound-upper > path') + + // outbound consul updates + var outboundPathsLower = qsa( + '#s-consul-server-connection-lower, #s-consul-outbound-5, #s-consul-outbound-6, #s-consul-outbound-7' ) - // faded boxes speak to consul - .to(inboundPathLower, 0.5, { opacity: 1 }, 'inbound') - .to(inboundPathUpper, 0.5, { opacity: 1 }, 'inbound') - .to(inboundUpdateLower, 0.5, { opacity: 1 }, 'inbound') - .to(inboundUpdateUpper, 0.5, { opacity: 1 }, 'inbound') - .to( - inboundUpdateLowerSpinner, - 0.7, - { - rotation: 360, - svgOrigin: '44px 237px' - }, - '-=0.3' + var outboundUpdateLower = qsa( + '#s-dynamic-update-outbound-ower, #s-tls-cert-lower' ) - .to( - inboundUpdateUpperSpinner, - 0.7, - { - rotation: 360, - svgOrigin: '44px 91px' - }, - '-=0.3' + var outboundUpdateLowerSpinner = qs('#s-dynamic-update-outbound-ower > path') + var outboundPathsUpper1 = qsa( + '#s-consul-server-connection-upper, #s-consul-outbound-3, #s-consul-outbound-4' ) - // consul removes faded boxes and connections - .to( - [ - box1, + var outboundPathsUpper2 = qsa( + '#s-consul-server-connection-upper, #s-consul-outbound-1, #s-soncul-outbound-2' + ) + var outboundUpdateUpper = qsa( + '#s-tls-cert-upper, #s-dynamic-update-outbound-upper' + ) + var outboundUpdateUpperSpinner = qs('#s-dynamic-update-outbound-upper > path') + + segmentationSolutionTimeline + .to(box2, 1, {}) + // boxes 2 and 3 appear + .fromTo( box2, + 0.3, + { scale: 0, rotation: 200, opacity: 0, svgOrigin: '281px 104px' }, + { scale: 1, rotation: 360, opacity: 1 } + ) + .fromTo( box3, + 0.3, + { scale: 0, rotation: 200, opacity: 0, svgOrigin: '185px 226px' }, + { scale: 1, rotation: 360, opacity: 1 }, + '-=0.1' + ) + // wait a moment + .to(box1, 0.5, {}) + // consul speaks to each box that needs a connection made + .to(outboundPathsUpper1, 0.5, { opacity: 1 }) + .to(outboundPathsLower, 0.5, { opacity: 1 }, '-=0.3') + .to(outboundUpdateUpper, 0.3, { opacity: 1 }, '-=0.3') + .to(outboundUpdateLower, 0.3, { opacity: 1 }, '-=0.1') + .to( + outboundUpdateUpperSpinner, + 0.7, + { + rotation: 360, + svgOrigin: '44px 99px' + }, + '-=0.5' + ) + .to( + outboundUpdateLowerSpinner, + 0.7, + { + rotation: 360, + svgOrigin: '44px 246px' + }, + '-=0.3' + ) + // pink borders, locks, connections drawn, consul talk fades + .to(box2Lock, 0.3, { opacity: 1 }, 'connections-1') + .to(box2Border, 0.3, { fill: '#CA2270' }, 'connections-1') + .to(box3Lock, 0.3, { opacity: 1 }, 'connections-1') + .to(box3Border, 0.3, { fill: '#CA2270' }, 'connections-1') + .to(outboundPathsUpper1, 0.7, { opacity: 0 }, 'connections-1') + .to(outboundPathsLower, 0.7, { opacity: 0 }, 'connections-1') + .to(outboundUpdateUpper, 0.7, { opacity: 0 }, 'connections-1') + .to(outboundUpdateLower, 0.7, { opacity: 0 }, 'connections-1') + .to( path1a, + 0.5, + { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, + 'connections-1' + ) + .to( path1b, + 0.5, + { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, + 'connections-1' + ) + .to( path2a, + 0.5, + { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, + 'connections-1' + ) + .to( path2b, + 0.5, + { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, + 'connections-1' + ) + // wait a moment + .to(box1, 0.5, {}) + // box 1 appears + .fromTo( + box1, + 0.3, + { scale: 0, rotation: 200, opacity: 0, svgOrigin: '185px 104px' }, + { scale: 1, rotation: 360, opacity: 1 }, + '-=0.1' + ) + // wait a moment, previous paths fade ('#EEB9D1') + .to(box1, 0.5, {}, 'stage-1-complete') + .to(box2Border, 0.5, { fill: '#EEB9D1' }, 'stage-1-complete') + .to(box3Border, 0.5, { fill: '#EEB9D1' }, 'stage-1-complete') + .to(path1a, 0.5, { css: { stroke: '#EEB9D1' } }, 'stage-1-complete') + .to(path1b, 0.5, { css: { stroke: '#EEB9D1' } }, 'stage-1-complete') + .to(path2a, 0.5, { css: { stroke: '#EEB9D1' } }, 'stage-1-complete') + .to(path2b, 0.5, { css: { stroke: '#EEB9D1' } }, 'stage-1-complete') + // consul speaks to each box that needs a connection made + .to(outboundPathsUpper2, 0.5, { opacity: 1 }) + .to(outboundPathsLower, 0.5, { opacity: 1 }, '-=0.3') + .to(outboundUpdateUpper, 0.3, { opacity: 1 }, '-=0.3') + .to(outboundUpdateLower, 0.3, { opacity: 1 }, '-=0.1') + .to( + outboundUpdateUpperSpinner, + 0.7, + { + rotation: 720, + svgOrigin: '44px 99px' + }, + '-=0.5' + ) + .to( + outboundUpdateLowerSpinner, + 0.7, + { + rotation: 720, + svgOrigin: '44px 246px' + }, + '-=0.3' + ) + // connections drawn + .to(box1Lock, 0.3, { opacity: 1 }, 'connections-2') + .to(box1Border, 0.3, { fill: '#CA2270' }, 'connections-2') + .to( path3a, + 0.5, + { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, + 'connections-2' + ) + .to( path3b, + 0.5, + { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, + 'connections-2' + ) + .to( path3c, + 0.5, + { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, + 'connections-2' + ) + .to( path3d, - inboundPathLower, - inboundPathUpper, - inboundUpdateLower, - inboundUpdateUpper - ], - 0.5, - { opacity: 0.0 } - ) - .addPause() - // wait a moment before the loop - .to(box1, 1, {}) - .pause() + 0.5, + { css: { strokeDashoffset: 0, stroke: '#CA2270' } }, + 'connections-2' + ) + .to(box1, 0.7, {}, 'stage-2-complete') + .to(outboundPathsUpper2, 0.7, { opacity: 0 }, 'stage-2-complete') + .to(outboundPathsLower, 0.7, { opacity: 0 }, 'stage-2-complete') + .to(outboundUpdateUpper, 0.7, { opacity: 0 }, 'stage-2-complete') + .to(outboundUpdateLower, 0.7, { opacity: 0 }, 'stage-2-complete') + .to(box1Border, 0.5, { fill: '#EEB9D1' }, 'path-fade-2') + .to(path3a, 0.5, { css: { stroke: '#EEB9D1' } }, 'path-fade-2') + .to(path3b, 0.5, { css: { stroke: '#EEB9D1' } }, 'path-fade-2') + .to(path3c, 0.5, { css: { stroke: '#EEB9D1' } }, 'path-fade-2') + .to(path3d, 0.5, { css: { stroke: '#EEB9D1' } }, 'path-fade-2') + // wait a moment + .to(box1, 1, {}) + // all new boxes and connections fade + .to( + [ + box1, + box2, + box3, + path1a, + path1b, + path2a, + path2b, + path3a, + path3b, + path3c, + path3d + ], + 0.5, + { opacity: 0.3 } + ) + // faded boxes speak to consul + .to(inboundPathLower, 0.5, { opacity: 1 }, 'inbound') + .to(inboundPathUpper, 0.5, { opacity: 1 }, 'inbound') + .to(inboundUpdateLower, 0.5, { opacity: 1 }, 'inbound') + .to(inboundUpdateUpper, 0.5, { opacity: 1 }, 'inbound') + .to( + inboundUpdateLowerSpinner, + 0.7, + { + rotation: 360, + svgOrigin: '44px 237px' + }, + '-=0.3' + ) + .to( + inboundUpdateUpperSpinner, + 0.7, + { + rotation: 360, + svgOrigin: '44px 91px' + }, + '-=0.3' + ) + // consul removes faded boxes and connections + .to( + [ + box1, + box2, + box3, + path1a, + path1b, + path2a, + path2b, + path3a, + path3b, + path3c, + path3d, + inboundPathLower, + inboundPathUpper, + inboundUpdateLower, + inboundUpdateUpper + ], + 0.5, + { opacity: 0.0 } + ) + .addPause() + // wait a moment before the loop + .to(box1, 1, {}) + .pause() - // kick it off - $segmentationChallenge.classList.add('active') - $segmentationSolution.classList.add('active') - segmentationChallengeTimeline.play() - segmentationSolutionTimeline.play() -} + // kick it off + $segmentationChallenge.classList.add('active') + $segmentationSolution.classList.add('active') + segmentationChallengeTimeline.play() + segmentationSolutionTimeline.play() + } +} \ No newline at end of file diff --git a/website/source/assets/javascripts/consul-connect/carousel.js b/website/source/assets/javascripts/consul-connect/carousel.js index c42e9b255..605417bbf 100644 --- a/website/source/assets/javascripts/consul-connect/carousel.js +++ b/website/source/assets/javascripts/consul-connect/carousel.js @@ -1,3 +1,6 @@ +var qs = document.querySelector.bind(document) +var qsa = document.querySelectorAll.bind(document) + // siema carousels var dots = qsa('.g-carousel .pagination li') var carousel = new Siema({ @@ -20,15 +23,13 @@ var carousel = new Siema({ }) // on previous button click -document - .querySelector('.g-carousel .prev') +qs('.g-carousel .prev') .addEventListener('click', function() { carousel.prev() }) // on next button click -document - .querySelector('.g-carousel .next') +qs('.g-carousel .next') .addEventListener('click', function() { carousel.next() }) From df03db47ceede7ba7a5434cc59e61729a7dd4cd0 Mon Sep 17 00:00:00 2001 From: John Cowen Date: Mon, 25 Jun 2018 11:33:07 +0100 Subject: [PATCH 604/627] tenenacy > tenancy --- website/source/docs/connect/security.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/connect/security.html.md b/website/source/docs/connect/security.html.md index 7171173d3..182d875dc 100644 --- a/website/source/docs/connect/security.html.md +++ b/website/source/docs/connect/security.html.md @@ -85,7 +85,7 @@ network namespacing techniques provided by the underlying operating system. For scenarios where multiple services are running on the same machine without isolation, these services must all be trusted. We call this the -**trusted multi-tenenacy** deployment model. Any service could theoretically +**trusted multi-tenancy** deployment model. Any service could theoretically connect to any other service via the loopback listener, bypassing Connect completely. In this scenario, all services must be trusted _or_ isolation mechanisms must be used. From bfc628da0a863ab47631fd822f6cc6525d06472f Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Sun, 24 Jun 2018 14:24:01 -0700 Subject: [PATCH 605/627] website: fix scrolling/loading issue on iOS --- website/source/assets/stylesheets/_global.scss | 4 ---- 1 file changed, 4 deletions(-) diff --git a/website/source/assets/stylesheets/_global.scss b/website/source/assets/stylesheets/_global.scss index 10319049c..3f8bf49df 100755 --- a/website/source/assets/stylesheets/_global.scss +++ b/website/source/assets/stylesheets/_global.scss @@ -1,6 +1,4 @@ html { - height: 100%; - min-height: 100%; text-rendering: optimizeLegibility; -webkit-font-smoothing: antialiased; overflow-x: hidden; @@ -13,8 +11,6 @@ body { font-size: $font-size; font-family: $font-family-open-sans; font-weight: $font-weight-reg; - height: 100%; - min-height: 100%; overflow-x: hidden; } From 0a8a54287760f93d4b7000c95f23060fd942ddc8 Mon Sep 17 00:00:00 2001 From: Mike Wickett Date: Mon, 25 Jun 2018 11:26:35 -0400 Subject: [PATCH 606/627] Adds small polyfill for classlist because IE does not support it on SVG elements --- .../javascripts/consul-connect/vendor/classlist-polyfill.min.js | 2 ++ website/source/layouts/layout.erb | 1 + 2 files changed, 3 insertions(+) create mode 100644 website/source/assets/javascripts/consul-connect/vendor/classlist-polyfill.min.js diff --git a/website/source/assets/javascripts/consul-connect/vendor/classlist-polyfill.min.js b/website/source/assets/javascripts/consul-connect/vendor/classlist-polyfill.min.js new file mode 100644 index 000000000..d866a3046 --- /dev/null +++ b/website/source/assets/javascripts/consul-connect/vendor/classlist-polyfill.min.js @@ -0,0 +1,2 @@ +/*! @source http://purl.eligrey.com/github/classList.js/blob/master/classList.js */ +"document" in self && ("classList" in document.createElement("_") && (!document.createElementNS || "classList" in document.createElementNS("http://www.w3.org/2000/svg", "g")) || !function (t) { "use strict"; if ("Element" in t) { var e = "classList", n = "prototype", i = t.Element[n], s = Object, r = String[n].trim || function () { return this.replace(/^\s+|\s+$/g, "") }, o = Array[n].indexOf || function (t) { for (var e = 0, n = this.length; n > e; e++)if (e in this && this[e] === t) return e; return -1 }, c = function (t, e) { this.name = t, this.code = DOMException[t], this.message = e }, a = function (t, e) { if ("" === e) throw new c("SYNTAX_ERR", "The token must not be empty."); if (/\s/.test(e)) throw new c("INVALID_CHARACTER_ERR", "The token must not contain space characters."); return o.call(t, e) }, l = function (t) { for (var e = r.call(t.getAttribute("class") || ""), n = e ? e.split(/\s+/) : [], i = 0, s = n.length; s > i; i++)this.push(n[i]); this._updateClassName = function () { t.setAttribute("class", this.toString()) } }, u = l[n] = [], h = function () { return new l(this) }; if (c[n] = Error[n], u.item = function (t) { return this[t] || null }, u.contains = function (t) { return ~a(this, t + "") }, u.add = function () { var t, e = arguments, n = 0, i = e.length, s = !1; do t = e[n] + "", ~a(this, t) || (this.push(t), s = !0); while (++n < i); s && this._updateClassName() }, u.remove = function () { var t, e, n = arguments, i = 0, s = n.length, r = !1; do for (t = n[i] + "", e = a(this, t); ~e;)this.splice(e, 1), r = !0, e = a(this, t); while (++i < s); r && this._updateClassName() }, u.toggle = function (t, e) { var n = this.contains(t), i = n ? e !== !0 && "remove" : e !== !1 && "add"; return i && this[i](t), e === !0 || e === !1 ? e : !n }, u.replace = function (t, e) { var n = a(t + ""); ~n && (this.splice(n, 1, e), this._updateClassName()) }, u.toString = function () { return this.join(" ") }, s.defineProperty) { var f = { get: h, enumerable: !0, configurable: !0 }; try { s.defineProperty(i, e, f) } catch (p) { void 0 !== p.number && -2146823252 !== p.number || (f.enumerable = !1, s.defineProperty(i, e, f)) } } else s[n].__defineGetter__ && i.__defineGetter__(e, h) } }(self), function () { "use strict"; var t = document.createElement("_"); if (t.classList.add("c1", "c2"), !t.classList.contains("c2")) { var e = function (t) { var e = DOMTokenList.prototype[t]; DOMTokenList.prototype[t] = function (t) { var n, i = arguments.length; for (n = 0; i > n; n++)t = arguments[n], e.call(this, t) } }; e("add"), e("remove") } if (t.classList.toggle("c3", !1), t.classList.contains("c3")) { var n = DOMTokenList.prototype.toggle; DOMTokenList.prototype.toggle = function (t, e) { return 1 in arguments && !this.contains(t) == !e ? e : n.call(this, t) } } "replace" in document.createElement("_").classList || (DOMTokenList.prototype.replace = function (t, e) { var n = this.toString().split(" "), i = n.indexOf(t + ""); ~i && (n = n.slice(i), this.remove.apply(this, n), this.add(e), this.add.apply(this, n.slice(1))) }), t = null }()); \ No newline at end of file diff --git a/website/source/layouts/layout.erb b/website/source/layouts/layout.erb index c06221c06..f96d9a60f 100644 --- a/website/source/layouts/layout.erb +++ b/website/source/layouts/layout.erb @@ -34,6 +34,7 @@ <%= javascript_include_tag "consul-connect/vendor/intersection-observer-polyfill", defer: true %> <%= javascript_include_tag "consul-connect/vendor/siema.min", defer: true %> + <%= javascript_include_tag "consul-connect/vendor/classlist-polyfill.min", defer: true %> <%= javascript_include_tag "application", defer: true %> From e5d7e2ec47d3b91da4c4e2f123ee7c7e05199801 Mon Sep 17 00:00:00 2001 From: Mike Wickett Date: Mon, 25 Jun 2018 13:41:31 -0400 Subject: [PATCH 607/627] Small tweaks to video playback --- .../assets/javascripts/consul-connect/home-hero.js | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/website/source/assets/javascripts/consul-connect/home-hero.js b/website/source/assets/javascripts/consul-connect/home-hero.js index 92ead8aac..94d9ed9fc 100644 --- a/website/source/assets/javascripts/consul-connect/home-hero.js +++ b/website/source/assets/javascripts/consul-connect/home-hero.js @@ -27,8 +27,10 @@ function initialiateVideoChange(index) { ) loadingBar.style.transitionDuration = '0s' - // reset the current video - $$videos[currentIndex].currentTime = 0 + // reset the current video + if (!isNaN($$videos[currentIndex].duration)) { + $$videos[currentIndex].currentTime = 0 + } $$videoControls[currentIndex].classList.remove('playing') // stop deactivation @@ -59,7 +61,8 @@ function playVideo(index, wrapper) { $$videoControls[index].querySelector( '.progress-bar span' - ).style.transitionDuration = `${Math.ceil($$videos[index].duration / playbackRate)}s` + ).style.transitionDuration = + Math.ceil($$videos[index].duration / playbackRate).toString() + 's' // set the currentIndex to be that of the current video's index currentIndex = index @@ -88,4 +91,4 @@ for (var i = 0; i < $$videoControls.length; i++) { // go to first video to start this thing if ($$videos.length > 0) { initialiateVideoChange(0) -} \ No newline at end of file +} From 08776304fb93334b0b845d1000c8bcf0c22a5481 Mon Sep 17 00:00:00 2001 From: RJ Spiker Date: Mon, 25 Jun 2018 11:59:58 -0600 Subject: [PATCH 608/627] website - some visual updates including css bug fixes and image updates (#111) --- .../distributed-locks-and-semaphores.png | 3 - .../assets/images/consul-connect/feature.jpg | 3 - .../assets/images/consul-connect/grid_1.png | 3 + .../assets/images/consul-connect/grid_2.png | 3 + .../assets/images/consul-connect/grid_3.png | 3 + .../logos/consul-enterprise-logo.svg | 7 +++ .../consul-connect/logos/consul-logo.svg | 7 +++ .../consul-connect/open-and-extensible.png | 3 - .../images/consul-connect/svgs/semaphores.svg | 55 +++++++++++++++++++ .../workflows-not-technologies.png | 3 - .../consul-connect/components/_logo-grid.scss | 5 -- .../components/_text-asset.scss | 12 ++++ .../consul-connect/pages/_home.scss | 13 +++++ website/source/configuration.html.erb | 2 +- website/source/index.html.erb | 14 ++--- website/source/segmentation.html.erb | 20 +------ 16 files changed, 113 insertions(+), 43 deletions(-) delete mode 100644 website/source/assets/images/consul-connect/distributed-locks-and-semaphores.png delete mode 100644 website/source/assets/images/consul-connect/feature.jpg create mode 100644 website/source/assets/images/consul-connect/grid_1.png create mode 100644 website/source/assets/images/consul-connect/grid_2.png create mode 100644 website/source/assets/images/consul-connect/grid_3.png create mode 100644 website/source/assets/images/consul-connect/logos/consul-enterprise-logo.svg create mode 100644 website/source/assets/images/consul-connect/logos/consul-logo.svg delete mode 100644 website/source/assets/images/consul-connect/open-and-extensible.png create mode 100644 website/source/assets/images/consul-connect/svgs/semaphores.svg delete mode 100644 website/source/assets/images/consul-connect/workflows-not-technologies.png diff --git a/website/source/assets/images/consul-connect/distributed-locks-and-semaphores.png b/website/source/assets/images/consul-connect/distributed-locks-and-semaphores.png deleted file mode 100644 index 0183b765c..000000000 --- a/website/source/assets/images/consul-connect/distributed-locks-and-semaphores.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:37be667f00897fa8f4e867e646ba94e23a4cfcc9c084ced2763db2628ea7c031 -size 37864 diff --git a/website/source/assets/images/consul-connect/feature.jpg b/website/source/assets/images/consul-connect/feature.jpg deleted file mode 100644 index 83411ef7a..000000000 --- a/website/source/assets/images/consul-connect/feature.jpg +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c971e3e33bbae50b240ff28d42b582b8b43887f171558182caa570e46b120675 -size 84628 diff --git a/website/source/assets/images/consul-connect/grid_1.png b/website/source/assets/images/consul-connect/grid_1.png new file mode 100644 index 000000000..2db1d1e75 --- /dev/null +++ b/website/source/assets/images/consul-connect/grid_1.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a885d9643e2ae7202980d482d6f60ed6a3699d401929618127aea724706297f5 +size 103170 diff --git a/website/source/assets/images/consul-connect/grid_2.png b/website/source/assets/images/consul-connect/grid_2.png new file mode 100644 index 000000000..251c3cc3f --- /dev/null +++ b/website/source/assets/images/consul-connect/grid_2.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d79543fac463cf99e46978226661a2217118cdc3cac408f267c404350dec430 +size 129232 diff --git a/website/source/assets/images/consul-connect/grid_3.png b/website/source/assets/images/consul-connect/grid_3.png new file mode 100644 index 000000000..9de85b9b6 --- /dev/null +++ b/website/source/assets/images/consul-connect/grid_3.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea17c3140f3d67f1c3ac81491481743959141d6ed8957f904ef871fad28824f4 +size 131975 diff --git a/website/source/assets/images/consul-connect/logos/consul-enterprise-logo.svg b/website/source/assets/images/consul-connect/logos/consul-enterprise-logo.svg new file mode 100644 index 000000000..c5bff249d --- /dev/null +++ b/website/source/assets/images/consul-connect/logos/consul-enterprise-logo.svg @@ -0,0 +1,7 @@ + + Consul Enterprise + + + + + diff --git a/website/source/assets/images/consul-connect/logos/consul-logo.svg b/website/source/assets/images/consul-connect/logos/consul-logo.svg new file mode 100644 index 000000000..daef751a6 --- /dev/null +++ b/website/source/assets/images/consul-connect/logos/consul-logo.svg @@ -0,0 +1,7 @@ + + Consul + + + + + diff --git a/website/source/assets/images/consul-connect/open-and-extensible.png b/website/source/assets/images/consul-connect/open-and-extensible.png deleted file mode 100644 index 3de665b2f..000000000 --- a/website/source/assets/images/consul-connect/open-and-extensible.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:3b24b9839f50e786960e841e8b842e692eba80917c0d9407276f8888a571ebfa -size 34458 diff --git a/website/source/assets/images/consul-connect/svgs/semaphores.svg b/website/source/assets/images/consul-connect/svgs/semaphores.svg new file mode 100644 index 000000000..afefb803c --- /dev/null +++ b/website/source/assets/images/consul-connect/svgs/semaphores.svg @@ -0,0 +1,55 @@ + + + Distributed Locks and Semaphores + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + LEADER + + + FOLLOWER + + + FOLLOWER + + + + + diff --git a/website/source/assets/images/consul-connect/workflows-not-technologies.png b/website/source/assets/images/consul-connect/workflows-not-technologies.png deleted file mode 100644 index dfdb2b3b8..000000000 --- a/website/source/assets/images/consul-connect/workflows-not-technologies.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:11f9fb25bd8da00e492893eb751e5f3be3cc4b56eaaabd81e612798d4eaa194a -size 24096 diff --git a/website/source/assets/stylesheets/consul-connect/components/_logo-grid.scss b/website/source/assets/stylesheets/consul-connect/components/_logo-grid.scss index b3f3c98b1..25b48531b 100644 --- a/website/source/assets/stylesheets/consul-connect/components/_logo-grid.scss +++ b/website/source/assets/stylesheets/consul-connect/components/_logo-grid.scss @@ -14,13 +14,8 @@ width: 50%; @media (min-width: 768px) { - padding: 5px $site-gutter-padding; width: 33%; } - - @media (min-width: 992px) { - padding: 25px $site-gutter-padding; - } } img { diff --git a/website/source/assets/stylesheets/consul-connect/components/_text-asset.scss b/website/source/assets/stylesheets/consul-connect/components/_text-asset.scss index 7af8785d6..127df53be 100644 --- a/website/source/assets/stylesheets/consul-connect/components/_text-asset.scss +++ b/website/source/assets/stylesheets/consul-connect/components/_text-asset.scss @@ -36,6 +36,10 @@ margin-bottom: -120px; } + & > div:last-child { + justify-content: unset; + } + img { width: 145%; } @@ -87,6 +91,10 @@ } } + &:last-child { + justify-content: center; + } + & > img { width: 100%; @@ -95,6 +103,10 @@ } } + & > svg { + max-width: 100%; + } + &.code-sample > div { box-shadow: 0 40px 48px -20px rgba(63, 68, 85, 0.4); color: $white; diff --git a/website/source/assets/stylesheets/consul-connect/pages/_home.scss b/website/source/assets/stylesheets/consul-connect/pages/_home.scss index c936b661a..5e496cde6 100644 --- a/website/source/assets/stylesheets/consul-connect/pages/_home.scss +++ b/website/source/assets/stylesheets/consul-connect/pages/_home.scss @@ -174,6 +174,10 @@ width: 42vw; } + @media (min-width: 1725px) { + width: 725px; + } + & > div { align-items: center; color: #d2d4dc; @@ -248,6 +252,11 @@ width: 42vw; } + @media (min-width: 1725px) { + padding-top: calc((725px * 0.63569) + 38px); + width: 725px; + } + & > div { background: #0e1016; border-radius: 3px 3px 0 0; @@ -392,6 +401,10 @@ width: 50%; } + & > svg { + width: 135px; + } + &:first-child { background: $consul-red; position: relative; diff --git a/website/source/configuration.html.erb b/website/source/configuration.html.erb index c243843cf..55eeb9e1a 100644 --- a/website/source/configuration.html.erb +++ b/website/source/configuration.html.erb @@ -195,7 +195,7 @@ description: |-
- Service Registry + <%= inline_svg 'consul-connect/svgs/semaphores.svg', height: 383 %>
diff --git a/website/source/index.html.erb b/website/source/index.html.erb index 50512aa94..aed1f2b25 100644 --- a/website/source/index.html.erb +++ b/website/source/index.html.erb @@ -111,7 +111,7 @@ description: |-
- + Service Discovery

Service Discovery for connectivity

Service Registry enables services to register and discover each other.

@@ -121,7 +121,7 @@ description: |-
- + Service Segmentation

Service Segmentation for security

Secure service-to-service communication with automatic TLS encryption and identity-based authorization.

@@ -131,7 +131,7 @@ description: |-
- + Service Configuration

Service Configuration for runtime configuration

Feature rich Key/Value store to easily configure services.

@@ -189,7 +189,7 @@ description: |-
- Workflows, not Technologies + Run and Connect Anywhere
@@ -209,7 +209,7 @@ description: |-
- Open and Extensible + Extend and Integrate
@@ -247,7 +247,7 @@ description: |-
- Consul + <%= inline_svg 'consul-connect/logos/consul-logo.svg' %>

Consul Open Source addresses the technical complexity of connecting services across distributed infrastructure.

@@ -263,7 +263,7 @@ description: |-
- Consul Enterprise + <%= inline_svg 'consul-connect/logos/consul-enterprise-logo.svg' %>

Consul Enterprise addresses the organizational complexity of large user bases and compliance requirements with collaboration and governance features.

diff --git a/website/source/segmentation.html.erb b/website/source/segmentation.html.erb index 639aa3c4c..da7c28bba 100644 --- a/website/source/segmentation.html.erb +++ b/website/source/segmentation.html.erb @@ -96,24 +96,8 @@ description: |-

-
-
- -
$ consul connect proxy \ - -service web \ - -service-addr 127.0.0.1:80 \ - -listen 10.0.1.109:7200 -==> Consul Connect proxy starting... - Configuration mode: Flags - Service: web - Public listener: 10.0.1.109:7200 => 127.0.0.1:80 - -==> Log data will now stream in as it occurs: - - 2018/06/23 09:33:51 [INFO] public listener starting on 10.0.1.109:7200 - 2018/06/23 09:33:51 [INFO] proxy loaded config and ready to serve -
-
+
+ Secure services across any runtime platform
From 3e68e37080f972f266caa3df9d55f1ba1b00aed0 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Mon, 25 Jun 2018 10:55:47 -0700 Subject: [PATCH 609/627] website: add an example of TLS encryption --- website/source/segmentation.html.erb | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/website/source/segmentation.html.erb b/website/source/segmentation.html.erb index da7c28bba..1b54c2af2 100644 --- a/website/source/segmentation.html.erb +++ b/website/source/segmentation.html.erb @@ -140,8 +140,30 @@ description: |-
-
- TODO +
$ consul connect proxy -service web \ + -service-addr 127.0.0.1:8000 + -listen 10.0.1.109:7200 +==> Consul Connect proxy starting... + Configuration mode: Flags + Service: web + Public listener: 10.0.1.109:7200 => 127.0.0.1:8000 +... +$ tshark -V \ + -Y "ssl.handshake.certificate" \ + -O "ssl" \ + -f "dst port 7200" +Frame 39: 899 bytes on wire (7192 bits), 899 bytes captured (7192 bits) on interface 0 +Internet Protocol Version 4, Src: 10.0.1.110, Dst: 10.0.1.109 +Transmission Control Protocol, Src Port: 61918, Dst Port: 7200, Seq: 136, Ack: 916, Len: 843 +Secure Sockets Layer + TLSv1.2 Record Layer: Handshake Protocol: Certificate + Version: TLS 1.2 (0x0303) + Handshake Protocol: Certificate + RDNSequence item: 1 item (id-at-commonName=Consul CA 7) + RelativeDistinguishedName item (id-at-commonName=Consul CA 7) + Id: 2.5.4.3 (id-at-commonName) + DirectoryString: printableString (1) + printableString: Consul CA 7
From 99562a015368fd2618e9778f7bcb8461c0fb83c1 Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Sun, 24 Jun 2018 16:55:26 -0500 Subject: [PATCH 610/627] website: split out CA docs by provider type --- website/source/docs/connect/ca.html.md | 199 ++++++------------ website/source/docs/connect/ca/consul.html.md | 129 ++++++++++++ website/source/docs/connect/ca/vault.html.md | 88 ++++++++ website/source/layouts/docs.erb | 8 + 4 files changed, 286 insertions(+), 138 deletions(-) create mode 100644 website/source/docs/connect/ca/consul.html.md create mode 100644 website/source/docs/connect/ca/vault.html.md diff --git a/website/source/docs/connect/ca.html.md b/website/source/docs/connect/ca.html.md index 06c465b98..3cc1e9bb9 100644 --- a/website/source/docs/connect/ca.html.md +++ b/website/source/docs/connect/ca.html.md @@ -8,30 +8,53 @@ description: |- # Connect Certificate Management -The certificate management in Connect is done centrally through the Consul +Certificate management in Connect is done centrally through the Consul servers using the configured CA (Certificate Authority) provider. A CA provider -controls the active root certificate and performs leaf certificate signing for -proxies to use for mutual TLS. Currently, the only supported provider is the -built-in Consul CA, which generates and stores the root certificate and key on -the Consul servers and can be configured with a custom key/certificate if needed. +manages root and intermediate certificates and performs certificate signing +operations. The Consul leader orchestrates CA provider operations as necessary, +such as when a service needs a new certificate or during CA rotation events. -The CA provider is initialized either on cluster bootstrapping, or (if Connect is -disabled initially) when a leader server is elected that has Connect enabled. -During the cluster's initial bootstrapping, the CA provider can be configured -through the [Agent configuration](docs/agent/options.html#connect_ca_config) -and afterward can only be updated through the [Update CA Configuration endpoint] -(/api/connect/ca.html#update-ca-configuration). +The CA provider abstraction enables Consul to support multiple systems for +storing and signing certificates. Consul ships with a +[built-in CA](/docs/connect/ca/consul.html) which generates and stores the +root certificate and private key on the Consul servers. Consul also also +built-in support for +[Vault as a CA](/docs/connect/ca/vault.html). With Vault, the root certificate +and private key material remain with the Vault cluster. A future version of +Consul will support pluggable CA systems using external binaries. -### Consul CA (Certificate Authority) Provider +## CA Bootstrapping -By default, if no provider is configured when Connect is enabled, the Consul -provider will be used and a private key/root certificate will be generated -and used as the active root certificate for the cluster. To see this in action, -start Consul in [dev mode](/docs/agent/options.html#_dev) and query the -[list CA Roots endpoint](/api/connect/ca.html#list-ca-root-certificates): +CA initialization happens automatically when a new Consul leader is elected +as long as +[Connect is enabled](/docs/connect/configuration.html#enable-connect-on-the-cluster) +and the CA system hasn't already been initialized. This initialization process +will generate the initial root certificates and setup the internal Consul server +state. + +For the initial bootstrap, the CA provider can be configured through the +[Agent configuration](docs/agent/options.html#connect_ca_config). After +initialization, the CA can only be updated through the +[Update CA Configuration API endpoint](/api/connect/ca.html#update-ca-configuration). +If a CA is already initialized, any changes to the CA configuration in the +agent configuration file (including removing the configuration completely) +will have no effect. + +If no specific provider is configured when Connect is enabled, the built-in +Consul CA provider will be used and a private key and root certificate will +be generated automatically. + +## Viewing Root Certificates + +Root certificates can be queried with the +[list CA Roots endpoint](/api/connect/ca.html#list-ca-root-certificates). +With this endpoint, you can see the list of currently trusted root certificates. +When a cluster first initializes, this will only list one trusted root. Multiple +roots may appear as part of +[rotation](#). ```bash -$ curl localhost:8500/v1/connect/ca/roots +$ curl http://localhost:8500/v1/connect/ca/roots { "ActiveRootID": "31:6c:06:fb:49:94:42:d5:e4:55:cc:2e:27:b3:b2:2e:96:67:3e:7e", "TrustDomain": "36cb52cd-4058-f811-0432-6798a240c5d3.consul", @@ -53,17 +76,15 @@ $ curl localhost:8500/v1/connect/ca/roots } ``` -#### Specifying a Private Key and Root Certificate +## CA Configuration -The above root certificate has been automatically generated during the cluster's -bootstrap, but it is possible to configure the Consul CA provider to use a specific -private key and root certificate. - -To view the current CA configuration, use the [Get CA Configuration endpoint] -(/api/connect/ca.html#get-ca-configuration): +After initialization, the CA provider configuration can be viewed with the +[Get CA Configuration API endpoint](/api/connect/ca.html#get-ca-configuration). +Consul will filter sensitive values from this endpoint depending on the +provider in use, so the configuration may not be complete. ```bash -$ curl localhost:8500/v1/connect/ca/configuration +$ curl http://localhost:8500/v1/connect/ca/configuration { "Provider": "consul", "Config": { @@ -74,66 +95,23 @@ $ curl localhost:8500/v1/connect/ca/configuration } ``` -This is the default Connect CA configuration if nothing is explicitly set when -Connect is enabled - the PrivateKey and RootCert fields have not been set, so those have -been generated (as seen above in the roots list). +The CA provider can be reconfigured using the +[Update CA Configuration API endpoint](/api/connect/ca.html#update-ca-configuration). +Specific options for reconfiguration can be found in the specific +CA provider documentation in the sidebar to the left. -There are two ways to have the Consul CA use a custom private key and root certificate: -either through the `ca_config` section of the [Agent configuration] -(docs/agent/options.html#connect_ca_config) (which can only be used during the cluster's -initial bootstrap) or through the [Update CA Configuration endpoint] -(/api/connect/ca.html#update-ca-configuration). - -Currently consul requires that root certificates are valid [SPIFFE SVID Signing certificates] -(https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md) and that the URI encoded -in the SAN is the cluster identifier created at bootstrap with the ".consul" TLD. In this -example, we will set the URI SAN to `spiffe://36cb52cd-4058-f811-0432-6798a240c5d3.consul`. - -In order to use the Update CA Configuration HTTP endpoint, the private key and certificate -must be passed via JSON: - -```bash -$ jq -n --arg key "$(cat root.key)" --arg cert "$(cat root.crt)" ' -{ - "Provider": "consul", - "Config": { - "PrivateKey": $key, - "RootCert": $cert, - "RotationPeriod": "2160h" - } -}' > ca_config.json -``` - -The resulting `ca_config.json` file can then be used to update the active root certificate: - -```bash -$ cat ca_config.json -{ - "Provider": "consul", - "Config": { - "PrivateKey": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEArqiy1c3pbT3cSkjdEM1APALUareU...", - "RootCert": "-----BEGIN CERTIFICATE-----\nMIIDijCCAnKgAwIBAgIJAOFZ66em1qC7MA0GCSqGSIb3...", - "RotationPeriod": "2160h" - } -} - -$ curl --request PUT --data @ca_config.json localhost:8500/v1/connect/ca/configuration - -... - -[INFO] connect: CA rotated to new root under provider "consul" -``` - -The cluster is now using the new private key and root certificate. Updating the CA config -this way also triggered a certificate rotation, which will be covered in the next section. - -#### Root Certificate Rotation +## Root Certificate Rotation Whenever the CA's configuration is updated in a way that causes the root key to change, a special rotation process will be triggered in order to smoothly transition to -the new certificate. +the new certificate. This rotation is automatically orchestrated by Consul. -First, an intermediate CA certificate is requested from the new root, which is then +This also automatically occurs when a completely different CA provider is +configured (since this changes the root key). Therefore, this automatic rotation +process can also be used to cleanly transition between CA providers. For example, +updating Connect to use Vault instead of the built-in CA. + +During rotation, an intermediate CA certificate is requested from the new root, which is then cross-signed by the old root. This cross-signed certificate is then distributed alongside any newly-generated leaf certificates used by the proxies once the new root becomes active, and provides a chain of trust back to the old root certificate in the @@ -145,9 +123,9 @@ certificate or CA provider has been set up, the new root becomes the active one and is immediately used for signing any new incoming certificate requests. If we check the [list CA roots endpoint](/api/connect/ca.html#list-ca-root-certificates) -after the config update in the previous section, we can see both the old and new root +after updating the configuration with a new root certificate, we can see both the old and new root certificates are present, and the currently active root has an intermediate certificate -which has been generated and cross-signed automatically by the old root during the +which has been generated and cross-signed automatically by the old root during the rotation process: ```bash @@ -190,58 +168,3 @@ $ curl localhost:8500/v1/connect/ca/roots The old root certificate will be automatically removed once enough time has elapsed for any leaf certificates signed by it to expire. - -### External CA (Certificate Authority) Providers - -#### Vault - -Currently, the only supported external CA (Certificate Authority) provider is Vault. The -Vault provider can be used by setting the `ca_provider = "vault"` field in the Connect -configuration: - -```hcl -connect { - enabled = true - ca_provider = "vault" - ca_config { - address = "http://localhost:8200" - token = "..." - root_pki_path = "connect-root" - intermediate_pki_path = "connect-intermediate" - } -} -``` - -The `root_pki_path` can be set to either a new or existing PKI backend; if no CA has been -initialized at the path, a new root CA will be generated. From this root PKI, Connect will -generate an intermediate CA at `intermediate_pki_path`. This intermediate CA is used so that -Connect can manage its lifecycle/rotation - it will never touch or overwrite any existing data -at `root_pki_path`. The intermediate CA is used for signing leaf certificates used by the -services and proxies in Connect to verify identity. - -To update the configuration for the Vault provider, the process is the same as for the Consul CA -provider above: use the [Update CA Configuration endpoint](/api/connect/ca.html#update-ca-configuration) -or the `consul connect ca set-config` command: - -```bash -$ cat ca_config.json -{ - "Provider": "vault", - "Config": { - "Address": "http://localhost:8200", - "Token": "...", - "RootPKIPath": "connect-root-2", - "IntermediatePKIPath": "connect-intermediate" - } -} - -$ consul connect ca set-config -config-file=ca_config.json - -... - -[INFO] connect: CA rotated to new root under provider "vault" -``` - -If the PKI backend at `connect-root-2` in this case has a different root certificate (or if it's -unmounted and hasn't been initialized), the rotation process will be triggered, as described above -in the [Root Certificate Rotation](#root-certificate-rotation) section. diff --git a/website/source/docs/connect/ca/consul.html.md b/website/source/docs/connect/ca/consul.html.md new file mode 100644 index 000000000..5d7257e1a --- /dev/null +++ b/website/source/docs/connect/ca/consul.html.md @@ -0,0 +1,129 @@ +--- +layout: "docs" +page_title: "Connect - Certificate Management" +sidebar_current: "docs-connect-ca-consul" +description: |- + Consul ships with a built-in CA system so that Connect can be easily enabled out of the box. The built-in CA generates and stores the root certificate and private key on Consul servers. It can also be configured with a custom certificate and private key if needed. +--- + +# Built-In CA + +Consul ships with a built-in CA system so that Connect can be +easily enabled out of the box. The built-in CA generates and stores the +root certificate and private key on Consul servers. It can also be +configured with a custom certificate and private key if needed. + +If Connect is enabled and no CA provider is specified, the built-in +CA is the default provider used. The provider can be +[updated and rotated](/docs/connect/ca.html#root-certificate-rotation) +at any point to migrate to a new provider. + +-> This page documents the specifics of the built-in CA provider. +Please read the [certificate management overview](/docs/connect/ca.html) +page first to understand how Consul manages certificates with configurable +CA providers. + +## Configuration + +The built-in CA provider has no required configuration. Enabling Connect +alone will configure the built-in CA provider and will automatically generate +a root certificate and private key: + +```hcl +connect { + enabled = true +} +``` + +A number of optional configuration options are supported. The +first key is the value used in API calls while the second key (after the `/`) +is used if configuring in an agent configuration file. + + * `PrivateKey` / `private_key` (`string: ""`) - A PEM-encoded private key + for signing operations. This must match the private key used for the root + certificate if it is manually specified. If this is blank, a private key + is automatically generated. + + * `RootCert` / `root_cert` (`string: ""`) - A PEM-encoded root certificate + to use. If this is blank, a root certificate is automatically generated + using the private key specified. If this is specified, the certificate + must be a valid + [SPIFFE SVID signing certificate](https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md) + and the URI in the SAN must match the cluster identifier created at + bootstrap with the ".consul" TLD. + +## Specifying a Custom Private Key and Root Certificate + +By default, a root certificate and private key will be automatically +generated during the cluster's bootstrap. It is possible to configure +the Consul CA provider to use a specific private key and root certificate. +This is particularly useful if you have an external PKI system that doesn't +currently integrate with Consul directly. + +To view the current CA configuration, use the [Get CA Configuration endpoint] +(/api/connect/ca.html#get-ca-configuration): + +```bash +$ curl localhost:8500/v1/connect/ca/configuration +{ + "Provider": "consul", + "Config": { + "RotationPeriod": "2160h" + }, + "CreateIndex": 5, + "ModifyIndex": 5 +} +``` + +This is the default Connect CA configuration if nothing is explicitly set when +Connect is enabled - the PrivateKey and RootCert fields have not been set, so those have +been generated (as seen above in the roots list). + +There are two ways to have the Consul CA use a custom private key and root certificate: +either through the `ca_config` section of the [Agent configuration] +(docs/agent/options.html#connect_ca_config) (which can only be used during the cluster's +initial bootstrap) or through the [Update CA Configuration endpoint] +(/api/connect/ca.html#update-ca-configuration). + +Currently consul requires that root certificates are valid [SPIFFE SVID Signing certificates] +(https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md) and that the URI encoded +in the SAN is the cluster identifier created at bootstrap with the ".consul" TLD. In this +example, we will set the URI SAN to `spiffe://36cb52cd-4058-f811-0432-6798a240c5d3.consul`. + +In order to use the Update CA Configuration HTTP endpoint, the private key and certificate +must be passed via JSON: + +```bash +$ jq -n --arg key "$(cat root.key)" --arg cert "$(cat root.crt)" ' +{ + "Provider": "consul", + "Config": { + "PrivateKey": $key, + "RootCert": $cert, + "RotationPeriod": "2160h" + } +}' > ca_config.json +``` + +The resulting `ca_config.json` file can then be used to update the active root certificate: + +```bash +$ cat ca_config.json +{ + "Provider": "consul", + "Config": { + "PrivateKey": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEArqiy1c3pbT3cSkjdEM1APALUareU...", + "RootCert": "-----BEGIN CERTIFICATE-----\nMIIDijCCAnKgAwIBAgIJAOFZ66em1qC7MA0GCSqGSIb3...", + "RotationPeriod": "2160h" + } +} + +$ curl --request PUT --data @ca_config.json localhost:8500/v1/connect/ca/configuration + +... + +[INFO] connect: CA rotated to new root under provider "consul" +``` + +The cluster is now using the new private key and root certificate. Updating the CA config +this way also triggered a certificate rotation. diff --git a/website/source/docs/connect/ca/vault.html.md b/website/source/docs/connect/ca/vault.html.md new file mode 100644 index 000000000..43440bc58 --- /dev/null +++ b/website/source/docs/connect/ca/vault.html.md @@ -0,0 +1,88 @@ +--- +layout: "docs" +page_title: "Connect - Certificate Management" +sidebar_current: "docs-connect-ca-vault" +description: |- + Consul can be used with Vault to manage and sign certificates. The Vault CA provider uses the Vault PKI secrets engine to generate and sign certificates. +--- + +# Vault as a Connect CA + +Consul can be used with [Vault](https://www.vaultproject.io) to +manage and sign certificates. +The Vault CA provider uses the +[Vault PKI secrets engine](https://www.vaultproject.io/docs/secrets/pki/index.html) +to generate and sign certificates. + +-> This page documents the specifics of the built-in CA provider. +Please read the [certificate management overview](/docs/connect/ca.html) +page first to understand how Consul manages certificates with configurable +CA providers. + +## Requirements + +Prior to using Vault as a CA provider for Consul, the following requirements +must be met: + + * **Vault 0.10.3 or later.** Consul uses URI SANs in the PKI engine which + were introduced in Vault 0.10.3. Prior versions of Vault are not + compatible with Connect. + +## Configuration + +The Vault CA is enabled by setting the `ca_provider` to `"vault"` and +setting the required configuration values. An example configuration +is shown below: + +```hcl +connect { + enabled = true + ca_provider = "vault" + ca_config { + address = "http://localhost:8200" + token = "..." + root_pki_path = "connect-root" + intermediate_pki_path = "connect-intermediate" + } +} +``` + +The set of configuration options is listed below. The +first key is the value used in API calls while the second key (after the `/`) +is used if configuring in an agent configuration file. + + * `Address` / `address` (`string: `) - The address of the Vault + server. + + * `Token` / `token` (`string: `) - A token for accessing Vault. + This is write-only and will not be exposed when reading the CA configuration. + This token must have proper privileges for the PKI paths configured. + + * `RootPKIPath` / `root_pki_path` (`string: `) - The path to + a PKI secrets engine for the root certificate. If the path doesn't + exist, Consul will attempt to mount and configure this automatically. + + * `IntermediatePKIPath` / `intermediate_pki_path` (`string: `) - + The path to a PKI secrets engine for the generated intermediate certificate. + This certificate will be signed by the configured root PKI path. If this + path doesn't exist, Consul will attempt to mount and configure this + automatically. + +## Root and Intermediate PKI Paths + +The Vault CA provider uses two separately configured +[PKI secrets engines](https://www.vaultproject.io/docs/secrets/pki/index.html) +for managing Connect certificates. + +The `RootPKIPath` is the PKI engine for the root certificate. Consul will +use this root certificate to sign the intermediate certificate. Consul will +never attempt to write or modify any data within the root PKI path. + +The `IntermediatePKIPath` is the PKI engine used for storing the intermediate +signed with the root certificate. The intermediate is used to sign all leaf +certificates and Consul may periodically generate new intermediates for +automatic rotation. Therefore, Consul requires write access to this path. + +If either path does not exist, then Consul will attempt to mount and +initialize it. This requires additional privileges by the Vault token in use. +If the paths already exist, Consul will use them as configured. diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 5df44a3ec..c59f79bdc 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -267,6 +267,14 @@ > Certificate Management + > Native App Integration From 6dfc0d848bf51434b81caf5e2e209cef3fdd2c82 Mon Sep 17 00:00:00 2001 From: Kyle Havlovitz Date: Mon, 25 Jun 2018 11:12:53 -0700 Subject: [PATCH 611/627] website: correct a few last things in CA docs --- website/source/docs/connect/ca.html.md | 2 +- website/source/docs/connect/ca/consul.html.md | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/website/source/docs/connect/ca.html.md b/website/source/docs/connect/ca.html.md index 3cc1e9bb9..b255b8ac1 100644 --- a/website/source/docs/connect/ca.html.md +++ b/website/source/docs/connect/ca.html.md @@ -17,7 +17,7 @@ such as when a service needs a new certificate or during CA rotation events. The CA provider abstraction enables Consul to support multiple systems for storing and signing certificates. Consul ships with a [built-in CA](/docs/connect/ca/consul.html) which generates and stores the -root certificate and private key on the Consul servers. Consul also also +root certificate and private key on the Consul servers. Consul also has built-in support for [Vault as a CA](/docs/connect/ca/vault.html). With Vault, the root certificate and private key material remain with the Vault cluster. A future version of diff --git a/website/source/docs/connect/ca/consul.html.md b/website/source/docs/connect/ca/consul.html.md index 5d7257e1a..ea48850a9 100644 --- a/website/source/docs/connect/ca/consul.html.md +++ b/website/source/docs/connect/ca/consul.html.md @@ -50,7 +50,8 @@ is used if configuring in an agent configuration file. must be a valid [SPIFFE SVID signing certificate](https://github.com/spiffe/spiffe/blob/master/standards/X509-SVID.md) and the URI in the SAN must match the cluster identifier created at - bootstrap with the ".consul" TLD. + bootstrap with the ".consul" TLD. The cluster identifier can be found + using the [CA List Roots endpoint](/api/connect/ca.html#list-ca-root-certificates). ## Specifying a Custom Private Key and Root Certificate From 934fa52c9862e9638aa8b6c273231632c50d1bb6 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Mon, 25 Jun 2018 11:23:32 -0700 Subject: [PATCH 612/627] website: getting started next/previous step change --- website/source/intro/getting-started/join.html.md | 3 +-- website/source/intro/getting-started/services.html.md | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/website/source/intro/getting-started/join.html.md b/website/source/intro/getting-started/join.html.md index 01ee74e78..73203ba01 100644 --- a/website/source/intro/getting-started/join.html.md +++ b/website/source/intro/getting-started/join.html.md @@ -12,8 +12,7 @@ description: > # Consul Cluster We've started our first agent and registered and queried a service on that -agent. This showed how easy it is to use Consul but didn't show how this could -be extended to a scalable, production-grade service discovery infrastructure. +agent. Additionally, we've configured Consul Connect to automatically authorize and encrypt connections between services. This showed how easy it is to use Consul but didn't show how this could be extended to a scalable, production-grade service mesh infrastructure. In this step, we'll create our first real cluster with multiple members. When a Consul agent is started, it begins without knowledge of any other node: diff --git a/website/source/intro/getting-started/services.html.md b/website/source/intro/getting-started/services.html.md index 7046e2575..000ceaa1a 100644 --- a/website/source/intro/getting-started/services.html.md +++ b/website/source/intro/getting-started/services.html.md @@ -163,5 +163,5 @@ dynamically. ## Next Steps We've now configured a single agent and registered a service. This is good -progress, but let's explore the full value of Consul by [setting up our -first cluster](/intro/getting-started/join.html)! +progress, but let's explore the full value of Consul by learning how to +[automatically encrypt and authorize service-to service communication](/intro/getting-started/connect.html) with Consul Connect. From 3db81ca030bf0eb9fbc0ef1f195c30f6d36a1a32 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Mon, 25 Jun 2018 11:34:26 -0700 Subject: [PATCH 613/627] website: minor example and copy fix for multi-dc --- website/source/discovery.html.erb | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/website/source/discovery.html.erb b/website/source/discovery.html.erb index 5afc8442a..0facebd1c 100644 --- a/website/source/discovery.html.erb +++ b/website/source/discovery.html.erb @@ -155,7 +155,7 @@ web-frontend.service.consul. 0 IN A 10.0.1.109

Multi Datacenter

-

Consul supports to multiple datacenters out of the box with no complicated configuration. Look up services in other datacenters or keep the request local. Advanced features like Prepared Queries enable automatic failover to other datacenters.

+

Consul supports multiple datacenters out of the box with no complicated configuration. Look up services in other datacenters or keep the request local. Advanced features like Prepared Queries enable automatic failover to other datacenters.

Learn more

@@ -168,6 +168,19 @@ web-frontend.service.consul. 0 IN A 10.0.1.109$ curl http://localhost:8500/v1/catalog/datacenters ["dc1", "dc2"] $ curl http://localhost:8500/v1/catalog/nodes?dc=dc2 +[ + { + "ID": "7081dcdf-fdc0-0432-f2e8-a357d36084e1", + "Node": "10-0-1-109", + "Address": "10.0.1.109", + "Datacenter": "dc2", + "TaggedAddresses": { + "lan": "10.0.1.109", + "wan": "10.0.1.109" + }, + "CreateIndex": 112, + "ModifyIndex": 125 + }, ...
From 3127fccec4126a2b377e2df7030a0fd4f61211c0 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Mon, 25 Jun 2018 11:36:36 -0700 Subject: [PATCH 614/627] website: whitespace fix --- website/source/discovery.html.erb | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/source/discovery.html.erb b/website/source/discovery.html.erb index 0facebd1c..f8023ac8c 100644 --- a/website/source/discovery.html.erb +++ b/website/source/discovery.html.erb @@ -166,8 +166,7 @@ web-frontend.service.consul. 0 IN A 10.0.1.109
$ curl http://localhost:8500/v1/catalog/datacenters -["dc1", "dc2"] -$ curl http://localhost:8500/v1/catalog/nodes?dc=dc2 +["dc1", "dc2"]$ curl http://localhost:8500/v1/catalog/nodes?dc=dc2 [ { "ID": "7081dcdf-fdc0-0432-f2e8-a357d36084e1", From d3cec142d4d4ec7fb6484b9535a19421081065f3 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Mon, 25 Jun 2018 12:05:29 -0700 Subject: [PATCH 615/627] website: fix an assortment of broken links --- website/source/api/connect/ca.html.md | 4 ++-- website/source/docs/commands/connect/ca.html.md.erb | 4 ++-- website/source/docs/connect/ca.html.md | 2 +- website/source/docs/connect/ca/consul.html.md | 2 +- website/source/docs/connect/intentions.html.md | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/website/source/api/connect/ca.html.md b/website/source/api/connect/ca.html.md index 00fdda13c..d8bf80fed 100644 --- a/website/source/api/connect/ca.html.md +++ b/website/source/api/connect/ca.html.md @@ -14,7 +14,7 @@ Certificate Authority mechanism. ## List CA Root Certificates -This endpoint returns the current list of trusted CA root certificates in +This endpoint returns the current list of trusted CA root certificates in the cluster. | Method | Path | Produces | @@ -104,7 +104,7 @@ $ curl \ This endpoint updates the configuration for the CA. If this results in a new root certificate being used, the [Root Rotation] -(/docs/guides/connect-ca.html#rotation) process will be triggered. +(/docs/connect/ca.html#root-certificate-rotation) process will be triggered. | Method | Path | Produces | | ------ | ---------------------------- | -------------------------- | diff --git a/website/source/docs/commands/connect/ca.html.md.erb b/website/source/docs/commands/connect/ca.html.md.erb index e279d74fd..08874de05 100644 --- a/website/source/docs/commands/connect/ca.html.md.erb +++ b/website/source/docs/commands/connect/ca.html.md.erb @@ -13,7 +13,7 @@ Command: `consul connect ca` The CA connect command is used to interact with Consul Connect's Certificate Authority subsystem. The command can be used to view or modify the current CA configuration. See the -[Connect CA Guide](/docs/guides/connect-ca.html) for more information. +[Connect CA documentation](/docs/connect/ca.html) for more information. ```text Usage: consul connect ca [options] [args] @@ -64,7 +64,7 @@ The output looks like this: ## set-config Modifies the current CA configuration. If this results in a new root certificate -being used, the [Root Rotation](/docs/guides/connect-ca.html#rotation) process +being used, the [Root Rotation](/docs/connect/ca.html#root-certificate-rotation) process will be triggered. Usage: `consul connect ca set-config [options]` diff --git a/website/source/docs/connect/ca.html.md b/website/source/docs/connect/ca.html.md index b255b8ac1..74c412434 100644 --- a/website/source/docs/connect/ca.html.md +++ b/website/source/docs/connect/ca.html.md @@ -33,7 +33,7 @@ will generate the initial root certificates and setup the internal Consul server state. For the initial bootstrap, the CA provider can be configured through the -[Agent configuration](docs/agent/options.html#connect_ca_config). After +[Agent configuration](/docs/agent/options.html#connect_ca_config). After initialization, the CA can only be updated through the [Update CA Configuration API endpoint](/api/connect/ca.html#update-ca-configuration). If a CA is already initialized, any changes to the CA configuration in the diff --git a/website/source/docs/connect/ca/consul.html.md b/website/source/docs/connect/ca/consul.html.md index ea48850a9..e95677681 100644 --- a/website/source/docs/connect/ca/consul.html.md +++ b/website/source/docs/connect/ca/consul.html.md @@ -82,7 +82,7 @@ been generated (as seen above in the roots list). There are two ways to have the Consul CA use a custom private key and root certificate: either through the `ca_config` section of the [Agent configuration] -(docs/agent/options.html#connect_ca_config) (which can only be used during the cluster's +(/docs/agent/options.html#connect_ca_config) (which can only be used during the cluster's initial bootstrap) or through the [Update CA Configuration endpoint] (/api/connect/ca.html#update-ca-configuration). diff --git a/website/source/docs/connect/intentions.html.md b/website/source/docs/connect/intentions.html.md index 589b3329f..ed950e8f3 100644 --- a/website/source/docs/connect/intentions.html.md +++ b/website/source/docs/connect/intentions.html.md @@ -117,7 +117,7 @@ Consul supporting namespaces. ## Intention Management Permissions -Intention management can be protected by [ACLs](/docs/guides/acls.html). +Intention management can be protected by [ACLs](/docs/guides/acl.html). Permissions for intentions are _destination-oriented_, meaning the ACLs for managing intentions are looked up based on the destination value of the intention, not the source. From f8355d608a22f849d18e8397ff54b739ba9e1e2e Mon Sep 17 00:00:00 2001 From: mkeeler Date: Mon, 25 Jun 2018 19:45:20 +0000 Subject: [PATCH 616/627] Release v1.2.0 --- CHANGELOG.md | 2 +- agent/bindata_assetfs.go | 168 +++++++++++++++++++-------------------- version/version.go | 2 +- 3 files changed, 86 insertions(+), 86 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0fcec9cd8..c3cf4e3e8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## UNRELEASED +## 1.2.0 (June 26, 2018) FEATURES: diff --git a/agent/bindata_assetfs.go b/agent/bindata_assetfs.go index 0d765acc3..0d3ac92ff 100644 --- a/agent/bindata_assetfs.go +++ b/agent/bindata_assetfs.go @@ -44,8 +44,8 @@ // pkg/web_ui/v2/assets/apple-touch-icon-76x76-c5fff53d5f3e96dbd2fe49c5cc472022.png // pkg/web_ui/v2/assets/apple-touch-icon-d2b583b1104a1e6810fb3984f8f132ae.png // pkg/web_ui/v2/assets/consul-logo-707625c5eb04f602ade1f89a8868a329.png -// pkg/web_ui/v2/assets/consul-ui-07dda31de740f1a5f8d66c166d785a89.css -// pkg/web_ui/v2/assets/consul-ui-e51248f3d8659994e198565dbadc4fcf.js +// pkg/web_ui/v2/assets/consul-ui-30b6cacf986e547028905bc5b588c278.css +// pkg/web_ui/v2/assets/consul-ui-61975faed99637b13431ce396e4422b4.js // pkg/web_ui/v2/assets/favicon-128-08e1368e84f412f6ad30279d849b1df9.png // pkg/web_ui/v2/assets/favicon-16x16-672c31374646b24b235b9511857cdade.png // pkg/web_ui/v2/assets/favicon-196x196-57be5a82d3da06c261f9e4eb972a8a3a.png @@ -132,7 +132,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _web_uiV1IndexHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x5b\x73\xdb\x38\xb2\xff\x7b\x3e\x45\xff\x39\x0f\x7e\xf8\x87\x94\x25\xdb\xb1\xe3\x95\x75\x2a\x93\x64\x76\xb2\xc9\x64\xa6\x12\x67\xf6\xcc\xd3\x29\x88\x6c\x89\x58\x53\x00\x17\x00\x25\x6b\x54\xfa\xee\xa7\x00\x90\x14\x2f\x20\x25\x39\xb1\xcf\xec\xec\xa6\x2a\x16\x2f\xb8\x36\x1a\xdd\xbf\x6e\x34\xc0\xf1\xff\x7b\xf3\xf3\xeb\xdb\xdf\x7e\x79\x0b\xb1\x5a\x24\x93\x67\xe3\xe2\x07\x49\x34\x79\x06\x30\x5e\xa0\x22\x10\xc6\x44\x48\x54\x37\x5e\xa6\x66\xfe\x95\xb7\x7b\x11\x2b\x95\xfa\xf8\xcf\x8c\x2e\x6f\xbc\xff\xf6\xbf\xbc\xf2\x5f\xf3\x45\x4a\x14\x9d\x26\xe8\x41\xc8\x99\x42\xa6\x6e\xbc\x77\x6f\x6f\x30\x9a\x63\x25\x1f\x23\x0b\xbc\xf1\x96\x14\x57\x29\x17\xaa\x92\x74\x45\x23\x15\xdf\x44\xb8\xa4\x21\xfa\xe6\xe6\x39\x50\x46\x15\x25\x89\x2f\x43\x92\xe0\xcd\x69\x70\xf9\x1c\x32\x89\xc2\xdc\x93\x69\x82\x37\x8c\xdb\xa2\x15\x55\x09\x4e\x5e\x73\x26\xb3\x04\xa6\x6b\xf8\x91\xc8\x98\xbe\xe6\x22\x1d\x0f\xec\x2b\x9d\x28\xa1\xec\x0e\x04\x26\x37\x9e\x54\xeb\x04\x65\x8c\xa8\x3c\x88\x05\xce\xf4\x13\xa2\x68\x38\x98\x72\xae\xa4\x12\x24\x0d\x16\x94\x05\xa1\x94\xde\xc1\x39\x89\x44\x57\x06\x1a\x72\xe6\x81\x5a\xa7\x78\xe3\xd1\x05\x99\xe3\x20\x65\xf3\x46\xde\x19\x59\xea\x64\xfe\xd9\xe8\xfe\x6c\x14\x98\xf7\x92\xfe\x8e\xf2\xc6\x33\x4f\xbe\xa6\xc8\xe1\x8b\xfb\xe1\x8b\x5a\x91\xe6\x89\x37\x79\xa6\xcb\x94\xa1\xa0\xa9\xca\x8b\x52\x78\xaf\x06\xff\x20\x4b\x62\x9f\x9a\x5a\x01\x06\x03\x78\x1d\x13\x36\x47\x50\x31\x95\xb0\x24\x49\x86\xa0\x38\xac\x79\x26\xf4\xe0\x69\x82\xc7\x5c\x2a\xa0\x33\xfd\x0c\x88\x40\x60\x5c\x81\xc8\x18\xa3\x6c\x5e\x94\xa1\x62\x84\x2f\xef\x80\x33\x73\x25\xc9\x02\x6d\x2e\x22\x81\x14\xc5\x50\x26\x15\x61\x21\x06\x45\x26\x0c\xe6\x01\x78\x9a\xd3\xae\x07\x83\xc5\x5a\xa2\x58\xa2\x08\x42\xbe\xb8\xbe\xba\x38\x3d\xf5\x4c\xb2\x25\x29\x9a\xf1\xa3\x2e\xef\x06\x4e\x4e\x74\xcf\x06\xb6\x13\x93\x67\xcf\xc6\x03\xcb\xd1\xe3\x29\x8f\xd6\xb6\xdb\x8c\x17\x6f\x75\x09\xe3\x10\x99\x42\x61\x6f\x00\xc6\xf1\x68\xf2\x37\xb2\x24\x9f\x2d\x6d\x3e\x69\x1e\x17\x18\x8d\x07\xf1\xa8\x4c\x92\x4e\x7e\x49\x90\x48\x04\x64\x9a\x0d\xa1\x92\x9e\x32\x4b\x9a\x15\x4e\x61\x2a\xf8\x4a\xa2\xd0\xe4\xca\x24\x42\xce\x9e\x5f\xde\x05\xe3\x41\x9a\xd7\x3d\xd8\x55\x3e\x1e\xec\xda\xa5\x6f\x23\xba\x84\x30\x21\x52\xde\x78\x2b\x41\xd2\x14\x45\x3e\x24\xd5\x37\x7a\xfa\x10\xca\xca\x77\xcd\xb7\x89\xbf\x88\xfc\xe1\xa8\x7c\x9b\xbf\xa7\xd1\x8d\x47\xd2\xb4\xfa\x78\x10\xd1\x65\x59\xc6\xee\x26\xbf\x6c\x55\x9c\x66\x32\xf6\x26\x65\xc2\xae\x64\x33\xce\x55\xa5\x71\xfb\x1a\x7f\x48\x07\x1a\x6d\xed\x68\xad\x83\xb7\xef\xfd\x98\xb0\x28\xc1\x29\x11\x32\x2f\x6f\xb3\xe1\x99\x4a\x50\x6d\xb7\x75\xa6\xd9\x9f\x1f\x22\xa2\x88\xaf\x70\x91\x26\x44\xa1\x6f\xc5\x1a\x0a\xc1\x5d\xa3\x24\xf8\xaa\x6f\x7c\xae\x20\xbf\xe0\xb3\x99\x44\xe5\x8f\xcc\xbd\x5c\xf8\x43\x7b\x75\x2f\x1d\x43\x98\x17\xa1\x5b\xe6\x5b\x26\x82\x25\x0a\x45\x43\x92\xe4\xf7\x35\x9a\x6d\x36\xdf\xd1\x99\x91\xb6\x82\x27\x09\x8a\x60\xc1\x23\x4c\x02\x2d\x2e\x32\x79\x8b\xf7\x0a\x0c\x0d\x2a\x44\x4e\x8b\x2a\xa6\x3c\x89\xbc\xc9\x8f\xb7\xb7\xbf\x80\xe9\x20\x84\x3c\x42\x98\x09\xbe\xc8\x59\xfa\x1a\xc6\xfa\xd1\x64\xb3\xe9\x28\x7f\xbb\x85\xce\x77\xba\xee\xed\x76\x3c\x30\x25\x94\x13\xa3\x68\xf4\x80\xce\x6a\xed\xea\xe8\x86\x40\x99\x72\x26\xf1\x90\x8e\xbc\x35\x7d\x58\xa0\x94\x64\xde\xd1\x8d\x84\x2e\xa8\xda\x53\xcb\xe8\xe2\xe2\xf0\x66\x8f\xd3\x42\x39\x09\x54\x99\x60\x18\x01\x61\x96\x9a\x01\xfc\xc6\x33\x58\x90\x35\xc4\x64\x89\xb0\xa4\x92\x2a\xfd\x1a\xbe\x7c\xfa\x00\x2a\x26\x0a\xa8\x84\x84\x93\x88\xb2\xb9\xce\x94\xb1\x3b\xc6\x57\x0c\x04\x4a\x9e\x89\x10\x9f\x83\x34\xd2\xb8\xd6\xe7\x90\x30\x50\x62\x0d\x73\xae\x73\x4d\x49\x78\xa7\x85\x90\x16\xbd\x82\x73\x15\xc0\xbb\x99\x95\x52\xaf\x5e\x7f\x00\xc5\xef\x90\xc1\x8a\x48\x23\xb7\x67\x3c\x63\x91\x91\xe4\xba\x0c\x81\x12\x15\x50\xf5\x1c\x08\x8b\x74\x7e\xd6\xaa\x6a\x45\x93\x04\xa6\x08\x02\x23\x2a\x30\xd4\x6d\xcf\xab\x92\xa8\x14\x65\x73\x09\xa9\x26\xb4\xe2\x60\xb9\x94\x00\xc3\xd5\xae\xe6\xa0\x41\xbc\xba\xf4\x10\x0b\x7f\x2e\x78\x96\xd6\x78\x19\x60\x3c\xcd\x94\xe2\x0c\x36\x1b\x20\xa1\xa2\x9c\x81\x67\x9a\x7a\xab\x4b\xf4\xc0\xf0\x1b\x4c\x29\x8b\x7c\xa2\x94\x28\x8a\xbb\x9e\x2a\x06\xfa\x8f\x1f\x69\x8d\x26\x74\xc2\xc9\x27\xd3\x45\xdd\x1c\x93\x79\x3c\xb0\x45\xef\xad\x4f\x13\xf5\x47\xbe\xc0\x43\x6a\xc3\x19\xc9\x12\x65\xaa\xfb\x2b\x87\xef\xf3\xe1\xf8\xc4\xb9\x72\x55\xd7\x2d\xde\x5c\xb2\xee\x5b\xc8\xad\x28\x1c\x64\x8c\x64\x2a\xe6\x82\xfe\x8e\xd1\x1f\x53\x82\x35\xe7\xf1\xab\x30\x44\x29\xe1\x0d\x32\xaa\xd5\x73\x9d\x89\xd2\xc9\x6f\x75\xfe\x8e\x38\x5a\x06\x37\xb3\x4c\xb3\x27\x49\x53\xc1\x53\x41\x89\x42\x48\x51\x2c\xa8\x94\x94\x33\xa9\x47\x26\x45\xa1\x59\xcf\x24\xc3\xfb\xd4\x72\xb5\x1d\xf8\x16\xbb\xa6\x93\x0f\x48\x04\x83\x05\x17\xa8\xb5\xbf\xce\x33\x26\x39\x1a\xd3\xd8\x45\x5e\x0f\x06\xab\xd5\x2a\xb0\x28\x25\xa0\x7c\x10\xf1\x50\x0e\xe6\x19\x8d\x50\x0e\x48\x98\x04\x1a\x79\x7b\xa0\x88\x98\x6b\xa4\xfd\x3f\xd3\x84\xb0\x3b\x6f\xa2\xdb\x1e\xf1\x30\x5b\x20\xd3\x98\x8e\xb3\xf1\x80\x4c\xea\xd5\x3f\x0d\x6b\x90\x30\x91\x11\x95\x1a\xe9\xfc\xab\xb0\xc6\xeb\x0f\x12\xde\xe4\x4d\x6e\x0f\x98\x79\xad\x81\x6a\xd1\x2b\x3b\x6e\x54\x16\x10\x2d\x4c\x32\xa9\x50\x04\x70\xab\x1f\x52\x69\x06\x35\x9f\xc5\x30\xc5\x98\x2c\x29\x17\xcf\x35\x78\xd5\x82\xd2\x72\x14\xd7\x9c\x92\xd0\x90\xaa\x64\x5d\xe0\x42\x15\xe3\xe2\xdf\x8d\x61\x72\x55\xf5\x07\x65\x14\xba\x98\x83\x14\x61\x69\x27\xe5\xad\xf5\xc3\x75\xc2\x99\x9f\x52\x76\x17\xc8\xe5\xdc\x03\x6b\x8d\x7a\x67\x57\xe7\x1e\xc4\x48\xe7\xb1\xba\xf1\xce\xaf\x1a\x4c\x37\x19\xcb\x05\x49\x92\xc9\x07\x5b\x48\x10\x04\xe3\x81\x7d\xf2\x98\x44\x37\xd8\xdd\xc8\xa2\x29\x39\x06\x6c\xda\x3c\xfe\x2e\x53\x99\xc0\xa5\xbd\x24\x12\x11\xc6\xdf\x13\x71\x5d\x60\xf0\xd3\xe2\xea\xc2\xa8\xbc\xa3\x75\xf6\x66\x03\x94\xa5\x59\xb5\x67\x9e\xb5\x27\x6f\x66\x34\xd1\x23\x57\x2d\x21\x07\x5f\x50\xbd\xf1\x17\x94\x51\x0f\xd2\x84\x84\x18\xf3\x24\x42\x91\xe7\xb4\x18\xf2\x40\x15\x5a\x80\x48\x86\xab\x57\x61\xf2\xbd\x55\xec\xf5\xdc\x6d\xe6\x1c\x35\x01\xc8\x21\x3d\x36\x35\x69\xbb\xdd\x57\x1c\x4e\xb4\x18\x3d\xc9\xf3\x9c\x68\x6c\xa0\xa1\x81\xee\x11\x54\x30\x82\xb9\x66\x5c\x8f\xd5\x12\x21\xcd\x92\xc4\x17\x9a\xfb\x4e\xb6\xdb\x8f\x16\x35\x6d\x36\x83\xbc\xc8\x26\xce\xad\x77\xd2\xd1\xeb\x3a\x2a\xb5\x54\xb0\x20\x1c\x65\xe5\x85\xa3\xfb\x17\x9d\x16\x5a\xcf\x70\x2f\x29\xae\xe0\xed\x62\x8a\x22\xf8\x8c\x09\x86\xaa\x74\xf6\x14\x95\xe6\xc3\x6f\x6f\x0f\x1c\xfe\xa3\xc6\xd9\xd5\xe3\x98\xc8\xb7\xf7\x29\x61\x11\x46\xd0\xdf\xeb\x11\xc4\x34\x8a\x90\xf9\xf7\xb2\xb8\x92\x8b\xe3\x29\x51\x01\x8f\x3d\x20\xd1\x70\xc2\xf5\x6e\xc4\x35\xb1\x22\x64\x12\xa3\x2a\x86\x34\xd7\xa9\xa0\x0b\x22\xd6\x39\xf2\xcc\x01\xa9\xe2\xf3\x79\x82\xaf\x8b\x4c\x1a\x6b\xda\x6e\x1e\x0b\x31\x9b\x74\x7b\x2c\x5c\x51\x9a\xe0\x7a\x50\xa8\xfc\xc8\x15\x9d\xd1\xd0\x28\xae\x5f\xa9\xa4\xd3\x04\x77\x4d\x28\x7c\x16\xcb\x91\xcf\x2a\xe9\xaa\x82\xac\x2a\x5b\xc2\x18\xc3\xbb\x29\xbf\xf7\x5c\x99\x7c\x4b\x29\x0f\x4c\x2a\x8c\xf2\xe4\x18\x79\x3b\x62\x7a\x31\x8d\xb0\xda\x22\x6f\xbb\x85\xc1\x31\x5e\x8c\x43\xfc\x18\xed\x74\x55\xd9\xdd\x9d\x2a\x2f\xed\x65\xa1\x20\xaf\x0a\xfd\x78\x55\x3c\xc9\x55\xe8\x69\xf1\x22\xbf\x1f\x76\x14\x0e\x56\x9d\x75\xbe\xd3\xff\xfe\x8e\x27\x4b\x63\xe5\xa1\xa4\x73\x6d\xc0\x6a\xdc\xb2\xc2\x29\x50\xad\x65\x67\x24\x44\x3d\x59\x73\x04\x15\xc0\x58\xa6\x84\x4d\x0a\xd3\x96\xa7\xca\xd7\x50\xc7\x1a\x86\xda\xfe\xdb\xe5\x5a\x51\x15\x03\x01\x49\x17\x69\x82\x80\x6c\x49\x05\x67\x1a\xc3\xc0\x92\x08\xaa\x71\x54\x00\xef\x18\x10\x98\x65\x2a\x13\xba\x09\xc6\xf7\xf6\xbc\x2c\xea\xcb\xbb\xc2\x08\x0d\xf9\x02\x6b\x58\xcd\x5a\xad\x54\x56\xd2\x40\x84\xa9\xc0\x90\x28\x8c\xb4\xba\xd6\xad\xec\xa6\xc9\xa0\x83\x28\x2d\x59\xdb\x3b\x50\xc3\x62\x58\xce\x8b\xf1\x78\xd1\x37\x10\xfd\x58\x90\x6a\x59\x38\x98\x5b\xdb\xda\x97\x8a\x08\x85\xd1\x20\xa3\x06\x14\x7e\x17\xf3\x95\xaf\xb8\x9f\x49\xf4\x55\x8c\x3e\xc3\x95\x9f\xd1\x16\x54\xb4\x7e\x64\xc6\x79\x8a\x0c\x05\x30\x2e\x70\x86\x42\x68\x8b\xb8\xc0\xd1\xb9\x7e\x2a\x08\xb9\x6b\x37\xec\xd0\xab\xc6\x97\xdf\x92\x3c\xc3\x06\x02\xec\x65\xd7\x84\x4c\xd1\xe8\x87\xce\x09\x3e\x19\x0f\x4c\xa2\xa3\xda\xe8\x78\x7c\xa0\x3d\x5e\x95\x9a\xbd\x78\xcc\xa9\x07\x4a\x09\x01\xd7\x3b\xd8\x7b\x5d\xe2\x5e\xb8\x56\x3c\xd5\xd0\x4d\xcb\xf5\x67\x7d\x4a\xab\xa4\xe6\xe8\x61\x52\xa1\x64\xbf\xef\xbc\x49\x0d\x62\xf3\xd4\x9f\x0a\xc2\xa2\xc2\xeb\x5b\x1b\xfd\x26\x8d\x5c\xca\x34\x6f\xc5\xd9\xc3\xda\x55\x85\x52\x12\xc5\x92\x86\xd8\x86\x53\x0d\x76\x1d\x8e\x4e\xb6\xdb\xcf\x79\x62\x27\x70\xaa\xfa\xad\x9f\xa0\xe1\x8c\x47\x07\xb6\xfa\xa3\x4e\xf9\x47\x68\xf2\xdd\xf2\xa0\xf6\xbe\xc7\xf5\xe0\x57\x8d\xe6\xbe\xba\xcd\x0d\xbe\xdd\x89\x83\xd3\x46\x1f\x46\x07\xf7\xc1\x09\xbd\x5d\xbd\xe8\xc2\xd7\x0f\x6a\xff\x8b\x3d\xed\x3f\xdf\x37\x0f\x37\x1b\xb7\x9c\xc8\x25\x82\xee\x48\x4c\xe4\x0f\x84\x26\x94\xcd\x5f\x6b\x10\x23\x0d\x3e\x5c\x11\xc1\x28\x9b\x9b\x6b\x99\x19\x37\x99\x57\x85\x8a\x5e\x2e\x20\xb7\xdb\x89\xd5\xd3\x45\xe9\x98\xd0\xd4\xe7\x4b\x14\xb3\x44\x0b\xad\xcd\xc6\xf8\xbd\xb7\xdb\x7a\xaa\x90\x08\x54\x5a\x0a\x18\xe5\x99\xff\x80\x11\x07\x2d\x4f\x3d\x95\x6f\x04\x4f\x23\xbe\x6a\x61\xba\xbc\x97\xc6\xdb\x62\x8a\x8d\xf2\x84\xfe\x02\x59\x56\xf2\x80\x07\x66\xc9\xf5\xc6\x8b\xa8\x4c\x13\xb2\xbe\x9e\x26\x3c\xbc\xfb\x8b\xcb\xd2\x42\x12\xc6\x10\x85\x40\x19\x44\xa1\x6c\x54\x64\x2a\x4b\x68\x03\xe2\xe9\xc6\x69\x32\xb8\x85\x4b\x14\x6e\xb7\x9b\x8d\xfd\xbb\x63\x8a\xf1\x20\xa1\xed\xda\x07\xba\xf6\x96\x45\x96\x25\xae\x75\x81\x63\xd8\xaa\x2d\xce\x47\x7b\xd8\xaa\x39\xb5\x4f\x7b\xc4\xa8\x75\xd0\xef\x9b\x1a\x2f\x80\x86\x9c\x9d\xd4\x2d\x2f\xb9\x9c\xc3\xfd\x22\x61\xf2\xa6\x58\xa1\xd5\x38\x65\x75\x16\x70\x31\x1f\x8c\x4e\x4f\x4f\x07\xc6\x91\x62\xb0\xbf\xce\xae\x7b\x34\xf7\x40\x1b\x85\xdf\xf3\xfb\x1b\xef\x14\x4e\xe1\x6c\x04\xe7\xa7\x4d\xd8\x9c\x12\x15\x43\x74\xe3\xfd\x34\x3c\x87\xd3\xc4\x1f\x06\x67\xc3\x33\x38\x0f\xfd\x21\x04\x67\xfe\x30\x78\x79\x79\x11\xbc\xb8\xba\xf2\x47\xc1\xd5\xe5\x05\x0c\x83\xe1\xd5\x55\xe2\x9f\x05\x97\x23\x7f\xa8\x9f\xf8\xa3\xe0\xf2\x0a\xcc\x1f\x73\x0f\xfa\x55\xe8\x07\x17\xc1\x4b\x3f\xb8\xba\xca\x9f\xfa\x26\x1f\x98\x32\x3e\x9c\xc2\xf0\x7c\x79\x9e\x9c\x83\xae\xea\x3c\x0c\xce\x60\x08\xc1\x8b\xab\x4b\x30\x95\x99\x2a\x2e\x6d\x52\xdd\x9a\xcb\x2b\x5d\xe4\x30\xaf\x63\x94\xdf\xdb\xda\xcf\xc3\xe0\x65\xa0\x73\xbc\x3c\xbd\x08\xae\x4c\xae\x97\xa7\x45\x23\x87\xc1\xe8\x0a\xce\xe3\xf3\xc4\x54\xe3\x9f\x87\x43\x3f\x38\x83\x51\x70\x7a\xfa\xc2\xd7\x3d\x32\x89\x5f\xd8\x96\x7d\x18\xbd\x80\xd1\x55\x70\xf1\xf2\x3c\x19\x05\x57\xc3\x33\xd3\x2b\xdd\xf9\xd3\x17\xa6\xaf\x61\x70\xe1\x07\x2f\x75\x15\xe6\x61\x5e\x85\x6f\xaa\xfb\x70\x36\x82\xe1\xd5\xd2\x3f\x4f\xfc\x73\x43\x3d\xdd\xfd\x33\x7f\xa8\x2b\xb9\xb4\x04\x34\x95\x5c\x5a\x12\x26\xba\x4b\x96\x80\xa6\x92\x51\x71\x6b\x29\x15\xfa\x9a\x70\x17\xb6\x1a\x43\x41\x5b\x4b\xde\xce\xe1\x15\x9c\xc6\xfe\xf9\xef\x8b\x11\xbc\x0c\xcf\x82\x97\x70\x0a\x97\x70\x16\x0c\xe1\x12\x2e\xa5\x6f\x2e\xfc\x4b\xfd\x5f\x5f\xfb\xfa\xda\xfc\xea\x27\xbf\x7b\x83\xba\x49\x2a\x97\xf3\xc6\x6c\xe9\x14\xc4\xad\x9b\xea\x4c\xfa\xc6\x8b\xba\x77\xcb\x81\x8c\x4b\x0c\x77\x84\x43\xd3\xca\xff\x64\xee\x5f\x40\x4a\xa2\x08\x23\x6b\xd8\xfb\x0b\x1a\x45\x09\x56\x6d\xd7\xf8\xbc\xc4\xdd\x02\x49\x14\x8a\x6c\x31\x95\xde\xa4\x84\x62\x15\xcb\xf4\x44\xd3\xe4\x96\xbf\xc7\xf5\x09\xcc\x35\x1c\xfb\x85\x08\x64\xea\x3d\xae\xc1\xc8\xb1\xb4\xb8\xd5\x82\x8a\x4c\x60\x5f\x19\x69\x35\xfb\xff\xd7\x59\xc6\x83\xf8\x7c\xcf\xe2\xfa\xb7\x25\xc3\xb3\xb6\x14\xa7\x0a\x17\x5a\x8e\x1b\xfd\xd3\x5c\xd0\xad\x88\x30\x9d\x2e\xb0\xbd\xf9\xc4\x33\x85\xf6\x81\xee\x8d\x22\xf3\x8f\xd6\xd1\x40\x97\x79\x6c\xcc\x8c\x24\x12\x8b\xc6\x25\x54\x2a\xeb\xad\xf1\x4d\x65\xe6\xbe\x74\xb9\xf8\xba\x4c\xaf\x59\x71\x89\xdf\x5b\x6a\xd9\x54\x4b\xe5\x0f\xc6\x29\x79\x3d\x9d\xfb\x73\xa1\x55\xd5\xdc\x4f\x4c\x57\xf5\x1d\x5c\x9b\x2a\xa6\x44\xf8\x66\x99\x8d\x33\x45\x12\xad\x7b\x9c\x86\x48\x95\x8e\x9a\x0d\x1d\x96\xd0\x66\x63\x6a\xbd\xc3\xf5\xdf\xa9\x8a\x79\xa6\x2c\x27\xb4\xdb\xdc\x2e\xbf\x36\xb5\xea\x53\xae\xa6\xc6\xea\xfa\xa9\xda\xa6\x29\x17\x11\x0a\x3f\xc1\x99\xea\x75\x92\x75\x97\xb0\xb4\x78\x40\x67\x2b\x2e\x6b\xce\xb5\x71\x2c\xf6\x17\xd2\x64\xb1\x4b\xa8\x34\xac\x63\xad\x20\x67\x42\x9b\xb0\xd3\x9b\x97\x12\x86\x89\xcb\xa3\xe3\xb4\xde\x4c\x6a\x3d\xb6\x40\x65\xbe\x1a\xa0\x47\x9f\x0b\xc2\xe6\x58\xe7\x03\x63\xc1\xed\x1d\x73\x5b\x60\x8c\xd5\xb5\x94\x5a\xda\x9d\xc8\xb0\x49\x4d\x98\x9b\xd3\x5e\x7e\x2d\x90\x28\x84\xf7\xb8\x76\x18\xbb\xd5\xa9\xee\x22\x76\x77\xdb\xa6\x3c\x5a\x83\xbd\x9c\x71\xb1\x70\xb5\xd1\xac\xa1\x56\xbc\xa3\x5e\xab\xd8\x66\xd1\x3d\xae\xf4\x3c\x71\x3a\xd9\x6c\x4c\xdc\xc4\x4f\x36\x80\x43\x8b\x39\xa7\x93\xc6\xdd\x0f\xe8\x1d\xc6\x5d\xf5\xc0\x70\xf5\x1e\xd7\x7a\x76\xfd\x4a\x12\x1a\x5d\x2f\xf5\x5f\x6b\x7d\x3b\xdb\x55\xe9\x84\xf1\x44\xf6\xf6\x02\xea\x80\xba\x92\xc1\x27\x51\xc4\x99\xd7\x14\xe4\x7d\x6e\xaa\x72\x5d\xc5\x3a\xd3\xf3\x86\x6b\x19\xe8\x70\xa8\x7b\x20\xf2\x80\xb6\x1b\x25\x32\x6c\x4b\xb8\x1a\xf1\xf6\xb6\x3c\xc6\x24\xf5\x0d\x22\xf7\x26\xb7\x1c\x42\xcb\x6a\x04\x66\x46\x10\x3e\x07\xb4\x51\x23\x70\x87\x6b\xeb\xe7\xb3\x11\x36\x83\x32\x6e\xa6\xab\x63\xdd\xa3\x57\x2e\xde\xe8\x5e\x16\x22\xd7\xdd\x8f\x71\x3a\xf9\xc8\xf3\x90\x45\x86\x18\x61\x64\x5c\x94\x0c\xa5\xc2\x48\xb7\x49\x36\x17\x67\x77\x95\x60\x22\x9b\xb6\x4a\x5e\xe8\x11\xdc\x63\x98\x86\x28\xfc\x9b\xe4\xec\xba\xb8\xd9\xb5\xdd\x70\x96\x79\x97\x5b\x69\xd7\x36\x80\xac\x93\xcb\x36\x1b\xd0\xa0\x85\x08\x24\xf5\xd1\x36\x96\xb7\x73\xbc\xbb\x06\xb8\x6b\x14\xf3\x92\x08\x83\x29\x02\x61\x6b\xb3\x02\x43\xac\x27\x35\x41\x36\x57\xf1\xde\x41\x73\xb5\xbb\x61\xf8\x54\xf2\xb4\x23\x6b\x2c\x17\xbd\xc7\xb5\x35\x5a\x77\x84\x2e\x96\xeb\x6f\x4a\x0a\xbe\x63\x86\xaa\xbd\x21\x38\x75\x72\x57\x8d\xe2\x56\x70\x8e\x15\x95\xee\x10\xa0\xa2\x27\xdf\x65\x2c\x41\x29\x0f\xe4\x40\xeb\xa5\xac\x0d\x4c\xb1\x36\x31\xa9\x2f\x88\x16\xcf\xcb\x65\x09\x07\x0f\xc1\x76\xfb\x6b\xc1\x45\x7f\xfb\xfc\xf3\xc7\x1e\x07\xe7\x66\x33\xb0\x0d\x75\xb7\xab\x4d\xf5\x08\x13\x54\x68\x3b\xe3\xf5\x86\x34\x55\xd6\xa8\x2a\xda\xae\xe1\x77\xb0\x11\x56\x40\xe5\x27\xce\xd5\xb5\x05\x06\x86\xc6\x6f\x4c\x3d\xb9\x84\xe8\x26\xf5\x78\xa0\xa9\xd5\xa1\x9c\x7a\x1e\x3d\x41\x84\xc3\xdd\x72\x80\x11\x55\xff\x96\x06\xc1\x13\x98\x03\xb5\xc9\x56\xb3\x06\x34\x4a\x34\x01\x85\xff\x31\x08\x1e\xd3\x20\xa8\x3d\xfe\x13\xd9\x03\x20\x15\x0d\xef\xd6\xbe\x0c\x05\x4f\x92\x03\xad\x83\xa7\x31\x0f\xe6\x02\x91\x99\x97\x5a\xf4\x1f\x66\x30\x1c\x6e\x2e\x68\x71\xe2\xf2\x25\x57\x2c\x87\x5d\xdd\x89\xf9\xf1\x5c\x10\x24\xf7\x07\x07\x46\x98\x38\xde\x6a\x60\x66\x53\x14\xa5\xb9\x9c\xb0\x26\x42\x6a\xb7\x8d\xa0\xd0\x25\x0e\x86\x7e\xff\xf6\x37\xf8\xf0\xf3\xeb\xf7\x6f\xdf\x38\xd4\x83\x0d\xb4\x72\xb4\xa2\x19\x7b\x0d\x4e\x43\xc7\x05\x30\x8f\x37\x72\x0e\xb6\x5d\x9a\x46\xcb\xfe\x29\xe9\xb6\x9f\x5a\x74\x38\x04\x8d\xda\x31\x71\x83\xd1\x62\xbc\x8e\xc4\xa2\x6d\x24\x5a\x56\x92\xe1\x1b\xd4\xe8\x3e\x72\xdb\x1f\x25\x88\xdb\xcf\x29\x1d\x2b\xb4\x25\x76\x29\xa0\x4b\x96\x46\x7b\x00\x63\x39\xed\x1a\x09\xaa\xb8\xa6\x1b\xcb\xd4\xd6\x50\x3a\x4a\xb7\x7d\x98\x7c\x31\x4d\xe9\x81\x35\xad\xc6\x87\x84\x85\x98\xbc\xd5\x80\xe2\x71\x5a\x5f\x00\x5c\x8d\x6f\x4d\x5d\x3d\xad\xfb\x0a\xb8\xda\x66\xb2\x83\xd1\x6a\x9b\x28\x16\x8c\x7e\xed\x88\x1e\x83\x54\x0f\x19\xdc\x1c\xba\xde\xe1\xba\x8b\x84\x2e\xd4\xda\x15\x97\x57\x07\x3a\xfb\x44\xe7\x38\xbe\x98\xe8\x17\xf0\x19\x4d\x9c\xfa\x78\x10\x5f\x34\xc4\x57\x6b\xa1\x39\x90\x31\x5f\x9d\xe4\xe5\x4a\x9b\x2f\xf8\xc8\x23\x7c\x4c\x30\x54\x00\x82\x1a\xd4\x71\x21\x9d\xaf\xc0\x39\x90\x77\x46\xf7\xa1\xb8\x76\x19\x60\x47\xe8\x1a\x5d\x6c\x95\x44\x4e\x79\xe4\xd4\x39\x47\xc1\x29\xe7\xfa\x5f\xed\xe6\x5b\x5b\x2b\x7a\xfc\x06\xff\x09\xca\x7e\x82\xa0\xec\x62\xe1\xf8\x10\x2a\xf7\xc5\x00\xbd\xb0\x71\x3f\xc6\x44\xba\x76\xd8\x48\x40\xe5\xe7\x98\xaf\x28\x9b\xbf\x53\xb8\xb8\xde\xc1\x6d\xe7\x73\xb9\xa8\xeb\xf0\x3c\x2e\xf7\x55\x9a\x06\xaf\x8c\xb4\xfd\x9e\x88\x5f\xf5\x93\x2a\x63\x5a\x81\x64\x83\xab\x31\x7a\x6d\xc3\x76\xdb\x0e\x9c\x72\x37\x5e\x19\x73\xda\x76\xf1\x6c\x36\xdf\x85\x3c\x49\xd0\x4a\x76\x1b\x0e\xfc\x81\x4a\x65\xea\xcc\x03\x82\xbf\xa7\x4c\x8f\xd5\x8d\xd7\xa8\xb1\x1c\xe6\xab\xd3\x53\x10\x7c\xf5\xa3\xbd\x3b\x3f\x77\x7b\x5b\x5c\xab\xf8\xb9\x0c\x34\x92\xe2\x71\xa4\x5e\x6d\x54\x5b\x83\xda\x0e\xd0\x68\xe3\xfe\xa3\x0c\xc1\xf6\x3c\xeb\x10\x93\x05\x4d\x74\x97\x3b\x1a\x7d\xa4\x84\x2c\x4a\xb4\x3a\x3f\xc7\xb2\x5d\x04\xe9\xc1\xe8\xb0\xc7\x57\xe8\x8e\x6f\xcf\x5f\xee\x98\xc9\xc5\x8f\xb9\xdb\xf6\x29\xb8\x70\x38\x3a\x7d\x42\x36\x7c\x7c\xe6\xdb\xc7\x71\x3b\x9f\x54\xa3\x81\x3d\xcb\x54\x87\x11\x24\x8f\x3e\x91\xd9\x54\x25\x78\xb2\xdd\x16\x3c\xdb\xcb\x0a\x95\x9e\x97\x6e\x6c\xab\x36\x62\x4c\xd2\x56\x34\x76\x2d\x13\x29\x77\xb9\x98\x2a\xbd\x5d\xc4\xe3\x66\x53\xe5\x6d\xeb\x32\xeb\x64\xef\x6e\x5a\x39\xd6\xd5\xf2\x37\xbb\x78\x27\x43\x45\xca\x12\xca\xfa\x66\xaf\xf5\x80\x69\x38\x07\x94\x99\xdf\x96\xfb\xab\x52\x7a\x42\xeb\x1b\xe1\x36\x1b\x66\xb0\x8c\x2b\x66\x69\x57\x87\x2b\x76\x69\xd7\x95\xac\xcb\xbf\xfc\x15\xd3\xb4\x81\x83\x5a\xcb\x2d\xbb\x0d\x7d\x89\x95\x46\xb7\x31\x0a\xcc\x0f\x97\x80\x82\x87\x40\x71\xd0\x6c\x54\x5f\xc5\x69\x94\xfe\xaf\xee\xb3\x32\xce\x2a\x6d\x8a\x74\xa0\x2c\xc1\x57\xd0\xbd\xa4\x5d\x8b\x85\xa9\xb5\xa4\x75\xf3\xf5\xc8\xa7\x13\x48\xda\x80\xc5\x1d\x72\xdc\x47\xb7\x8a\xc0\xd8\xa1\xaa\xae\xd0\x74\x7d\x6d\x17\xaf\x60\xbb\x2d\xf6\x56\x6b\xa5\x26\x7b\x03\x7f\x3b\x87\x26\x3e\x2b\x15\x2b\xf7\x17\x44\xcc\xa9\x59\x8f\xcd\xcd\xa8\xd3\x20\x8f\x27\x2e\x7e\x41\xcf\xae\xf8\xac\x2c\x4c\x97\x5b\x5e\x5f\x4c\x6e\xc9\x5c\x5a\x5b\xad\xec\x1a\x9d\x69\x15\x50\xdb\x71\x95\x4e\x36\x1b\xfb\xac\xc2\xca\x8e\x59\x31\xf9\xc8\x4d\xde\x5a\xaa\x3a\xbb\xc7\x17\x13\x13\x3b\x5c\x33\x10\x9b\x72\xa4\x8c\xab\x71\x91\xbc\x66\x3c\xea\x6b\x63\x10\x1d\x66\x38\x1a\x5f\x59\xee\x31\x33\x27\xd6\x14\x97\x52\x79\x8e\x4d\x76\x0e\x9d\x65\x2a\x3c\x48\x71\x95\x5e\x55\x37\x6c\xea\xf3\xdc\x76\xa8\xac\xca\xd8\xf7\x45\x55\x58\xa9\xba\xa3\x4a\xdb\x10\xb6\xc0\xa7\x9a\xee\x55\x14\x09\xb3\x1a\xd7\x65\x45\x56\x17\x64\x6d\xe5\x8c\x2b\x2c\x44\x78\xd0\x54\x4b\xed\x35\xd8\x2a\x17\xb6\xd9\x1a\x1c\xca\x27\x63\x26\xb6\x36\x6a\xed\xdc\xb3\xcc\x62\xaa\x2c\xb4\x4e\x60\x07\xc2\x61\xf4\xef\x74\xce\x21\xf8\xd9\x65\xce\xeb\x74\xdf\x13\x61\x2b\x0c\x3e\xe7\x47\x86\xb8\x7d\x9d\xfb\x30\x6f\xae\xc0\x83\x4e\xd4\x5b\x8e\x4d\x5e\x9d\xe9\xd7\xbb\x37\xd0\x39\x32\x70\xbc\x23\x61\x4f\x47\x7a\xa0\xb1\xdb\x27\xd2\xd2\xde\x2e\x9d\x6d\x35\x75\x55\x0d\xb6\x62\xbc\xaa\xd9\x8e\x97\xf2\x46\x2c\xfc\x09\x8c\xdb\x83\xcd\x5b\xa7\x81\xdb\x1e\xcc\x27\xb5\x71\xa1\x47\x56\x3f\xa2\x6b\xaf\xc2\x66\x4f\x6b\xe8\x36\x99\xad\xd7\xd8\xb5\xc4\xe9\x70\xdf\x55\xca\x3b\xde\xe8\xb5\x25\x03\xcb\x16\xc5\x76\x22\xd8\x6e\x4b\x84\xd1\x57\x57\x9f\x11\x0c\xfd\x66\xc4\x1e\x94\x0d\xfb\x91\x76\x9f\x49\xfc\xb4\x46\xf1\x63\x70\xed\x1e\x5e\x7d\x0a\xab\xf8\xab\xec\xe2\x03\x98\xb5\xd4\x56\xfb\xf0\x43\xa3\xdb\xc7\x9a\xc4\xdf\xca\x28\xde\xc3\xcf\xdd\x86\xf1\xd1\xa6\xf1\x0e\xa7\xe4\xb3\xd0\x44\x88\xe4\x13\xb2\x7f\xfa\xb7\xad\x64\x59\xc7\xf4\xfd\x06\xf3\x3e\x93\xb9\xdb\x68\xde\x3b\xa1\x0f\x98\xce\xad\x48\x3a\x47\xac\x62\xbf\xf9\x6c\x9d\x08\x4e\xdb\xb9\x63\xef\xff\xa3\x18\xcf\x7f\x02\xbb\x57\x53\xf2\x51\x8c\x5e\x0b\xb5\x8e\xb6\x78\x59\xf7\xa6\xd1\xe3\xcc\xdd\x4a\xa3\x72\xbb\xd7\x48\x69\xb3\x13\xd0\x48\x9f\xdd\x8b\x5c\x30\xb5\xf1\x73\x9f\x55\x5c\x28\x51\xa7\x91\x5a\x99\xcf\xb6\x8a\x8a\xca\x75\xda\xab\x0d\x9f\x62\x63\x32\x7f\x0b\x44\x04\x11\xcf\xf4\x80\x19\x89\xd4\x7d\x58\x8c\x03\xde\x40\x3d\x96\x67\x5f\x20\x8f\x13\xde\x38\xe4\x93\xd3\xe0\x3c\x08\xd1\xec\x0a\x2b\x55\xca\xf5\xee\xd9\x2f\x5c\xb8\x02\xc6\x1c\x2a\xc7\xd5\x11\xa7\x04\x07\x99\x4d\x3b\xb7\x6b\x2a\x32\xaf\xc8\xed\xe0\xb6\xee\x17\x29\x0b\x4e\xa8\x75\x90\x1c\xb3\xfb\xb2\xd2\xd5\x5b\x32\x2f\x56\x2b\x7a\x6a\xaa\x4b\xed\x7e\xfb\xa9\xc2\xcb\x16\x44\x38\x39\xb9\xb4\xa0\x2d\x1f\xef\x4c\x68\xa7\x44\x6b\xc7\x94\x6d\x36\xd6\x7f\xd2\x34\x8e\xbb\x39\xf0\x9b\x78\x37\x7a\x2c\xe8\x12\x8f\xd4\x8c\xe7\x87\x78\x35\xea\x36\xf2\x03\x1d\x1a\xce\x80\xad\x56\xb7\x2f\x26\x1f\xb9\xc2\x9a\x1b\x2e\x7f\x95\xee\xfc\x00\x26\x09\x38\xb6\x93\xe8\xfc\x3f\x67\x2a\xcd\x94\xab\x00\x81\x65\x5f\x6c\x22\x53\x82\x40\x47\xc3\xdd\xfd\x70\xf2\x54\x35\xd2\xa3\xc5\x5a\x74\x56\xc4\x3c\x34\x24\x62\x23\xc4\xac\x2a\x2b\x73\xc1\x6a\x23\x25\xcc\x84\x6b\x17\xe0\x90\x65\x9d\x52\xb1\x26\x10\xbb\xc5\xd9\xc3\xa2\x40\x0e\x10\x88\x87\x45\x80\x38\xc2\xdc\x29\x2b\x42\x94\x72\xf2\x7a\x45\xf6\xc0\x38\x81\x0e\x3a\x38\xb4\x16\x59\x64\x9e\xeb\xae\xb9\x63\x8c\x8c\x83\xe2\x5d\x59\x6b\xc7\xf9\xa2\xee\x4d\x57\x87\xcb\x53\x87\xcc\x29\xba\x65\xa5\x4e\xc7\xf6\x84\x16\x12\x36\xd9\xbb\xf0\x6f\x97\xa0\x6d\x6f\x74\x2f\x6a\xff\x09\x15\x71\x8f\x4f\xfb\xcc\xa9\xfa\x34\xd8\xb7\x26\x04\x46\xe0\x78\x93\x8f\xbc\x64\xe6\x3d\x5e\x71\x54\x2b\x2e\xee\xe0\x96\x2f\xf8\x5c\x90\x34\x5e\x37\x26\x16\xa8\xf2\xcd\x5f\xf5\xdf\xca\x3d\x9c\x9d\xbd\xa8\xc8\xed\xce\xa6\x44\x78\x9d\xcb\x3c\x0d\x0f\x77\xf9\x7d\x8d\xcc\x7c\xca\x66\xbc\x72\x6e\x42\xd9\xd8\x8e\xe2\x7e\xa2\x8c\x2e\xb2\xc5\x75\xbd\x69\xc1\x82\x6a\x4a\x2e\xe4\x01\x05\x60\x44\x09\x6b\xe5\x37\x4f\x0f\x2d\x82\xdc\x3b\xdb\x40\xee\xab\x05\x3c\xe4\x00\xc3\xe4\xcf\xed\x4a\x6c\x4d\xce\xa7\x71\x10\x36\x4f\x3d\xc9\x21\xf1\xbb\x37\x8f\xe8\x17\xfc\x16\x61\x7f\xcd\x72\x7a\xdc\x7c\x46\x98\x27\x46\xf0\x9b\x3f\x46\x6c\xbb\x8a\x73\xd7\xd3\x63\x81\x37\xad\xef\xea\x1b\xc7\x7e\xc0\x7e\x2b\xdb\x1c\x32\xfb\x7f\x62\x64\x17\x27\xaf\x54\x98\xbc\x7e\x92\xde\x1f\xcc\x00\x77\xc9\xfa\x07\xec\xf1\x3e\xa6\x75\xfb\x37\x73\x3c\xfa\x6e\xef\xe3\xf6\x7b\x1f\xb5\xe3\x1b\x20\x3f\x2b\xd4\x39\x2b\x5c\x1e\xb8\xae\xed\xb6\x0f\xd9\xf6\xdd\xbd\x2b\xa2\x67\x4b\xf6\x01\x5b\x1d\xfa\x4a\xee\xf0\xc3\xb5\xb7\x46\xbf\x0a\x93\xa0\x12\x73\xd4\xd8\x9b\xd0\x13\x26\xd7\xb1\x5d\xf6\x33\x2a\xb3\xc1\x99\xa7\x5a\x66\x90\xc4\x7c\xdf\xc6\xec\x32\xd6\x4f\x5f\xbd\xfe\xd0\x7b\xc6\x60\x37\x41\x1e\xd0\xd5\xee\xe3\x56\xb5\x26\x96\x75\x1a\xdc\xae\x53\x27\x0d\xf6\x1d\xb8\x7a\x10\x51\x6e\x63\x34\xda\x1f\xf8\xcc\x9e\xc0\x6e\x4f\xd1\x7e\x2a\x52\xd8\x5d\x0c\x93\x4f\x59\xa2\x6d\xbe\x9e\x23\x00\x3b\x36\x54\x6b\xfa\x98\xcc\xdf\x8a\x49\x7e\xe0\xa2\x38\xed\xdb\xee\xa6\xd6\x20\x80\x33\x10\xba\x92\xe7\xf6\x93\x0f\x8f\x78\x0c\x78\x60\x37\x71\x3e\x90\xf6\x5d\xfb\xb3\x5f\x85\x49\xff\x36\xe1\xea\xce\x99\xc3\x36\x57\xbb\xf7\xfc\x76\x2e\x8a\xf7\x1f\x77\xdc\x50\x30\x95\xb3\x19\xbf\xe6\xd8\xed\xe4\xc8\x10\xfa\xaf\x71\x02\x1b\x8c\x7c\xb4\x0f\x58\xe3\x8e\x03\x5d\xc0\xae\xb1\xef\xd7\x8b\x8f\xaa\x13\x8f\xdb\xd0\x78\x90\x2e\xac\x60\x45\xcf\x6e\xcd\xd2\xf4\xf1\x72\x7f\x9c\x0b\x39\x3e\xf5\x4e\xc1\x0e\xcb\xff\xd0\xad\x7f\x87\x49\xc5\x86\x1e\xcc\x3d\xfa\xc7\xa9\xc1\x47\x50\x81\x87\xa0\x8e\xbd\x3d\x3b\x4c\xed\xd9\x2e\x7f\xad\xd6\xfb\x66\x1a\xef\x1b\xf4\xfc\x40\x2d\xd7\xb5\x51\xf3\x58\x05\xf7\x2f\xa9\xdc\x3a\xc9\xdc\x56\x6a\x76\x0f\xe9\xd1\x4a\xed\xf0\xfd\x9e\x3b\xf5\x99\x70\x76\xec\x57\x83\x5e\xeb\x3c\x47\xd5\x22\x51\x1d\x59\xc7\x17\x89\x7d\x9f\x40\xb2\xcc\xf4\x1d\x54\x36\x2a\x7e\xe4\xea\x15\xeb\xd8\x79\xd7\x75\xc0\x48\x2f\x79\xf7\x1e\x27\x52\xf1\xbb\x56\x0e\x13\xe9\x6b\xb0\xf3\xe0\x99\x63\xb6\x69\xe6\x37\xb1\x78\x18\x64\xa0\x2c\xc2\x7b\x07\x68\xe8\xdd\x5f\x57\x39\xbc\xb3\xeb\x70\xcf\x3a\xaa\xe8\xd8\x5d\x67\x97\x5a\x8d\x50\x24\x66\x73\xa0\x7d\xdf\x88\x47\x6e\x9f\xb8\xb7\x67\x9d\xf5\xc4\xa6\x6e\xba\x43\x9a\x3a\xb1\x1d\x06\x2c\x63\x2e\xda\x6b\x92\x0e\x65\x4a\xc4\x01\xeb\xa6\xc7\x9e\x98\x76\xd0\xea\x17\x94\x07\x69\xec\x17\x83\xd5\x05\xad\x6e\x11\x54\x5f\xc4\x72\xb2\x9a\xcb\x1b\xde\x04\x70\x75\xdf\xfb\x57\x05\xeb\xdb\x53\x5f\x1f\x99\x29\x2b\x4c\x78\xa6\x31\x82\xa9\xb3\x16\x13\x90\x6a\xb5\x29\x2b\xdf\x89\x23\x49\xc2\x57\xe6\x93\x4a\x8a\x6b\x2d\x3e\xa3\xf3\x4c\xa0\xfd\x52\x5d\xf1\x2d\xcd\x02\x57\xe4\x5f\x6a\xfa\x3b\x4e\xe1\xcb\xbb\x00\xde\x2e\x51\xac\x55\x4c\xd9\x1c\xa8\x04\x49\x96\xf6\x33\x74\x09\x0f\x49\x22\x15\x17\x64\x8e\xcf\xf3\x7a\x09\x8b\x40\xc6\x3c\x4b\x22\x48\x51\x48\x2a\xb5\x7a\x12\x3c\x9b\xc7\x56\x59\x49\x93\xa2\xa8\x2f\x93\x64\x8e\x41\xcd\x51\x5e\xf6\xc6\xb8\xfa\x48\xa6\xb8\xd6\x7a\x21\x49\x92\x75\x51\x22\x46\x90\xa5\xdc\xcc\xa6\xf2\x58\x7a\xf3\xa1\x3e\xc6\x61\x41\x58\x46\x12\xd3\x46\xdd\xd6\xe2\x3c\xb6\x5a\x1d\xf1\xc5\xa4\xf2\x45\xba\xdd\x6c\xdd\x8b\x0f\x7a\x3e\xb2\x63\xa5\xb6\xfd\x1a\x5a\x27\x06\x5a\x70\xc6\xeb\x5f\xd8\xf1\x4c\x8e\x2a\x1e\xea\x47\x41\xa6\x7c\x3d\x06\xc8\x94\x3d\xf3\x4d\xf7\x10\xa5\xa6\xab\xfd\x90\x96\x3d\x03\xee\xbf\x94\xed\x9c\xb9\x81\x94\x08\xb2\xc0\xda\x37\xb7\x32\x69\xc7\xb0\x68\x5f\x8e\x26\x0b\x0e\x28\x9b\xb3\xb2\x2c\x50\x9f\x80\x75\xe1\xbd\x8f\x6a\x5d\x9f\x13\x7c\xc8\x97\x04\xdf\x58\x95\x2a\xdb\x4a\xc9\x8d\x05\xe4\x03\xb0\x80\x6c\xa9\xbc\x3d\xf1\x4c\x3b\xd9\xa0\x5f\x0c\x06\xf0\xd6\x7e\xa0\xec\x9f\x19\x8a\xb5\x25\xbe\x7c\x0e\x8b\x4c\x2a\x98\xa2\xf9\xda\x24\x46\x30\xc5\x99\xc6\x75\xa8\xb1\x35\x50\xf9\x0c\x2c\xcc\x7e\xfb\xf1\x57\xb8\x81\xcd\x0f\x6f\x5f\xdd\x7e\xf9\xf4\xf6\xf3\x35\x6c\x4e\x4c\x31\xbe\x2d\xc6\x67\xb8\x3a\xb9\x06\x25\x32\xdc\x6e\xff\xd2\x68\x46\x21\xa0\xaa\xbb\xbd\x49\x9a\x26\xf9\x14\x31\x5f\x5d\xfe\x87\x34\xd2\xd4\xe6\x19\x0f\xec\x67\x7b\xc7\x03\xfb\x79\xea\xff\x0d\x00\x00\xff\xff\x32\x69\x2c\xe9\xb6\x7a\x00\x00") +var _web_uiV1IndexHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\xcf\x76\xdb\x38\xb2\xf7\x3e\x4f\x51\x1f\x7b\xe1\xc5\x17\x52\xb2\x6c\xc7\x8e\x47\xd6\x3d\x89\xe3\x9e\xce\x24\x9d\xee\x93\x38\x99\xdb\xab\x7b\x20\x12\x12\x31\x06\x01\x0e\x00\x4a\x56\xeb\xe8\xdd\xef\x01\x40\x52\xfc\x03\x52\x92\x1d\xfb\xf6\xf4\xcc\x22\xb1\x48\x02\x28\x54\xa1\x50\xf5\xab\x02\x40\x8e\xff\xdf\xbb\x5f\xae\x6f\x7f\xfb\xf5\x06\x62\x95\xd0\xc9\x8b\x71\xf1\x07\xa3\x68\xf2\x02\x60\x9c\x60\x85\x20\x8c\x91\x90\x58\x5d\x79\x99\x9a\xf9\x17\xde\xf6\x41\xac\x54\xea\xe3\x7f\x66\x64\x71\xe5\xfd\xb7\xff\xf5\x8d\x7f\xcd\x93\x14\x29\x32\xa5\xd8\x83\x90\x33\x85\x99\xba\xf2\xde\xdf\x5c\xe1\x68\x8e\x2b\xf5\x18\x4a\xf0\x95\xb7\x20\x78\x99\x72\xa1\x2a\x45\x97\x24\x52\xf1\x55\x84\x17\x24\xc4\xbe\xb9\x78\x09\x84\x11\x45\x10\xf5\x65\x88\x28\xbe\x1a\x06\xe7\x2f\x21\x93\x58\x98\x6b\x34\xa5\xf8\x8a\x71\xdb\xb4\x22\x8a\xe2\xc9\x35\x67\x32\xa3\x30\x5d\xc1\x4f\x48\xc6\xe4\x9a\x8b\x74\x3c\xb0\x8f\x74\x21\x4a\xd8\x1d\x08\x4c\xaf\x3c\xa9\x56\x14\xcb\x18\x63\xe5\x41\x2c\xf0\x4c\xdf\x41\x8a\x84\x83\x29\xe7\x4a\x2a\x81\xd2\x20\x21\x2c\x08\xa5\xf4\xf6\xae\x89\x24\x76\x55\x20\x21\x67\x1e\xa8\x55\x8a\xaf\x3c\x92\xa0\x39\x1e\xa4\x6c\xde\xa8\x3b\x43\x0b\x5d\xcc\x3f\x19\xdd\x9f\x8c\x02\xf3\x5c\x92\xdf\xb1\xbc\xf2\xcc\x9d\xc7\x34\x79\xfc\xea\xfe\xf8\x55\xad\x49\x73\xc7\x9b\xbc\xd0\x6d\xca\x50\x90\x54\xe5\x4d\x29\x7c\xaf\x06\xff\x40\x0b\x64\xef\x1a\xaa\x00\x83\x01\x5c\xc7\x88\xcd\x31\xa8\x98\x48\x58\x20\x9a\x61\x50\x1c\x56\x3c\x13\x7a\xf0\xb4\xc0\x63\x2e\x15\x90\x99\xbe\x07\x48\x60\x60\x5c\x81\xc8\x18\x23\x6c\x5e\xb4\xa1\x62\x0c\x5f\xdf\x03\x67\xe6\x97\x44\x09\xb6\xb5\x90\x04\x54\x34\x43\x98\x54\x88\x85\x38\x28\x2a\xe1\x60\x1e\x80\xa7\x35\xed\x72\x30\x48\x56\x12\x8b\x05\x16\x41\xc8\x93\xcb\x8b\xb3\xe1\xd0\x33\xc5\x16\xa8\xe8\xc6\x4f\xba\xbd\x2b\x38\x3a\xd2\x9c\x0d\x2c\x13\x93\x17\x2f\xc6\x03\xab\xd1\xe3\x29\x8f\x56\x96\x6d\xc6\x8b\xa7\xba\x85\x71\x88\x99\xc2\xc2\x5e\x00\x8c\xe3\xd1\xe4\x6f\x68\x81\xbe\x58\xd9\x7c\xd6\x3a\x2e\x70\x34\x1e\xc4\xa3\xb2\x48\x3a\xf9\x95\x62\x24\x31\x60\xa6\xd5\x10\x2a\xe5\x09\xb3\xa2\x59\xe2\x29\x4c\x05\x5f\x4a\x2c\xb4\xb8\x32\x89\x21\x57\xcf\xaf\xef\x83\xf1\x20\xcd\x69\x0f\xb6\xc4\xc7\x83\x6d\xbf\xf4\x65\x44\x16\x10\x52\x24\xe5\x95\xb7\x14\x28\x4d\xb1\xc8\x87\xa4\xfa\x44\x4f\x1f\x44\x58\xf9\xac\xf9\x94\xfa\x49\xe4\x1f\x8f\xca\xa7\xf9\x73\x12\x5d\x79\x28\x4d\xab\xb7\x07\x11\x59\x94\x6d\x6c\x2f\xf2\x9f\x2d\xc2\x69\x26\x63\x6f\x52\x16\xec\x2a\x36\xe3\x5c\x55\x3a\xb7\xab\xf3\xfb\x30\xd0\xe8\x6b\x47\x6f\x1d\xba\x7d\xef\xc7\x88\x45\x14\x4f\x91\x90\x79\x7b\xeb\x35\xcf\x14\xc5\x6a\xb3\xa9\x2b\xcd\xee\xfa\x10\x21\x85\x7c\x85\x93\x94\x22\x85\x7d\x6b\xd6\xb0\x10\xdc\x35\x4a\x82\x2f\xfb\xc6\xe7\x02\xf2\x1f\x7c\x36\x93\x58\xf9\x23\x73\x2d\x13\xff\xd8\xfe\xba\x97\x8e\x21\xcc\x9b\xd0\x3d\xf3\xad\x12\xc1\x02\x0b\x45\x42\x44\xf3\xeb\x9a\xcc\xd6\xeb\x1f\xc8\xcc\x58\x5b\xc1\x29\xc5\x22\x48\x78\x84\x69\xa0\xcd\x45\x26\x6f\xf1\xbd\x02\x23\x83\x8a\x90\xd3\x82\xc4\x94\xd3\xc8\x9b\xfc\x74\x7b\xfb\x2b\x18\x06\x21\xe4\x11\x86\x99\xe0\x49\xae\xd2\x97\x30\xd6\xb7\x26\xeb\x75\x47\xfb\x9b\x0d\x74\x3e\xd3\xb4\x37\x9b\xf1\xc0\xb4\x50\x4e\x8c\xa2\xd3\x03\x32\xab\xf5\xab\x83\x0d\x81\x65\xca\x99\xc4\xfb\x30\x72\x63\x78\x48\xb0\x94\x68\xde\xc1\x06\x25\x09\x51\x3b\xa8\x8c\xce\xce\xf6\xef\xf6\x38\x2d\x9c\x93\xc0\x2a\x13\x0c\x47\x80\x98\x95\x66\x00\xbf\xf1\x0c\x12\xb4\x82\x18\x2d\x30\x2c\x88\x24\x4a\x3f\x86\xaf\x9f\x3f\x82\x8a\x91\x02\x22\x81\x72\x14\x11\x36\xd7\x95\x32\x76\xc7\xf8\x92\x81\xc0\x92\x67\x22\xc4\x2f\x41\x1a\x6b\x5c\xe3\x39\x44\x0c\x94\x58\xc1\x9c\xeb\x5a\x53\x14\xde\x69\x23\xa4\x4d\xaf\xe0\x5c\x05\xf0\x7e\x66\xad\xd4\x9b\xeb\x8f\xa0\xf8\x1d\x66\xb0\x44\xd2\xd8\xed\x19\xcf\x58\x64\x2c\xb9\x6e\x43\x60\x89\x15\x10\xf5\x12\x10\x8b\x74\x7d\xd6\x22\xb5\x24\x94\xc2\x14\x83\xc0\x11\x11\x38\xd4\x7d\xcf\x49\x49\xac\x14\x61\x73\x09\xa9\x16\xb4\xe2\x60\xb5\x14\x01\xc3\xcb\x2d\xe5\xa0\x21\xbc\xba\xf5\x10\x89\x3f\x17\x3c\x4b\x6b\xba\x0c\x30\x9e\x66\x4a\x71\x06\xeb\x35\xa0\x50\x11\xce\xc0\x33\x5d\xbd\xd5\x2d\x7a\x60\xf4\x0d\xa6\x84\x45\x3e\x52\x4a\x14\xcd\x5d\x4e\x15\x03\xfd\x9f\x1f\x69\x8f\x26\x74\xc1\xc9\x67\xc3\xa2\xee\x8e\xa9\x3c\x1e\xd8\xa6\x77\xd2\xd3\x42\xfd\x89\x27\x78\x1f\x6a\x78\x86\x32\xaa\x0c\xb9\xbf\x72\x78\x9b\x0f\xc7\x67\xce\x95\x8b\x5c\xb7\x79\x73\xd9\xba\xef\x61\xb7\xa2\x70\x90\x31\x94\xa9\x98\x0b\xf2\x3b\x8e\xfe\x98\x16\xac\x39\x8f\xdf\x84\x21\x96\x12\xde\x61\x46\xb4\x7b\xae\x2b\x51\x3a\xf9\xad\xae\xdf\x11\xc7\x56\xc1\xcd\x2c\xd3\xea\x89\xd2\x54\xf0\x54\x10\xa4\x30\xa4\x58\x24\x44\x4a\xc2\x99\xd4\x23\x93\x62\xa1\x55\xcf\x14\xc3\xf7\xa9\xd5\x6a\x3b\xf0\x2d\x75\x4d\x27\x1f\x31\x12\x0c\x12\x2e\xb0\xf6\xfe\xba\xce\x18\xe5\x68\x4c\x63\x17\x79\x39\x18\x2c\x97\xcb\xc0\xa2\x94\x80\xf0\x41\xc4\x43\x39\x98\x67\x24\xc2\x72\x80\x42\x1a\x68\xe4\xed\x81\x42\x62\xae\x91\xf6\xff\x4c\x29\x62\x77\xde\x44\xf7\x3d\xe2\x61\x96\x60\xa6\x31\x1d\x67\xe3\x01\x9a\xd4\xc9\x3f\x8f\x6a\xa0\x90\xca\x88\x48\x8d\x74\xfe\x55\x54\xe3\xfa\xa3\x84\x77\x79\x97\xdb\x03\x66\x1e\x6b\xa0\x5a\x70\x65\xc7\x8d\xc8\x02\xa2\x85\x34\x93\x0a\x8b\x00\x6e\xf5\x4d\x22\xcd\xa0\xe6\xb3\x18\xa6\x38\x46\x0b\xc2\xc5\x4b\x0d\x5e\xb5\xa1\xb4\x1a\xc5\xb5\xa6\x50\x12\x12\x45\x57\x05\x2e\x54\x31\x4e\xfe\xdd\x14\x26\x77\x55\x7f\x50\x45\x21\xc9\x1c\xa4\x08\xcb\x38\x29\xef\xad\x1f\xae\x28\x67\x7e\x4a\xd8\x5d\x20\x17\x73\x0f\x6c\x34\xea\x9d\x5c\x9c\x7a\x10\x63\x32\x8f\xd5\x95\x77\x7a\xd1\x50\xba\xc9\x58\x26\x88\xd2\xc9\x47\xdb\x48\x10\x04\xe3\x81\xbd\xf3\x94\x42\x37\xd8\xdd\xd8\xa2\x29\x3a\x04\x6c\xda\x3a\xfe\xb6\x52\x59\xc0\xe5\xbd\x24\x46\x22\x8c\xdf\x22\x71\x59\x60\xf0\x61\xf1\xeb\xcc\xb8\xbc\x83\x7d\xf6\x7a\x0d\x84\xa5\x59\x95\x33\xcf\xc6\x93\x57\x33\x42\xf5\xc8\x55\x5b\xc8\xc1\x17\x54\x2f\xfc\x84\x30\xe2\x41\x4a\x51\x88\x63\x4e\x23\x2c\xf2\x9a\x16\x43\xee\xe9\x42\x0b\x10\xc9\xf0\xf2\x4d\x48\xdf\x5a\xc7\x5e\xaf\xdd\x56\xce\x51\x13\x80\xec\xc3\xb1\xa1\xa4\xe3\x76\x5f\x71\x38\xd2\x66\xf4\x28\xaf\x73\xa4\xb1\x81\x86\x06\x9a\x23\xa8\x60\x04\xf3\x9b\x71\x3d\x56\x0b\x0c\x69\x46\xa9\x2f\xb4\xf6\x1d\x6d\x36\x9f\x2c\x6a\x5a\xaf\x07\x79\x93\x4d\x9c\x5b\x67\xd2\xc1\x75\x1d\x95\x5a\x29\x58\x10\x8e\x65\xe5\x81\x83\xfd\xb3\xce\x08\xad\x67\xb8\x17\x04\x2f\xe1\x26\x99\x62\x11\x7c\xc1\x14\x87\xaa\x4c\xf6\x14\x44\xf3\xe1\xb7\x97\x7b\x0e\xff\x41\xe3\xec\xe2\x38\x46\xf2\xe6\x3e\x45\x2c\xc2\x11\xf4\x73\x3d\x82\x98\x44\x11\x66\xfe\xbd\x2c\x7e\xc9\xe4\x70\x49\x54\xc0\x63\x0f\x48\x34\x9a\x70\xb9\x1d\x71\x2d\xac\x08\x33\x89\xa3\x2a\x86\x34\xbf\x53\x41\x12\x24\x56\x39\xf2\xcc\x01\xa9\xe2\xf3\x39\xc5\xd7\x45\x25\x8d\x35\x2d\x9b\x87\x42\xcc\xa6\xdc\x9e\x0a\x57\xec\x63\xba\x9c\x22\x2b\x93\x02\x70\xb9\xf5\x10\x97\xa5\x8b\x80\x4b\xc5\x53\x6d\xe5\xb4\x08\x5e\xf4\x8d\xef\x71\xe1\x61\x4a\x07\x73\x51\xdc\xc9\x7d\xd0\xb0\x78\x90\x5f\x1f\xd7\x47\xbf\xf0\xda\x3f\x78\x93\x9a\x37\xe2\xa9\x3f\x15\x88\x45\x45\x82\x44\x3b\xe2\x2e\x61\x3b\xf5\x2e\xef\xc5\xc9\xc3\xfa\x55\xb5\x3a\x12\x8b\x05\x09\x71\xdb\xf2\x14\x06\xa7\x94\xdb\xd1\x66\xf3\x25\x2f\xec\xb4\x31\xd5\x14\xcf\x33\x74\x9c\xf1\x68\xcf\x5e\x7f\xd2\x25\xff\x08\x5d\xbe\x5b\xec\xd5\xdf\x0f\x78\x35\xf8\xa6\x0d\xdf\xa3\xfb\xdc\xd0\xdb\x2d\x76\x1a\x36\x78\x18\xed\xcd\x83\xd3\x4b\xb9\xb8\xe8\x72\x45\x0f\xea\xff\xab\x1d\xfd\x3f\xdd\x35\x0f\xd7\x6b\xb7\x9d\xc8\x2d\x82\x66\x24\x46\xf2\x47\x44\x28\x61\xf3\xeb\x18\x87\x77\xd2\x98\xd2\x25\x12\x8c\xb0\xb9\xf9\x2d\x33\x13\x51\x7a\x55\xab\xea\x59\xb3\xea\x6d\x36\x13\x18\xcb\x14\xb1\xa2\x75\x4c\x49\xea\xf3\x05\x16\x33\xaa\x8d\xd6\x7a\x6d\x52\x44\x9b\x4d\xbd\x54\x88\x04\x56\xda\x0a\xe8\x9b\xc5\x1f\x30\xe6\xa0\x95\xd4\x22\xf2\x9d\xe0\x69\xc4\x97\xec\x1b\x91\x64\x4a\x71\xd3\xbf\x9b\xc0\xc4\x34\x1b\xe5\x05\xfd\x04\xb3\xac\xd4\x01\x0f\xcc\xea\xc4\x95\x17\x11\x99\x52\xb4\xba\x9c\x52\x1e\xde\xfd\xc5\x05\x4a\x30\x0a\x63\x88\x42\x1d\x7e\x44\xa1\x6c\x10\x32\xc4\x28\xa9\x08\x21\x26\x11\xd6\x9d\xd3\x62\x70\x1b\x97\x28\xdc\x6c\xd6\x6b\xfb\xff\x56\x29\xc6\x03\x4a\xda\xd4\x07\x9a\x7a\x0b\xbc\x64\xd4\x95\x42\x3b\x44\xad\xda\xe6\x7c\xb4\x43\xad\x9a\x53\x7b\xd8\x63\x46\x6d\x2e\x6b\xd7\xd4\x78\x05\x24\xe4\xec\xa8\x0e\x52\xe4\x62\x0e\xf7\x09\x65\xf2\xaa\x58\xcc\xd0\xe1\xdd\xf2\x24\xe0\x62\x3e\x18\x0d\x87\xc3\x81\x89\x39\x8c\x9b\xd4\xd5\x35\x47\x73\x0f\x34\x7e\x7a\xcb\xef\xaf\xbc\x21\x0c\xe1\x64\x04\xa7\xc3\x26\xb0\x48\x91\x8a\x21\xba\xf2\x7e\x3e\x3e\x85\x21\xf5\x8f\x83\x93\xe3\x13\x38\x0d\xfd\x63\x08\x4e\xfc\xe3\xe0\xf5\xf9\x59\xf0\xea\xe2\xc2\x1f\x05\x17\xe7\x67\x70\x1c\x1c\x5f\x5c\x50\xff\x24\x38\x1f\xf9\xc7\xfa\x8e\x3f\x0a\xce\x2f\xc0\xfc\x67\xae\x41\x3f\x0a\xfd\xe0\x2c\x78\xed\x07\x17\x17\xf9\x5d\xdf\xd4\x03\xd3\xc6\xc7\x21\x1c\x9f\x2e\x4e\xe9\x29\x68\x52\xa7\x61\x70\x02\xc7\x10\xbc\xba\x38\x07\x43\xcc\x90\x38\xb7\x45\x75\x6f\xce\x2f\x74\x93\xc7\x39\x8d\x51\x7e\x6d\xa9\x9f\x86\xc1\xeb\x40\xd7\x78\x3d\x3c\x0b\x2e\x4c\xad\xd7\xc3\xa2\x93\xc7\xc1\xe8\x02\x4e\xe3\x53\x6a\xc8\xf8\xa7\xe1\xb1\x1f\x9c\xc0\x28\x18\x0e\x5f\xf9\x9a\x23\x53\xf8\x95\xed\xd9\xc7\xd1\x2b\x18\x5d\x04\x67\xaf\x4f\xe9\x28\xb8\x38\x3e\x31\x5c\x69\xe6\x87\xaf\x0c\xaf\x61\x70\xe6\x07\xaf\x35\x09\x73\x33\x27\xe1\x1b\x72\x1f\x4f\x46\x70\x7c\xb1\xf0\x4f\xa9\x7f\x6a\xa4\xa7\xd9\x3f\xf1\x8f\x35\x91\x73\x2b\x40\x43\xe4\xdc\x8a\x90\x6a\x96\xac\x00\x0d\x91\x51\x71\x69\x25\x15\xfa\x5a\x70\x67\x96\x8c\x91\xa0\xa5\x92\xf7\xf3\xf8\x02\x86\xb1\x7f\xfa\x7b\x32\x82\xd7\xe1\x49\xf0\x1a\x86\x70\x0e\x27\xc1\x31\x9c\xc3\xb9\xf4\xcd\x0f\xff\x5c\xff\xd3\xbf\x7d\xfd\xdb\xfc\xd5\x77\x7e\xf7\x06\x75\xf4\x26\x17\xf3\xc6\x6c\xe9\x34\xc4\xad\x8b\xea\x4c\xfa\xce\xeb\x1f\x77\x8b\x81\x8c\x4b\x0c\x77\x40\xec\x6f\xed\x3f\x9d\xfb\x67\x90\xa2\x28\xc2\x91\xc5\xc0\x7e\x42\xa2\x88\xe2\x6a\xbc\x1a\x9f\x96\xa9\x1e\x81\x51\x14\x8a\x2c\x99\x4a\x6f\x52\x42\x31\x6f\x6b\xb6\x8e\xb4\x4c\x6e\xf9\x07\xbc\x3a\x82\xb9\x86\x63\xbf\x22\x81\x99\xfa\x80\x57\x60\xec\x58\x5a\x5c\x6a\x43\x85\x26\xb0\xab\x8d\xb4\x5a\xfd\xff\xeb\x2a\xe3\x41\x7c\xba\x63\x1d\xea\xfb\x8a\xe1\x45\xdb\x8a\x13\x85\x13\x6d\xc7\x8d\xff\x69\xae\x7d\x54\x4c\x98\x2e\x17\x58\x6e\x3e\xf3\x4c\x61\x7b\x43\x73\xa3\xd0\xfc\x93\xc5\xe4\x64\x91\x2f\x23\xcf\x10\x95\xb8\xe8\x1c\x25\x52\xd9\xc0\xc6\x37\xc4\xcc\x75\x19\x9d\xf8\xba\x4d\xaf\x49\xb8\xc4\xef\x2d\xb7\x6c\xc8\x12\xf9\xa3\x89\xdf\x2f\xa7\x73\x7f\x2e\xb4\xab\x9a\xfb\xd4\xb0\xaa\xaf\xe0\xd2\x90\x98\x22\xe1\x9b\x8c\x34\x67\x0a\x51\xed\x7b\x5a\x21\x6e\x53\x8e\x5a\x0d\x5b\xfe\x4e\x8b\xc1\x50\xbd\xc3\xab\xbf\x13\x15\xf3\x4c\x59\x4d\x68\xf7\xb9\xdd\x7e\x6d\x6a\xd5\xa7\x5c\xcd\x8d\xd5\xfd\x53\xb5\x4f\x53\x2e\x22\x2c\x7c\x8a\x67\xaa\x37\x9e\xec\x6e\x61\x61\xf1\x80\xae\x56\xfc\xac\xc5\xa1\xe3\x58\xec\x6e\xa4\xa9\x62\xe7\x50\xe9\x58\x47\x5a\x2d\x57\x42\x5b\xb0\x33\xf0\x4d\x11\xc3\xd4\x95\x1f\x71\x46\x6f\xa6\xb4\x1e\x5b\x20\x32\x4f\x9c\xe9\xd1\xe7\x02\xb1\x39\xae\xeb\x81\x89\xe0\x76\x8e\xb9\x6d\x30\xc6\xd5\xb4\x63\xad\xec\xd6\x64\xd8\xa2\x66\x47\x88\xa3\x20\xc0\xb5\xc0\x48\x61\xf8\x80\x57\xed\x56\x6a\x53\xdd\x25\xec\xee\xbe\x4d\x79\xb4\x02\xfb\x73\xc6\x45\xe2\xea\xa3\x59\x6e\xa8\x24\x12\xbc\x56\xb3\xcd\xa6\x7b\xb2\x4e\x79\xe1\x74\xb2\x5e\x9b\x25\xc6\x9f\xed\x5a\xa7\x36\x73\xa9\xab\xac\x9b\x0f\xe8\x1d\xc6\x2d\x79\x60\x78\xf9\x01\xaf\xf4\xec\xfa\x86\x28\x89\x2e\x17\xfa\x7f\x1b\x7d\x3b\xfb\x55\x61\xc2\x24\x04\x7b\xb9\x80\x3a\xa0\xae\x54\xf0\x51\x14\x71\xe6\x35\x0d\xb9\x81\xd8\x1d\x4d\x95\x29\x48\x9b\x77\xca\x3b\xae\x6d\xa0\x23\xf7\xe4\x81\xc8\xf7\x7e\x5c\x29\x91\xe1\xb6\x85\xab\x09\x6f\x67\xcf\x63\x4c\x53\xdf\x20\x72\x6f\x72\xcb\x21\xb4\xaa\x86\x60\x66\x0c\xe1\x4b\xc0\x76\x81\x15\xee\xf0\x0a\x96\x44\xc5\xf9\x62\xf4\xa0\x5c\x62\xee\x62\xac\x7b\xf4\xca\x3c\xa7\xe6\xb2\x30\xb9\x6e\x3e\xc6\xe9\xe4\x13\xcf\x77\xf7\x30\x8c\x23\x1c\xc1\x8c\x0b\x60\x58\x2a\x1c\xe9\x3e\xc9\xe6\x3a\xc6\x96\x08\xa6\xb2\x19\xab\xe4\x8d\x1e\xa0\x3d\x46\x69\x90\xc2\x7f\x93\x9c\x5d\x16\x17\xdb\xbe\x1b\xcd\x32\xcf\xf2\x28\xed\xd2\xee\xb5\xe8\xd4\xb2\xf5\x1a\x34\x68\x41\x02\xa3\xfa\x68\x9b\xc8\xdb\x39\xde\x5d\x03\xdc\x35\x8a\x79\x4b\x88\xc1\x14\x03\x62\x2b\x93\xac\x44\xca\x2c\x95\x53\xcc\xe6\x2a\xde\x39\x68\xae\x7e\x37\x02\x9f\x4a\x9d\xf6\x22\xb4\xd5\xa2\x0f\x78\x65\x83\xd6\xad\xa0\x8b\x95\xad\xab\x52\x82\xef\x99\x91\x6a\xef\x6a\x75\x5d\xdc\xd5\xa0\xb8\xb5\x8e\x6d\x4d\xa5\x7b\xb5\xbc\xe0\xe4\x87\x8c\x51\x2c\xe5\x9e\x1a\x48\xd1\x14\xd3\xfa\xc0\xe8\x38\x7d\xca\xef\xf5\x1c\xaf\xae\x1d\x14\xf7\xc1\xfc\xd8\x32\x59\xd5\x21\xd8\x6c\xbe\x15\x5a\xf4\xb7\x2f\xbf\x7c\x1a\x0f\x4c\xfb\x1d\x22\xb7\x1d\x75\xf7\xab\x2d\xf5\x08\x53\xac\xb0\x65\xc6\xeb\x5d\xfd\xaf\xa4\x73\x2b\xde\xae\x91\x77\xb0\x9b\x11\x80\xc8\xcf\x9c\xab\x4b\x0b\x0c\x8c\x8c\xdf\x19\x3a\xb9\x85\xe8\x16\xf5\x78\xa0\xa5\xd5\xe1\x9c\x7a\x6e\x3d\xc3\x62\xe0\xdd\x62\x80\x23\xa2\xfe\x2d\x03\x82\x67\x08\x07\x6a\x93\xad\x16\x0d\x68\x94\x68\xf6\xde\xfc\x27\x20\x78\xca\x80\xa0\x76\xfb\x4f\x14\x0f\x80\x54\x24\xbc\x5b\xf9\x32\x14\x9c\xd2\x3d\xa3\x83\xe7\x09\x0f\xe6\x02\x63\x66\x1e\x6a\xd3\xbf\x5f\xc0\xb0\x7f\xb8\xa0\xcd\x89\x2b\x97\x5c\x89\x1c\xb6\xb4\xa9\xf9\xe3\xb9\x20\x48\x9e\x0f\x0e\x8c\x31\x71\x3c\xd5\xc0\xcc\x96\x28\x5a\x73\x25\x61\xcd\x66\x82\xed\x8e\xdb\xc2\x97\x38\x14\xfa\xc3\xcd\x6f\xf0\xf1\x97\xeb\x0f\x37\xef\x1c\xee\xc1\xee\x49\x70\xf4\xa2\xb9\x4d\x11\x9c\x81\x8e\x0b\x60\x1e\x1e\xe4\xec\x1d\xbb\x34\x83\x96\xdd\x53\xd2\x1d\x3f\xb5\xe4\xb0\x0f\x1a\xb5\x63\xe2\x06\xa3\xc5\x78\x1d\x88\x45\xdb\x48\xb4\x24\x92\xe1\x77\x58\xa3\xfb\xc8\x1d\x7f\x94\x20\x6e\xb7\xa6\xb8\xe1\xe4\x16\xbb\x14\xd0\x25\x4b\xa3\x1d\x80\xb1\x9c\x76\x8d\x02\x55\x5c\xd3\x8d\x65\x6a\x6b\x28\x1d\xad\x5b\x1e\x26\x5f\x4d\x57\x7a\x60\x4d\xab\xf3\x21\x62\x21\xa6\x37\x1a\x50\x3c\x4d\xef\x0b\x80\xab\xf1\xad\xa1\xd5\xd3\xbb\x47\xc0\xd5\xb6\x92\xed\x8d\x56\xdb\x42\xb1\x60\xf4\xb1\x23\x7a\x08\x52\xdd\x67\x70\x73\xe8\x7a\x87\x57\x5d\x22\x74\xa1\xd6\xae\x2d\x2c\x75\xa0\xb3\xcb\x74\x8e\xe3\xb3\x89\x7e\x00\x5f\xb0\xd9\xd2\x39\x1e\xc4\x67\x0d\xf3\xd5\x5a\x68\x0e\x64\xcc\x97\x47\x79\xbb\xd2\xd6\x0b\x3e\xf1\x08\x3f\x25\x18\x2a\x00\x41\x0d\xea\xb8\x90\xce\x23\x70\x0e\xe4\xcc\x68\x1e\x8a\xdf\xae\x00\xec\x00\x5f\xa3\x9b\xad\x8a\xc8\x69\x8f\x9c\x3e\xe7\x20\x38\xe5\x5c\xff\xab\x5d\x7c\xef\x68\x45\x8f\xdf\xe0\x3f\xfb\x17\x9f\x61\xff\x62\xb1\x70\xfc\xd8\x3d\x40\xaf\xec\xbe\x1f\x13\x22\x5d\x3a\x62\x24\x20\xf2\x4b\xcc\x97\x84\xcd\xdf\x2b\x9c\x5c\x6e\xe1\xb6\xf3\xbe\x4c\xea\x3e\x3c\xdf\xc2\xf6\x26\x4d\x83\x37\xc6\xda\xbe\x45\xe2\x9b\xbe\x53\x55\x4c\x6b\x90\xec\x3e\x44\x1c\x5d\xdb\x1d\x6e\xed\x04\x4e\x79\x70\xa5\xdc\x9e\xd5\x4e\xf1\xac\xd7\x3f\x84\x9c\x52\x6c\x2d\xbb\xdd\x39\xf7\x91\x48\x65\x68\xe6\x7b\xe7\xde\x12\xa6\xc7\xea\xca\x6b\x50\x2c\x87\xf9\x62\x38\x04\xc1\x97\x3f\xd9\xab\xd3\x53\x77\xb6\xc5\xb5\x8a\x9f\xdb\x40\x63\x29\x9e\xc6\xea\xd5\x46\xb5\x35\xa8\xed\x0d\x1a\x6d\xdc\x7f\x50\x20\xd8\x9e\x67\x1d\x66\xb2\x90\x89\x66\xb9\xa3\xd3\x07\x5a\xc8\xa2\x45\xeb\xf3\x73\x2c\xdb\x25\x90\x1e\x8c\x0e\x3b\x72\x85\xee\xad\xa0\xf9\xc3\xad\x32\xb9\xf4\x31\x4f\xdb\x3e\x87\x16\x1e\x8f\x86\xcf\xa8\x86\x4f\xaf\x7c\xbb\x34\x6e\x9b\x93\x6a\x74\xb0\x67\x99\x6a\x3f\x81\xe4\xbb\x4f\x64\x36\x55\x14\x1f\x6d\x36\x85\xce\xf6\xaa\x42\x85\xf3\x32\x8d\x6d\xdd\x46\x8c\x69\xda\x70\x31\x8d\x4a\xa8\xdc\x10\x6e\x48\x7a\xdb\x1d\x8f\xeb\x75\x55\xb7\x6d\xca\xac\x53\xbd\xbb\x65\xe5\x58\x57\xcb\x9f\x6c\xf7\x3b\x19\x29\x12\x46\x09\xeb\x9b\xbd\x36\x03\xa6\xe1\x1c\x10\x66\xfe\xb6\xd2\x5f\x95\xd6\x29\xa9\x9f\x19\x59\xaf\x99\xc1\x32\xae\x3d\x4b\x5b\x1a\xae\xbd\x4b\x5b\x56\xb2\xae\xfc\xf2\x23\xa6\x69\x03\x07\xb5\x96\x5b\xb6\x67\x5f\xa8\xb5\x46\xb7\x31\x16\x38\x3f\x87\x0d\x85\x0e\x81\xe2\xa0\xd5\xa8\xbe\x8a\xd3\x68\xfd\x5f\x3d\x67\x65\x92\x55\x3a\x14\xe9\x40\x59\x82\x2f\xa1\x7b\x49\xbb\xb6\x17\xa6\xd6\x93\xd6\xc5\xe3\x91\x4f\x27\x90\xb4\x1b\x16\xb7\xc8\x71\x97\xdc\x2a\x06\x63\x8b\xaa\xca\x61\x6b\x6c\x52\xd3\xbf\xed\xe2\x15\x6c\x36\xc5\x31\x44\xed\xd4\x64\xef\xc6\xdf\xce\xa1\x89\x4f\x4a\xc7\xca\xfd\x04\x89\x39\x31\xeb\xb1\x79\x18\x35\x0c\xf2\xfd\xc4\xc5\x5f\xd0\xb3\x2b\x3e\x29\x1b\xd3\xed\x96\xbf\xcf\x26\xb7\x68\x2e\x6d\xac\x56\xb2\x46\x66\xda\x05\xd4\x0e\x27\xa4\x93\xf5\xda\xde\xab\xa8\xb2\x63\x56\x4c\x3e\x71\x53\xb7\x56\xaa\xae\xee\xf1\xd9\xc4\xec\x1d\xae\x05\x88\x4d\x3b\x52\xee\xab\x71\x89\xbc\x16\x3c\xea\xdf\x26\x20\xda\x2f\x70\x34\xb9\xb2\x3c\x63\x66\x5e\xee\x50\xfc\x94\xca\x73\x9c\x47\x71\xf8\x2c\x43\x70\x2f\xc7\x55\x66\x55\xdd\xb0\xa9\x2f\x73\xdb\xe1\xb2\x2a\x63\xdf\xb7\xab\xc2\x5a\xd5\xad\x54\xda\x81\xb0\x05\x3e\xd5\x72\x6f\xa2\x48\x98\xd5\xb8\xae\x28\xb2\xba\x20\x6b\x89\x33\xae\x70\x61\xc2\x83\xa6\x5b\x6a\xaf\xc1\x56\xb5\xb0\xad\xd6\xe0\x70\x3e\x19\x33\x7b\x6b\xa3\xd6\x21\x17\xab\x2c\x86\x64\xe1\x75\x02\x3b\x10\x8e\xa0\x7f\xeb\x73\xf6\xc1\xcf\xae\x70\x5e\x97\x7b\x8b\x84\x25\x18\x7c\xc9\x4f\xd7\xbb\x73\x9d\xbb\x30\x6f\xee\xc0\x83\x4e\xd4\x5b\x8e\x4d\x4e\xce\xf0\xf5\xfe\x1d\x74\x8e\x0c\x1c\x9e\x48\xd8\xc1\x48\x0f\x34\x76\xe7\x44\x5a\xde\xdb\xe5\xb3\xad\xa7\xae\xba\xc1\xd6\x1e\xaf\x6a\xb5\xc3\xad\xbc\x31\x0b\x7f\x82\xe0\x76\xef\xf0\xd6\x19\xe0\xb6\x07\xf3\x59\x63\x5c\xe8\xb1\xd5\x4f\x98\xda\xab\xa8\xd9\xf3\x06\xba\x4d\x65\xeb\x0d\x76\xad\x70\x3a\xd2\x77\x95\xf6\x0e\x0f\x7a\x6d\xcb\xc0\xb2\xa4\x38\x4e\x04\x9b\x4d\x89\x30\xfa\x68\xf5\x05\xc1\xd0\x1f\x46\xec\x40\xd9\xb0\x1b\x69\xf7\x85\xc4\xcf\x1b\x14\x3f\x85\xd6\xee\xd0\xd5\xe7\x88\x8a\x1f\x15\x17\xef\xa1\xac\xa5\xb7\xda\x85\x1f\x1a\x6c\x1f\x1a\x12\x7f\xaf\xa0\x78\x87\x3e\x77\x07\xc6\x07\x87\xc6\x5b\x9c\x92\xcf\x42\xb3\x43\x24\x9f\x90\xfd\xd3\xbf\x1d\x25\xcb\x3a\xa6\xef\x0f\x98\x77\x85\xcc\xdd\x41\xf3\xce\x09\xbd\xc7\x74\x6e\xed\xa4\x73\xec\x55\xec\x0f\x9f\x6d\x12\xc1\x19\x3b\x77\x1c\x93\x7d\x92\xe0\xf9\x4f\x10\xf7\x6a\x49\x3e\x49\xd0\x6b\xa1\xd6\xc1\x11\x2f\xeb\x3e\x34\x7a\x58\xb8\x5b\xe9\x54\x1e\xf7\x1a\x2b\x6d\x4e\x02\x1a\xeb\xb3\x7d\x90\x1b\xa6\x36\x7e\xee\x8b\x8a\x0b\x27\xea\x0c\x52\x2b\xf3\xd9\x92\xa8\xb8\x5c\x67\xbc\xda\xc8\x29\x36\x26\xf3\xf7\x40\x44\x10\xf1\x4c\x0f\x98\xb1\x48\xdd\xef\x55\x70\xc0\x1b\xa8\xef\xe5\xd9\xb5\x91\xc7\x09\x6f\x1c\xf6\xc9\x19\x70\xee\x85\x68\xb6\x8d\x95\x2e\xe5\x72\x7b\xef\x57\x2e\x5c\x1b\xc6\x1c\x2e\xc7\xc5\x88\xd3\x82\x83\xcc\xa6\x9d\xc7\x35\x15\x9a\x57\xec\x76\x70\x5b\xcf\x8b\x94\x0d\x53\x62\x13\x24\x87\x9c\xbe\xac\xb0\x7a\x8b\xe6\xc5\x6a\x45\x0f\xa5\xba\xd5\xee\x8f\x9f\x2a\xba\x6c\x41\x84\x53\x93\xcb\x08\xda\xea\xf1\x36\x84\x76\x5a\xb4\xf6\x9e\xb2\xf5\xda\xe6\x4f\x9a\xc1\x71\xb7\x06\x7e\x97\xec\x46\x4f\x04\x5d\xe2\x91\x5a\xf0\xfc\x90\xac\x46\x3d\x46\x7e\x60\x42\xc3\xb9\x61\xab\xc5\xf6\xd9\xe4\x13\x57\xb8\x96\x86\xcb\x1f\xa5\xdb\x3c\x80\x29\x02\x8e\xe3\x24\xba\xfe\x2f\x99\x4a\x33\xe5\x6a\x40\xe0\x92\x17\x5b\xc8\xb4\x20\xb0\xa3\xe3\x6e\x3e\x9c\x3a\x55\xdd\xe9\xd1\x52\x2d\x32\x2b\xf6\x3c\x34\x2c\x62\x63\x8b\x59\xd5\x56\xe6\x86\xd5\xee\x94\x30\x13\xae\xdd\x80\xc3\x96\x75\x5a\xc5\x9a\x41\xec\x36\x67\x0f\xdb\x05\xb2\x87\x41\xdc\x6f\x07\x88\x63\x9b\x3b\x61\xc5\x16\xa5\x5c\xbc\x5e\x51\x3d\x30\x49\xa0\xbd\xde\xb1\x57\xdb\x59\x64\xee\x6b\xd6\xdc\x7b\x8c\x4c\x82\xe2\x7d\x49\xb5\xe3\x55\x7c\xee\x43\x57\xfb\xdb\x53\x87\xcd\x29\xd8\xb2\x56\xa7\xe3\x78\x42\x0b\x09\x9b\xea\x5d\xf8\xb7\xcb\xd0\xb6\x0f\xba\x17\xd4\x7f\xc6\x0a\xb9\xc7\xa7\xfd\x7a\x96\xfa\x34\xd8\xb5\x26\x04\xc6\xe0\x78\x93\x4f\xbc\x54\xe6\x1d\x59\x71\xac\x96\x5c\xdc\xc1\x2d\x4f\xf8\x5c\xa0\x34\x5e\x35\x26\x16\xa8\xf2\xc9\x5f\xf5\xff\x95\x6b\x38\x39\x79\x55\xb1\xdb\x9d\x5d\x89\xf0\x65\x6e\xf3\x34\x3c\xdc\xd6\xf7\x35\x32\xf3\x09\x9b\xf1\xca\x7b\x13\xca\xce\x76\x34\xf7\x33\x61\x24\xc9\x92\xcb\x7a\xd7\x82\x84\x68\x49\x26\x72\x8f\x06\x70\x44\x10\x6b\xd5\x37\x77\xf7\x6d\x02\xdd\x3b\xfb\x80\xee\xab\x0d\x3c\xe4\x5d\x5f\xf4\xcf\x9d\x4a\x6c\x4d\xce\xe7\x49\x10\x36\xdf\x7a\x92\x43\xe2\xf7\xef\x9e\x30\x2f\xf8\x3d\xb6\xfd\x35\xdb\xe9\x49\xf3\x19\x63\x4e\x8d\xe1\x37\xff\x19\xb3\xed\x6a\xce\x4d\xa7\x27\x02\x6f\x46\xdf\xd5\x27\x8e\xf3\x80\xfd\x51\xb6\x79\x1f\xe3\xff\x49\x90\x5d\xbc\x79\xa5\xa2\xe4\x5b\x5a\x7f\xc0\x00\xdc\x65\xeb\x1f\x70\xc6\xfb\x90\xde\xed\x3e\xcc\xf1\xe4\xa7\xbd\x0f\x3b\xef\x7d\xd0\x89\x6f\x80\xfc\xb5\x7a\xce\x59\xe1\xca\xc0\x75\x1d\xb7\x7d\xc8\xb1\xef\xee\x53\x11\x3d\x47\xb2\xf7\x38\xea\xd0\xd7\x72\x47\x1e\xae\x7d\x34\xfa\x4d\x48\x83\xca\x9e\xa3\xc6\xd9\x84\x9e\x6d\x72\x1d\xc7\x65\xbf\x60\x65\x0e\x38\xf3\x54\xdb\x0c\x44\xcd\xa7\x20\xcc\x29\x63\x7d\xf7\xcd\xf5\xc7\xa0\xef\xf0\x76\xb7\x40\x1e\xc0\x6a\xf7\x9b\x09\xb5\x27\x96\x75\x19\xdc\xae\x52\xa7\x0c\x76\xbd\x9b\x70\x2f\xa1\xdc\xc6\xd8\x78\x7f\xe0\x33\xfb\xb2\x62\xfb\xc2\xd9\xe7\x12\x85\x3d\xc5\x30\xf9\x9c\x51\x1d\xf3\x75\x9e\x90\x85\xae\x03\xd5\x5a\x3e\xa6\xf2\xf7\x52\x92\x1f\xb9\x28\x5e\x8c\x6b\x4f\x53\x6b\x10\xc0\x19\x08\x4d\xe4\xa5\x7d\x3b\xfa\x13\xbe\x31\x37\xb0\x87\x38\x1f\x28\xfb\xae\xf3\xd9\x6f\x42\xda\x7f\x4c\xb8\x7a\x72\x66\xbf\xc3\xd5\xee\x33\xbf\x9d\x8b\xe2\xfd\x6f\x06\x6d\x38\x98\xad\xc3\x7d\xd4\x1b\x6a\xe9\x81\x5b\xe8\x1f\x93\x04\x36\x18\xf9\xe0\x1c\xb0\xc6\x1d\x7b\xa6\x80\x5d\x63\xdf\xef\x17\x9f\xd4\x27\x1e\x76\xa0\x71\x2f\x5f\x58\xc1\x8a\x9e\x3d\x9a\xa5\xe5\xe3\xe5\xf9\x38\x17\x72\x7c\xee\x93\x82\x1d\x91\xff\xbe\x47\xff\xf6\xb3\x8a\x0d\x3f\x98\x67\xf4\x0f\x73\x83\x4f\xe0\x02\xf7\x41\x1d\x3b\x39\xdb\xcf\xed\x59\x96\x1f\xeb\xf5\xbe\x9b\xc7\xfb\x0e\x9c\xef\xe9\xe5\xba\x0e\x6a\x1e\xea\xe0\xfe\x25\x9d\x5b\xa7\x98\xdb\x4e\xcd\x9e\x21\x3d\xd8\xa9\xed\x7f\xde\x73\xeb\x3e\x29\x67\x87\x7e\x60\xe3\x5a\xd7\x39\x88\x8a\xc4\xea\x40\x1a\x5f\x25\xee\xfb\x5a\x88\x55\xa6\x1f\xa0\x72\x50\xf1\x13\x57\x6f\x58\xc7\xc9\xbb\xae\x17\x8c\xf4\x8a\x77\xe7\xeb\x44\x2a\x79\xd7\xca\xcb\x44\xfa\x3a\xec\x7c\xf1\xcc\x21\xc7\x34\xf3\x8b\x58\x3c\x0c\x32\x10\x16\xe1\x7b\x07\x68\xe8\x3d\x5f\x57\x79\x79\x67\xd7\xcb\x3d\xeb\xa8\xa2\xe3\x74\x9d\x5d\x6a\x35\x46\x11\x99\xc3\x81\xf6\x79\x63\x3f\x72\xfb\x8d\x7b\x3b\xd6\x59\x8f\x6c\xe9\x66\x3a\xa4\xe9\x13\xdb\xdb\x80\x65\xcc\x45\x7b\x4d\xd2\xe1\x4c\x91\xd8\x63\xdd\xf4\xd0\x37\xa6\xed\xb5\xfa\x05\xe5\x8b\x34\x76\x9b\xc1\xea\x82\x56\xb7\x09\xaa\x2f\x62\x39\x55\xcd\x95\x0d\x6f\x02\xb8\x7a\xee\xfd\x51\x9b\xf5\xed\x5b\x5f\x9f\x58\x29\x2b\x4a\x78\xa2\x31\x82\xa1\x59\xdb\x13\x90\x6a\xb7\x29\x2b\x9f\x54\x42\x94\xf2\xa5\xf9\xfa\x88\xe2\xda\x8b\xcf\xc8\x3c\x13\xd8\x7e\xd4\xa9\xf8\xec\x5c\x81\x2b\xf2\x8f\x9a\xfc\x1d\x4f\xe1\xeb\xfb\x00\x6e\x16\x58\xac\x54\x4c\xd8\x1c\x88\x04\x89\x16\xf6\x8b\x4d\x94\x87\x88\x4a\xc5\x05\x9a\xe3\x97\x39\x5d\xc4\x22\x90\x31\xcf\x68\x04\x29\x16\x92\x48\xed\x9e\x04\xcf\xe6\xb1\x75\x56\xd2\x94\x28\xe8\x65\x12\xcd\x71\x50\x4b\x94\x97\xdc\x98\x54\x1f\xca\x14\xd7\x5e\x2f\x44\x94\xae\x8a\x16\x71\x04\x59\xca\xcd\x6c\x22\x33\x12\x1a\x4f\x65\xbe\x69\xc5\x38\x24\x88\x65\x88\x9a\x3e\xea\xbe\x16\xef\x63\xab\xd1\x88\xcf\x26\x95\x8f\x37\x6d\x67\xeb\x4e\x7c\xd0\xf3\x3d\x0a\x6b\xb5\xed\x87\x83\x3a\x31\x50\xc2\x19\xaf\x7f\x8c\xc2\x33\x35\xaa\x78\xa8\x1f\x05\x99\xf6\xf5\x18\x60\xa6\xec\x3b\xdf\x34\x87\x58\x6a\xb9\xda\x6f\xce\xd8\x77\xc0\xfd\x97\xb2\xcc\x99\x0b\x48\x91\x40\x09\xae\x7d\x9e\x26\x93\x76\x0c\x8b\xfe\xe5\x68\xb2\xd0\x80\xb2\x3b\x4b\xab\x02\xf5\x09\x58\x37\xde\xbb\xa4\xd6\xf5\xe5\xad\x87\x7c\x74\xeb\x9d\x75\xa9\xb2\xed\x94\xdc\x58\x40\x3e\x00\x0b\xc8\x96\xcb\xdb\xb1\x9f\x69\x6b\x1b\xf4\x83\xc1\x00\x6e\xec\xb7\x7c\xfe\x99\x61\xb1\xb2\xc2\x97\x2f\x21\xc9\xa4\x82\x29\x36\x1f\x66\xc3\x11\x4c\xf1\x4c\xe3\x3a\xac\xb1\x35\x10\xf9\x02\x2c\xcc\xbe\xf9\xf4\x0d\xae\x60\xfd\xe3\xcd\x9b\xdb\xaf\x9f\x6f\xbe\x5c\xc2\xfa\xc8\x34\xe3\xdb\x66\x7c\x86\x97\x47\x97\xa0\x44\x86\x37\x9b\xbf\x34\xba\x51\x18\xa8\xea\x69\x6f\x94\xa6\x34\x9f\x22\xe6\x03\xa5\xff\x90\xc6\x9a\xda\x3a\xe3\x81\xfd\xc2\xe5\x78\x60\xbf\xe4\xfa\xbf\x01\x00\x00\xff\xff\xeb\x13\x0c\x4a\xe1\x75\x00\x00") func web_uiV1IndexHtmlBytes() ([]byte, error) { return bindataRead( @@ -147,7 +147,7 @@ func web_uiV1IndexHtml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/index.html", size: 31414, mode: os.FileMode(420), modTime: time.Unix(1529532756, 0)} + info := bindataFileInfo{name: "web_ui/v1/index.html", size: 30177, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -167,7 +167,7 @@ func web_uiV1StaticAndroidChrome192x192Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/android-chrome-192x192.png", size: 18250, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/android-chrome-192x192.png", size: 18250, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -187,7 +187,7 @@ func web_uiV1StaticAndroidChrome512x512Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/android-chrome-512x512.png", size: 58433, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/android-chrome-512x512.png", size: 58433, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -207,7 +207,7 @@ func web_uiV1StaticAppleTouchIcon114x114Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-114x114.png", size: 15576, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-114x114.png", size: 15576, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -227,7 +227,7 @@ func web_uiV1StaticAppleTouchIcon120x120Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-120x120.png", size: 16251, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-120x120.png", size: 16251, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -247,7 +247,7 @@ func web_uiV1StaticAppleTouchIcon144x144Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-144x144.png", size: 20027, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-144x144.png", size: 20027, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -267,7 +267,7 @@ func web_uiV1StaticAppleTouchIcon152x152Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-152x152.png", size: 23769, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-152x152.png", size: 23769, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -287,7 +287,7 @@ func web_uiV1StaticAppleTouchIcon57x57Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-57x57.png", size: 5158, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-57x57.png", size: 5158, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -307,7 +307,7 @@ func web_uiV1StaticAppleTouchIcon60x60Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-60x60.png", size: 5522, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-60x60.png", size: 5522, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -327,7 +327,7 @@ func web_uiV1StaticAppleTouchIcon72x72Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-72x72.png", size: 7289, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-72x72.png", size: 7289, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -347,7 +347,7 @@ func web_uiV1StaticAppleTouchIcon76x76Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-76x76.png", size: 8031, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon-76x76.png", size: 8031, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -367,7 +367,7 @@ func web_uiV1StaticAppleTouchIconPng() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon.png", size: 8285, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/apple-touch-icon.png", size: 8285, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -387,7 +387,7 @@ func web_uiV1StaticApplicationMinJs() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/application.min.js", size: 771162, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/application.min.js", size: 771162, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -407,7 +407,7 @@ func web_uiV1StaticBaseCss() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/base.css", size: 42550, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/base.css", size: 42550, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -427,7 +427,7 @@ func web_uiV1StaticBaseCssMap() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/base.css.map", size: 20314, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/base.css.map", size: 20314, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -447,7 +447,7 @@ func web_uiV1StaticBootstrapMinCss() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/bootstrap.min.css", size: 90287, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/bootstrap.min.css", size: 90287, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -467,7 +467,7 @@ func web_uiV1StaticConsulLogoPng() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/consul-logo.png", size: 58433, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/consul-logo.png", size: 58433, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -487,7 +487,7 @@ func web_uiV1StaticFavicon128Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/favicon-128.png", size: 11154, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/favicon-128.png", size: 11154, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -507,7 +507,7 @@ func web_uiV1StaticFavicon16x16Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/favicon-16x16.png", size: 821, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/favicon-16x16.png", size: 821, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -527,7 +527,7 @@ func web_uiV1StaticFavicon196x196Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/favicon-196x196.png", size: 37174, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/favicon-196x196.png", size: 37174, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -547,7 +547,7 @@ func web_uiV1StaticFavicon32x32Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/favicon-32x32.png", size: 2075, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/favicon-32x32.png", size: 2075, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -567,7 +567,7 @@ func web_uiV1StaticFavicon96x96Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/favicon-96x96.png", size: 10171, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/favicon-96x96.png", size: 10171, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -587,7 +587,7 @@ func web_uiV1StaticFaviconIco() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/favicon.ico", size: 34494, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/favicon.ico", size: 34494, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -607,7 +607,7 @@ func web_uiV1StaticFaviconPng() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/favicon.png", size: 821, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/favicon.png", size: 821, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -627,7 +627,7 @@ func web_uiV1StaticLoadingCylonPinkSvg() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/loading-cylon-pink.svg", size: 983, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/loading-cylon-pink.svg", size: 983, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -647,7 +647,7 @@ func web_uiV1StaticMstile144x144Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/mstile-144x144.png", size: 20027, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/mstile-144x144.png", size: 20027, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -667,7 +667,7 @@ func web_uiV1StaticMstile150x150Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/mstile-150x150.png", size: 64646, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/mstile-150x150.png", size: 64646, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -687,7 +687,7 @@ func web_uiV1StaticMstile310x150Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/mstile-310x150.png", size: 112362, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/mstile-310x150.png", size: 112362, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -707,7 +707,7 @@ func web_uiV1StaticMstile310x310Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/mstile-310x310.png", size: 201893, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/mstile-310x310.png", size: 201893, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -727,7 +727,7 @@ func web_uiV1StaticMstile70x70Png() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/mstile-70x70.png", size: 11154, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/mstile-70x70.png", size: 11154, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -747,7 +747,7 @@ func web_uiV1StaticSafariPinnedTabSvg() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/safari-pinned-tab.svg", size: 3798, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/safari-pinned-tab.svg", size: 3798, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -767,7 +767,7 @@ func web_uiV1StaticTadaPng() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v1/static/tada.png", size: 1767, mode: os.FileMode(420), modTime: time.Unix(1529532755, 0)} + info := bindataFileInfo{name: "web_ui/v1/static/tada.png", size: 1767, mode: os.FileMode(420), modTime: time.Unix(1529955838, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -787,7 +787,7 @@ func web_uiV2AssetsAndroidChrome192x192501b0811835ea92d42937aaf9edfbe08Png() (*a return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/android-chrome-192x192-501b0811835ea92d42937aaf9edfbe08.png", size: 18250, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/android-chrome-192x192-501b0811835ea92d42937aaf9edfbe08.png", size: 18250, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -807,7 +807,7 @@ func web_uiV2AssetsAndroidChrome512x512707625c5eb04f602ade1f89a8868a329Png() (*a return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/android-chrome-512x512-707625c5eb04f602ade1f89a8868a329.png", size: 58433, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/android-chrome-512x512-707625c5eb04f602ade1f89a8868a329.png", size: 58433, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -827,7 +827,7 @@ func web_uiV2AssetsAppleTouchIcon114x11449e20f98710f64b0cae7545628a94496Png() (* return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-114x114-49e20f98710f64b0cae7545628a94496.png", size: 15576, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-114x114-49e20f98710f64b0cae7545628a94496.png", size: 15576, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -847,7 +847,7 @@ func web_uiV2AssetsAppleTouchIcon120x120C9cc4fc809a6cbff9b9c261c70309819Png() (* return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-120x120-c9cc4fc809a6cbff9b9c261c70309819.png", size: 16251, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-120x120-c9cc4fc809a6cbff9b9c261c70309819.png", size: 16251, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -867,7 +867,7 @@ func web_uiV2AssetsAppleTouchIcon144x144Ac561ffa84c7e8ce1fe68d70f1c16d1dPng() (* return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-144x144-ac561ffa84c7e8ce1fe68d70f1c16d1d.png", size: 20027, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-144x144-ac561ffa84c7e8ce1fe68d70f1c16d1d.png", size: 20027, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -887,7 +887,7 @@ func web_uiV2AssetsAppleTouchIcon152x15208c9aa1c11a83650b824e3549b33a832Png() (* return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-152x152-08c9aa1c11a83650b824e3549b33a832.png", size: 23769, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-152x152-08c9aa1c11a83650b824e3549b33a832.png", size: 23769, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -907,7 +907,7 @@ func web_uiV2AssetsAppleTouchIcon57x57Ae96d6d27e61e25514af459bc8b20960Png() (*as return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-57x57-ae96d6d27e61e25514af459bc8b20960.png", size: 5158, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-57x57-ae96d6d27e61e25514af459bc8b20960.png", size: 5158, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -927,7 +927,7 @@ func web_uiV2AssetsAppleTouchIcon60x60522fca33a44f77c679561313def843b9Png() (*as return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-60x60-522fca33a44f77c679561313def843b9.png", size: 5522, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-60x60-522fca33a44f77c679561313def843b9.png", size: 5522, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -947,7 +947,7 @@ func web_uiV2AssetsAppleTouchIcon72x72Da5dd17cb4f094262b19223464fc9541Png() (*as return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-72x72-da5dd17cb4f094262b19223464fc9541.png", size: 7289, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-72x72-da5dd17cb4f094262b19223464fc9541.png", size: 7289, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -967,7 +967,7 @@ func web_uiV2AssetsAppleTouchIcon76x76C5fff53d5f3e96dbd2fe49c5cc472022Png() (*as return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-76x76-c5fff53d5f3e96dbd2fe49c5cc472022.png", size: 8031, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-76x76-c5fff53d5f3e96dbd2fe49c5cc472022.png", size: 8031, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -987,7 +987,7 @@ func web_uiV2AssetsAppleTouchIconD2b583b1104a1e6810fb3984f8f132aePng() (*asset, return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-d2b583b1104a1e6810fb3984f8f132ae.png", size: 8285, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/apple-touch-icon-d2b583b1104a1e6810fb3984f8f132ae.png", size: 8285, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1007,47 +1007,47 @@ func web_uiV2AssetsConsulLogo707625c5eb04f602ade1f89a8868a329Png() (*asset, erro return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/consul-logo-707625c5eb04f602ade1f89a8868a329.png", size: 58433, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/consul-logo-707625c5eb04f602ade1f89a8868a329.png", size: 58433, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _web_uiV2AssetsConsulUi07dda31de740f1a5f8d66c166d785a89Css = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x6b\x93\xdb\xba\x91\xe8\x5f\xc1\x8e\xcb\x75\x3c\x3e\x24\xcc\xf7\x43\x8a\xa7\x92\x38\x7b\xca\x5b\xe5\xdc\x5b\x75\xb3\xf1\x97\x53\xfe\x00\x91\x90\xc4\x98\x22\x19\x92\xd2\x68\xac\xd5\xfe\xf6\x5b\x00\x08\x12\x20\xc1\x87\xc6\xe3\x9c\xa4\x76\x6b\xce\x91\x25\x3c\x1a\x8d\x46\x37\xd0\x68\x34\x1a\x9b\x3c\x7e\xd2\xa2\x3c\xc6\x97\x6d\x9e\xd5\xfa\x23\x4e\x76\xfb\x7a\xe5\x18\xc6\x15\x95\x75\x12\xa5\x58\x43\x55\x12\x63\x6d\x9b\xec\x8e\x25\xd6\xb6\x79\x5e\xe3\x52\xdb\x63\x14\x93\x7f\x76\x65\x7e\x2c\xb4\x7d\xa9\x55\x38\xaa\x93\x3c\xbb\xc4\x49\x55\xa4\xe8\x69\xb5\x49\xf3\xe8\xeb\x15\x1d\xe3\x24\xd7\xf0\x61\x83\x63\x2d\x39\xec\xb4\x7c\xf3\x37\x1c\xd5\xda\x29\x89\x71\x7e\x39\xa0\xb3\xfe\x98\xc4\xf5\x7e\x65\x1a\xc6\xeb\x2b\x5a\xed\xf3\x13\x2e\xb5\xaa\x2e\xf3\x6c\xa7\xd5\x68\x93\x62\x50\xef\x2f\x51\x9e\xe6\xe5\xea\x95\xed\x91\xbf\x2b\xc1\x54\x2b\x4a\x7c\xd9\xa0\xe8\x2b\x69\x3d\x8b\xf5\xa6\xc4\xd6\x25\x7f\x57\xda\x21\x52\xa2\x49\xb6\x02\xf2\x77\xdd\xd7\x87\x94\x24\x03\xda\xd7\x41\xed\xba\x44\x59\x55\xa0\x12\x67\xf5\x15\x81\x06\x87\xb6\x38\x2b\x13\x1d\x4b\x92\xff\x81\xfc\xb8\x6e\x13\x9c\xc6\x15\xae\xb5\x64\x5b\xa2\x03\xbe\x6c\xf2\x32\xc6\xe5\xca\xb8\xc2\xa4\xd2\xab\xe4\x1b\xd6\x03\x46\x51\xf2\x7d\x05\x7d\xb7\xc4\x87\xeb\xbb\xb7\xff\x06\x0e\x49\x96\x94\xb8\xc2\x35\x8c\xaa\x0a\x9c\x0c\x68\x40\x0b\xfc\x17\xf8\xf3\x7f\xfc\x27\xf8\x94\x44\x38\xab\x30\xf8\x2f\xb0\x4b\xea\xfd\x71\x03\xa3\xfc\xf0\xee\x6f\xbb\x7a\x7f\xa8\xde\xc9\xd5\xde\xbe\xa3\x04\xfe\xfb\x31\xaf\xb1\x46\x7b\x1c\xc7\x5a\x9c\x6a\x71\xad\xb5\x88\x35\x23\xb6\x37\xb5\xbd\xa5\xed\x6d\x6d\xef\x68\x7b\x57\xdb\x7b\x64\xb8\x28\x35\x18\xe6\x5a\x8a\x77\x38\x8b\xb5\x34\xd1\xf2\x54\x2b\x48\xa7\xb5\x1a\x9f\x6b\x54\x62\xa4\x1d\xd3\xcb\x01\x95\xbb\x24\x5b\x19\xeb\x02\xc5\x71\x92\xed\x56\xc6\x75\x00\x52\xe8\x29\x19\xcb\x75\x9f\x95\x8e\xe9\x25\x4d\xaa\x5a\xaf\xea\xa7\x14\xaf\xb2\x3c\xc3\xd7\xcd\xb1\xae\xf3\x4c\x4b\xb2\xe2\x58\x6b\x15\x4e\x09\x5f\xf0\x66\xdb\x36\xe9\xa8\x5d\xf4\x47\xbc\xf9\x9a\xd4\xfa\x26\x3f\x93\x36\x08\x0e\x8c\xda\x24\x65\xad\x4e\x15\x10\xf2\x8a\xf3\x5a\x3f\xe4\xdf\xf4\xbc\x3a\xeb\x2c\xfd\x90\xe7\xf5\x9e\xd4\xd8\x95\xe8\xa9\x8a\x50\x8a\xd7\xbc\x91\x5e\x01\x94\xd5\x09\x4a\x13\x54\xe1\x78\x7d\x48\xb2\x86\x61\x6d\xc3\x28\xce\x6b\xc2\xae\xdb\x34\x7f\xd4\xcf\xab\x7d\x12\xc7\x38\xeb\x52\x9e\x56\x55\x54\xe6\x69\xba\x26\x5d\xd2\x4b\x9c\xc5\xb8\x24\xe0\xf2\xa2\x4e\x0e\xc9\x37\xfc\x09\xef\x92\x4d\x92\x26\xf5\x53\xdb\x30\x2d\x49\x19\x07\xc5\x7f\x3b\x56\x35\xa3\x24\x45\x7c\x2c\xab\x1a\xc9\x51\xa6\xb6\xa2\xa3\x55\x07\x94\xa6\x22\x73\x06\xbe\x8b\x0f\xd7\xb7\xda\x0a\x6d\x89\x70\xaf\x36\x78\x9b\x97\x58\x45\xf7\x24\xdb\xe3\x32\xa9\xd7\xc3\xa4\x2b\x15\xd8\x46\x0c\x88\x50\xa5\xa8\xa8\xf0\x8a\x7f\x59\x37\x19\x55\x81\x22\xc6\x44\x75\xac\xd5\xfb\x4b\xcb\x54\x0c\x69\x94\x26\xbb\x6c\x95\xe2\x6d\xcd\xe4\x78\x92\x4b\x68\x0f\xb6\xe8\x90\xa4\x4f\xab\x3f\xa6\x49\xf6\xf5\xcf\x28\xfa\xcb\x53\x55\xe3\xc3\x2f\x79\x56\x6b\x3a\x2a\x8a\x14\xeb\x15\x4d\xd1\xee\xfe\x82\x77\x39\x06\x7f\xfd\x8f\x3b\xed\xff\xe5\x9b\xbc\xce\xb5\xff\x7b\x7e\xda\xe1\x4c\xfb\xeb\xe6\x98\xd5\x47\xed\x03\xca\x08\xd4\x34\xd5\xee\x7e\x49\x4a\x04\xfe\x82\xb2\xea\x4e\xbb\xfb\x53\x99\x27\x31\xff\xf1\x11\xa7\x27\x5c\x27\x11\x02\xff\x07\x1f\xf1\x9d\xd6\xfe\xd6\xfe\x50\x26\x28\xd5\x2a\x94\x55\x7a\x85\xcb\x64\xdb\xcd\x53\x63\x9c\x87\x8e\x75\x3e\xca\x74\x24\x4f\xec\xdd\x21\xcf\x72\x42\x39\x4c\xa9\x72\x49\x93\x0c\xeb\x7b\x26\x5e\x26\x74\xaf\x88\xcf\x75\x86\xe1\x05\xdb\xed\x3a\x3a\x96\x55\x5e\xae\x8a\x3c\xc9\x6a\x5c\x32\xca\xc6\x38\xca\x4b\x44\xe6\x68\x26\x7f\xc2\xbc\xf6\x2a\xf6\x6d\xc7\x71\x5b\x01\x87\x96\x8b\x0f\x00\x12\x9e\xd8\x97\x8a\x89\x36\xde\x90\xbf\x66\x44\x29\xb4\x35\x47\xa6\x38\xaf\x1b\xd9\x35\x21\x99\xf1\x80\x71\x4d\x0e\xbb\x4b\x93\x4d\xfa\x75\xa5\x63\xf9\x6b\xfd\x54\xe0\xf7\xd1\x1e\x47\x5f\x37\xf9\xf9\x8b\x26\x24\x96\x28\x4e\xf2\x2f\x97\x13\x26\xcb\x0f\x4a\x1b\x96\xd8\xa0\x0a\x93\x6e\x5f\xab\x02\x65\x0d\xef\xd2\xc9\x84\x73\xa4\x38\xe5\x70\x96\xfc\xb5\xcc\x53\xfc\x7e\x83\xb2\x0c\x97\x5f\x40\x86\x4e\xab\x6d\x52\x56\xb5\x9e\x6f\x75\xd2\x14\x38\xa6\xe4\x3f\x32\x61\xa3\xa8\x4e\x4e\x74\xae\x64\x53\xbf\xb4\x18\xfa\x86\x71\x2d\x04\x79\x68\xa5\x9c\xc9\x38\x21\x59\x9d\x1f\xa3\xbd\x38\x21\xd0\x21\xe4\x04\x35\xa1\x45\x89\xc1\x68\xb2\x7e\xdc\x27\x35\xa6\x92\x80\x57\x45\x89\xd7\x8f\x79\x19\xeb\x8f\x25\x2a\x56\x59\x5e\x1e\x50\x7a\x6d\xd7\x9d\x6e\xca\x6d\x96\xc3\xb8\x5b\x17\x7b\xf2\xb2\xee\x11\xac\xce\x8b\x2b\xfa\xb5\xc4\xe9\x7b\x32\x80\x5f\xb8\x24\x91\x09\x15\xd6\xf8\x50\xa4\xa8\xc6\x3a\x2e\xcb\xbc\x04\x71\x72\x7a\x40\x5a\x8a\x36\x38\x05\x84\xbc\xda\x01\x25\x19\x60\x4b\x3c\x80\x88\x2e\xec\x15\x40\xea\x64\x06\xb7\xa5\x0d\x25\x84\x1e\xa1\x34\xcd\x8f\x35\xe3\x0d\x9e\x75\xac\x88\xfc\x53\x19\x6e\x32\x88\x68\x28\x52\xab\x61\x62\x3f\xa1\x5d\x7e\x2f\xa2\x06\x71\x4b\x77\x9f\xd1\xc7\x7f\xe2\xf9\x66\xdd\x9f\x12\x48\x3f\x1e\x48\x4f\xe3\xe4\x04\x98\x1c\x50\xc1\xa2\x0a\xdb\x17\x40\x07\xbb\x37\x8f\x58\xee\x75\x6f\x5e\x52\x5c\xd7\xc2\x3c\x4d\x24\x5a\x58\x4b\x29\x03\xcf\xc9\x95\x36\xcc\x4f\x91\x90\x2d\xd2\xb8\xcd\x8c\xf6\x49\x1a\xf7\x5b\x87\x86\x8d\x0f\x57\xb8\xc7\x28\xad\xf7\x74\xb6\xd0\xab\x1a\xd5\xc7\x0a\xc4\xb5\x26\x26\xe3\x58\x2f\x71\x95\x1f\xcb\x08\x73\xc8\x94\x93\xbf\x13\x53\x36\xf0\x30\xca\x8b\x27\x7d\x53\x67\x44\xeb\x91\x08\x0b\x49\x29\xba\x08\x3f\xb4\x82\xd3\xcf\xcc\x77\xbb\x14\x2b\xb2\x47\x06\x45\x51\x92\xfe\x9b\x3e\xc4\xb5\x56\xc7\x00\x49\x53\x93\x47\xf4\xf4\x7f\x04\xdb\x77\xed\xfd\x5b\x72\x28\xf2\xb2\x46\x59\x7d\x6d\x4a\xea\x14\xff\x1e\x94\x8e\xd6\x74\x64\x15\xb9\xdd\xb0\x13\x45\x40\x6c\xc5\x35\x8c\xeb\xd8\xe8\x36\xaa\x79\xd3\x51\x0a\x93\xcd\x5c\x94\x0c\x6f\xdf\xef\x71\x5a\x7c\x21\x84\xe2\xfb\x88\xfd\x60\x63\xf3\x0f\x19\x42\x36\x3f\xd3\x6d\xc5\x36\x2f\x0f\xab\x63\x51\xe0\x32\x42\x15\x9e\x6c\xfe\xe1\xed\x0c\x02\xfd\x02\x53\x28\x3c\xbc\xed\x23\x41\xa7\xce\xdb\x57\x45\x4b\xd4\xef\xa1\x49\x17\x33\xa6\x9f\x31\xc5\x8c\xaf\x57\x1a\x59\x9f\x24\xed\xac\xd5\x2f\x49\x8d\x57\x64\x8d\x2b\x70\xf9\xd0\x6c\x22\x67\x86\x98\x8f\x5c\x03\xa3\x69\x56\xa4\x08\x10\x94\x01\xb1\x90\xc8\xa2\x8f\x49\xbd\xd7\xa3\x3c\xdb\x26\xe5\x81\xaa\x3f\xa0\xd0\x6e\x97\x18\x06\x66\x8b\x71\x4c\x6a\x81\xe2\x19\x72\x34\x3a\x73\xe0\xc3\x73\x27\x95\x91\x9a\xb7\xb2\x6b\x1f\xcc\x24\x5b\x0f\xf6\xb4\xc3\xa9\x8a\xcc\x53\xa2\xec\x0f\x94\x29\x99\xd8\x94\xb4\xad\xf6\x72\xc0\xd9\x51\x9b\x2b\x00\xa8\x1a\xcc\xd5\xad\x66\xf7\xd5\xf2\x57\xbb\x7d\x04\x4c\x0b\xcb\xab\x84\xea\xbd\x64\xe9\x48\xa2\xb5\x60\x77\xb0\x3c\xb2\x8f\x6b\xb5\x2c\x60\xc2\xd0\x70\x7c\xcf\x6c\x3f\xc3\xd3\xe3\x9a\x95\xa5\xa0\xb8\x75\x43\xd8\x19\x75\x69\x87\x4a\xdf\xa6\xf8\x2c\xa6\x91\xdf\x74\xdb\xc8\x17\x59\xc3\x38\xed\xd7\xe2\xbe\x2a\x2f\x13\x9c\xd5\x2b\xde\x3b\x29\x2f\x4e\x4a\x66\x55\x69\xf4\xc2\x35\x6f\x42\xc8\x89\xf2\xf4\x78\xc8\xd6\xea\x54\xc5\x06\x2e\xca\xb3\x1a\x67\x75\x7f\xe7\x2c\x24\x5f\x7f\x7f\xc0\x71\x82\xc0\x9b\x8e\x4e\x1e\xd9\xee\xde\x5f\x5a\xfa\x0a\x04\x33\x8a\xf3\x40\xae\x09\x47\x08\x65\x6c\xbb\x38\x5f\x3b\xa8\xed\x26\x9a\x51\x5f\x09\xd6\x72\x96\x82\xa5\x89\xc2\x88\x9a\x2e\x19\x51\xb1\xe3\x84\x34\x2b\xb3\xa5\xdd\xca\x5c\xb3\x7f\xe6\xe0\xab\x78\x81\x31\xfb\xdb\xe7\x58\x24\x68\x55\x50\x34\xbc\xa9\x6f\xf2\xba\xce\x0f\x2b\x93\xec\xb1\xea\x43\x4a\x19\xfa\xa1\x3a\xed\x5a\x03\x1a\x55\x7b\x5b\xd6\x45\x9b\x2a\x4f\x8f\x35\x5e\xd7\x79\xb1\x72\x8d\xd7\xcd\x2e\x4b\x27\x3f\x75\xcb\x2b\xce\x6b\xb2\x07\x10\x73\xe8\x6f\x3d\x20\x84\xa4\xd2\x84\x0f\x1b\x5c\xea\x69\x8e\x48\xf7\x86\xcd\x31\x7b\x9d\x2c\x77\xbc\x30\x97\xf0\xcb\xb3\xd8\x5f\xa4\x15\xdb\x9a\x44\x98\x6e\x4b\x5b\x6e\x96\x52\xe9\x0f\x3d\xa9\xf1\xa1\x6a\x0b\x0a\x10\x0a\x14\x7d\x1d\x00\x10\x13\xff\x76\xac\xea\x64\xfb\xa4\x37\x1c\xcd\x93\x1b\x01\x8c\x50\x1a\xbd\xa1\x52\x08\x74\x10\x1a\xc5\x19\xe8\xc0\x09\xe8\x3f\x84\x71\xee\x67\x28\x00\xa2\xa4\x8c\xd2\x6e\x27\x88\xb2\x84\x2d\x2a\x2b\x52\x12\x97\x5d\x02\xd9\xeb\x55\x20\xc9\xb6\x49\x96\xd4\x18\x60\x54\x61\x3d\xc9\xf4\xfc\x58\xaf\x9f\x55\xa9\xdd\x5f\xf1\xb5\x9c\xcc\x1c\x64\x92\x73\x8d\xd7\x80\x0c\xfb\x68\xc6\x36\x49\xd3\xd5\x2b\xbc\x8d\x9c\x38\x98\xeb\xde\x6e\x95\xd5\x7b\xbd\x53\xcc\xde\x58\xf7\xa3\x5d\xd6\x63\x4c\xc6\x18\x5a\xd5\x5a\x91\x76\x73\x4b\xf6\x7c\x4b\xb6\xa2\x25\xfb\xf6\x96\x9c\xf9\x96\x1c\x45\x4b\xce\xed\x2d\xb9\xf3\x2d\xb9\x8a\x96\xdc\x8a\xdb\x56\xcc\xe2\x0c\xaa\x3c\x4d\x62\x50\xe2\xf8\xfa\x7b\x0e\xe5\x2b\x7e\xa2\xa6\xdb\x0a\xf4\x39\xe8\x62\xbc\xd6\xc8\x5e\xf8\x32\xe0\x97\x15\x35\x73\xda\x7f\x7a\x63\x6a\xa6\x66\xde\xaf\xc7\x32\xae\xb6\x3d\x55\xdb\xd0\x0c\x75\x6d\x96\x71\xbd\xfe\xfe\x9f\x19\x39\x59\xf5\xa5\x5a\xcd\xaf\xdb\xbc\x7c\x4f\x86\x50\xcf\xd0\xa9\xd1\x9d\xbe\x30\xc5\xf2\x94\x54\x8d\x91\x96\x6b\x18\x37\xd5\x5f\x35\x76\x54\x6d\x69\xad\x55\x63\x7f\x5d\x5e\xbe\x31\xd4\x96\x8d\x72\xc1\x97\x01\xa2\xab\x74\x3a\x30\x5d\x33\x2c\xbb\x38\x0b\x6a\xf1\xcd\x4d\x88\xeb\x8d\x57\x9c\x6f\x00\x40\xfb\x24\xd6\x1f\x54\x7f\xa0\xbb\x08\x69\xe5\x5b\x0c\xff\x42\x20\x1a\xc3\x85\x52\xa1\xc5\x04\x4e\x48\xd4\x0d\xb9\x69\xb2\xac\x0d\x2d\x9c\x21\xb6\x4c\x37\xbc\x0e\x8a\x82\xce\x48\x26\x72\x07\xfd\x9a\x62\x81\xe4\xbc\x2f\x49\x46\x6d\x2c\x74\x75\xe5\x7a\xa6\xce\x86\xcb\xf1\x3b\xd5\x93\xd2\xc5\xb4\x6f\xa0\xeb\x65\xa8\x1a\x30\xb0\xc6\xba\xb5\x88\x15\x67\xbe\xec\x39\xe4\xfb\x37\x3d\xc9\x62\x7c\x5e\x59\x3d\xfb\xf0\xb0\x9b\xb2\x88\xd1\x66\xf4\x3a\x39\x10\x44\xb7\xc7\xac\x51\x31\x8f\x9b\x24\xd2\x37\xf8\x5b\x82\xcb\x37\xd0\xd4\xc8\x7f\x96\xab\xc1\xb0\x91\xbf\x9b\x6b\x29\xda\x8c\x8f\x8d\xc5\x9a\x2c\x32\x63\xe9\x8a\x7a\x45\x99\x17\xb8\xac\x9f\x56\x94\x14\x80\x12\x66\x3d\x97\xff\x9c\x53\x26\xc1\x06\xcb\x80\x30\xda\x1b\xaf\x5b\x53\x30\xdb\x37\x74\xea\xff\xeb\x11\x9d\xce\x68\x87\xc7\xee\xcc\xc6\x6e\x71\x06\x06\x30\x9a\x91\xd5\xa9\x91\x53\x21\x39\xab\x66\x07\xfd\xdf\x64\xe8\x18\x06\x16\x55\x83\x39\x47\x48\x00\x6d\x57\x2d\x7f\x1c\xca\xcf\xcc\x36\x38\x10\x8a\x72\xb7\x41\x74\x22\x35\x34\xe8\xdc\x77\x5c\x76\x7a\x94\xba\x47\x27\x20\x83\xf5\x69\xce\xd4\x20\xce\x0a\x8e\xab\xde\x25\x04\x54\x2f\x53\x48\xed\x77\x2b\xa3\x54\x6b\x6c\xd4\xc5\x9e\x2e\xc9\x53\xfb\xca\x24\xb5\xd9\xeb\x1b\x5c\x3f\x62\x9c\x4d\xed\x32\x74\x36\xca\x27\xdc\x6c\x37\xf4\x5d\x99\x3f\xae\xcc\xe5\xe2\x2d\x4d\x86\x0b\x4d\x36\x08\x32\x0b\x39\x8e\xe7\xec\x9e\xbc\xc2\xaa\xb1\xf0\x2c\x2d\xbe\xcd\xa3\x63\xb5\xb8\x34\x3d\xcf\x57\x1d\xd4\x87\xe4\x6f\x31\x2d\x66\xdb\x43\x33\x66\x5c\xd0\x9e\x8f\x6d\x43\xbc\xd9\x5a\xb3\xe4\x5c\x00\x50\xda\x37\xb5\x02\x66\x17\x67\x60\x5a\xc5\x99\x9f\x76\x96\x28\x4e\x8e\xd5\xca\x29\xce\xd2\xa1\x4f\x96\x93\xad\xe7\xc2\x25\x6a\xd8\x7a\x9a\x48\xd6\x95\x89\x75\x73\x64\x6c\xf8\xa1\x49\x60\x0c\x56\x9c\x41\x8d\x87\x63\xfa\x90\x26\x0f\x73\x24\xe1\xc5\x2e\xd2\x8a\xe6\x0a\x4b\x1c\xdf\xf1\x92\x34\x79\xd7\xb9\x08\x83\xce\x1e\xf9\x80\x06\x06\xa5\xdb\x41\xcc\x31\x15\xef\xcf\x42\x09\x69\x8b\x2f\x92\x90\xb6\x34\xf3\x78\x59\x42\xd9\x19\xdc\x55\x43\x31\x8e\xba\xb2\xf4\x18\xe6\xca\xc2\x4c\xb4\xb9\x54\x6d\xb7\x0b\x27\xee\x31\x52\xac\xb2\xbc\x7e\x23\x32\xf5\xfd\xcd\xfd\x9d\x06\xa1\x98\x85\x5c\x64\x3a\xb6\xb3\x40\x60\xe6\x26\x83\x63\xfa\xac\xb5\x68\xa1\xa8\x76\x04\xb5\x3c\xa2\x39\x2f\xac\x36\xd8\x40\xbe\xda\x6c\x22\x27\x36\xd7\x8a\xe9\x78\xbb\x95\x95\x9f\x3d\x8a\xf3\xc7\x95\x01\xac\xe2\x0c\x7c\xaa\x82\x88\x6b\xbf\x65\xde\xaf\x17\x16\xeb\x4d\x92\x03\xc5\x87\xab\x3c\xa6\x31\xab\x2a\xf0\x45\x45\xf6\xea\x12\x37\x39\xbd\xe3\xf0\xb6\xb5\x12\xa7\x88\x30\x41\xa7\x05\x11\x84\xad\x81\x16\xb4\xe0\x24\x04\x20\xbe\x9d\xe9\x76\x5c\x0b\x81\xa4\xc9\xa5\xd5\x1b\xd9\x3c\xc8\x47\x66\xdd\xb9\xcc\x90\x8e\xb1\x61\x92\xa1\xb6\xbb\xb0\xb1\x9d\xca\x9a\x2b\x28\x3f\xfd\x34\x41\x66\xdd\xe4\xda\x99\x42\x79\x73\x82\x7e\x5f\x7e\x2b\x73\xdf\x28\x42\x0f\xfd\x65\xb7\x51\x1f\x9b\xc1\x20\xeb\xae\x78\xa2\x6d\x08\x07\xd7\x46\xdf\xda\xdb\x3f\x69\xd6\xa1\xe1\xe2\x03\x1f\x0b\xba\x78\x8d\x0d\x50\x18\x99\x91\x3f\x22\x47\x03\x4b\xf4\xcb\x68\xa9\xcf\x30\x78\x0e\xf9\xbf\x95\xb6\x56\x12\xa8\x10\xf4\x08\xf3\xf0\x56\x38\xe2\xe1\xce\x35\x9c\xd9\xac\x30\x46\x9b\xbe\x58\x73\xb1\x32\x8b\xc6\xc8\x1d\xb7\xa7\xbd\xe4\x1b\x5f\x86\xba\x04\xb6\xd2\x74\xbf\xd9\x2a\xb8\xe0\xf0\xb8\x10\xbe\x48\x60\x0b\x19\x6a\x01\x7a\x2b\x14\xf3\x8b\xba\x4e\x34\xa1\xd8\x4f\x53\x5b\x07\xdd\x2d\x4d\xa1\x96\x8b\xae\x8d\x54\xe2\x99\xe9\xfe\x59\x4b\xc2\x54\x43\x69\x72\x91\xf8\xdd\x15\xc9\x3d\x45\xd3\x51\xe2\xf4\xfd\xc2\x8e\x59\x8c\x4b\xea\x64\xf5\xa3\x5d\x0a\x86\xca\xf0\xc0\xe2\xd9\x53\xa1\xad\xde\xa9\x4e\xbb\xfa\x50\x95\x7b\xb0\xfa\x98\x56\x6f\x91\x1a\x2f\xd6\x33\x86\x20\x76\x88\x1a\x95\x18\xd5\x58\x20\x03\x62\xde\x69\xd5\x71\x73\x48\xea\x2f\x03\xfa\x88\xb5\x9a\x24\xa9\xc2\x38\xfd\xa4\x9a\xa3\xa5\x64\x60\x6a\x92\x4b\x90\xd4\x45\x16\x80\x51\xf4\x67\xa2\x9c\x04\xb0\x73\x82\xee\x39\xce\xb4\x19\xc2\x82\xad\x18\x4d\x22\x38\x42\x89\xd1\x9c\x2b\xa2\xda\x9e\xd4\xf6\xbd\x34\x56\xfd\xe1\x51\x94\x1f\xa7\xb5\xaa\xb0\x9a\x9c\x8b\x4b\x8e\x62\x71\x91\x16\x17\xc9\xc3\xfc\x47\x0b\x21\x9f\xd4\xcd\x70\x8b\x82\xc1\x7c\xc5\x2d\x97\xbd\x69\xab\x49\xee\xcf\x5e\x42\xb2\xa8\x89\xbd\xe0\x01\xde\xf7\xeb\x15\x7c\xa1\xa2\x67\x80\xd0\xf6\x5d\x7c\x00\x3a\x99\x14\xee\x01\x4d\xb2\xa0\xd5\xa6\x70\x25\xc4\xa2\x5e\xb0\x7c\xb1\x11\x59\x8c\x4d\xa9\x52\x0a\x9d\x55\x87\xac\xd7\x2c\x54\xcd\x2f\x36\x13\x37\x3f\xd8\x2c\x3d\xce\x88\xdc\x01\x67\xb4\x00\x83\x36\x9e\x3f\x5c\x5a\x05\xe6\x15\x57\xd0\x61\xae\xb0\x64\x0c\x33\x27\xe0\xca\xbd\x9e\x2a\x32\xd1\x82\x48\x1f\xce\xa8\x6e\xec\x79\xde\x66\x30\xb5\x48\x4b\xbc\x38\x7b\x4c\x5d\xf1\xe8\x01\x91\x46\xa5\x4b\x1d\x35\x60\xb9\x5b\x6f\xeb\x0f\x80\xb0\x2e\x2b\xca\x63\x13\x3b\xd8\xff\xdf\xa5\xe5\xbb\x96\x96\x01\x55\x4d\xd7\xb3\xb7\xdb\x75\xa7\x7d\x8b\x0b\x84\xd8\xc8\x50\x54\xa5\xdc\xbe\xd8\x72\xd1\x1e\x19\x14\x2e\xe7\x8b\xc7\x4c\x66\xae\x41\xcb\xd2\xc4\x20\x13\x51\x94\x82\x67\x0c\xf9\xdc\xec\xa0\xc0\x65\x66\xa6\x59\x88\x84\x0a\xe3\x79\xbe\x9a\x9c\x71\x14\xb8\x4e\xcd\x5b\x4b\xda\x5e\x8c\xe5\xe8\x90\x2e\x2d\x3d\x3f\x5b\x2e\x45\x7a\x94\x23\x14\xf2\x61\x60\xc7\x40\x76\x4f\x14\xe2\xa4\x42\x9b\x14\xc7\x12\x43\xb7\x89\xcb\x79\x9a\x57\xe1\x3c\xaa\x04\xb1\x90\xf5\x5a\x58\xe3\x0c\xa5\x06\xbf\x80\x55\x5a\xd8\x23\x3c\x70\x03\xe0\x29\x2a\x4c\x0d\x96\xb2\x89\x8b\x70\xa8\x66\xb9\xae\xc6\xff\x87\xee\xfd\x70\xe5\xa2\xa5\x4c\x47\xf3\x1c\xcd\xf4\x6c\x5a\x66\x44\x7b\xee\xeb\xcc\xbd\xd1\x57\xa8\x31\x4d\xd2\xf2\x91\x97\xd5\x19\x45\xf5\x85\xa3\x3e\xab\xdb\xa8\x40\x2f\x18\xf1\x69\xad\x66\x31\xd0\xf1\x9e\xcf\x6b\x3d\xb3\x92\xe9\x99\x1b\xc7\xe3\x63\x13\xe3\x14\x4b\xfa\x80\xd8\x34\xcb\x9c\x23\x69\x53\x6a\x8a\x3a\x53\x45\x86\x0d\x5e\x5a\x63\x97\x83\xec\x48\xc6\x74\x6c\x39\x6d\x72\xd5\x5a\xf0\x10\x80\x2a\x63\x66\x11\x52\x00\x59\x56\x76\x6a\xc1\x50\x00\x5d\x50\x70\x76\x4a\x5f\x0a\x76\x8c\x0a\x0a\xb6\x89\xd8\x35\x3d\xc1\xdc\x28\x5b\x26\x59\x7e\x6f\xb0\x14\x13\xbe\xa2\xc9\xf9\x29\x58\x5d\x7c\x01\xa5\x96\xcc\x91\xca\x0a\xe2\x14\x69\x86\xa1\xe6\x5a\x9a\x17\xb2\x09\x52\xec\xb6\x2a\x7f\xe9\x19\xc7\xd4\x94\xd9\x60\x33\x98\x32\x15\x18\xcf\x4d\x65\xaa\xc2\x0b\x28\x37\x3f\xdf\x28\x8a\x2b\xf8\xc6\xdf\x5a\x96\x15\x8d\xf3\x0d\xcb\x6f\x8c\x2a\xc2\xb5\x9c\x7b\xb5\x19\x45\x2c\x31\x63\x44\x91\x8a\x4e\x98\x50\xe6\xcb\x8d\xb4\x2f\x9b\x44\x7d\x85\xad\xb6\x2b\xdc\x5c\x7a\xeb\xd9\xfb\x26\xca\x77\xc7\x68\xc6\x54\x31\xd4\x3b\xd9\xb1\xd5\x07\x07\xc2\xf6\x93\x0f\x85\xef\x07\x76\x80\x14\xae\x01\x6a\x13\x3b\x3b\x96\xb0\x14\x67\xea\xd6\x74\xc7\x5b\xbf\x90\xc9\x5b\x58\x6a\x8b\xc0\xa0\xcc\x70\x2e\x1b\x14\x69\x26\x30\x89\xc9\xda\x83\x2a\xd1\xc1\x6e\x78\x6a\xec\x99\x6e\x71\x26\xc3\x3a\x35\x2a\xbf\xcd\xc1\xd4\x34\xab\x00\xd5\x11\xf4\x45\x74\xc6\xb7\x67\xd8\xb3\xf3\x9d\x60\xbe\xfc\xc2\x38\x77\x16\x7f\xd5\xed\x0c\xd3\x99\x21\x59\x9a\x88\xd7\x64\xaf\xb0\x46\x1b\xbd\x09\xd8\xf1\x30\xb8\x79\xad\x4d\x67\xff\xfc\x76\x6a\xec\x87\xf7\xb8\x25\xef\x94\x69\xd0\xad\x07\xda\xdb\xde\xb5\x04\xc5\x05\xd0\xc1\x01\x77\x77\xa8\x65\x14\x67\x7a\x81\x04\x58\x5e\x71\x06\xae\xdf\xde\x49\xe7\xe2\x42\x32\x87\xe7\x63\xaa\x56\x34\xd5\xd5\x53\x1a\x7d\xa4\x7f\x38\xa1\xbe\xa4\x1a\xf7\xfc\xdd\xa4\x73\x34\x65\x15\x75\x68\x13\xc3\x30\xc4\xe9\x5b\x3c\xc9\x56\x41\x51\x9d\x17\xd3\xf3\x3c\xd7\x7d\xbd\xe6\x0e\x82\xa2\x3f\x70\x6b\xee\x94\x53\x29\x1f\x9a\x84\x80\xd4\x85\xd8\x90\x32\x05\xdf\x3d\xe9\x4e\x68\x81\xaa\x2a\xc9\x76\xe3\x63\xd6\x3b\xe9\x9e\xa8\x30\x71\xee\x6d\xe1\x8d\x61\x87\x62\xbb\x51\x99\xd0\x2b\x54\xf3\x0d\x37\xe1\x0e\xd4\xdb\x2c\xcb\x74\xb9\x26\x61\xde\x4f\x42\x9d\xc0\x8e\xb5\x20\x5d\x43\x44\x65\xb6\x88\x2a\x5b\x14\x6c\x6d\xc5\xf1\x72\xb3\x53\x34\x34\xd3\xb1\x35\xb2\x51\x24\xd8\x8d\x43\x1d\x65\x01\x3a\x05\xa8\x38\x4f\xbe\xe2\x78\xd9\xa6\x39\xaa\x99\x57\xeb\xc8\x05\xec\xde\x55\x26\x68\x8d\x5d\xd5\xee\x4b\x41\xaf\x9e\xd7\xab\x27\x5c\x05\x1d\x4a\xb9\xf2\x0c\x90\x08\x7b\x30\x38\xdc\x33\xdc\xde\x19\xe0\x78\xb1\xe1\x61\xf9\xb4\x80\x0b\x28\x36\xeb\xe1\x58\x2e\x5b\x0a\x95\x58\x13\x54\x4c\x63\x78\x26\xd9\xc3\x7a\xb4\xd8\xe8\x15\xe9\xce\xd7\x44\xf2\x63\x18\x2d\x2f\x5e\x98\x1f\x87\x39\xd0\xbd\x98\x1b\x7c\xef\x12\xa6\xda\xf3\x5d\xa1\xe1\xd0\x53\xa2\xb6\x32\x4e\xd3\xa4\xa8\x92\x6a\xdd\x0b\x0a\x35\x81\x4e\x87\x09\xc3\x2c\xcf\xd2\x27\xbe\xec\xf6\x91\x52\x38\x5f\x8e\x40\x9e\x20\xc0\xc8\xf4\x35\x49\xd4\x1e\xaf\x5b\xf8\x30\xb3\xf0\xa8\xc6\x85\x45\x24\x19\x3a\x28\x34\x40\x75\x22\x41\xcd\x5c\xed\x8e\x33\x2a\x40\x73\x11\x13\xd0\xc3\xd4\xc2\x3b\xcb\x0d\xec\xd6\x76\x53\xe5\x09\x8c\xd7\x95\x38\x6d\xaa\xa0\x30\xa6\x9c\x0a\xb2\x3a\x31\x3e\x56\xcb\xa9\x9c\x26\xed\x5c\xc9\x68\x68\x36\xeb\x5d\xff\x9a\x25\x74\x42\xf9\x6a\xf9\x28\x40\xbe\xa4\x5d\xa4\xc5\x6a\xaa\x42\x33\x91\x5f\xa4\x45\x60\xaa\x02\x5f\x97\xe4\x10\x3e\xd7\x59\xa2\x0a\xb2\x7e\x91\xb5\x4b\xb2\xbc\xcf\xd7\x17\x49\x2b\x79\xca\x04\xed\x7d\x03\x1a\x70\x4b\x08\x0e\xb4\x04\xa8\x3c\x04\xc6\x92\x2a\x7d\x7d\x79\x3c\x64\x04\x67\xef\x8b\xe4\x01\x38\x29\x2b\x22\xf8\x95\xd3\x94\xa6\x4e\x24\x76\xd8\x5d\xab\xb1\x4d\x02\x62\x9b\xa4\x35\xd9\xfd\xa1\x52\x71\x73\x7e\x42\xc9\xd5\x6e\xad\x49\x57\x9a\x67\xd4\xa3\x6b\x50\x4f\x6f\xdf\xe6\xe5\xe1\x46\x74\x9f\x55\x9d\xe3\xfc\xac\xca\x0d\xe2\xaa\x13\xd8\x31\x9f\xd5\xef\x55\x05\xa6\x87\x73\x86\xf4\xd4\xc0\xb0\xa0\xab\x4b\xa8\xc1\x8c\x15\xc2\xcd\x9a\xf1\xcd\xf2\x34\xca\xf3\x6d\x3d\xcf\x7d\xf9\x65\x48\xd1\xf4\xf3\x19\x57\x69\xc4\xf6\x29\x90\x69\x4b\x12\x6d\x5e\x5d\xae\xdd\xcd\xf1\x18\x66\x8d\xbe\xf4\xb2\x5d\x94\x4d\x3a\xb2\x47\x65\x2f\x60\x9b\x1c\x97\xc9\x57\xaa\x2f\xf3\xa8\x0d\x63\x79\x4d\xe1\xc7\x96\x04\x19\x49\xe9\xda\x5a\x33\x19\x92\x79\xd0\x74\x6e\x9f\xf7\x9e\x35\x03\xf4\x96\xfb\xae\xc5\x87\xe1\x8c\xf2\xf0\x76\xb8\x5b\x18\x6a\xf1\xa3\xe6\x93\x97\x67\xa7\xbe\xa3\xb7\xc4\x4f\xda\x74\x67\x94\xbd\x1b\xbb\x48\xb5\x00\xf5\xe9\xd6\x96\x76\xb8\x25\xb1\xac\x03\x8b\x3a\x91\xd0\xc3\x7e\xed\x6e\xf9\x75\x06\x57\x80\x46\xaf\x8f\x4c\x02\xfc\x6e\x3b\x60\x13\x7d\x65\x9f\x97\xc9\xb7\x3c\xab\x47\xe3\xaf\x94\xf8\x84\xcb\x0a\xab\x02\xb0\x94\xf9\xa3\xce\xb3\x27\xb2\x5e\xf2\xe2\xa1\x24\x08\xa2\xbd\x71\x38\x60\x92\x8b\xb3\x68\x8c\x64\xa6\x23\x81\x71\xda\xb0\xb1\x03\xe6\x68\x43\xed\x09\x57\x51\xe8\x5d\xf3\x2e\x30\x8e\xca\x2c\xd9\xdc\x65\xfb\x3e\x5c\x45\x03\xf7\xe2\x39\xaf\xbd\x78\x7f\xd3\xdc\xb7\x74\x17\xe0\xe2\x83\x84\x09\x3c\xa1\xf4\x88\x75\x94\xa6\xf9\x63\xb3\xb3\x19\xe6\x72\x75\x7d\xb4\x40\x8c\xb3\xa7\xd1\xcc\x66\x3b\x31\x9a\xdf\xec\x1e\x46\x26\xfc\x21\x82\x63\x45\x64\x2c\xc7\x4a\x75\xa8\x8e\x95\x90\xf0\x1d\x2b\x24\x22\x3d\xdc\xae\x31\xfb\x40\x92\xc5\x84\xfb\x2d\xbb\xc7\xac\x0a\x10\x3c\x78\xc2\x92\xe6\xf8\x2d\x1e\x3a\xd2\xd0\xc7\x07\xe5\x48\x07\xdd\x3d\xce\xf1\xf9\xb6\x07\x5d\xbe\x49\x7d\x0b\x5e\xa3\x35\xfb\xf6\xc1\xed\x56\x8a\x81\x26\x1a\xea\x7a\x76\x6e\xd1\xdb\x7a\xb8\x1f\x3e\xfc\x43\xa2\xe0\x29\xb5\x1e\x76\xee\x75\x95\x56\x7a\xc5\xb1\x9c\x02\x3b\xd0\x46\xa6\x1e\xc5\x70\xa4\xc8\x98\xe8\xb7\xf1\xea\xfa\x57\x8b\x44\x3b\x94\x10\xfa\xb9\xfd\xb9\xe7\xd7\x4f\x85\x08\x62\xd0\xa3\xa1\x73\xe9\x1d\xd7\x12\x53\x7b\x6b\x1b\xd9\x6b\x32\x42\xa7\xd0\x43\x46\x15\xf2\xf5\xcb\x58\x5c\xb8\xa6\x0c\x11\xb3\xc7\xbc\x8c\xd5\xe5\x18\x31\xe6\xa0\x89\xa5\x46\xe0\x8d\x51\x6e\x14\xf4\x74\x85\xb6\x95\x5e\x00\xf8\xb5\xf0\xf5\xe5\x9c\xd1\x07\xeb\x6e\x55\xa3\xb2\xee\xad\xba\x2c\xad\xbf\xe6\xd2\x7c\x96\xd5\x7a\x83\x93\xe1\xed\x8f\xe4\xf0\xea\x0a\x0d\x6c\x8b\x8a\x02\xa3\x12\x65\x11\x96\xc3\xe0\x8e\xa5\xcf\xb8\x25\x28\x2d\xd2\xa8\x62\x47\xee\xec\x2c\x4d\x13\x12\x5a\xb6\xee\x23\xf7\xc3\xf9\x6e\xa1\x94\xbe\x00\x63\x7e\x87\xd4\x7f\x2f\xef\xde\x38\xb7\x34\xf3\x9e\xe7\xfb\x81\xaf\xbc\x3a\x48\x1d\xca\x87\x17\x0d\x9d\xd8\x5a\x32\x5e\x82\x37\xd2\x82\x51\x9b\x29\xcd\x91\x9e\x28\x36\x18\xc1\x85\x65\x97\xa0\x20\x8f\xa6\xaa\xe0\x82\x31\xbd\xbd\xda\x24\x6a\x73\xe3\xab\x74\x78\x10\xee\x1c\xce\x0d\x5f\xe7\x52\xb1\x64\xf8\xa6\x4b\xb7\x38\x8d\x17\x1b\x0e\xdf\xb2\xb2\x4b\x50\xe8\x0d\x9f\xa2\xe0\x92\xe1\xbb\xb9\xda\x24\x6a\xb3\xc3\x47\xeb\x5c\xf2\x63\x4d\x66\xf6\x95\x71\xeb\x94\xdc\x0c\x78\x10\x87\x5b\x14\x0f\x66\xe5\xfe\x96\x99\x7a\x54\x8a\x5b\x66\x91\x70\x2a\x2d\x70\xe6\x0c\x5c\x11\x11\x57\x7e\xf9\x64\x4c\xd1\x9b\x8c\x1e\xbb\x34\x26\x2c\x3e\x88\xc1\xf8\x9b\x38\xa5\xb2\x71\x4b\xdc\x76\x06\x9d\x33\x06\x3d\x9b\xa4\x67\x0e\x7d\xfc\x7b\x8a\x6f\xb3\xe2\x51\xc5\x40\x69\x09\x1e\xeb\xe1\x98\x8c\x4c\xf6\xfc\xfb\x69\xb2\x98\x70\xb2\x09\xe4\x96\x4e\x28\x30\x55\x1b\x7e\xe7\xd0\xe8\xef\xb3\xa1\xeb\xf6\x43\xfa\xfe\xb0\xad\x41\xcf\x0d\xc0\x91\x0e\xd7\xa6\xc1\x0c\x0e\x78\xa5\x93\x20\x7e\x11\x7d\xb6\xf3\x6c\xeb\x44\x5f\x1b\x59\x52\x7e\xc6\xfb\xd0\x72\x17\xb6\x2b\x75\x9f\x61\x8c\x0f\xa2\x58\x40\x0b\x1f\xd6\xa2\x03\xc6\xcb\xb1\x9d\x82\xf2\xc6\x18\xd9\xff\x47\x6e\x4a\x5a\x23\xa5\xdf\x9e\x11\x0e\x26\xe8\xbe\x53\x5b\x7f\xfe\x9d\x8c\xfe\x47\x07\xdc\x1a\x00\x16\x6d\x05\x8a\x8c\x66\x11\x18\x9e\xbd\xf6\x37\xd8\x6d\x1c\x10\x6e\xd2\x50\x6c\xac\x65\xca\xb0\xe7\x05\x54\x27\xe6\xa3\x68\x0c\xa7\x61\x1e\x5d\x46\x9a\xb2\x7d\x1e\x90\x92\x47\x1c\xb1\x84\x98\x84\xd4\x91\xb5\x67\x6f\x19\x5f\x0b\x18\xd5\x7a\x35\xbc\xd6\x08\x19\x74\x70\x07\x60\xc6\x2d\x2b\x4c\xf6\x8c\xd6\x2d\xd3\x12\xad\x34\x84\x36\x52\xa0\x9f\x65\x01\x5c\x98\x3b\x7a\x10\x6a\x66\xe8\x69\x96\x11\x68\xd0\xbd\x57\xc4\x48\x4f\x1f\x62\xee\xe0\xfe\x2a\xb0\x82\x4d\x88\x78\x28\xeb\xb6\x7b\xae\x21\xfb\x8b\xfe\x4c\x2a\xaa\x8c\xa4\x74\x25\xa5\x5b\x53\xde\x8f\x4b\xcf\x41\x98\x4e\x28\xfd\xa9\xde\x6c\xcb\xab\xfb\x4b\x13\xc4\xe6\x9e\x71\x48\xa7\xf2\xe5\x96\x2c\x56\x2f\xe4\xca\xdb\xdb\xd5\x8b\x89\xa2\x23\x6f\xb7\xa5\x57\x62\xf6\x70\xa3\x7f\xf9\xc2\x8b\xf6\xcf\xbb\x78\x4e\xc7\xb3\xe7\x5a\x4f\x86\xa4\x09\x47\x55\xa3\x4d\x81\x32\x9c\x7e\x79\x28\x04\x37\x99\x56\xb2\x21\xbf\x45\x41\xab\x88\xfc\x27\x95\x1f\x3d\x54\x5a\xcb\xd1\x01\xfa\x51\xff\x25\xef\xcf\xee\x6c\x90\x88\xf7\xe0\xfa\xff\xb5\x8f\xf0\x0c\x0f\x4b\xa7\x64\x82\xc3\xb0\xfa\x90\x6c\xd0\xd9\xcb\x88\x63\xaa\x24\x83\xcd\x6b\x18\xea\xc1\xe6\xce\x5a\xd4\xa3\x51\x29\xba\xed\xb3\x43\x74\x42\x1b\x06\x9f\x85\x34\x24\x4c\x94\xa7\x69\x83\x3a\x7d\xb5\x49\xab\xe3\x96\x25\xa6\xcd\xb1\xdd\x14\x21\x2b\x36\x96\xd1\x57\x31\x9a\x92\x6f\x2f\xe2\x2b\x03\x03\x61\x87\x36\x3e\xb0\x27\xa4\x68\xa7\x61\x85\xab\x8a\x70\xe8\x3d\xa8\xcb\xbe\xe5\xb5\x79\x60\x8a\xbe\xb0\x20\x98\xe4\xc4\xb3\x60\xea\x0d\x1c\x4a\x01\x85\x9b\x5a\xe5\xf3\x7c\x09\x78\xed\x5e\x68\xfd\x41\xe8\x7e\xd0\x3c\xe9\x20\xfd\x52\x1c\x1e\x86\xec\xc1\x02\xf9\x72\x4a\x96\xc7\xb8\xfb\x55\xed\xf3\x47\x1a\xba\x1b\xd0\xc6\x5b\x92\x30\x2c\x18\x18\x16\x2d\xde\x81\x56\xe0\xfa\xaf\x9b\x10\xf1\x40\x07\x96\x22\x52\xfc\x72\xe0\x34\x48\x38\x8f\xae\x7e\x11\xde\x13\xfc\x3e\x88\x42\xd8\x71\xf3\x5e\x64\x4f\xe5\xd9\x5c\x18\xbe\x18\x7d\x5c\x83\x90\xc6\x6c\x68\xe3\xfd\x6b\xd1\xe6\x05\x10\xb5\xfb\x97\xa0\x9e\x05\xc5\x79\x11\x28\xee\x8b\x40\xf1\xee\x7b\xe1\x67\xc9\x90\x83\x58\xda\x2f\xc9\xcd\x24\x54\xeb\x4c\xf2\x4c\x88\x8e\x9f\x54\xb5\xd0\xd6\x80\x6d\x2c\xf7\xf5\x08\xbb\x2c\x03\x26\xbe\x1a\xc6\x5f\x29\x19\x0c\xe8\xd7\xd3\x34\x0c\x71\x21\x12\x45\xde\x30\xc6\x90\x9b\x83\xb8\x04\xab\xc9\x41\x11\x09\x65\xdb\xaf\x7b\x55\x51\x94\x2e\x27\x31\x93\x4c\x5b\xd1\x8b\x39\x30\x8b\xba\x51\xe1\xf2\x94\x44\x78\x11\x3e\x74\x9e\xee\xaf\x8a\xdc\xbb\xcd\x35\x44\xd7\xd3\x87\x2e\x8a\x75\x37\x18\x3f\x03\xcb\x26\xdd\x10\x0b\x69\xf0\x98\x89\x75\x7a\xab\x77\x1f\xe6\x43\x9e\x6a\xd2\xef\x63\xda\x83\x40\x4b\xc8\x29\xc7\xb9\xe8\x6e\x87\x4a\xdf\x95\x49\xdc\x26\xd0\x1f\xe4\x43\xdf\xa1\xa2\xb9\x8b\xc4\x9e\x6a\xa5\x89\xec\x61\x9e\x6a\x45\xd6\xaa\x03\x3a\xbf\xb1\x48\x09\xcd\xdc\x96\xf7\xf7\xbf\x12\x1a\xe9\xdb\x24\x4d\xbf\x30\x00\x2d\x5d\x79\xa5\x12\x17\x18\xd5\x6f\xda\x72\xda\x10\x08\xab\x49\x4b\x94\xf9\x63\x25\xdd\x10\xe2\x3d\x7a\x48\x93\xbe\xeb\xaf\x3d\xbc\x53\x48\xe9\xc7\xde\x68\x1d\xf3\x67\xea\x1e\x67\x66\x3b\x18\x29\x74\x97\x68\xf3\x44\xee\xc6\xd8\xf8\xd7\x3a\xee\xd9\x42\xa3\x38\xb6\x63\xbf\x79\x39\x56\xf0\xfa\xef\xf4\xa4\xf6\xbd\x81\x7a\xdf\x6a\xd5\x8a\x2d\x78\x97\x7b\xe9\x87\x64\xbf\xd6\x71\x5b\x73\xf8\x18\x18\x6c\x7e\x24\x4a\x07\xfb\x26\x7e\xfa\xb5\x7d\x73\xb3\xbf\xad\xf2\xb8\x5a\x25\xbc\xcc\x29\x5c\xe4\x5b\xc1\x90\x3e\x81\xda\xe5\xb4\x85\x98\x1e\xd6\x20\x76\xaf\x0d\x5a\x60\x4d\x93\xfa\x23\x7a\x9b\xfc\xc2\xd6\xf0\xa1\xd0\x7e\x8c\xea\x6b\x2d\xbd\x99\xa5\x9f\x87\xaf\x16\x5c\xeb\x81\x9b\xbd\x64\xc1\x50\xdc\x48\xa7\x03\xd8\xdd\x47\x2b\xce\xc0\x1d\x44\xbf\xb6\x28\x23\xf5\x3b\xdc\x4b\x20\xa4\xd9\xab\xe2\x02\x8e\xdc\x13\xe9\xbf\xfd\x55\xc7\x64\x81\x12\x95\xd4\xe7\x29\xa2\x04\xcc\xe0\x1a\x04\x49\xad\xe1\x37\x5c\xe6\x3d\xb6\x8b\x41\x1c\xb3\xf4\xe1\x5b\x21\xb4\xd2\x45\x74\x63\xd1\x43\xc3\x60\x52\x45\xc0\x35\x4e\x32\x9a\xf4\xeb\xe7\xce\x2a\xd0\x58\xf7\x59\x76\xe3\x25\xa2\x49\xbf\x84\xc2\xcd\x85\x01\x96\xcd\xdd\x78\x34\xf9\xa7\x50\xbc\xb9\x2d\x20\x41\xeb\x6c\xaa\x0d\x63\xc8\xd9\x92\xcd\x44\xb7\x1a\xef\x28\xdd\x6c\xfa\x13\xab\xde\xc4\x9a\x7f\x6c\x4c\x3a\x1c\xb0\xac\xc1\x95\x57\x76\x40\xc0\xde\xae\xda\xa0\x2a\x89\xf4\xb8\xcc\x8b\x38\x7f\xcc\x34\x65\x2a\x3f\xbc\x9f\xce\x05\x6f\x79\x7e\x91\x3f\xb6\x0f\xcb\xb6\xc5\xc0\xf3\x1e\xf8\xea\x07\x52\x45\xa2\x84\xb6\x3e\x69\x4b\x6e\x14\xaa\x7c\xbc\x3a\xf7\xad\xa9\x52\x92\x07\xd7\x54\xc1\xd6\x89\x6b\xaa\x90\xe8\xc7\x35\x55\x4e\xf6\xad\x6a\xcb\xa5\xa8\xda\xeb\x07\x5c\x55\x68\x87\x41\x01\x59\x70\x00\x36\xc1\x8c\x97\xaa\x8e\x51\x84\xab\x6a\x58\xae\xc4\x98\x0a\x13\x6b\x5e\xb6\x97\xaa\x33\x79\xd5\xf9\x5b\x34\x4b\xca\x36\xc4\x58\x52\xb4\x15\x19\x5e\x34\xcb\xeb\x24\xc2\xdd\xef\xf9\x2b\xaf\xda\x82\xbb\x9d\x0b\x1f\x62\x50\xc4\xaf\x1e\x44\xba\x93\x3c\x26\xc5\x97\x54\xe7\xdc\xd7\x14\x6c\xb9\xc8\xbb\x70\xb6\xf4\x90\x41\x97\x78\x1b\xce\x16\x56\xb2\xea\x01\x4d\x84\x1e\x6e\x4b\xc9\xb3\x69\x2f\xb9\xcf\x1d\xbd\xb9\xb3\x4d\x85\xdb\x3c\x8d\x71\x29\xa6\xb0\xfd\x0e\xa7\xa4\xcc\xf6\x5d\x36\xa3\x87\x94\xcb\x55\x26\x7c\x58\x0b\x96\x9f\x65\x6f\x06\xd2\x3b\x83\xa3\x67\x07\xc2\x4a\xcf\x94\xce\x55\x96\x37\xdf\xc4\xbc\xb6\x29\x66\xe8\x03\x4d\xe8\x84\x81\x9e\x20\x86\x35\x5d\x0f\xdf\x4d\xea\xbd\x23\xcc\xce\xda\xb8\x68\xab\xb2\xb8\x88\x88\x79\x37\xbf\xb6\xf5\x0f\x7e\xa3\x4b\xa0\xee\x5c\x58\x7c\xc5\xb9\x77\xb3\x1f\x14\xae\x9e\x11\x35\xeb\x46\x3a\xb7\x8c\xd0\xbe\x15\xd9\xd8\xfc\x74\x7c\xc2\x59\x5d\x71\xa5\xa6\xe1\x51\x49\x77\xb1\x15\x0f\x7d\xca\x2b\xdc\xe0\xb1\xec\x49\xa9\xea\x44\x61\xc4\x16\x3a\x5a\xb3\x67\x12\x15\x83\xac\x8b\xda\xea\xb2\xe5\x58\xb4\x7f\x27\x07\xb4\xc3\xab\x63\x99\xbe\xf9\x29\x46\x35\x5a\xd1\xdf\xef\xaa\xd3\xee\xe7\xf3\x21\x5d\x47\x7b\x54\x56\xb8\x7e\xff\xd7\xff\xfc\x45\x0f\xb4\xdf\x55\xa7\x1d\xa0\x83\xf2\xfe\xce\xb2\xef\x00\x1b\x95\xf7\x77\x96\x75\x07\xce\x87\x34\xab\xde\xdf\xed\xeb\xba\x58\xbd\x7b\xf7\xf8\xf8\x08\x1f\x6d\x98\x97\xbb\x77\x96\x61\x18\x04\xde\xdd\xc3\xef\x76\x80\x6c\xe0\xde\xdf\xbd\xb6\x6c\xb6\x39\xba\x7b\xf8\x5d\x81\xea\x3d\x88\xdf\xdf\xfd\x39\x84\xbe\x6b\x03\xf3\x93\x09\x5c\xe8\x3a\xe6\xc9\x34\x60\x68\x9a\xa9\x0d\xad\x20\x00\x26\xf4\x0d\xff\xb3\x0f\x2d\x37\x75\xa1\xe3\xb9\xba\x05\x03\xdb\xff\x76\xf7\x4e\x00\x60\xda\xd0\x72\x7c\x60\x9e\x02\xe8\x05\xc1\x47\x0a\xef\xb3\x07\x1d\xc7\x4b\x75\x1b\x5a\x21\x05\x12\x9c\x4c\x13\x9a\x36\x85\x4b\x53\x4c\xf3\xa4\x07\xd0\xf3\xec\xbd\x0d\x9d\xd0\x39\xd9\xd0\xb2\x58\xa3\x3a\x6b\xd4\x22\xb5\x94\x2d\x59\xe6\x27\xcb\x02\xa6\x07\x1d\x37\xfc\x4c\x90\x0e\x58\x43\xac\x22\xeb\x40\xaa\x13\x7c\x6d\xd0\xe1\xfb\x6e\xf7\xf0\x3b\x42\x8f\x87\x9f\xf8\x73\x52\x96\xc0\xe1\x54\x1b\x6c\xb4\x4d\xb7\x77\xf0\x47\xbd\xf3\x6f\x09\xf0\x2a\x33\x8d\x1d\x48\xb6\xeb\x91\xc5\xf0\x25\x78\xc3\xf4\x3a\xde\x30\xfd\x9b\x79\xc3\x74\x3d\xfb\x97\x5f\x44\xde\x30\xa1\x0b\x4c\x03\x7a\x27\x68\xba\x5e\x64\xe8\xd0\x34\x7d\x1d\x1a\x8e\xad\x93\x04\x1d\x1a\x36\xfb\xf6\xd1\x84\xee\xb7\x83\x01\x8c\xcf\x26\x74\xf7\xc1\xc9\x82\xa1\xff\xd1\x34\x3f\x9b\xd0\x76\x9c\x0f\xa6\x09\xa0\x67\x58\x04\x92\xed\xd8\xc0\x00\x21\x74\x6d\x1b\x18\x1f\x4d\xe8\x78\xfe\x07\xe8\xb9\x3e\x30\x80\xc1\x0a\x19\x80\x56\x3a\x85\xd0\x31\xad\xc8\x00\xd0\x77\x2c\x5a\x82\x26\x03\x5a\x85\x7d\xdf\x5b\x30\x0c\xdd\x93\x6e\x42\x97\xb5\xcf\xf0\x09\xa1\x19\xe9\xd0\x30\x09\x40\x8a\x9f\xe1\xb4\x78\x52\xf4\x68\xb1\x6f\x87\x80\xfc\x03\x8c\x93\xce\xfa\x06\x48\xdf\x48\xd7\xc8\x4f\x92\x45\xfb\x15\x36\xfd\x5a\x58\xea\xb7\xe8\x37\x23\xf9\xb7\x83\x1e\x00\x83\xf5\x6d\xb2\xfb\x04\xd1\x10\x5a\xb3\xc3\x49\x87\xfc\xdb\x1d\xe5\x0e\xbd\x3c\xa6\xf8\xfd\x5d\x96\x67\x64\xf7\x29\x8b\xa4\x03\x1d\xe0\x40\xf7\xa3\x0b\x3d\xd2\x6e\x60\x03\x02\x17\x1a\xa6\xa7\x43\x93\x82\x24\x02\x69\x79\xb4\x3d\xc3\x09\xa1\x41\xdb\x24\x94\xa3\xff\xec\x03\x18\x44\xac\x1e\xa0\x49\xa6\x47\xaa\xdb\xf6\x67\x87\x74\x99\x50\xc2\x70\x42\x92\xee\x37\xf8\xb1\xe1\x33\x48\xff\x23\x18\x04\x0e\x25\x9d\xd7\xd0\xca\x63\x94\x6a\xdb\x04\x30\x30\x75\xe8\x9b\x4d\xba\xde\x96\x68\x10\xa6\xf5\x49\xaa\x4e\x00\xd0\x2f\x34\x9b\xb5\xfe\xc1\x01\x36\x05\xec\x50\x10\x36\x70\xa1\x07\x28\xca\xf3\x94\xb1\xa0\x0b\x02\x68\xb9\x1f\x4d\xe3\x64\x42\xd3\x75\x3e\x5a\xd0\xed\xcd\x67\x06\x0c\x1d\x0b\x04\xd0\x37\xdc\x14\x1a\x86\x49\xfe\x27\x53\xac\xe5\x93\x89\xcb\x0a\x74\x18\x18\x3e\xf9\x08\x80\x05\x0d\x4b\x97\x3f\x4c\x31\x97\xd7\xf1\x4f\x04\xc8\xc8\x8c\x47\x2f\xcb\x73\xed\xad\xf5\x97\x50\xf9\x46\x4c\xcf\x77\x8b\x54\xd8\x97\x98\xd1\x84\x09\x2d\x5c\x36\x9f\x51\xe2\xb6\x53\x9a\x61\x78\xc1\x2f\xbf\xdc\x51\x6a\xbb\xd0\xf7\x4d\x10\x7e\xb4\xa1\x6b\xf9\x9f\xa0\x6d\x13\xc6\x0d\x6c\x32\xc4\xae\xe5\x43\xcf\x27\x92\x65\x39\x0e\x59\x4c\xcc\xd0\x06\x0e\x34\x89\x00\xe4\x05\x8a\x92\xfa\xe9\xfd\x1d\xb4\x6d\x4a\x55\x89\xa2\x02\x41\xc3\xce\xff\x44\x24\xa6\x33\x47\xcc\x81\x9a\xde\x9e\x19\xf5\xdc\x57\x54\x9a\x75\xfb\x18\xb7\x25\x37\xf3\xec\x25\xc4\x11\x96\x10\xfb\x06\x92\x13\x0a\x3b\xd0\xf7\x43\x60\x7e\x34\x61\x10\xe9\xd0\xb1\x43\x32\x17\x04\xd0\x26\x4c\x0a\x03\xdb\x3e\x85\x84\xea\x44\x28\x1d\xcf\x86\xb6\x67\x92\x44\x96\xb5\x37\x0d\xe8\x44\xac\x0e\x80\x81\x0e\x6d\x9f\x7c\x06\xb6\xfd\xd9\x26\x9f\x1f\x4c\x1b\xd8\xd0\xf6\x81\x69\x41\xcf\x0e\x81\x4d\xbe\x58\xc0\xfe\xe8\x41\xdb\x45\xd0\x85\x2e\x9d\x4a\x4d\x1d\x3a\x96\x0e\x2d\x2b\xf8\xe4\x40\x3f\x00\xe6\xb7\x3b\xba\x8d\xf9\x8a\x29\x3b\xfc\xf1\x8f\x1f\x9c\x3f\x59\x77\x0d\x7f\x10\x05\x54\x1c\xd1\xc9\x51\x52\xee\xfa\x15\x37\xc9\x06\xa6\xb9\x75\x2f\x82\xcb\x94\xf5\xa0\x77\x3a\x3e\x54\xa1\xa5\xe7\xd6\x45\x3f\x1b\xb6\xcd\xa2\x9e\x7c\xad\xf3\x84\x8d\x0f\x0d\x97\x42\xdb\xed\xb6\x6d\xec\x47\x73\x53\x83\x3e\x44\xd4\x3c\xda\x25\x29\xf4\xf2\xb6\xa0\xa9\x73\x33\x81\xb8\x1d\x4f\x46\x6a\xac\x5b\xd4\x1b\xd1\xb4\xbb\x5e\xb1\x2b\x73\x86\xdf\xbd\x95\xd4\x3c\x79\xca\xfb\x65\x06\x42\xbf\xe8\x9b\x4a\xc3\xf7\x9a\xcb\xbc\x46\x35\x7e\xe3\xb8\x31\xde\x89\xcf\x35\x4b\xe9\xd7\x81\x2d\x42\x31\xb6\xc3\x73\x8c\x61\x35\xf9\xcd\x2e\xe3\x45\x64\x52\x54\xeb\xbc\x65\x32\xc9\x5e\x02\x1f\xf0\xbe\x79\xd7\xcd\x8d\xbf\x90\x89\x31\x3a\xbf\xbf\xf3\xef\x40\xf4\x44\xff\x29\xc9\xe7\xbb\xfe\x24\xea\xfd\xc1\xf7\x03\x8f\x4d\xa2\x1e\x34\x5d\xe0\x40\xcf\xf7\x3f\xdb\xd0\x22\x72\x0b\x5d\xef\x64\x42\xc7\x71\xbe\xb1\x4c\xd3\x84\xb6\xef\x7c\x26\x72\xc9\x32\x5d\x68\x58\xf6\xb7\xa5\x82\xa6\x5a\xa9\x3c\xf5\x85\xcc\x11\x83\xe1\x42\x13\xdf\x0d\xb6\xb7\x05\x06\xb4\x9b\xcc\x45\x4a\x23\xce\x8b\x6c\x00\x8c\x8e\x53\x82\x1b\x27\xef\x00\x86\x2e\x30\x3e\x99\x06\x80\x61\xe0\x02\x1b\xfa\xb6\x03\x02\x60\x10\xdd\xc7\xf6\x53\x18\x5a\x8e\x6e\x42\xd3\x04\x16\xd9\xe6\x01\x0b\xda\x4e\xf8\xad\xc7\x4e\xe2\x18\x0f\xcc\x0a\xdf\xf1\x00\xde\x24\xc3\xfc\x4b\xd3\x0d\x58\x36\x7b\x1a\x52\xa4\x1d\xdf\x93\xca\x21\x10\x3b\xc1\x58\x18\x88\xf0\x39\xa6\xfb\x45\x56\xf4\x1b\x4c\xdc\xb7\x99\x67\xd5\x46\xcf\x97\x90\x8c\xe0\xf9\x03\xec\x00\x17\x1a\x9e\xf3\xc9\x24\x9f\x74\x64\x3d\x18\xda\x1e\xb0\xe8\x27\xdb\x7e\x90\x1c\xf6\x69\x00\xa7\xc9\x61\xa5\x0c\x10\x34\x39\x14\x0a\x20\x10\x3c\x21\x3f\x00\x4d\xce\x62\x59\xb2\x8c\xd7\xf3\xd7\xd3\x5f\xe0\xd8\x60\xc1\x91\xc0\x4d\x76\x6f\xa5\x91\xfa\x45\x66\x3d\x51\x67\x75\x6e\x1c\x5d\xd3\x86\x9e\x43\xad\x1c\x46\x68\x45\xd0\x72\xa0\x63\x84\xd0\xf6\x5c\x18\x04\xe4\x1f\xb2\xe7\xf6\xd9\xee\x3c\xb4\x74\x13\x1a\x74\x9f\xe5\x5a\xbe\x6e\x41\x2b\x24\x63\xed\xda\xc1\x47\x0b\xda\x96\x45\xb6\x93\x16\xdb\x4e\xba\xa1\xa5\x43\x27\x20\x85\x0c\x83\xc8\xbf\xe5\x23\x0b\xfa\x1e\x99\x05\xc8\x27\xd5\x55\x81\xa1\xd3\xda\xa9\x03\xbd\xc0\xd3\x03\x18\x1a\xce\x07\x17\x3a\xa6\x07\x5d\xc3\x05\x1e\x34\x03\x07\x1a\x66\x00\x7c\x68\x18\x01\xf9\x16\xb1\x06\x00\x6d\x80\xc0\x07\x14\x3e\x20\xf0\x09\x14\xdb\x06\x01\x0c\x0c\xe7\xdb\x41\x77\x61\x18\x84\x24\xc3\x73\x3e\x87\xd0\x33\x7c\xa2\x29\x3b\xce\xc9\x84\xbe\x13\xee\x4d\x68\x9b\x16\xd9\x44\xdb\xd0\x70\x82\x93\xee\x40\x9b\x17\x20\x5f\x9b\x7c\x81\x23\xd9\x91\xef\x0d\xda\xf2\x32\xfe\xbc\x85\x81\xfe\x97\x55\xfe\x35\x58\x25\x74\xc2\x18\x21\x81\x55\xa6\x03\x98\xcc\xf2\x82\xa2\xe8\xf4\x39\xd6\x8b\x9b\x52\x6f\x65\x94\x10\x5a\x9e\x0d\xdd\x4f\x01\x34\x02\x42\x71\xcf\x26\x24\xf7\x4d\x0f\x38\xd0\x75\x3f\x1a\x27\x13\x7a\x96\xbb\x37\x2d\x18\xa4\x7a\x97\x01\x4c\x68\x92\xc5\xc2\xb4\x83\x4f\xa6\x07\x7c\x91\xa8\xd6\xbf\xff\xd1\xb0\xc3\xa1\xfd\x41\xd2\x93\x67\x4d\x0d\x2f\x3d\x10\x2f\x42\x69\xe1\x40\xc3\x34\x97\x50\x9a\x15\x59\x9d\xd3\x24\xfb\xaa\x2a\x68\x86\x61\xf8\x8e\xe6\x0e\xec\x41\xa1\x19\xfe\xf2\x87\x80\x6d\x65\x7c\xe8\x5a\x1e\xb4\xcc\x30\xd5\x61\xe8\x06\x44\x51\xfb\x64\x1a\xd4\x9c\x19\x38\x74\x90\x6c\x8b\x9a\x25\x3e\x79\xd0\xf5\x02\x10\xc2\xc0\x25\xfa\x1c\x2f\x6a\x03\xb7\x31\xe8\x4d\xf0\xf9\xf2\x83\xe5\x61\xc9\xe9\xe3\xd8\xdf\x68\x63\xb9\x13\x8d\x27\xa2\x0d\x14\x9f\x70\x96\xc7\xb1\x4c\xf2\x57\x56\x60\x7d\xb0\xfe\xfd\xae\xd1\x94\xfd\x10\x38\xa9\x4e\xf4\x62\xe8\x9b\x9f\x4c\x13\xf8\xd0\x75\xbd\x8f\xf6\xe7\x00\xba\xfe\x3e\x48\x75\x0b\x86\x0e\xb5\x63\x3a\x44\x7b\xf6\xa0\x6f\x9a\x84\xd0\x01\x34\x3c\x8b\x92\xba\xc4\x51\xdd\xee\x6a\x5f\x7d\xa0\x9e\x45\xdc\xc4\xa3\xf3\x6e\x41\xf7\x0e\x9c\xdf\xdf\x41\xdf\xbd\x03\x4f\xcd\xbf\xed\x02\x40\x32\x3b\xc9\x26\xbf\x4a\xb2\x07\x86\x96\x2b\xd9\x66\x6d\x48\x74\x79\x37\x0d\x41\x78\xb7\xa8\xc1\x26\x29\x4d\x32\x1c\xa1\xe2\xfd\x5d\xf5\xf7\x23\x2a\xf1\x02\x3b\xac\xf7\x0c\xa9\xfd\x0e\xb6\xfa\xad\x96\xd1\xdb\x18\x67\x20\xab\x7e\x48\xd4\xea\xd0\x6a\xd9\xc7\x24\x5b\x2e\x0f\xba\x8e\xff\xd1\x3a\xd1\x03\x07\x99\x81\xec\x8e\x81\x2c\xb2\x2c\xba\xae\x82\x81\x78\x1b\x37\x30\x90\x25\x31\x90\xc5\x19\xc8\x1b\x32\x90\xe1\xd9\x80\x7e\xa6\x3e\x0c\x7c\x07\xd0\x4f\xc9\x1e\x39\xd1\xfc\x22\x76\x1a\x18\xdd\xfa\x06\x44\xf5\x0d\xf6\x81\xa9\x8e\x39\xb9\x8e\xc5\x8c\x11\xa2\xeb\xae\x47\x82\x57\xaa\x21\x0e\x5d\xa5\xd8\xed\x48\x21\xd2\x4e\x71\xac\xf5\x22\x45\x11\xde\xb3\xe3\xf9\xde\x9b\x05\x37\x81\x65\x41\x7a\x5e\x12\xe2\x8f\x00\x79\x03\x20\x55\x28\x4f\xee\x91\xc1\x82\x94\x29\xc7\x51\x72\x7b\x26\x63\xc4\xef\xcb\x0e\x2f\x1d\x58\xd0\xc7\x87\x7b\x65\xe8\x4d\x1e\x09\x7e\xc4\xf2\xdd\x1c\xa1\xd3\x78\x66\x62\x75\x29\x92\x7f\x9d\x1f\xf2\x5d\x89\x8a\xfd\x13\x80\x1d\x2b\x5e\x88\x88\x77\xf1\xcd\xc4\x42\xe8\x9c\x54\x2c\x9b\x72\x1b\x13\x82\xf6\xbe\x6f\x23\x13\x31\xaa\xf6\xa8\x2c\xd1\xd3\xca\x01\x4e\xaf\x11\xda\xf5\x51\x08\x72\x61\x2a\x24\x17\x5e\xa2\x89\x1f\xc1\x70\x6b\x5e\x8c\x91\x8a\x13\x51\xac\x00\xf9\xec\xea\x4c\x16\x6b\x02\xb5\xa8\x3b\xd1\xd0\x8f\x5e\x1f\x16\x6a\xd7\x49\xf4\x55\x6e\x43\x85\x39\x2d\x45\x86\x44\x30\xac\xc3\x80\xdd\x95\x64\x2e\xe9\x59\xb4\xcf\xcb\xe6\x4e\xaa\x1c\xa0\x43\xe1\xe3\x34\x7c\x60\xa4\x7f\x8f\x5b\xae\x34\x1e\xb7\x99\xb9\x4c\x49\x66\x3d\xa5\x53\x95\x54\x62\xfc\x09\x32\xe6\xb1\xaf\x00\x20\xdf\x7c\x67\x17\xe3\x55\xfe\x59\x52\xdc\x03\xea\x3e\x3c\xee\xfd\x25\x95\x75\xc7\x8a\xaa\xe0\xfa\x83\xc2\xdd\x63\x4e\x37\x3f\x31\x6f\x15\x67\xe0\x17\x67\x29\x54\xb4\xd5\x7f\xa6\x61\xa4\xd0\x00\x8b\x6e\xd1\x57\x3c\xd1\xc0\x31\xe2\x61\x2c\xf8\x5c\x2e\x77\x3b\xe9\x9e\x58\x52\xbc\x14\xe3\x05\xd2\xa3\xb8\x03\x04\xb4\x39\x8c\x64\x48\x23\xeb\x95\x08\xa3\xbd\xa8\x27\xb8\xa7\x29\xef\xc5\xf6\x03\xc5\x8f\x0c\xe9\x78\x14\xe2\x51\x6a\x32\x65\x4b\x7e\xc2\x04\x14\x9c\x8d\x04\xdf\xbb\x16\xd8\x08\x37\x49\x17\xfe\x87\x1d\x10\x2e\xc4\x7e\xa3\xae\x71\xe7\x95\x6e\xd2\x6e\x19\x43\x7e\x1b\xbb\x50\xa2\x9b\x62\xcc\xc0\xa1\x08\x08\x43\x32\x06\xc2\x0c\xfa\x21\x74\x5b\xd4\x54\xe0\xd8\x55\x8b\x4d\x7e\xc2\xf7\xf4\x4a\x8d\x3d\xa4\xbc\x5c\xaa\x45\xa0\x3d\xd2\x7e\xee\x29\x5e\xbf\x15\xd6\xc0\x85\xbf\x95\xa6\x42\x84\x15\x11\xd8\x92\x3d\x73\x31\x89\x85\x65\x8d\xa0\xd1\x64\x0c\x64\x48\x71\x98\xc8\x47\xd4\x54\x08\x1c\xf7\x00\x90\xc2\x3d\x2a\x88\xad\x78\x4e\x6b\x3a\xff\xbf\x87\x97\x92\x1e\x06\x42\x3a\xac\x25\xbc\x82\x36\x9c\x6b\xc7\x97\x83\xe7\xb5\xd6\xd6\x3e\xa6\xfd\x07\x42\x16\x56\x54\x08\x97\x40\xed\x91\x03\xc2\xde\x84\xc4\x2e\xc4\x28\x26\x24\x1c\x6d\x7d\x1c\x0f\xc0\xf4\x5f\xdf\x50\x9e\xa4\x2c\x7c\x1a\xea\xd5\x36\xc4\x11\xc6\xeb\xde\x6b\x1b\x12\xc8\xe7\x85\xa3\x1e\x67\x43\x6b\x18\xbc\xb2\xff\x62\x9d\x98\xd8\x0f\x5f\xd9\x24\xb3\x65\x71\x65\x00\xd3\x7d\x3d\x24\xd1\x70\x05\x1c\x2c\x92\xcc\x65\xc6\x53\xce\xe7\x5c\x10\xc2\x36\x56\x4e\xaf\x01\x7e\x9f\x8c\xd1\x2d\xc9\xf6\xb8\x4c\x14\xc1\x68\xd5\xaf\x06\xc9\x11\x73\x8c\x71\xf0\xbd\xd7\x43\x06\xf1\x9a\xc5\xd3\xb9\x0f\x79\x8c\xff\x9c\x90\xa1\x27\x3b\xb9\x5a\xaf\xf3\x3c\xad\x93\x62\x2c\x10\x06\x1a\x10\x04\xbc\x42\x0e\xf9\xeb\x69\x0f\xed\x03\x6d\x96\x49\xfe\x98\xd3\xc7\x16\x1d\x92\xf4\x69\x75\xc8\xb3\x9c\x5e\x6a\x13\x5c\x41\x4c\xe1\xce\xe5\xca\x6f\x9e\xa3\x08\x49\x1f\xa3\x83\x5e\xe9\x7b\x54\xed\x13\x01\x59\x31\x14\x83\xf2\x7d\x38\xe1\x58\x92\xeb\x6a\xdb\xd8\x12\x83\xb3\x4b\xbb\x46\x35\x7a\x9c\xe1\x18\x9a\x87\x3c\xaf\xf7\x04\x3d\x1a\x73\x41\x8e\x7a\xea\x88\x78\x02\x91\xaa\xbb\x63\x5d\xe3\xb2\xe2\xd2\xe7\x07\x81\x15\x1a\x0a\x9c\x2d\x64\x6d\x6d\x4f\x7e\x28\x66\x04\x24\x53\x7d\x2f\xa2\x72\xc4\x86\x82\xa0\x07\x5e\x6d\x83\x6d\xb0\x35\x46\x6b\x13\xc4\xb3\xe3\x61\xd3\xed\xee\xbc\x38\x40\x41\x30\x42\x69\x9d\x46\x03\xc4\x31\x88\x93\x93\x98\xcc\xdf\xd0\x14\x38\x45\xf1\x7c\x37\x7d\x33\x6e\x1c\x0f\xb2\x7f\xcd\xbf\x35\xb0\x92\x3c\xd3\xa6\x0a\x3f\x30\x23\xd1\xad\x35\x94\xd5\xbe\x13\xeb\x9b\x10\xbe\x1d\xd7\x97\x42\xf3\x5f\x81\xb8\x04\x00\xf9\x1d\xe5\x87\x03\xce\xea\xf6\x4a\x66\x8c\x6d\xec\x2b\x4b\x56\x75\x99\x64\x3b\x6d\x3c\x4b\xb7\x38\x14\x33\x8e\x37\xc8\x56\x42\x91\x45\x20\x8e\x1d\xec\x06\xca\x82\x27\x54\x26\x34\x5c\xcd\x54\x66\xd7\x64\x88\x03\x27\x72\x95\x90\x62\xbc\x5d\x80\x58\x5e\xe0\x12\xd5\x79\x8b\x1a\xb6\xc9\x9f\xb2\xe8\x57\xfc\xf4\x98\x97\xdd\xad\xd7\x6d\x1c\xbb\x6a\x9a\xa1\x3a\x3f\x2c\xe8\xeb\x01\xd7\x48\xd9\xcf\x1a\xed\x16\xa0\x8e\xea\xba\x4c\x36\xc7\x5a\x4d\xab\xbf\x1f\x51\x9a\x6c\x13\xc1\xaa\xb4\x8d\x90\xeb\x29\x21\x15\x25\x21\x43\xfd\xb4\x80\xaa\x9b\x63\x92\xd6\x49\x36\x3d\x3c\xf6\x44\x9b\x22\x63\x33\xbf\x1e\xb6\xe8\x76\xe6\x20\x81\x9f\x5f\x99\x86\x69\x99\xf6\x28\x80\x03\xaa\x23\xb2\x48\x6c\x4a\x14\x7d\xc5\xcd\x85\xe7\x18\x47\x79\x49\xb5\xcb\xd5\x31\x8b\x71\x49\xe0\x0b\x7b\x52\x71\x3d\x2e\x31\x8a\x59\x3c\xaa\x3c\xc6\x07\x0a\x53\x31\xf7\x57\x3d\x6d\x57\x59\xab\x47\x8f\xbe\x64\x2d\xa9\x23\x8a\xdb\x6d\xe5\x3b\x81\xc0\x88\xfc\x2d\x6f\x4f\x16\x4c\x64\xa3\x68\x13\x2d\xaf\xdd\x67\x1c\xba\x3b\x5f\x5a\x79\x28\xcd\x5e\xe4\x6f\x02\xb3\x89\x22\x40\x56\xc0\xc9\x90\x09\xea\x87\xb9\xc6\x03\x29\x28\x01\x76\x9a\xba\x1c\x30\xb6\x1c\x09\x66\x35\x02\xe6\x47\x84\x95\xbb\xf1\x85\x68\x59\x47\xba\x2a\x6d\x12\x73\x96\x8a\xee\x8d\xf6\x01\x6d\x1d\xcb\x09\x9d\x58\x05\x56\xb1\x99\xfd\x6e\x0a\x3c\x73\xc3\xa1\xee\x35\xef\xdf\xdc\xd5\x47\xb5\x5b\x35\x8b\x29\xc1\xac\x00\x6e\x1b\xf3\x91\x5a\x51\xbe\xcb\xb9\xb8\x8f\xe5\x45\xdc\xd8\x2b\x2f\x66\xb2\x08\x28\x34\x06\x5a\x3f\xcc\x9d\xf2\x95\x2e\x75\xe4\x02\xe6\xd7\xa5\x8a\xb8\x29\x7b\x7c\x5d\xda\x9d\x0f\xdd\x85\x99\xbd\xe8\xc9\x06\x2f\xbf\xc4\xb2\xd9\x76\x0d\x1f\x00\xff\xdf\xa1\x5b\x36\xee\x55\xc6\x61\x0d\x65\x1a\x6f\x43\x1c\xf4\xdf\x42\x69\x9e\xa8\x54\xc4\x51\xa8\xcb\x64\xb7\xc3\xa5\x32\xc6\x42\x93\xa7\x37\x0b\xcf\x54\x99\x26\x2c\xb7\xca\x4e\x0a\x5e\x21\xc4\x77\x66\x83\xb8\x34\x52\x66\xef\x58\x4c\xca\x93\xad\xac\x34\x6b\x2e\xc4\x51\x17\xac\x29\x7e\xe6\x33\xf4\xfd\x28\x9c\x0e\x3e\xdc\xd0\x68\x3d\x30\x69\xae\x7e\x52\x6f\x9e\xa5\x49\x94\x46\x32\x54\xbb\xe8\xa8\x7d\x1a\x14\xf1\x4c\xc6\xe3\x60\xe1\x38\xa9\x69\x20\xc2\xd1\x49\x99\x6e\x39\xa5\x48\x8f\x3e\x0f\x3e\x8a\x14\xd1\xca\x9b\x13\x05\xc9\xf7\x5f\x3e\x98\x9b\x8a\x81\x09\x14\xbe\xff\x9c\x5c\x4c\x7b\x98\x8a\x0d\x32\x6a\xf4\xa5\x9b\xe0\xd6\x68\x65\x18\xaa\x3d\x2d\x5d\x7c\xa7\xa0\xeb\x94\xe9\xda\x57\x4e\xa7\x8b\xd2\xb1\xe3\xd7\x16\x46\x0a\xe7\x27\x5c\xa6\xe8\xa9\xc3\x7a\x9b\x9c\x71\xbc\xee\x6f\x48\x9a\x03\x09\xf7\x7e\x3d\x12\xd2\xb1\xeb\x18\xb3\x66\xf3\x7b\x53\xaa\x1b\xe2\xd3\x68\x3f\xe6\xe5\x61\x9f\xa7\x58\xcf\xcb\x64\x97\xf4\x83\x21\x4f\x4d\x18\x8a\x55\x4c\x9e\xc7\x94\xde\x54\x74\xb1\xec\xbd\x98\xe8\xae\x85\x38\x47\xcd\xa5\x9f\xb1\x07\xa7\x85\xb8\x99\xd0\xa7\xf7\x55\xc8\xde\xee\x58\xb5\xe8\x35\xcf\xb4\x1c\xaa\x61\xe2\xb0\x54\x33\x15\x0c\x32\x24\x03\xd8\x14\x11\x7a\x0c\x7b\x77\xd7\x8a\x37\xd5\xc9\xd6\x51\x8a\x51\xb9\xda\xe4\xf5\x7e\x0a\xca\x0d\xf3\xeb\xc2\xc7\x0e\x46\x46\xbd\x6d\x6f\x83\xd3\xfc\x71\xa2\xb5\x5f\x89\xaa\xa9\xe3\x73\x81\xb2\x18\xc7\xef\xeb\xf2\x88\xbf\x8c\x44\xe9\x69\x61\x26\x19\x3b\xe4\xbf\x11\xac\x1c\xb0\x8c\x4a\x5c\x67\xa5\x93\xf3\xa8\x70\xb5\x99\x73\x7d\xa4\xe7\x12\xcf\x44\xa6\xce\x0b\x35\x26\x24\x43\x8d\x86\xd4\x84\xca\x49\x21\x0c\x7b\x67\x40\x43\xae\x5f\x1c\x51\x4b\xd9\x28\x73\x2e\xd7\x93\x48\x0c\x59\x3b\x08\x3d\x2e\x3f\xf5\xde\x5c\x07\x6b\x64\x8a\x4f\x27\x5c\x77\xe7\xf6\x68\x3a\x9b\x36\x14\x60\x6f\x47\x48\x8a\x0b\x83\xe2\x17\x67\xfa\x52\xaf\xd1\x8f\x27\x87\x10\x10\xdc\xb1\xc4\xef\xcd\x56\xc3\x1d\x0b\x1b\x35\x35\x54\x60\x8e\x04\xa3\xca\xa6\x19\x18\x6a\x6d\xb3\xc9\x50\x12\x97\x4a\x32\x0b\x4e\x30\x20\x6d\xcf\x69\x48\x78\x60\x60\x4a\xe8\x0f\xc7\xb4\x4e\x8a\x14\x33\x07\x99\x8b\x68\xe3\xe5\xa6\xf7\xce\x00\xcd\x53\x44\xab\xb0\x72\x44\xc5\xb9\x95\x57\x1a\x73\x4c\xea\xde\x50\x69\x23\xd2\xad\xbb\xa8\xa1\xd3\xd7\xcc\xa4\x17\xff\x6e\xea\xe9\x8a\xc7\x7f\x56\x28\xaf\x18\x4f\xae\x39\x7d\x48\x13\x8e\x50\xcd\xb5\xe3\x95\xb9\xee\xe4\xef\x16\xd0\x6a\xcf\xa5\xef\x85\xfa\xa3\xc0\xbe\x2c\x8e\x3f\x8c\xa8\xcc\x12\xfb\xcf\x44\xd0\x16\x5c\x5e\xb0\x88\x94\x9d\x24\xf0\xb3\xb8\x25\xf5\x06\x7b\x3a\xb0\x2b\xd1\x93\x42\x21\x6a\xd0\xb1\x6d\x5b\x75\x34\xea\x90\xbf\x4e\x18\x81\x33\x12\x2e\xa7\x7f\xbc\xe3\x8a\x62\xdb\xe0\x6d\xd1\x07\xd3\x2d\xf6\x10\xfe\x74\x1f\x4a\x7c\xc8\x4f\x98\x4e\x6e\x7d\xbf\xa5\x65\xd5\x58\x70\x76\xea\xe5\x72\xdf\xd2\x1e\xba\xea\x55\x0a\xa3\x32\xda\x8b\xcf\xf5\x4f\x14\xd3\x45\x8f\xbc\xb1\x4d\x23\x5f\xa2\x05\x65\x79\x38\x71\xaa\xe6\xc5\x8e\xd0\x63\x73\xb5\x88\x85\xb4\xc3\xed\xe3\x72\xa3\x5e\xa6\x8c\x64\x78\x19\xdf\xe9\x4e\x6e\x90\x07\xea\xf4\x90\xed\x96\xbe\x91\xd5\x7f\xa9\x67\x5e\x11\xe6\xc8\xcf\xec\x8e\xb8\x1f\xc9\x32\xeb\x80\xe8\x2a\xfb\x6c\xbd\xf0\x39\x68\x52\xbd\x78\x32\xdc\xe4\x0c\x00\xae\x04\x8b\x5d\x1d\xf6\x66\xa4\xf7\xdf\xab\x75\xde\x8a\xa3\x10\xc4\x57\x05\x8e\x4f\x89\x5d\x4c\x65\xd1\x34\x20\x69\x0d\x3f\x62\x17\x36\x85\x13\xb3\x28\x10\xc4\x36\xf9\xb9\x7b\x60\x4a\x7f\x62\xaa\x2a\x07\xd9\xa6\x57\x51\x99\xa7\x29\xc1\xb5\xce\x8f\xd1\x9e\x3e\x18\xdb\x86\x06\x69\x1e\xb2\x18\x6d\xad\xef\x14\xde\x4d\x1b\xc1\xc8\xb4\x41\x1d\x6c\x98\xd2\xca\x95\x9d\x66\x7f\x21\x6c\x06\x1a\xa8\x59\xde\x98\x78\x70\x7c\x1b\x2c\xa5\x02\xcc\x30\x56\x72\x30\xcb\x9a\xc3\x4a\x61\x42\x58\x8a\xa9\xd8\x00\x3f\x7f\xe7\xdb\xaa\xc1\x5a\x17\xc7\xf3\x40\x9a\x80\x1a\xa3\x30\xdc\x20\xf4\xb7\x9b\xf5\xd0\x9c\x33\x24\xa0\x9e\xa1\x43\x1b\x1e\x24\xc6\x5b\x74\x4c\x1b\xf5\xfa\x91\xbf\x0c\xac\x96\x29\x69\x07\xd2\x23\xdb\xcd\xfa\x6b\xa7\x5d\x00\xd3\xa3\xeb\xf3\xec\xf6\x51\x39\x94\x9c\xba\xf4\x3c\xe3\xd2\x7b\xb2\x6f\x9c\x08\x4a\x8e\x99\xc9\x61\x84\x93\x9d\x7b\xc6\xd6\xec\xe7\x34\xd2\x88\x98\xd4\x80\x63\xdc\xdc\x80\x0a\xcc\x73\xf1\x54\x74\x79\x8c\xac\x9c\x3b\xe2\xa4\x7c\x5f\xd6\xe9\x17\x61\x80\xe9\xbd\x53\x6f\x61\x3d\x25\x32\x73\x5c\xb0\x08\x88\x92\x55\xca\xee\xed\xad\xe7\xc2\xed\x29\xc0\xcf\x47\x70\x6c\x3b\x2c\xde\x83\x78\x76\xe7\x05\x73\x00\x77\x4b\x6f\xb6\xe7\x49\x96\xd4\x09\x4a\x9f\x0f\xbb\x33\x08\xb4\x6f\xc4\x2d\x00\xcd\x57\xe3\x69\xd8\x2f\x24\xad\x8d\x21\x62\x4c\x0c\x7e\x1c\x2e\x3d\x69\x64\x78\x8c\x4a\xf5\xf7\xe2\xa1\x6c\xed\xc7\xf6\x5a\x45\x67\x22\x4e\xff\x3f\x00\x00\xff\xff\xb4\x0d\x74\xa5\xa0\xdb\x00\x00") +var _web_uiV2AssetsConsulUi30b6cacf986e547028905bc5b588c278Css = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\xfb\x93\xdb\xb8\x91\xf0\xbf\x82\x1b\x97\x2b\x3b\x5e\x12\xe6\xfb\x21\xc5\x53\x49\x36\xb7\xe5\xab\x72\xbe\xaf\xea\xcb\x65\x7f\xd9\xf2\x0f\x10\x09\x8d\x18\x53\x24\x43\x52\x1a\x8d\x75\xba\xbf\xfd\x2b\x00\x04\x09\x80\x20\x45\x8d\xc7\x79\xd4\x5d\xcd\xae\x2c\xe1\xd1\x68\x34\xba\x81\x46\xa3\xd1\xd8\x94\xe9\xb3\x91\x94\x29\x3e\x6f\xcb\xa2\x35\x9f\x70\xf6\xb8\x6b\x57\x9e\x65\x5d\x50\xdd\x66\x49\x8e\x0d\xd4\x64\x29\x36\xb6\xd9\xe3\xa1\xc6\xc6\xb6\x2c\x5b\x5c\x1b\x3b\x8c\x52\xf2\xcf\x63\x5d\x1e\x2a\x63\x57\x1b\x0d\x4e\xda\xac\x2c\xce\x69\xd6\x54\x39\x7a\x5e\x6d\xf2\x32\xf9\x72\x41\x87\x34\x2b\x0d\xbc\xdf\xe0\xd4\xc8\xf6\x8f\x46\xb9\xf9\x2b\x4e\x5a\xe3\x98\xa5\xb8\x3c\xef\xd1\xc9\x7c\xca\xd2\x76\xb7\xb2\x2d\xeb\xed\x05\xad\x76\xe5\x11\xd7\x46\xd3\xd6\x65\xf1\x68\xb4\x68\x93\x63\xd0\xee\xce\x49\x99\x97\xf5\xea\x8d\x1b\x90\xbf\x0b\xc1\xd4\xa8\x6a\x7c\xde\xa0\xe4\x0b\x69\xbd\x48\xcd\xae\xc4\xd6\x27\x7f\x17\xda\x21\x52\xa2\x4b\x76\x22\xf2\x77\xd9\xb5\xfb\x9c\x24\x03\xda\xd7\x51\xed\xb6\x46\x45\x53\xa1\x1a\x17\xed\x05\x81\x0e\x87\xbe\x38\x2b\x93\x1c\x6a\x92\xff\x13\xf9\x71\xd9\x66\x38\x4f\x1b\xdc\x1a\xd9\xb6\x46\x7b\x7c\xde\x94\x75\x8a\xeb\x95\x75\x81\x59\x63\x36\xd9\x57\x6c\x46\x8c\xa2\xe4\xfb\x0a\x86\x7e\x8d\xf7\x97\xf7\xef\xfe\x0d\xec\xb3\x22\xab\x71\x83\x5b\x98\x34\x0d\x38\x5a\xd0\x82\x0e\xf8\x2f\xf0\xa7\xff\xf8\x4f\xf0\x29\x4b\x70\xd1\x60\xf0\x5f\xe0\x31\x6b\x77\x87\x0d\x4c\xca\xfd\xfb\xbf\x3e\xb6\xbb\x7d\xf3\x5e\xae\xf6\xee\x3d\x25\xf0\xdf\x0e\x65\x8b\x0d\xda\xe3\x34\x35\xd2\xdc\x48\x5b\xa3\x47\xac\x1b\xb1\x9d\x6d\xec\x1c\x63\xe7\x1a\x3b\xcf\xd8\xf9\xc6\x2e\x20\xc3\x45\xa9\xc1\x30\x37\x72\xfc\x88\x8b\xd4\xc8\x33\xa3\xcc\x8d\x8a\x74\xda\x68\xf1\xa9\x45\x35\x46\xc6\x21\x3f\xef\x51\xfd\x98\x15\x2b\x6b\x5d\xa1\x34\xcd\x8a\xc7\x95\x75\x19\x81\x14\x7a\x4a\xc6\x72\xad\xb2\xd2\x21\x3f\xe7\x59\xd3\x9a\x4d\xfb\x9c\xe3\x55\x51\x16\xf8\xb2\x39\xb4\x6d\x59\x18\x59\x51\x1d\x5a\xa3\xc1\x39\xe1\x0b\xde\x6c\xdf\x26\x1d\xb5\xb3\xf9\x84\x37\x5f\xb2\xd6\xdc\x94\x27\xd2\x06\xc1\x81\x51\x9b\xa4\xac\xf5\xa9\x02\x42\x41\x75\x5a\x9b\xfb\xf2\xab\x59\x36\x27\x93\xa5\xef\xcb\xb2\xdd\x91\x1a\x8f\x35\x7a\x6e\x12\x94\xe3\x35\x6f\x44\x29\x80\x8a\x36\x43\x79\x86\x1a\x9c\xae\xf7\x59\xd1\x31\xac\x6b\x59\xd5\x69\x4d\xd8\x75\x9b\x97\x4f\xe6\x69\xb5\xcb\xd2\x14\x17\x43\xca\xf3\xaa\x49\xea\x32\xcf\xd7\xa4\x4b\x66\x8d\x8b\x14\xd7\x04\x5c\x59\xb5\xd9\x3e\xfb\x8a\x3f\xe1\xc7\x6c\x93\xe5\x59\xfb\xdc\x37\x4c\x4b\x52\xc6\x41\xe9\x5f\x0f\x4d\xcb\x28\x49\x11\x9f\xca\x6a\x26\x72\xb4\xa9\xbd\xe8\x18\xcd\x1e\xe5\xb9\xc8\x9c\x51\xe8\xe3\xfd\xe5\x9d\xb1\x42\x5b\x22\xdc\xab\x0d\xde\x96\x35\xd6\xd1\x3d\x2b\x76\xb8\xce\xda\xf5\x38\xe9\x42\x05\xb6\x13\x03\x22\x54\x39\xaa\x1a\xbc\xe2\x5f\xd6\x5d\x46\x53\xa1\x84\x31\x51\x9b\x1a\xed\xee\xdc\x33\x15\x43\x1a\xe5\xd9\x63\xb1\xca\xf1\xb6\x65\x72\x3c\xcb\x25\xb4\x07\x5b\xb4\xcf\xf2\xe7\xd5\x1f\xf2\xac\xf8\xf2\x27\x94\xfc\xf9\xb9\x69\xf1\xfe\xe7\xb2\x68\x0d\x13\x55\x55\x8e\xcd\x86\xa6\x18\x77\x7f\xc6\x8f\x25\x06\x7f\xf9\x8f\x3b\xe3\xff\x95\x9b\xb2\x2d\x8d\xff\x7b\x7a\x7e\xc4\x85\xf1\x97\xcd\xa1\x68\x0f\xc6\x4f\xa8\x20\x50\xf3\xdc\xb8\xfb\x39\xab\x11\xf8\x33\x2a\x9a\x3b\xe3\xee\x8f\x75\x99\xa5\xfc\xc7\x47\x9c\x1f\x71\x9b\x25\x08\xfc\x1f\x7c\xc0\x77\x46\xff\xdb\xf8\x7d\x9d\xa1\xdc\x68\x50\xd1\x98\x0d\xae\xb3\xed\x30\x4f\x4d\x71\x1e\x3a\xb4\xe5\x24\xd3\x91\x3c\xb1\x77\xfb\xb2\x28\x09\xe5\x30\xa5\xca\x39\xcf\x0a\x6c\xee\x98\x78\xd9\xd0\x5f\x6b\xe6\xc2\xed\xf6\x82\xf8\x0c\x68\x59\x41\xb4\xdd\xae\x93\x43\xdd\x94\xf5\xaa\x2a\xb3\xa2\xc5\x35\xa3\x77\x8a\x93\xb2\x46\x64\xe6\x66\x52\x29\xcc\x76\x6f\xd2\xd0\xf5\x3c\xbf\x17\x7b\xe8\xf8\x78\x0f\x20\xe1\x94\x5d\xad\x99\x7e\xd3\x0d\xf9\xeb\xc6\x99\x42\x5b\x73\x14\xab\xd3\xba\x93\x68\x1b\x92\x79\x10\x58\x97\x6c\xff\x78\xee\xb2\x49\x6f\x2f\x74\x84\x7f\x6d\x9f\x2b\xfc\x21\xd9\xe1\xe4\xcb\xa6\x3c\x7d\x36\x84\xc4\x1a\xa5\x59\xf9\xf9\x7c\xc4\x64\x51\x42\x79\xc7\x28\x1b\xd4\x60\x42\x8c\x4b\x53\xa1\xa2\xe3\x68\x3a\xc5\x70\x3e\x15\x27\x22\xce\xa8\xbf\xd6\x65\x8e\x3f\x6c\x50\x51\xe0\xfa\x33\x28\xd0\x71\xb5\xcd\xea\xa6\x35\xcb\xad\x49\x9a\x02\x87\x9c\xfc\x47\xa6\x71\x94\xb4\xd9\x91\xce\xa0\x6c\x41\x90\x96\xc8\xd0\xb2\x2e\x95\x20\x25\xbd\xec\x33\xc9\x27\x24\x6b\xcb\x43\xb2\x13\xa7\x09\x3a\xb0\x9c\xa0\x36\x74\x28\x31\x18\x4d\xd6\x4f\xbb\xac\xc5\x54\x3e\xf0\xaa\xaa\xf1\xfa\xa9\xac\x53\xf3\xa9\x46\xd5\xaa\x28\xeb\x3d\xca\x2f\xfd\x6a\x34\x4c\xc4\xdd\x22\x99\x0e\xab\xa5\x22\x45\x6b\x85\x60\x6d\x59\x5d\xd0\xaf\x35\xce\x3f\x90\x01\xfc\xcc\xe5\x8b\x4c\xb3\xb0\xc5\xfb\x2a\x47\x2d\x36\x71\x5d\x97\x35\x48\xb3\xe3\x03\x32\x72\xb4\xc1\x39\x20\xe4\x35\xf6\x28\x2b\x00\x5b\xf8\x01\x44\x74\xb9\x6f\x00\xd2\x27\x33\xb8\x3d\x6d\x28\x21\xcc\x04\xe5\x79\x79\x68\x19\x6f\xf0\xac\x43\x43\x66\x05\x2a\xd9\x5d\x06\x11\x18\x4d\x6a\x33\x4e\x54\x13\xfa\x45\xf9\x2c\xea\x15\xb7\x74\xf7\x05\x7d\xfc\x27\x9e\x85\xd6\xca\x44\x71\x21\xfd\x78\x20\x3d\x4d\xb3\x23\x60\x72\x40\x05\x8b\xaa\x71\x9f\x01\x1d\x6c\x65\x76\x71\xfc\xcb\xce\x3e\xe7\xb8\x6d\x85\xd9\x9b\x48\xb4\xb0\xc2\x52\x06\xbe\x26\x57\xc6\x38\x3f\x47\x42\xb6\x48\xe3\x3e\x33\xd9\x65\x79\xaa\xb6\x0e\x2d\x17\xef\x2f\x70\x87\x51\xde\xee\xe8\x6c\x61\x36\x2d\x6a\x0f\x0d\x48\x5b\x43\x4c\xc6\xa9\x59\xe3\xa6\x3c\xd4\x09\xe6\x90\x29\x27\x7f\x23\xa6\x6c\xe0\x61\x52\x56\xcf\xe6\xa6\x2d\x88\x2e\x24\x11\x16\x92\x52\x74\x69\x7e\xe8\x05\x47\xcd\x2c\x1f\x1f\x73\xac\xc9\x9e\x18\x14\x4d\x49\xfa\x6f\xfe\x90\xb6\x46\x9b\x02\x24\x4d\x4d\x01\xd1\xde\xff\x1e\x6c\x3f\xb4\xf7\x6f\xd9\xbe\x2a\xeb\x16\x15\xed\xa5\x2b\x69\x52\xfc\x15\x28\x03\xad\xe9\xc8\x6a\x72\x87\x61\x27\xea\x81\xd8\x8a\x6f\x59\x97\xa9\xd1\xed\x14\xf6\xae\xa3\x14\x26\x9b\xb9\x28\x19\xde\x7d\xd8\xe1\xbc\xfa\x4c\x08\xc5\x77\x17\xbb\xd1\x76\xe7\xef\x32\x84\x6c\x7e\xa6\x9b\x8d\x6d\x59\xef\x57\x87\xaa\xc2\x75\x82\x1a\x3c\xdb\xfc\xc3\xbb\x2b\x08\xa8\x05\xe6\x50\x78\x78\xa7\x22\x41\xa7\xce\xdb\x57\x45\x47\xd4\xfa\xa1\x4d\x17\x33\xa6\xb5\x31\x75\x8d\xaf\x57\x06\x59\x9f\x24\x9d\xad\xd7\x3a\x49\x8d\x37\x64\x8d\xab\x70\xfd\xd0\x6d\x2d\xaf\x0c\x31\x1f\xb9\x0e\x46\xd7\xac\x48\x11\x20\x28\x03\x62\x21\x91\x45\x9f\xb2\x76\x67\x26\x65\xb1\xcd\xea\x3d\x55\x7f\x40\x65\xdc\x2e\x31\x0c\xcc\x16\xe3\x94\xd4\x02\xd5\x0b\xe4\x68\x72\xe6\xc0\xfb\x97\x4e\x2a\x13\x35\x6f\x65\x57\x15\xcc\x2c\x5b\x8f\x76\xba\xe3\xa9\x8a\xcc\x53\xa2\xec\x8f\x94\x29\x99\xd8\x94\xb4\xbd\xf6\xb2\xc7\xc5\xc1\xb8\x56\x00\x50\xe5\x98\xab\x5b\xdd\x9e\xac\xe7\xaf\x7e\x53\x09\x98\x16\x56\x36\x19\xd5\x7b\xc9\xd2\x91\x25\x6b\xc1\x1a\xe1\x04\x64\x77\xd7\x6b\x59\xc0\x86\xb1\xe5\x85\x81\xdd\x7f\xc6\xc7\xa7\x35\x2b\x4b\x41\x71\x9b\x87\xb0\x5f\x1a\xd2\xf6\x8d\xb9\xcd\xf1\x49\x4c\x23\xbf\xe9\x66\x92\x2f\xb2\x96\x75\xdc\xad\xc5\xdd\x56\x59\x67\xb8\x68\x57\xbc\x77\x52\x5e\x9a\xd5\xcc\xd6\xd2\xe9\x85\x6b\xde\x84\x90\x93\x94\xf9\x61\x5f\xac\xf5\xa9\x9a\x6d\x5d\x52\x16\x2d\x2e\x5a\x75\x3f\x2d\x24\x5f\x7e\xb7\xc7\x69\x86\xc0\x0f\x03\x9d\x02\xb2\x09\xbe\x3f\xf7\xf4\x15\x08\x66\x55\xa7\x91\x5c\x13\x8e\x10\xca\xb8\x6e\x75\xba\x0c\x50\xfb\xad\x35\xa3\xbe\x16\xac\xe3\x2d\x05\x4b\x13\x85\x11\xb5\x7d\x32\xa2\x62\xc7\x09\x69\x56\x76\x4f\xbb\x95\xbd\x66\xff\x5c\x83\xaf\xe3\x05\xc6\xec\xef\x5e\x62\xa7\xa0\x55\x41\xd5\xf1\xa6\xb9\x29\xdb\xb6\xdc\xaf\x6c\xb2\xc7\x6a\xf7\x39\x65\xe8\x87\xe6\xf8\xd8\x9b\xd5\xa8\xda\xdb\xb3\x2e\xda\x34\x65\x7e\x68\xf1\xba\x2d\xab\x95\x6f\xbd\xed\x76\x59\x26\xf9\x69\x3a\x41\x75\x5a\x93\x3d\x80\x98\x43\x7f\x9b\x11\x21\x24\x95\x26\xbc\xdf\xe0\xda\xcc\x4b\x44\xba\x37\x6e\x8e\x59\xf1\x64\xb9\xe3\x85\xb9\x84\x9f\x5f\xc4\xfe\x22\xad\xd8\xd6\x24\xc1\x74\x5b\xda\x73\xb3\x94\x4a\x7f\x98\x59\x8b\xf7\x4d\x5f\x50\x80\x50\xa1\xe4\xcb\x08\x80\x98\xf8\xd7\x43\xd3\x66\xdb\x67\xb3\xe3\x68\x9e\xdc\x09\x60\x82\xf2\xe4\x07\x2a\x85\xc0\x04\xb1\x55\x9d\x80\x09\xbc\x88\xfe\x43\x18\xe7\xfe\x0a\x05\x40\x92\xd5\x49\x3e\xec\x04\x51\x91\xb1\x45\x65\x45\x4a\xe2\x7a\x48\x20\x7b\xbd\x06\x64\xc5\x36\x2b\xb2\x16\x03\x8c\x1a\x6c\x66\x85\x59\x1e\xda\xf5\x8b\x2a\xf5\xfb\x2b\xbe\x96\x93\x99\x83\x4c\x72\xbe\xf5\x16\x90\x61\x9f\xcc\xd8\x66\x79\xbe\x7a\x83\xb7\x89\x97\x46\xd7\xba\xf7\xb8\x2a\xda\x9d\x39\x28\x66\x3f\x38\xf7\x93\x5d\x36\x53\x4c\xc6\x18\x3a\xcd\x5a\x93\x76\x73\x4b\xee\xf5\x96\x5c\x4d\x4b\xee\xed\x2d\x79\xd7\x5b\xf2\x34\x2d\x79\xb7\xb7\xe4\x5f\x6f\xc9\xd7\xb4\xe4\x37\xdc\xb6\x62\x57\x27\xd0\x94\x79\x96\x82\x1a\xa7\x97\xdf\x71\x28\x5f\xf0\x33\x35\xe8\x36\x40\xe5\xa0\xb3\xf5\xd6\x20\x7b\xe1\xf3\x88\x5f\x56\xd4\xf8\xe9\xfe\xf1\x07\xdb\xb0\x0d\xfb\x7e\x3d\x95\x71\x71\xdd\xb9\xda\x96\x61\xe9\x6b\xb3\x8c\xcb\xe5\x77\xff\xcc\xc8\xc9\xaa\x2f\xd5\x6a\x7e\xdd\x96\xf5\x07\x32\x84\x66\x81\x8e\x9d\xee\xf4\x99\x29\x96\xc7\xac\xe9\x4c\xb7\x5c\xc3\xb8\xa9\xfe\xaa\xb3\xae\x1a\x4b\x6b\xad\x3a\xab\xec\xf2\xf2\x9d\xf9\xb6\xee\x94\x0b\xbe\x0c\x10\x5d\x65\xd0\x81\xe9\x9a\xe1\xb8\xd5\x49\x50\x8b\x6f\x6e\x42\x5c\x6f\x82\xea\x74\x03\x00\xda\x27\xb1\xfe\xa8\xfa\x03\xdd\x45\x48\x2b\xdf\x62\xf8\x67\x02\xd1\x1a\x2f\x94\x1a\x2d\x26\xf2\x62\xa2\x6e\xc8\x4d\x93\x65\x6d\x6c\xe1\x8c\xb1\x63\xfb\xf1\x65\x54\x14\x0c\x46\x32\x91\x3b\xe8\xd7\x1c\x0b\x24\xe7\x7d\xc9\x0a\x6a\x63\xa1\xab\x2b\xd7\x33\x4d\x36\x5c\x5e\x38\xa8\x9e\x94\x2e\xb6\x7b\x03\x5d\xcf\x63\xd5\x80\x81\xb5\xd6\xbd\x45\xac\x3a\xf1\x65\xcf\x23\xdf\xbf\x9a\x59\x91\xe2\xd3\xca\x51\xec\xc3\xe3\x6e\xca\x22\x46\x9b\x31\xdb\x6c\x4f\x10\xdd\x1e\x8a\x4e\xc5\x3c\x6c\xb2\xc4\xdc\xe0\xaf\x19\xae\x7f\x80\xb6\x41\xfe\x73\x7c\x03\xc6\x9d\xfc\xdd\x5c\x4b\xd3\x66\x7a\xe8\x2c\xd6\x64\x91\x99\x4a\xd7\xd4\xab\xea\xb2\xc2\x75\xfb\xbc\xa2\xa4\x00\x94\x30\xeb\x6b\xf9\x2f\x39\x7b\x12\x6c\xb0\x0c\x08\xa3\xbd\xf5\xb6\x37\x05\xb3\x7d\xc3\xa0\xfe\xbf\x9d\xd0\xe9\xac\x7e\x78\xdc\xc1\x6c\xec\x57\x27\x60\x01\xab\x1b\x59\x93\x1a\x39\x35\x92\xb3\xea\x76\xd0\xff\x4d\x86\x8e\x61\xe0\x50\x35\x98\x73\x84\x04\xd0\xf5\xf5\xf2\xc7\xa1\xfc\xc8\x6c\x83\x23\xa1\xa8\x1f\x37\x88\x4e\xa4\x96\x01\xbd\xfb\x81\xcb\x8e\x4f\x52\xf7\xe8\x04\x64\xb1\x3e\x5d\x33\x35\x88\xb3\x82\xe7\xeb\x77\x09\x11\xd5\xcb\x34\x52\xfb\xcd\xca\x28\xd5\x1a\x3b\x75\x51\xd1\x25\x79\xaa\xaa\x4c\x52\x9b\xbd\xb9\xc1\xed\x13\xc6\xc5\xdc\x2e\xc3\x64\xa3\x7c\xc4\xdd\x76\xc3\x7c\xac\xcb\xa7\x95\xbd\x5c\xbc\xa5\xc9\x70\xa1\xc9\x06\x41\x66\x21\xc7\xe9\x35\xbb\x27\xaf\xb0\xea\x2c\x3c\x4b\x8b\x6f\xcb\xe4\xd0\x2c\x2e\x4d\x4f\xf9\x75\xc7\xf7\x31\xf9\x5b\x4c\x8b\xab\xed\xa1\x2b\x66\x5c\xd0\x9f\x8f\x6d\x63\xbc\xd9\x3a\x57\xc9\xb9\x00\xa0\xb4\x6f\xea\x05\xcc\xad\x4e\xc0\x76\xaa\x13\x3f\x03\xad\x51\x9a\x1d\x9a\x95\x57\x9d\xa4\x43\x9f\xa2\x24\x5b\xcf\x85\x4b\xd4\xb8\xf5\x3c\x93\xac\x2b\x33\xeb\xe6\xc4\xd8\xf0\x43\x93\xc8\x1a\xad\x38\xa3\x1a\x0f\x87\xfc\x21\xcf\x1e\xae\x91\x84\x17\x3b\x4b\x2b\x9a\x2f\x2c\x71\x7c\xc7\x4b\xd2\xe4\x5d\xe7\x22\x0c\x06\x7b\xe4\x03\x1a\x19\x94\x6e\x07\x71\x8d\xa9\x78\x7f\x16\x4a\x48\x5f\x7c\x91\x84\xf4\xa5\x99\x1f\xcc\x12\xca\x5e\xc1\x5d\x37\x14\xd3\xa8\x6b\x4b\x4f\x61\xae\x2d\xcc\x44\x5b\x38\x82\x5e\x36\x71\x4f\x91\x62\x55\x94\xed\x0f\x22\x53\xdf\xdf\xdc\xdf\x79\x10\x9a\x59\xc8\x47\xb6\xe7\x7a\x0b\x04\xe6\xda\x64\x70\xc8\x5f\xb4\x16\x2d\x14\xd5\x81\xa0\x4e\x40\x34\xe7\x85\xd5\x46\x1b\xc8\x37\x9b\x4d\xe2\xa5\xb6\xde\x83\x40\x56\x7e\x76\x28\x2d\x9f\x56\x16\x70\xaa\x13\x08\xa9\x0a\x22\xae\xfd\x8e\x7d\xbf\x5e\x58\x4c\x99\x24\x47\x8a\x0f\x57\x79\x6c\xeb\xaa\xaa\xc0\x17\x15\xd9\xd7\x4b\xdc\xe4\x28\xc7\xe1\x7d\x6b\x35\xce\x11\x61\x82\x41\x0b\x22\x08\x3b\x23\x2d\x68\xc1\x49\x08\x40\x7c\x3b\x33\xec\xb8\x16\x02\xc9\xb3\x73\xaf\x37\xb2\x79\x90\x8f\xcc\x7a\x70\xa4\x21\x1d\x63\xc3\x24\x43\xed\x77\x61\x53\x3b\x95\x35\x57\x50\x7e\xf3\x9b\x19\x32\x9b\x36\xd7\xce\x34\xca\x9b\x17\xa9\x7d\xf9\x47\x99\xfb\x26\x11\x7a\x50\x97\xdd\x4e\x7d\xec\x06\x83\xac\xbb\xe2\x89\xb6\x25\x1c\x5c\x5b\xaa\xb5\x57\x3d\x69\x36\xa1\xe5\xe3\x3d\x1f\x0b\xba\x78\x4d\x0d\x50\x9c\xd8\x49\x38\x21\x47\x23\x4b\xf4\xeb\x68\xa9\x2f\x30\x78\x8e\xf9\xbf\x97\xb6\x5e\x12\xa8\x10\x28\x84\x79\x78\x27\x1c\xf1\x70\xe7\x1a\xce\x6c\x4e\x9c\xa2\x8d\x2a\xd6\x5c\xac\xec\xaa\x33\x72\xa7\xfd\x69\x2f\xf9\xc6\x97\xa1\x21\x81\xad\x34\xc3\x6f\xb6\x0a\x2e\x38\x3c\xae\x84\x2f\x12\xd8\x4a\x86\x5a\x01\x65\x85\x62\x7e\x51\x97\x99\x26\x34\xfb\x69\x6a\xeb\xa0\xbb\xa5\x39\xd4\x4a\xd1\xe1\x91\x4a\x3c\x33\xdd\xbf\x68\x49\x98\x6b\x28\xcf\xce\x12\xbf\xfb\x22\xb9\xe7\x68\x3a\x49\x1c\xd5\x2f\xec\x50\xa4\xb8\xa6\x4e\x56\xdf\xdb\xa5\x60\xac\x0c\x8f\x2c\x9e\x8a\x0a\xed\x28\xa7\x3a\xfd\xea\x43\x55\xee\xd1\xea\x63\x3b\xca\x22\x35\x5d\x4c\x31\x86\x20\x76\x88\x9a\xd4\x18\xb5\x58\x20\x03\x62\xde\x69\xcd\x61\xb3\xcf\xda\xcf\x23\xfa\x88\xb5\xba\x24\xa9\xc2\x34\xfd\xa4\x9a\x93\xa5\x64\x60\x7a\x92\x4b\x90\xf4\x45\x16\x80\xd1\xf4\x67\xa6\x9c\x04\x70\x70\x8d\x56\x1c\x67\xfa\x0c\x61\xc1\xd6\x8c\x26\x11\x1c\xa1\xc4\x64\xce\x05\x51\x6d\x4f\x6a\xfb\x5e\x1a\x2b\x75\x78\x34\xe5\xa7\x69\xad\x2b\xac\x27\xe7\xe2\x92\x93\x58\x9c\xa5\xc5\x45\xf2\x3b\xff\xde\x42\xc8\x27\x75\x3b\xde\xa2\x68\x34\x5f\x71\xcb\xa5\x32\x6d\x75\xc9\xea\xec\x25\x24\x8b\x9a\xd8\x2b\x1e\xe0\x7d\xbb\x5e\xc1\x17\x2a\x7a\x06\x08\xdd\xd0\xc7\x7b\x60\x92\x49\xe1\x1e\xd0\x24\x07\x3a\x7d\x0a\x57\x42\x1c\xea\x05\xcb\x17\x1b\x91\xc5\xd8\x94\x2a\xa5\xd0\x59\x75\xcc\x7a\xdd\x42\xd5\xfd\x62\x33\x71\xf7\x83\xcd\xd2\xd3\x8c\xc8\x1d\x70\x26\x0b\x30\x68\xd3\xf9\xe3\xa5\x55\x60\x5e\x71\x05\x1d\xe7\x0a\x4b\xc6\x38\x73\x06\xae\xdc\xeb\xb9\x22\x33\x2d\x88\xf4\xe1\x8c\xea\xa7\x41\x10\x6c\x46\x53\x8b\xb4\xc4\x8b\xb3\xc7\xdc\xc5\x0f\x05\x88\x34\x2a\x43\xea\xa4\x01\xcb\xdf\x06\xdb\x70\x04\x84\x75\x59\x53\x1e\xdb\xd8\xc3\xe1\xff\x2e\x2d\xdf\xb4\xb4\x8c\xa8\x6a\xfb\x81\xbb\xdd\xae\x07\xed\x5b\x5c\x20\xc4\x46\xc6\xa2\x2a\xe5\xaa\x62\xcb\x45\x7b\x62\x50\xb8\x9c\x2f\x1e\x33\x99\xb9\x46\x2d\x4b\x13\x83\x4c\x44\x51\x0a\x5e\x30\xe4\xd7\x66\x07\x0d\x2e\x57\x66\x9a\x85\x48\xe8\x30\xbe\xce\x57\xb3\x33\x8e\x06\xd7\xb9\x79\x6b\x49\xdb\x8b\xb1\x9c\x1c\xd2\xa5\xa5\xaf\xcf\x96\x4b\x91\x9e\xe4\x08\x8d\x7c\x58\xd8\xb3\x90\xab\x88\x42\x9a\x35\x68\x93\xe3\x54\x62\xe8\x3e\x71\x39\x4f\xf3\x2a\x9c\x47\xb5\x20\x16\xb2\x5e\x0f\x6b\x9a\xa1\xf4\xe0\x17\xb0\x4a\x0f\x7b\x82\x07\x6e\x00\x3c\x47\x85\xb9\xc1\xd2\x36\x71\x16\x0e\xd5\x1c\xdf\x37\xf8\xff\xd0\xbf\x1f\xaf\x5c\xb4\x94\xed\x19\x81\x67\xd8\x81\x4b\xcb\x4c\x68\xcf\xaa\xce\xac\x8c\xbe\x46\x8d\xe9\x92\x96\x8f\xbc\xac\xce\x68\xaa\x2f\x1c\xf5\xab\xba\x8d\x0e\xf4\x82\x11\x9f\xd7\x6a\x16\x03\x9d\xee\xf9\x75\xad\xe7\xaa\x64\x06\xf6\xc6\x0b\xf8\xd8\xa4\x38\xc7\x92\x3e\x20\x36\xcd\x32\xaf\x91\xb4\x2b\x35\x47\x9d\xb9\x22\xe3\x06\xcf\xbd\xb1\xcb\x43\x6e\x22\x63\x3a\xb5\x9c\x76\xb9\x7a\x2d\x78\x0c\x40\x97\x71\x65\x11\xd2\x00\x59\x56\x76\x6e\xc1\xd0\x00\x5d\x50\xf0\xea\x94\xbe\x14\xec\x14\x15\x34\x6c\x93\xb0\x6b\x7a\x82\xb9\x51\xb6\x4c\xb2\x7c\x65\xb0\x34\x13\xbe\xa6\xc9\xeb\x53\xb0\xbe\xf8\x02\x4a\x2d\x99\x23\xb5\x15\xc4\x29\xd2\x8e\x63\xc3\x77\x8c\x20\x66\x13\xa4\xd8\x6d\x5d\xfe\xd2\x33\x8e\xb9\x29\xb3\xc3\x66\x34\x65\x6a\x30\xbe\x36\x95\xe9\x0a\x2f\xa0\xdc\xf5\xf9\x46\x53\x5c\xc3\x37\xe1\xd6\x71\x9c\x64\x9a\x6f\x58\x7e\x67\x54\x11\xae\xe5\xdc\xeb\xcd\x28\x62\x89\x2b\x46\x14\xa9\xe8\x8c\x09\xe5\x7a\xb9\x89\xf6\x65\x93\x68\xa8\xb1\xd5\x0e\x85\xbb\x4b\x6f\x8a\xbd\x6f\xa6\xfc\x70\x8c\x66\xcd\x15\x43\xca\xc9\x8e\xab\x3f\x38\x10\xb6\x9f\x7c\x28\xc2\x30\x72\x23\xa4\x71\x0d\xd0\x9b\xd8\xd9\xb1\x84\xa3\x39\x53\x77\xe6\x3b\xde\xfb\x85\xcc\xde\xc2\xd2\x5b\x04\x46\x65\xc6\x73\xd9\xa8\x48\x37\x81\x49\x4c\xd6\x1f\x54\x89\x0e\x76\xe3\x53\xe3\xc0\xf6\xab\x13\x19\xd6\xb9\x51\xf9\xc7\x1c\x4c\xcd\xb3\x0a\xd0\x1d\x41\x9f\x45\x67\x7c\xf7\x0a\x7b\x0e\xbe\x13\xcc\x97\x5f\x18\xe7\xc1\xe2\xaf\xbb\x9d\x61\x7b\x57\x48\x96\x67\xe2\x35\xd9\x0b\x6c\xd1\xc6\xec\xc2\x78\x3c\x8c\x6e\x5e\x1b\xf3\xd9\x3f\xbe\x9b\x1b\xfb\xf1\x3d\x6e\xc9\x3b\x65\x1e\x74\xef\x81\xf6\x4e\xb9\x96\xa0\xb9\x00\x3a\x3a\xe0\x1e\x0e\xb5\xac\xea\x44\x2f\x90\x00\x27\xa8\x4e\xc0\x0f\xfb\x3b\xe9\x5c\x5c\x48\xe6\xf8\x7c\x4c\xd7\x8a\xa1\xbb\x7a\x4a\x63\x92\xa8\x87\x13\xfa\x4b\xaa\xa9\xe2\xef\x26\x9d\xa3\x69\xab\xe8\x03\x9e\x58\x96\x25\x4e\xdf\xe2\x49\xb6\x0e\x8a\xee\xbc\x98\x9e\xe7\xf9\xfe\xdb\x35\x77\x10\x14\xfd\x81\x7b\x73\xa7\x9c\x4a\xf9\xd0\x26\x04\xa4\x2e\xc4\x96\x94\x29\xf8\xee\x49\x77\x42\x2b\xd4\x34\x59\xf1\x38\x3d\x66\xca\x49\xf7\x4c\x85\x99\x73\x6f\x07\x6f\x2c\x37\x16\xdb\x4d\xea\x8c\x5e\xa1\xba\xde\x70\x17\xee\x40\xbf\xcd\x72\x6c\x9f\x6b\x12\xf6\xfd\x2c\xd4\x19\xec\x58\x0b\xd2\x35\x44\x54\x17\x8b\xa8\xb2\x45\xd1\xd6\xd5\x1c\x2f\x77\x3b\x45\xcb\xb0\x3d\xd7\x20\x1b\x45\x82\xdd\x34\xd4\x49\x16\xa0\x53\x80\x8e\xf3\xe4\x2b\x8e\xe7\x6d\x5e\xa2\x96\x79\xb5\x4e\x5c\xc0\x56\xae\x32\x41\x67\xea\xaa\xb6\x2a\x05\x4a\xbd\x40\xa9\x27\x5c\x05\x1d\x4b\xb9\xf6\x0c\x90\x08\x7b\x34\x3a\xdc\xb3\x7c\xe5\x0c\x70\xba\xd8\xf8\xb0\x7c\x5e\xc0\x05\x14\xbb\xf5\x70\x2a\x97\x2d\x85\x5a\xac\x09\x2a\xb6\x35\x3e\x93\x54\xb0\x9e\x2c\x36\x79\x45\x7a\xf0\x35\x91\xfc\x18\x26\xcb\x8b\x17\xe6\xa7\x61\x8e\x74\x2f\xe6\x06\xaf\x5c\xc2\xd4\x7b\xbe\x6b\x34\x1c\x7a\x4a\xd4\x57\xc6\x79\x9e\x55\x4d\xd6\xac\x95\x50\x51\x33\xe8\x0c\x98\x30\xcc\xca\x22\x7f\xe6\xcb\xae\x8a\x94\xc6\xf9\x72\x02\xf2\x0c\x01\x26\xa6\xaf\x59\xa2\x2a\xbc\xee\xe0\xfd\x95\x85\x47\x37\x2e\x2c\x22\xc9\xd8\x41\xa1\x03\x6a\x12\x09\xea\xe6\x6a\x7f\x9a\x51\x01\xba\x16\x31\x01\x3d\xcc\x2d\xbc\x57\xb9\x81\xdd\xda\xee\xaa\x3c\x83\xe9\xba\x12\xa7\xcd\x15\x14\xc6\x94\x53\x41\x56\x27\xa6\xc7\x6a\x39\x95\xf3\xac\x9f\x2b\x19\x0d\xed\x6e\xbd\x53\xaf\x59\x42\x2f\x96\xaf\x96\x4f\x02\xe4\x4b\xda\x59\x5a\xac\xe6\x2a\x74\x13\xf9\x59\x5a\x04\xe6\x2a\xf0\x75\x49\x0e\xe1\x73\xb9\x4a\x54\x41\xd6\xcf\xb2\x76\x49\x96\xf7\xeb\xf5\x45\xd2\x4a\x9e\x32\x51\x7f\xdf\x80\x86\xe1\x12\x82\x03\x2d\x01\x2a\x0f\x81\xb5\xa4\x8a\xaa\x2f\x4f\x87\x8c\xe0\xec\x7d\x96\x3c\x00\x67\x65\x45\x04\x4f\xf4\x6e\xc0\xae\x4e\x90\xff\xe3\xe1\x5a\x8d\x6b\x13\x10\xdb\x2c\x6f\xc9\xee\x0f\xd5\x9a\x9b\xf3\x33\x4a\xae\x71\x6b\x4d\xba\xd2\xbc\xa0\x1e\x5d\x83\x14\xbd\x7d\x5b\xd6\xfb\x1b\xd1\x7d\x51\x75\x8e\xf3\x8b\x2a\x77\x88\xeb\x4e\x60\xa7\x7c\x56\xbf\x55\x15\x98\x1f\xce\x2b\xa4\xa7\x06\x86\x05\x5d\x5d\x42\x0d\x66\xac\x10\x6e\xd6\x4c\x6f\x96\xe7\x51\xbe\xde\xd6\xcb\xdc\x97\x5f\x87\x14\x5d\x3f\x5f\x70\x95\x46\x6c\x9f\x02\x99\xb7\x24\xd1\xe6\xf5\xe5\xfa\xdd\x1c\x8f\x61\xd6\xe9\x4b\xaf\xdb\x45\xd9\xa4\x23\x7b\x54\x2a\x01\xdb\xe4\xb8\x4c\xa1\x56\x7d\xb9\x8e\xda\x38\x96\xd7\x1c\x7e\x6c\x49\x90\x91\x94\xae\xad\x89\x93\xa1\x77\xfb\xbc\xf7\xa2\x19\x40\x59\xee\x87\x16\x1f\xc6\x33\xca\xc3\xbb\xf1\x6e\x61\xac\xc5\x4f\x9a\x4f\x5e\x9f\x9d\x54\x47\x6f\x89\x9f\x8c\xf9\xce\x68\x7b\x37\x75\x91\x6a\x01\xea\xf3\xad\x2d\xed\x70\x4f\x62\x59\x07\x16\x75\x22\xa1\x87\x6a\xed\x61\xf9\xf5\x46\x57\x80\x26\xaf\x8f\xcc\x02\xfc\x66\x3b\x60\x17\x7d\x65\x57\xd6\xd9\xd7\xb2\x68\x27\xe3\xaf\xd4\xf8\x88\xeb\x06\xeb\x02\xb0\xd4\xe5\x93\xc9\xb3\x67\xb2\x5e\xf3\xe2\xa1\x24\x08\xa2\xbd\x71\x3c\x60\x92\x8b\xb3\x68\x8c\x64\xa6\x23\x81\x71\xfa\x60\xb2\x23\xe6\xe8\x43\xed\x09\x57\x51\xe8\x5d\xf3\x21\x30\x8e\xce\x2c\xd9\xdd\x65\xfb\x36\x5c\x45\x03\xf7\xe2\x39\xaf\xbf\x78\x7f\xd3\xdc\xb7\x74\x17\xe0\xe3\xbd\x84\x09\x3c\xa2\xfc\x80\x4d\x94\xe7\xe5\x53\xb7\xb3\x19\xe7\x72\x75\x7d\xb2\x40\x8a\x8b\xe7\xc9\xcc\x6e\x3b\x31\x99\xdf\xed\x1e\x26\x26\xfc\x31\x82\x53\x45\x64\x2c\xa7\x4a\x0d\xa8\x4e\x95\x90\xf0\x9d\x2a\x24\x22\x3d\xde\xae\x31\xfb\x40\x56\xa4\x84\xfb\x1d\x57\x61\x56\x0d\x08\x1e\x3c\x61\x49\x73\xfc\x16\x0f\x1d\x69\x18\xe2\xbd\x76\xa4\xa3\xe1\x1e\xe7\xf4\x7c\xab\x40\x97\x6f\x52\xdf\x82\xd7\x64\x4d\xd5\x3e\xb8\xdd\x4a\x31\xd0\x44\x43\x9d\x62\xe7\x16\xbd\xad\xc7\xfb\xe1\xfd\xdf\x25\x0a\x9e\x56\xeb\x61\xe7\x5e\x17\x69\xa5\xd7\x1c\xcb\x69\xb0\x03\x7d\xbc\xea\x49\x0c\x27\x8a\x4c\x89\x7e\x1f\xaf\x4e\xbd\x5a\x24\xda\xa1\x84\x80\xd0\xfd\xcf\x1d\xbf\x7e\x2a\x44\x10\x83\x01\x0d\x9d\x4b\xef\xb8\xd6\x98\xda\x5b\xfb\xc8\x5e\xb3\x11\x3a\x85\x1e\x32\xaa\x90\xaf\x9f\xa7\xe2\xc2\x75\x65\x88\x98\x3d\x95\x75\xaa\x2f\xc7\x88\x71\x0d\x9a\x58\x6a\x02\xde\x14\xe5\x26\x41\xcf\x57\xe8\x5b\x51\xc2\xc2\xaf\x85\xaf\xaf\xe7\x8c\x3e\x5a\x77\x9b\x16\xd5\xad\xb2\xea\xb2\x34\x75\xcd\xa5\xf9\x2c\xab\xf7\x06\x27\xc3\xab\x8e\xe4\xf8\xea\x0a\x0d\x6c\x8b\xaa\x0a\xa3\x1a\x15\x09\x96\xc3\xe0\x4e\xa5\x5f\x71\x4b\xd0\x5a\xa4\x51\xc3\x8e\xdc\xd9\x59\x9a\x21\x24\xf4\x6c\xad\x22\xf7\xdd\xf9\x6e\xa1\x94\xbe\x02\x63\x7e\x83\xd4\x7f\x2b\xef\xde\x38\xb7\x74\xf3\x5e\x10\x86\x51\xa8\xbd\x3a\x48\x1d\xca\xc7\x17\x0d\xbd\xd4\x59\x32\x5e\x82\x37\xd2\x82\x51\xbb\x52\x9a\x23\x3d\x53\x6c\x34\x82\x0b\xcb\x2e\x41\x41\x1e\x4d\x5d\xc1\x05\x63\x7a\x7b\xb5\x59\xd4\xae\x8d\xaf\xd6\xe1\x41\xb8\x73\x78\x6d\xf8\x06\x97\x8a\x25\xc3\x37\x5f\xba\xc7\x69\xba\xd8\x78\xf8\x96\x95\x5d\x82\x82\x32\x7c\x9a\x82\x4b\x86\xef\xe6\x6a\xb3\xa8\x5d\x1d\x3e\x5a\xe7\x5c\x1e\x5a\x32\xb3\xaf\xac\x5b\xa7\xe4\x6e\xc0\xa3\x34\xde\xa2\x74\x34\x2b\xab\x5b\x66\xea\x51\x29\x6e\x99\x45\xc2\xe9\xb4\xc0\x2b\x67\xe0\x9a\x88\xb8\xf2\x7b\x28\x53\x8a\xde\x6c\xf4\xd8\xa5\x31\x61\xf1\x5e\x0c\xc6\xdf\xc5\x29\x95\x8d\x5b\xe2\xb6\x33\x1a\x9c\x31\xe8\xd9\x24\x3d\x73\x50\xf1\x57\x14\xdf\x6e\xc5\xa3\x8a\x81\xfe\xfd\x83\x89\x1e\x4e\xc9\xc8\x6c\xcf\xbf\x9d\x26\x8b\x09\x27\x9b\x40\x6e\xe9\x84\x06\x53\xbd\xe1\xf7\x1a\x1a\xea\x3e\x1b\xfa\xbe\x1a\xd2\xf7\xbb\x6d\x0d\x14\x37\x00\x4f\x3a\x5c\x9b\x07\x33\x3a\xe0\x95\x4e\x82\xf8\x45\xf4\xab\x9d\x67\x5b\x27\xfa\x06\xc9\x92\xf2\x57\xbc\x0f\x1d\x7f\x61\xbb\x52\xf7\x19\xc6\x78\x2f\x8a\x05\x74\xf0\x7e\x2d\x3a\x60\xbc\x1e\xdb\x69\x28\x6f\x4d\x91\xfd\x7f\xe4\xa6\xa4\x37\x52\x86\xfd\x19\xe1\x68\x82\x56\x9d\xda\xd4\xf9\x77\x36\xfa\x1f\x1d\x70\x67\x04\x58\xb4\x15\x68\x32\xba\x45\x60\x7c\xf6\xaa\x6e\xb0\xfb\x38\x20\xdc\xa4\xa1\xd9\x58\xcb\x94\x61\xcf\x0b\xe8\x4e\xcc\x27\xd1\x18\x4f\xc3\x3c\xba\x8c\x34\x65\x87\x3c\x20\x25\x8f\x38\xe2\x08\x31\x09\xa9\x23\xab\x62\x6f\x99\x5e\x0b\x18\xd5\x94\x1a\x41\x6f\x84\x8c\x06\xb8\x23\x30\xd3\x96\x15\x26\x7b\x56\xef\x96\xe9\x88\x56\x1a\x42\x1b\x29\xd0\xcf\xb2\x00\x2e\xcc\x1d\x3d\x8a\x0d\x3b\x0e\x0c\xc7\x8a\x0c\xe8\xdf\x6b\x62\xa4\xe7\x0f\x29\x77\x70\x7f\x13\x39\xd1\x26\x46\x3c\x94\x75\xdf\x3d\xdf\x92\xfd\x45\x7f\x24\x15\x75\x46\x52\xba\x92\xd2\xad\x29\xef\xc7\x59\x71\x10\xa6\x13\x8a\x3a\xd5\xdb\x7d\x79\x7d\x7f\x69\x82\xd8\xdc\x0b\x0e\xe9\x74\xbe\xdc\x92\xc5\xea\x95\x5c\x79\x95\x5d\xbd\x98\x28\x3a\xf2\x0e\x5b\x7a\x2d\x66\x0f\x37\xfa\x97\x2f\xbc\x68\xff\xb2\x8b\xe7\x74\x3c\x15\xd7\x7a\x32\x24\x5d\x38\xaa\x16\x6d\x2a\x54\xe0\xfc\xf3\x43\x25\xb8\xc9\xf4\x92\x0d\xf9\x2d\x0a\x5a\x45\xe4\x3f\xa9\xfc\xe4\xa1\xd2\x5a\x8e\x0e\xa0\x46\xfd\x97\xbc\x3f\x87\xb3\x41\x22\xde\xa3\xeb\xff\x17\x15\xe1\x2b\x3c\x2c\x9d\x92\x09\x0e\xc3\xfa\x43\xb2\x51\x67\xcf\x13\x8e\xa9\x92\x0c\x76\xaf\x61\xe8\x07\x9b\x3b\x6b\x51\x8f\x46\xad\xe8\xf6\xcf\x0e\xd1\x09\x6d\x1c\x7c\x16\xd2\x90\x30\x49\x99\xe7\x1d\xea\xf4\xd5\x26\xa3\x4d\x7b\x96\x98\x37\xc7\x0e\x53\x84\xac\xd8\x38\x96\xaa\x62\x74\x25\xdf\x9d\xc5\x57\x06\x46\xc2\x0e\x5d\xbc\x67\x4f\x48\xd1\x4e\xc3\x06\x37\x0d\xe1\xd0\x7b\xd0\xd6\xaa\xe5\xb5\x7b\x60\x8a\xbe\xb0\x20\x98\xe4\xc4\xb3\x60\xea\x0d\x1c\x4b\x01\x85\xbb\x5a\xf5\xcb\x7c\x09\x78\x6d\x25\xb4\xfe\x28\x74\x3f\xe8\x9e\x74\x90\x7e\x69\x0e\x0f\x63\xf6\x60\x81\x7c\x39\xa5\x28\x53\x3c\xfc\x6a\x76\xe5\x13\x0d\xdd\x0d\x68\xe3\x3d\x49\x18\x16\x0c\x0c\x8b\x16\xef\x41\x27\xf2\xc3\xb7\x5d\x88\x78\x60\x02\x47\x13\x29\x7e\x39\x70\x1a\x24\x9c\x47\x57\x3f\x0b\xaf\x0c\x7e\x1b\x44\x21\xec\xb8\x7d\x2f\xb2\xa7\xf6\x6c\x2e\x8e\x5f\x8d\x3e\xbe\x45\x48\x63\x77\xb4\x09\xfe\xb5\x68\xf3\x0a\x88\xba\xea\x25\xa8\x17\x41\xf1\x5e\x05\x8a\xff\x2a\x50\x82\x7b\x25\xfc\x2c\x19\x72\x90\x4a\xfb\x25\xb9\x99\x8c\x6a\x9d\x59\x59\x08\xd1\xf1\xb3\xa6\x15\xda\x1a\xb1\x8d\xe3\xbf\x9d\x60\x97\x65\xc0\xc4\x57\xc3\xf8\x2b\x25\xa3\x01\xfd\x72\x9c\x87\x21\x2e\x44\xa2\xc8\x5b\xd6\x14\x72\xd7\x20\x2e\xc1\x6a\x76\x50\x44\x42\xb9\xee\x5b\xa5\x2a\x4a\xf2\xe5\x24\x66\x92\xe9\x6a\x7a\x71\x0d\xcc\xa2\x6e\x34\xb8\x3e\x66\x09\x5e\x84\x0f\x9d\xa7\xd5\x55\x91\x7b\xb7\xf9\x96\xe8\x7a\xfa\x30\x44\xb1\x1e\x06\xe3\x47\xe0\xb8\xa4\x1b\x62\x21\x03\x1e\x0a\xb1\x8e\xb2\x7a\xab\x30\x1f\xca\xdc\x90\x7e\x1f\x72\x05\x02\x2d\x21\xa7\x1c\xae\x45\x77\xdb\x37\xe6\x63\x9d\xa5\x7d\x02\xfd\x41\x3e\xcc\x47\x54\x75\x77\x91\xd8\x03\xae\x34\x91\x3d\xcc\xd3\xac\xc8\x5a\xb5\x47\xa7\x1f\x1c\x52\xc2\xb0\xb7\xf5\xfd\xfd\xaf\x84\x46\xe6\x36\xcb\xf3\xcf\x0c\x40\x4f\x57\x5e\xa9\xc6\x15\x46\xed\x0f\x7d\x39\x63\x0c\x84\xd5\xa4\x25\xea\xf2\xa9\x91\x6e\x08\xf1\x1e\x3d\xe4\x99\xea\xfa\xeb\x8e\xef\x14\x52\xfa\xf5\xcf\xa8\xe9\x6e\x23\xa7\xa9\x9b\x86\xdd\xe3\xae\x53\x2e\x4f\xc3\xab\xce\x6c\x93\x23\x45\xf7\x12\xcd\xa2\xc8\xdf\x58\x1b\x02\x4d\x31\x97\xf2\x56\xe8\x93\xb3\xc2\xc5\x80\x41\x95\xea\x9f\x24\x68\x77\xbd\xe2\xad\xd9\xa5\x0f\xb9\x67\x35\x6a\xfb\xa5\x4d\xfb\x9a\xe3\xf7\xc2\x60\xf7\x23\xd3\xfa\xe0\x77\x21\xd6\x2f\xfd\xb3\x9c\xea\xce\x2b\xe0\x9a\x97\xf0\x78\xa7\x70\xd7\x6f\x05\x63\xfa\x4a\xea\x90\xd3\x17\x62\xaa\x5a\x87\xd8\xbd\x31\x6a\x81\x35\x4d\xea\x4f\xa8\x76\xf2\x23\x5c\xe3\xb7\x44\xd5\x30\xd6\x97\x56\x7a\x56\xcb\x3c\x8d\x1f\x36\xb8\x8c\xd1\x52\x12\x48\x07\x76\xba\x00\x7f\x13\x17\x3e\xd4\x47\xbc\xda\x94\xac\x34\xa2\xb6\xf9\x32\x8d\x92\x80\x19\xdd\x67\x20\xa9\x2d\xfc\x8a\xeb\x52\x61\x8e\x14\xa4\x29\x4b\x1f\x3f\xfa\x41\x2b\x9d\x45\x7f\x14\x33\xb6\xac\x4e\x3c\xd2\x96\x3b\xfb\x1b\xd2\xaf\x1f\x87\xed\x7d\x67\xa6\x67\xd9\x9d\xbb\x87\x21\xfd\x12\x0a\x77\x9e\xff\x2c\x9b\xfb\xe3\x18\xf2\x4f\xa1\x78\xe7\xf6\x2f\x41\x1b\x8c\xa3\xdd\xf0\xc9\xd9\x92\xf1\xc3\x74\x3a\x37\x27\xd3\xee\xfa\x93\xea\x1e\xb7\xba\xfe\x6a\x98\x64\xe5\x77\x9c\xd1\xdd\x55\x66\xe9\x67\x8f\x50\x6d\x50\x93\x25\x66\x5a\x97\x55\x5a\x3e\x15\x86\x36\x95\x9f\xc2\xcf\xe7\x82\x77\x3c\xbf\x2a\x9f\xfa\x17\x62\xfb\x62\xe0\x65\x2f\x75\xa9\x11\x51\x91\x28\x47\xbd\x73\xd9\x92\xab\x81\x3a\x67\xad\xc1\x0f\x6b\xae\x94\xe4\x8a\x35\x57\xb0\xf7\xc6\x9a\x2b\x24\x3a\x64\xcd\x95\x93\x9d\xa4\xfa\x72\x39\x6a\x76\xe6\x1e\x37\x0d\x7a\xc4\xa0\x82\xec\x96\x3f\x5b\x18\xa6\x4b\x35\x87\x24\xc1\x4d\x33\x2e\x57\x63\x4c\x85\x89\x35\x2f\x1b\x3e\xf5\x99\xbc\xea\xf5\xeb\x30\x4b\xca\x76\xc4\x58\x52\xb4\x17\x19\x5e\xb4\x28\xdb\x2c\xc1\xc3\xef\xeb\x77\x57\x8d\x05\x97\x34\x17\xbe\xa8\xa0\x09\x44\x3d\x0a\x59\xc7\x21\x2e\x53\xa9\xdb\x14\xb2\x2c\xce\x95\xf2\x58\xdd\x0a\x84\xf1\xa2\x0c\x83\xea\x83\xe2\xdb\xac\xd7\x1c\xe2\x34\xf2\xb1\xc8\x5f\xf1\x6a\xe9\xb1\xa4\x2c\xf1\x5f\xbc\x5a\x58\x2b\x33\x7b\x34\x13\xcc\xb8\x2f\x25\x4f\xeb\x4a\xb2\xca\xa6\xca\x24\xde\xa7\xc2\x6d\x99\xa7\xb8\xee\x6d\xf3\x9d\x92\x84\xf7\x6b\xc1\x1c\xb4\xec\x21\x41\x7a\x91\x70\xf2\x40\x41\x50\x01\x99\x26\xba\x2a\xca\xee\x9b\x98\xd7\x37\xc5\xac\x7f\xa0\x8b\xa7\x30\x52\x20\xc5\x58\xa7\xeb\xf1\x63\x4a\xca\xe3\xc2\xec\x00\x8e\x4f\x13\xba\x2c\x2e\x6e\x62\xde\xcd\x4f\x70\xfd\x9d\x1f\xee\x12\xa8\x7b\x2d\x56\xbe\xe6\x30\xbc\xdb\x24\x0a\xf7\xd1\xaa\xd3\xcd\x74\xee\x19\xa1\x7f\x40\xb2\x33\x04\x9a\xf8\x88\x8b\xb6\xe1\x0a\x52\xc7\x66\x92\x1e\xe4\x6a\x5e\xff\x94\x57\xcb\xd1\x0b\xda\xb3\x82\x31\x70\xf3\x84\x81\x74\xb2\xa6\x62\x27\x15\x23\xaf\x8b\x87\x70\xcb\x96\x76\x71\xb3\x93\xed\xd1\x23\x5e\x1d\xea\xfc\x87\xdf\xa4\xa8\x45\x2b\xfa\xfb\x7d\x73\x7c\xfc\xf1\xb4\xcf\xd7\xc9\x0e\xd5\x0d\x6e\x3f\xfc\xe5\x3f\x7f\x36\x23\xe3\xb7\xcd\xf1\x11\xd0\x41\xf9\x70\xe7\xb8\x77\x80\x8d\xca\x87\x3b\xc7\xb9\x03\xa7\x7d\x5e\x34\x1f\xee\x76\x6d\x5b\xad\xde\xbf\x7f\x7a\x7a\x82\x4f\x2e\x2c\xeb\xc7\xf7\x8e\x65\x59\x04\xde\xdd\xc3\x6f\x1f\x01\xd9\xd5\x7d\xb8\x7b\xeb\xb8\x6c\x3b\x74\xf7\xf0\xdb\x0a\xb5\x3b\x90\x7e\xb8\xfb\x53\x0c\x43\xdf\x05\xf6\x27\x1b\xf8\xd0\xf7\xec\xa3\x6d\xc1\xd8\xb6\x73\x17\x3a\x51\x04\x6c\x18\x5a\xe1\x2f\x21\x74\xfc\xdc\x87\x5e\xe0\x9b\x0e\x8c\xdc\xf0\xeb\xdd\x7b\x01\x80\xed\x42\xc7\x0b\x81\x7d\x8c\x60\x10\x45\x1f\x29\xbc\x5f\x02\xe8\x79\x41\x6e\xba\xd0\x89\x29\x90\xe8\x68\xdb\xd0\x76\x29\x5c\x9a\x62\xdb\x47\x33\x82\x41\xe0\xee\x5c\xe8\xc5\xde\xd1\x85\x8e\xc3\x1a\x35\x59\xa3\x0e\xa9\xa5\x6d\xc9\xb1\x3f\x39\x0e\xb0\x03\xe8\xf9\xf1\x2f\x04\xe9\x88\x35\xc4\x2a\xb2\x0e\xe4\x26\xc1\xd7\x05\x03\xbe\xef\x1f\x1f\x7e\x4b\xe8\xf1\xf0\x1b\xfe\xc6\x94\x23\x70\x38\xd5\x2c\x3b\xcd\xd5\x57\x4e\x03\xa9\xcb\xfe\x2d\x51\x5f\x65\xa6\x71\x23\xc9\xa0\x3d\xb1\xb0\xbe\x06\x6f\xd8\xc1\xc0\x1b\x76\x78\x33\x6f\xd8\x7e\xe0\xfe\xfc\xb3\xc8\x1b\x36\xf4\x81\x6d\xc1\xe0\x08\x6d\x3f\x48\x2c\x13\xda\x76\x68\x42\xcb\x73\x4d\x92\x60\x42\xcb\x65\xdf\x3e\xda\xd0\xff\xba\xb7\x80\xf5\x8b\x0d\xfd\x5d\x74\x74\x60\x1c\x7e\xb4\xed\x5f\x6c\xe8\x7a\xde\x4f\xb6\x0d\x60\x60\x39\x04\x92\xeb\xb9\xc0\x02\x31\xf4\x5d\x17\x58\x1f\x6d\xe8\x05\xe1\x4f\x30\xf0\x43\x60\x01\x8b\x15\xb2\x00\xad\x74\x8c\xa1\x67\x3b\x89\x05\x60\xe8\x39\xb4\x04\x4d\x06\xb4\x0a\xfb\xbe\x73\x60\x1c\xfb\x47\xd3\x86\x3e\x6b\x9f\xe1\x13\x43\x3b\x31\xa1\x65\x13\x80\x14\x3f\xcb\xeb\xf1\xa4\xe8\xd1\x62\x5f\xf7\x11\xf9\x07\x58\x47\x93\xf5\x0d\x90\xbe\x91\xae\x91\x9f\x24\x8b\xf6\x2b\xee\xfa\xb5\xb0\xd4\x3f\xa2\xdf\x8c\xe4\x5f\xf7\x66\x04\x2c\xd6\xb7\xd9\xee\x13\x44\x63\xe8\x5c\x1d\x4e\x3a\xe4\x5f\xef\x28\x77\x98\xf5\x21\xc7\x1f\xee\x8a\xb2\x20\x3b\x59\x59\x24\x3d\xe8\x01\x0f\xfa\x1f\x7d\x18\x90\x76\x23\x17\x10\xb8\xd0\xb2\x03\x13\xda\x14\x24\x11\x48\x27\xa0\xed\x59\x5e\x0c\x2d\xda\x26\xa1\x1c\xfd\x67\x17\xc1\x28\x61\xf5\x00\x4d\xb2\x03\x52\xdd\x75\x7f\xf1\x48\x97\x09\x25\x2c\x2f\x26\xe9\x61\x87\x1f\x1b\x3e\x8b\xf4\x3f\x81\x51\xe4\x51\xd2\x05\x1d\xad\x02\x46\xa9\xbe\x4d\x00\x23\xdb\x84\xa1\xdd\xa5\x9b\x7d\x89\x0e\x61\x5a\x9f\xa4\x9a\x04\x00\xfd\x42\xb3\x59\xeb\x3f\x79\xc0\xa5\x80\x3d\x0a\xc2\x05\x3e\x0c\x00\x45\xf9\x3a\x65\x1c\xe8\x83\x08\x3a\xfe\x47\xdb\x3a\xda\xd0\xf6\xbd\x8f\x0e\xf4\x95\xf9\xcc\x82\xb1\xe7\x80\x08\x86\x96\x9f\x43\xcb\xb2\xc9\xff\x64\x8a\x75\x42\x32\x71\x39\x91\x09\x23\x2b\x24\x1f\x11\x70\xa0\xe5\x98\xf2\x87\x2d\xe6\xf2\x3a\xe1\x91\x00\x99\x98\xf1\xe8\x0d\x7a\xae\xbd\xf5\x4e\x14\x3a\x87\x89\xf9\xf9\x6e\x91\x16\xfa\x1a\x33\x9a\x30\xa1\xc5\xcb\xe6\x33\x4a\xdc\x7e\x4a\xb3\xac\x20\xfa\xf9\xe7\x3b\x4a\x6d\x1f\x86\xa1\x0d\xe2\x8f\x2e\xf4\x9d\xf0\x13\x74\x5d\xc2\xb8\x91\x4b\x86\xd8\x77\x42\x18\x84\x44\xb2\x1c\xcf\x23\x8b\x89\x1d\xbb\xc0\x83\x36\x11\x80\xb2\x42\x49\xd6\x3e\x7f\xb8\x83\xae\x4b\xa9\x2a\x51\x54\x20\x68\x3c\x38\xa5\x88\xc4\xf4\xae\x11\x73\x52\xd3\x76\x14\x9f\x16\x9d\x66\xdd\xbf\xd0\xed\xc8\xcd\xbc\x78\x09\xf1\x84\x25\xc4\xbd\x81\xe4\x84\xc2\x1e\x0c\xc3\x18\xd8\x1f\x6d\x18\x25\x26\xf4\xdc\x98\xcc\x05\x11\x74\x09\x93\xc2\xc8\x75\x8f\x31\xa1\x3a\x11\x4a\x2f\x70\xa1\x1b\xd8\x24\x91\x65\xed\x6c\x0b\x7a\x09\xab\x03\x60\x64\x42\x37\x24\x9f\x91\xeb\xfe\xe2\x92\xcf\x9f\x6c\x17\xb8\xd0\x0d\x81\xed\xc0\xc0\x8d\x81\x4b\xbe\x38\xc0\xfd\x18\x40\xd7\x47\xd0\x87\x3e\x9d\x4a\x6d\x13\x7a\x8e\x09\x1d\x27\xfa\xe4\xc1\x30\x02\xf6\xd7\x3b\xba\x6f\xfc\x82\x29\x3b\xfc\xe1\x0f\x3f\x79\x7f\x74\xee\x3a\xfe\x20\x0a\xa8\x38\xa2\xb3\xa3\xa4\xb5\x20\x68\xae\x97\x8d\xcc\x7c\x6b\x25\xac\xcb\x9c\x25\x42\x39\x32\x1f\xab\xd0\xd2\x1b\xec\xa2\xf3\x0d\xdb\x66\x51\xf7\xbe\xde\xa3\xc2\xc5\xfb\x8e\x4b\xa1\xeb\x0f\xdb\x36\xf6\xa3\xbb\xbe\x41\x5f\x27\xea\x5e\xf2\x92\x14\x7a\x79\x5b\xd0\xd5\xb9\x99\x40\xdc\x26\x28\x23\x35\xd5\x2d\xea\xa2\x68\xbb\x43\xaf\xd8\x3d\x3a\x2b\x1c\x1e\x50\xea\xde\x41\xe5\xfd\xb2\x23\xa1\x5f\xf4\xa1\xa5\xf1\x23\xce\x75\xd9\xa2\x16\xff\xe0\xf9\x29\x7e\x14\xdf\x70\x96\xd2\x2f\x23\x73\x82\x66\x6c\xc7\x87\x1b\xe3\x6a\xf2\x43\x5e\xd6\xab\xc8\xa4\xa8\xd6\x05\xcb\x64\x92\x3d\x0f\x3e\xe2\x7d\xfb\x6e\x98\x1b\x7f\x26\x13\x63\x72\xfa\x70\x17\xde\x81\xe4\x99\xfe\x53\x93\xcf\xf7\xea\x24\x1a\xfc\x3e\x0c\xa3\x80\x4d\xa2\x01\xb4\x7d\xe0\xc1\x20\x0c\x7f\x71\xa1\x43\xe4\x16\xfa\xc1\xd1\x86\x9e\xe7\x7d\x65\x99\xb6\x0d\xdd\xd0\xfb\x85\xc8\x25\xcb\xf4\xa1\xe5\xb8\x5f\x97\x0a\x9a\x6e\xa5\x0a\xf4\xb7\x34\x27\x8c\x8f\x0b\xcd\x85\x37\xd8\xf1\x16\x18\xe3\x6e\xb2\xf8\x68\xed\x30\xaf\xb2\x01\xb0\x06\x4e\x89\x6e\x9c\xbc\x23\x18\xfb\xc0\xfa\x64\x5b\x00\xc6\x91\x0f\x5c\x18\xba\x1e\x88\x80\x45\x74\x1f\x37\xcc\x61\xec\x78\xa6\x0d\x6d\x1b\x38\x64\x9b\x07\x1c\xe8\x7a\xf1\x57\x85\x9d\xc4\x31\x1e\x99\x15\xbe\xe1\x55\xbc\x59\x86\xf9\x97\xa6\x1b\x70\x5c\xf6\x5e\xa4\x48\x3b\xbe\x27\x95\xe3\x22\x0e\x82\xb1\x30\x3a\xe1\x4b\x8e\x01\x16\x59\xe4\x6f\x30\x97\xdf\x66\x61\xd5\xdb\x2d\x5f\x43\x32\xa2\x97\x0f\xb0\x07\x7c\x68\x05\xde\x27\x9b\x7c\xd2\x91\x0d\x60\xec\x06\xc0\xa1\x9f\x6c\xfb\x41\x72\xd8\xa7\x05\xbc\x2e\x87\x95\xb2\x40\xd4\xe5\x50\x28\x80\x40\x08\x84\xfc\x08\x74\x39\x8b\x65\xc9\xb1\xde\x5e\xbf\xb3\xfe\x0a\x47\x10\x0b\x8e\x17\x6e\x32\x5d\x6b\xed\xcc\xaf\x32\xeb\x89\x3a\xab\x77\xe3\xe8\xda\x2e\x0c\x3c\x6a\xe5\xb0\x62\x27\x81\x8e\x07\x3d\x2b\x86\x6e\xe0\xc3\x28\x22\xff\x90\x3d\x77\xc8\x76\xe7\xb1\x63\xda\xd0\xa2\xfb\x2c\xdf\x09\x4d\x07\x3a\x31\x19\x6b\xdf\x8d\x3e\x3a\xd0\x75\x1c\xb2\x9d\x74\xd8\x76\xd2\x8f\x1d\x13\x7a\x11\x29\x64\x59\x44\xfe\x9d\x10\x39\x30\x0c\xc8\x2c\x40\x3e\xa9\xae\x0a\x2c\x93\xd6\xce\x3d\x18\x44\x81\x19\xc1\xd8\xf2\x7e\xf2\xa1\x67\x07\xd0\xb7\x7c\x10\x40\x3b\xf2\xa0\x65\x47\x20\x84\x96\x15\x91\x6f\x09\x6b\x00\xd0\x06\x08\x7c\x40\xe1\x03\x02\x9f\x40\x71\x5d\x10\xc1\xc8\xf2\xbe\xee\x4d\x1f\xc6\x51\x4c\x32\x02\xef\x97\x18\x06\x56\x48\x34\x65\xcf\x3b\xda\x30\xf4\xe2\x9d\x0d\x5d\xdb\x21\x9b\x68\x17\x5a\x5e\x74\x34\x3d\xe8\xf2\x02\xe4\x6b\x97\x2f\x70\x24\x3b\x3e\xbe\x41\x5b\x5e\xc6\x9f\xb7\x30\xd0\xff\xb2\xca\xbf\x06\xab\xc4\x5e\x9c\x22\x24\xb0\xca\x7c\x54\x93\xd7\x3d\x59\x5c\x7e\xf8\xf7\xea\x06\xd7\x5b\xd9\x29\x86\x4e\xe0\x42\xff\x53\x04\xad\x88\x8c\x4b\xe0\x92\x81\x09\xed\x00\x78\xd0\xf7\x3f\x5a\x47\x1b\x06\x8e\xbf\xb3\x1d\x18\xe5\xe6\x90\x01\x6c\x68\x93\x25\xc5\x76\xa3\x4f\x76\x00\x42\x91\xf4\xce\xbf\xff\xc1\x72\xe3\xb1\x95\x42\xd2\xa6\xaf\x1a\x24\x96\x0d\xd7\xdf\x99\xd2\xc2\xb1\x87\x6d\x2f\xa1\x34\x2b\xb2\x3a\xe5\x59\xf1\x45\x57\xd0\x8e\xe3\xf8\x3d\xcd\x1d\x59\x8d\x62\x3b\xfe\xf9\xf7\x11\xdb\xf0\x84\xd0\x77\x02\xe8\xd8\x71\x6e\xc2\xd8\x8f\x88\x3a\xf7\xc9\xb6\xa8\xd1\x33\xf2\xe8\x20\xb9\x0e\x35\x5e\x7c\x0a\xa0\x1f\x44\x20\x86\x91\x4f\xb4\x3e\x5e\xd4\x05\x7e\x67\xf6\x9b\x91\x86\xf1\x09\xf2\xab\x9d\x90\x2f\x39\xaf\x7e\x75\x51\x58\xb8\x49\x7d\x14\x0d\x31\xa2\x3d\x15\x1f\x71\x51\xa6\xa9\x3c\x30\x6f\x9c\xc8\xf9\xc9\xf9\xf7\xbb\x4e\xeb\x0e\x63\xe0\xe5\x26\xd1\xb1\x61\x68\x7f\xb2\x6d\x10\x42\xdf\x0f\x3e\xba\xbf\x44\xd0\x0f\x77\x51\x6e\x3a\x30\xf6\xa8\x4d\xd4\x23\x9a\x78\x00\x43\xdb\x26\xc3\x11\x41\x2b\x70\xe8\x80\xd4\x38\x69\xfb\x1d\xf2\x9b\x9f\xa8\xc7\x13\x37\x17\x99\xbc\x5b\xd0\xbf\x03\xa7\x0f\x77\x30\xf4\xef\xc0\x73\xf7\x6f\xbf\x98\x90\xcc\x41\xfe\xc9\xaf\x9a\xec\xa7\xa1\xe3\x4b\x76\x5e\x17\x92\x7d\x81\x9f\xc7\x20\xbe\x5b\xd4\x60\x97\x94\x67\x05\x4e\x50\xf5\xe1\xae\xf9\xdb\x01\xd5\x78\x81\x4d\x37\x78\x81\x6c\x2f\x77\x5f\xf8\x3e\x8c\xf3\x82\x25\xf9\x36\xc6\x19\x49\x74\x18\x13\x15\x3d\x76\x7a\xf6\xb1\xc9\xf6\x2d\x80\xbe\x17\x7e\x74\x8e\xf4\xf0\x42\x66\x20\x77\x60\x20\x87\x2c\xb1\xbe\xaf\x61\x20\xde\xc6\x0d\x0c\xe4\x48\x0c\xe4\x70\x06\x0a\xc6\x0c\x64\x05\x2e\xa0\x9f\x79\x08\xa3\xd0\x03\xf4\x53\xb2\x6d\xce\x34\xbf\x88\x9d\x46\x06\x3c\xd5\x18\xa9\xbf\x22\x3f\x32\xfb\x31\x17\xd9\xa9\xa0\x34\x42\xf8\xde\xf5\x44\x74\x4c\x3d\xc4\xb1\x0b\x17\xbb\x7e\x29\x84\xf2\xa9\x0e\xad\x59\xe5\x28\xc1\x3b\x76\xd4\xaf\x3c\x8a\x70\x13\x58\x16\x05\xe8\x35\x21\x7e\x0f\x90\x37\x00\xd2\xc5\x0a\xe5\xde\x1d\x2c\x0a\x9a\x76\x1c\x25\xa7\x69\x32\x46\xfc\x42\xee\xf8\x56\x83\x03\x43\xbc\xbf\xd7\xc6\xf6\xe4\xa1\xe6\x27\xac\xe8\xdd\x71\x3c\x0d\x98\x26\x56\x97\x9e\x0a\x68\xcb\x7d\xf9\x58\xa3\x6a\xf7\x0c\xe0\xc0\x8a\x67\x22\xe2\x43\x00\x35\xb1\x10\x3a\x65\x0d\xcb\xa6\xdc\xc6\x84\xa0\xbf\x50\xdc\xc9\x44\x8a\x9a\x1d\xaa\x6b\xf4\xbc\xf2\x80\xa7\x34\x42\xbb\x3e\x09\x41\x2e\x4c\x85\xe4\xcc\x4b\x74\x01\x2a\x18\x6e\xdd\x93\x34\x52\x71\x22\x8a\x0d\x20\x9f\x43\x9d\xd9\x62\x5d\x24\x18\x7d\x27\x3a\xfa\xd1\xfb\xc9\x42\xed\x36\x4b\xbe\xc8\x6d\xe8\x30\xa7\xa5\xc8\x90\x08\x46\x7a\x18\xb1\xcb\x98\xcc\xa1\xbd\x48\x76\x65\xdd\x5d\x7a\x95\x23\x80\x68\xfc\xa5\xc6\x2f\x98\xa8\x17\xc5\xe5\x4a\xd3\x81\xa1\x99\xfb\x95\x64\x22\xd4\x3a\x68\x49\x25\xa6\xdf\x38\x63\xfe\xfe\x1a\x00\xf2\xd5\x7a\x76\xf3\x5e\xe7\xeb\x25\x05\x56\xa0\x6e\xcd\xd3\x9e\x64\x52\x59\x7f\xaa\xa8\x0e\x6e\x38\x2a\x3c\xbc\x16\x75\xf3\x1b\xf6\x4e\x75\x02\x61\x75\x92\x62\x51\x3b\xea\x3b\x10\x13\x85\x46\x58\x0c\x8b\xbe\xe6\x0d\x08\x8e\x11\x8f\x93\xc1\xe7\x72\xb9\xdb\xd9\xf0\x86\x93\xe6\x29\x9a\x20\x92\x5e\xdd\x1d\x21\x60\x5c\xc3\x48\x86\x34\xb1\x5e\x89\x30\xfa\x9b\x80\x82\xab\x9b\xf6\xe2\xad\x1a\x89\x7e\x62\x48\xa7\xc3\x1c\x6b\x19\x40\x0a\x02\x30\x6e\x53\xb8\x24\xfb\x95\x7a\xc6\x9d\x56\xa6\x4d\x31\xb1\xc6\x2c\x32\x75\x83\xc4\xb4\xc5\x38\x82\x63\xae\x15\xa8\x38\x05\xc2\x26\x1d\x9f\xf4\x28\xe4\x58\x0a\x5a\xa8\xa5\x6b\x85\x5d\xe6\xd8\x94\x47\x7c\x4f\xaf\xd6\xb8\x63\x1a\xca\xa5\x7a\xbc\xfa\x83\xee\x97\x9e\xed\xa9\xad\xb0\x06\xce\xfc\x59\x35\x1d\x22\xac\x88\xc0\x60\xec\x45\x8c\x59\x2c\x1c\x67\x02\x8d\x2e\x63\x24\x0d\x9a\x23\x46\x3e\xd0\xb6\x46\x74\xb8\x5f\x80\x14\x19\x52\x43\x6c\xcd\xcb\x5b\xf3\xf9\xff\x3d\xbe\x9c\xf4\x30\x12\xb7\x71\x2d\xe1\xc1\xb4\xf1\xac\x39\x3d\xb1\xbf\xac\xb5\xbe\xf6\x21\x57\xdf\x12\x59\x58\x51\x23\x73\x02\xb5\x27\x8e\x0d\x95\xa9\x85\x5d\xb9\xd1\x4c\x2d\x38\xd9\x86\x38\x1d\x81\x51\x1f\xea\xd0\x9e\xaf\x2c\x7c\x45\xea\xcd\x36\xc6\x09\xc6\x6b\xe5\x61\x0e\x09\xe4\xcb\x22\x57\x4f\xb3\xa1\x33\x8e\x73\xa9\x3e\x6e\x27\x26\xaa\x91\x2e\xbb\x64\xb6\xc0\xad\x2c\x60\xfb\x6f\xc7\x24\x1a\xaf\x65\xa3\xe5\x8e\x39\xd2\x04\xda\x99\x99\x0b\x42\xdc\x87\xd5\x51\x1a\xe0\x37\x0d\x19\xdd\xb2\x62\x87\xeb\x4c\x13\xb7\x56\xff\xc0\x90\x1c\x5c\xc7\x9a\x06\xaf\x3c\x34\x32\x0a\xed\x2c\x9e\xd9\xfd\x54\xa6\xf8\x4f\x19\x1d\x7a\x21\xba\x02\xbd\x8f\xce\xe7\x50\x76\xaf\x75\x28\x48\x36\x6f\xad\xd9\x96\x65\xde\x66\xd5\x54\x70\x0d\x34\xa2\x1c\x78\x83\x3c\xf2\xa7\x28\x0c\xfd\xa3\x6f\x8e\x4d\xfe\x98\xcf\xc8\x16\xed\xb3\xfc\x79\xb5\x2f\x8b\x92\xde\xaf\x13\x3c\x49\x6c\xe1\x1e\xe7\x2a\xec\x9e\xb8\x88\x09\x31\x92\xbd\xd9\x98\x3b\xd4\xec\x32\xb1\x57\xc2\xca\xa5\x7d\x73\x4e\x38\xd5\xe4\xea\xd9\x36\x75\xc4\x80\xef\xd2\x46\x51\x8f\x1e\xe7\x4c\x86\xe6\xbe\x2c\xdb\x1d\x41\x8f\xc6\x71\x90\x23\xa9\x7a\x22\x9e\x40\xa4\xea\xe3\xa1\x6d\x71\xdd\x70\x31\x0d\xa3\xc8\x89\x2d\x0d\xce\x0e\x72\xb6\x6e\x20\x3f\x3e\x33\x01\x92\x69\xbb\x67\x51\x1f\x62\x43\x41\xd0\x03\x6f\xb6\xd1\x36\xda\x5a\x93\xb5\x09\xe2\xc5\x61\xbf\x19\x36\x74\x41\x1a\xa1\x28\x9a\xa0\xb4\x49\x23\x0c\xe2\x14\xa4\xd9\x51\x4c\xe6\xef\x72\x0a\x9c\xa2\x79\x12\x9c\xbe\x43\x37\x8d\x07\xd9\xb2\x96\x5f\x3b\x58\x59\x59\x18\x73\x85\x1f\x98\x5d\xe8\xd6\x1a\xda\x6a\xdf\x88\xf5\x4d\x08\xdf\x8e\xeb\x6b\xa1\xf9\xaf\x40\x5c\x02\x80\xfc\x4e\xca\xfd\x1e\x17\x6d\x7f\x3b\x34\xc5\x2e\x0e\xb5\x25\x9b\xb6\xce\x8a\x47\x63\x3a\xcb\x74\x38\x14\x3b\x4d\x37\xc8\xd5\x42\x91\x45\x20\x4d\x3d\xec\x47\xda\x82\x47\x54\x67\x34\x04\xce\x5c\xe6\xd0\x64\x8c\x23\x2f\xf1\xb5\x90\x52\xbc\x5d\x80\x58\x59\xe1\x1a\xb5\x65\x8f\x1a\x76\xc9\x9f\xb6\xe8\x17\xfc\xfc\x54\xd6\xc3\x05\xdc\x6d\x9a\xfa\x7a\x9a\xa1\xb6\xdc\x2f\xe8\xeb\x1e\xb7\x48\xdb\xcf\x16\x3d\x2e\x40\x1d\xb5\x6d\x9d\x6d\x0e\xad\x9e\x56\x7f\x3b\xa0\x3c\xdb\x66\x82\x21\x69\x9b\x20\x3f\xd0\x42\xaa\x6a\x42\x86\xf6\x79\x01\x55\x37\x87\x2c\x6f\xb3\x62\x7e\x78\xdc\x99\x36\x45\xc6\x66\x6e\x41\x6c\x75\x1e\x2c\x40\x02\x3f\xbf\xb1\x2d\xdb\xb1\xdd\x49\x00\x7b\xd4\x26\x64\x91\xd8\xd4\x28\xf9\x82\xbb\xbb\xd7\x29\x4e\xca\x9a\xaa\xa1\xab\x43\x91\xe2\x9a\xc0\x17\xb6\xa1\xe2\xc2\x5d\x63\x94\xb2\x18\x57\x65\x8a\xf7\x14\xa6\x66\xee\x6f\x14\xb5\x58\x5b\x4b\xa1\x87\x2a\x59\x4b\xea\x88\xe2\x76\x5b\xf9\x41\x20\x30\x22\x7f\xcb\xdb\x93\x05\x13\xb9\x28\xd9\x24\xcb\x6b\xab\x8c\x43\x37\xe4\x4b\x2b\x8f\xa5\x39\x48\xc2\x4d\x64\x77\x61\x07\xc8\x0a\x38\x1b\x63\x41\xff\xd8\xd7\x74\xe4\x05\x2d\xc0\x41\xa5\x97\x83\xd0\xd6\x13\x01\xb2\x26\xc0\x7c\x8f\x50\x75\x37\xbe\x3a\x2d\xeb\x48\xcc\x91\x55\x7e\xaa\x15\x54\x86\x36\x95\xdb\xb8\xc4\x77\xdf\x47\xb4\xf5\x1c\x2f\xf6\x52\x1d\x58\xcd\xae\xf7\x9b\x29\xf0\xc2\x9d\x89\xbe\xd7\x43\xff\x16\x5f\xa2\xd4\x3b\x68\xfb\x74\x2f\xc3\x2c\x07\x7e\x1f\x52\x52\x35\x95\x7c\x9b\xcb\xb2\x8a\xfc\x59\x34\x0c\x68\xaf\x7b\xb2\x48\x2a\x34\xdc\x9a\x1a\x51\x4f\xfb\x20\x98\x3e\xb6\x02\xf3\x16\xd3\x05\xf7\x94\xfd\xc8\xce\xfd\xce\x89\xee\xe2\x6c\x25\x50\xb3\xc5\xcb\x2f\xb1\x71\xf6\x5d\xc3\x7b\xc0\xff\xf7\xe8\x96\x8f\xfb\xaa\x71\x58\x63\x51\xc7\xdb\x18\x47\xea\xb3\x2b\xdd\x6b\x98\x9a\x48\x0f\x6d\x9d\x3d\x3e\xe2\x5a\x1b\x05\xa2\xcb\x33\xbb\xf5\x68\xae\x4c\x17\x01\x5c\x67\x31\x05\x6f\x10\xe2\x1b\xb6\x51\x7c\x1b\x29\x53\x39\x20\x93\xf2\x64\x7b\x2b\xcd\xba\x1e\x14\xaa\x4d\x61\x8b\x1e\x1b\xf6\x8c\xce\x92\xd8\x4b\xb7\x54\x18\xc2\x4e\xa5\x29\xab\xd0\x8e\x1e\x59\xd5\x6d\xb1\x69\x2f\xfc\xe1\x69\x1c\xe1\x3d\x8a\xe5\x2d\xbe\xf0\x09\x7f\x35\x82\xa9\x87\xf7\x37\x34\xaa\x04\x04\xfa\x27\x23\xff\xe4\x59\x88\x1a\xca\xd1\x1f\x75\xfa\x36\x1f\xac\x6f\xf1\x59\xd1\x44\xc7\x59\x0c\x2d\xc5\x4d\x9b\x15\xdd\xb2\x5a\x16\xad\xf9\xc4\xe6\xd6\xc0\xb2\x66\x62\xac\xe1\x34\x6b\x69\x90\xcb\xc9\xc5\x99\x9a\x1e\xa4\x28\xa2\x21\x0f\x6c\x8b\x34\x91\xf0\xbb\xc3\x24\xe9\x0a\x89\x7c\x26\x3b\x17\x5f\x15\x68\xae\x90\xf0\xf5\x87\x69\x91\x73\xe1\x6a\x26\x0f\x0f\xa8\x31\xa4\xb7\x72\x5a\x96\xce\xb6\x41\x95\xb0\x39\xe8\x26\x9d\x65\xfa\x17\x74\xe7\x8b\xd2\xc9\x8a\xdf\x7e\x99\x28\x5c\x1e\x71\x9d\xa3\xe7\x01\xeb\x6d\x76\xc2\xe9\x5a\xdd\x98\x76\x67\x51\xfe\xfd\x7a\x22\x5c\xe8\xd0\x31\x76\x2a\xc2\xaf\xdf\xe9\x02\x0d\xcc\xa3\xfd\x54\xd6\xfb\x5d\x99\x63\xb3\xac\xb3\xc7\x4c\x0d\xb4\x3d\xb7\x42\x68\xb4\x19\x79\x02\xd3\xba\xdb\x51\xa5\x49\x79\x8d\xd3\x5f\x0b\x01\xb2\xba\xbb\x63\x53\x8f\x99\x0b\x31\x59\x61\x48\xaf\x3d\x91\x3d\xfe\xa1\xe9\xd1\xeb\x9e\x00\xda\x37\xe3\xc4\x71\xa9\x6e\xaa\x1c\x65\x48\x16\xd3\x39\x22\x28\x0c\x7b\x77\xd7\x4f\xaf\x54\x37\x5f\x27\x39\x46\xf5\x6a\x53\xb6\xbb\x39\x28\x37\x2c\xa8\x0b\x1f\xd2\x98\x18\xf5\xbe\xbd\x0d\xce\xcb\xa7\x99\xd6\x7e\x25\x5b\x0e\x13\x9f\x2a\x54\xa4\x38\xfd\xd0\xd6\x07\xfc\x79\x22\x70\x54\x0f\x33\x2b\x98\x7f\xc7\x8d\x60\xe5\x48\x77\x54\xe2\x06\x6b\xad\x9c\x47\x85\xab\xcf\xbc\xd6\x47\x7a\x90\xf5\x42\x64\xda\xb2\xd2\x63\x42\x32\xf4\x68\x48\x4d\xe8\xfc\x53\xe2\x58\xd1\xa6\xc7\x5c\xbf\x38\xc8\x9b\xb6\x51\x76\x47\xc1\xcc\x12\x31\x1c\xf2\x28\xac\xbd\xa4\x82\xf0\x5b\x85\x9d\x4c\xf1\xe9\x84\xef\xe1\xf8\x01\x06\x9d\x4d\x3b\x0a\xb0\x77\x49\x24\x4d\x95\x41\x09\xab\x13\x7d\x05\xda\x52\x03\x11\x22\x04\x04\x4f\x3c\xf1\x7b\xb7\xe5\xf4\xa7\x22\x99\xcd\x0d\x15\xb8\x46\x82\xc9\xdd\x85\x1d\x59\xfa\xed\x45\x97\xa1\x25\x2e\x95\x64\x16\xe3\x62\x44\x5a\xc5\x5f\x4c\x78\xbc\x62\x4e\xe8\xf7\x87\xbc\xcd\xaa\x1c\x33\xdf\xa8\xb3\x68\xeb\xe7\x67\x35\xc3\x41\x04\x4f\x11\x4f\x07\xb4\x23\x2a\xce\xad\xbc\xd2\x94\x4f\xda\xf0\x3e\x4f\x1f\xca\x70\x3d\x44\xa4\x9d\xbf\xad\x28\xbd\x26\x79\x53\x4f\x57\x3c\xb6\xb8\x46\x41\xc3\x78\x76\xcd\x51\x21\xcd\xf8\xc0\x75\xb7\xd7\x57\xf6\x7a\x90\xbf\x5b\x40\xeb\x9d\xd6\xbe\x15\xea\xf7\x02\xfb\xba\x38\x7e\x37\xa2\x32\x8b\xfc\x3f\x13\x41\x7b\x70\x65\xc5\x42\x99\x0e\x92\xc0\x0f\x6f\x97\xd4\x1b\x6d\xe2\xc1\x63\x8d\x9e\x35\x0a\x51\x87\x8e\xeb\xba\xba\xb3\x74\x8f\xfc\x0d\xc2\x08\xbc\x89\xa8\x4b\xea\x31\x9f\x2f\x8a\x6d\x87\xb7\x43\x1f\xe3\x27\x9f\xee\x84\x78\xf6\x7d\xa8\xf1\xbe\x3c\x62\x3a\xb9\xa9\x2e\x6b\xcb\xaa\xb1\xc0\xff\xd4\xc1\xe9\xbe\xa7\x3d\xf4\xf5\xab\x14\x46\x75\xd2\xc7\x5c\x5d\x79\x13\xc8\xb1\x62\xa6\xe8\x8c\x39\x65\x25\xe0\x4b\xb4\xa0\x2c\x8f\x27\x4e\xdd\xbc\x38\x10\x7a\x6a\xae\x16\xb1\x90\x4c\x1a\x2a\x2e\x37\xea\x65\xda\xe0\x9a\xe7\x69\xd3\xc6\xac\x45\x64\xa4\x4e\x8f\xd9\x6e\xe9\xfb\x6b\xea\x2b\x50\xd7\x15\x61\x8e\xfc\x95\xdd\x11\x77\x3c\x5a\x66\x0e\x12\xbd\xa4\x5f\xac\x17\xbe\x04\x4d\xaa\x17\xcf\x46\x40\xbd\x02\x80\x2b\xc1\x62\x57\xc7\xbd\x99\xe8\xfd\xb7\x6a\x9d\xb7\xe2\x28\x44\x7f\xd6\x81\xe3\x53\xe2\x10\xaf\x5b\x34\x0d\x48\x5a\xc3\xf7\xd8\x85\xcd\xe1\xc4\x2c\x0a\x04\xb1\x4d\x79\x1a\x1e\x2f\x33\x9f\x99\xaa\xca\x41\xf6\xe9\x4d\x52\x97\x79\x4e\x70\x6d\xcb\x43\xb2\xa3\x8f\x11\xf7\x11\x66\xba\x47\x52\x26\x5b\x53\xef\x03\x0c\xd3\x46\x34\x31\x6d\x50\x8f\x2c\xa6\xb4\x72\x65\xa7\xdb\x5f\x08\x9b\x81\x0e\x6a\x51\x76\xd6\x24\x9c\xde\x06\x4b\xab\x00\x33\x8c\xb5\x1c\xcc\xb2\xae\x61\xa5\x31\x21\x2c\xc5\x54\x6c\x80\xfb\x61\xf0\x6d\xd5\x68\xad\x4b\xd3\xeb\x40\xba\xb8\x2c\x93\x30\xfc\x28\x0e\xb7\x9b\xf5\xd8\x9c\x33\x26\xa0\x59\xa0\x7d\x1f\x65\x26\xc5\x5b\x74\xc8\x3b\xf5\xfa\x89\xbf\x3a\xad\x97\x29\x69\x07\xa2\x90\xed\x66\xfd\x75\xd0\x2e\x80\x1d\xd0\xf5\xf9\xea\xf6\x51\x3b\x94\x9c\xba\xf4\x5c\xeb\xac\x3c\x07\x39\x4d\x04\x2d\xc7\x5c\xc9\x61\x84\x93\xbd\xc1\xa6\xd6\xec\x97\x34\xd2\x89\x98\xd4\x80\x67\xdd\xdc\x80\x0e\xcc\x4b\xf1\xd4\x74\x79\x8a\xac\x9c\x3b\xd2\xac\xfe\x50\xb7\xf9\x67\x61\x80\xe9\xf5\xe5\x60\x61\x3d\x2d\x32\xd7\xb8\x60\x11\x10\x2d\xab\xd4\xc3\xbb\x6e\x2f\x85\xab\x28\xc0\x2f\x47\x70\x6a\x3b\x2c\x5e\x81\x79\x71\xe7\x05\x73\x00\xbf\x91\xd0\x6d\xcf\xb3\x22\x6b\x33\x94\xbf\x1c\xf6\x60\x10\xe8\xdf\x1f\x5c\x00\x9a\xaf\xc6\xf3\xb0\x5f\x49\x5a\x3b\x43\xc4\x94\x18\x7c\x3f\x5c\x14\x69\x64\x78\x4c\x4a\xf5\xb7\xe2\xa1\x6d\xed\xfb\xf6\x5a\x47\x67\x22\x4e\xff\x3f\x00\x00\xff\xff\xa3\xa4\xff\x0e\x12\xde\x00\x00") -func web_uiV2AssetsConsulUi07dda31de740f1a5f8d66c166d785a89CssBytes() ([]byte, error) { +func web_uiV2AssetsConsulUi30b6cacf986e547028905bc5b588c278CssBytes() ([]byte, error) { return bindataRead( - _web_uiV2AssetsConsulUi07dda31de740f1a5f8d66c166d785a89Css, - "web_ui/v2/assets/consul-ui-07dda31de740f1a5f8d66c166d785a89.css", + _web_uiV2AssetsConsulUi30b6cacf986e547028905bc5b588c278Css, + "web_ui/v2/assets/consul-ui-30b6cacf986e547028905bc5b588c278.css", ) } -func web_uiV2AssetsConsulUi07dda31de740f1a5f8d66c166d785a89Css() (*asset, error) { - bytes, err := web_uiV2AssetsConsulUi07dda31de740f1a5f8d66c166d785a89CssBytes() +func web_uiV2AssetsConsulUi30b6cacf986e547028905bc5b588c278Css() (*asset, error) { + bytes, err := web_uiV2AssetsConsulUi30b6cacf986e547028905bc5b588c278CssBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/consul-ui-07dda31de740f1a5f8d66c166d785a89.css", size: 56224, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/consul-ui-30b6cacf986e547028905bc5b588c278.css", size: 56850, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _web_uiV2AssetsConsulUiE51248f3d8659994e198565dbadc4fcfJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\xfb\x7a\x1b\x37\x92\x38\xfa\xbf\x9e\x82\xc2\xf9\x8e\xa6\x7b\x0c\xb6\x48\x49\xbe\x31\xd3\xf1\x71\x6c\x27\xf1\xc6\xb1\xbd\xb2\x9c\xdd\x59\x2e\x3f\xff\x5a\xdd\x20\xd9\xa3\x26\xc0\x00\x68\x49\x8c\xc4\x07\x3a\xaf\x71\x9e\xec\x7c\xb8\xf4\xfd\x4e\x51\xb2\x99\xc9\x7e\x3b\xb1\xd8\xb8\x56\xa1\x50\x55\x28\x54\x15\x40\xc8\x50\x8f\x71\xea\xbb\x1c\xec\x79\x68\xea\x63\x64\x00\x97\x60\x16\x06\xfd\xd0\x3f\x74\x3c\x67\xc9\x11\x65\x87\x8e\x1b\x00\x38\x06\xe8\x7a\x49\x28\x67\x00\x96\xd6\x59\x2e\x03\xdf\x75\xb8\x4f\x70\xa6\xc2\x82\x78\x28\xd0\x5d\x14\x3f\x7b\x6e\xe6\x6b\xc8\xfd\x80\x1d\xce\x39\x5f\x1e\x2e\x10\x9f\x13\xaf\xaa\x94\x71\x87\x87\xac\xa4\x74\xe1\x5c\xa0\x97\x9c\x53\xe7\x3c\x40\x60\x02\xa7\x21\x76\xc5\x94\x0c\x04\x39\xc4\x90\xc2\x00\x32\x48\xcc\x9b\x0f\xe7\xff\x42\x2e\xb7\x14\xcc\x1f\x29\x59\x22\xca\x57\x06\x82\xe0\xcb\x17\xc4\x7e\x25\x5e\x18\x20\x00\x6f\x2e\x9d\x20\x44\xa3\xfd\xc1\xda\xdc\xbb\x74\x68\xcf\xb7\x75\x33\x87\x31\x7f\x86\x6f\x6f\x93\xde\xcd\x9b\x29\xa1\x86\xa8\xc4\xed\xe1\x77\xfc\x1f\x0e\x9d\x85\x0b\x84\x39\xb3\x02\x84\x67\x7c\xfe\x1d\x7f\xf4\xc8\xbc\x11\xe5\xd8\x8e\xcb\xc6\x7c\xb2\x17\x35\xa3\x3d\x1f\xf7\xb0\xa9\x07\x58\x52\xc2\x09\x5f\x2d\x91\x35\x77\xd8\x87\x2b\x1c\xcd\xd0\x72\x9d\x20\x30\x30\xa4\xe6\xc1\x81\x81\xc6\x74\x62\xe3\x31\x9d\x98\x6b\x8a\x78\x48\x71\x0f\xad\xf7\xa2\x29\xf5\x1c\x05\xb2\x79\xa3\xcb\xb8\x18\x00\xbd\xa8\x02\x9c\x47\xd0\x62\x88\x70\xb8\x40\x12\x81\xa3\xfd\x01\x74\x09\x9e\xfa\xb3\x30\xfe\x7d\x45\x7d\xae\xff\x5e\x9b\x23\x34\xe6\x13\x1b\x43\xb4\x46\xa2\x4b\x27\x0c\xb8\xcd\xa3\xbf\x2c\x74\xcd\x11\xf6\x8c\x9b\x90\x06\x3f\x12\xfa\x9f\x21\xa2\xab\x51\x1a\x65\xd1\xcc\xe6\x3e\xb3\x9c\xe5\x12\x61\xef\xf3\xe9\x3b\x03\x38\x6e\x70\x18\xf8\x8c\x03\x38\x9e\x40\x59\xe8\x06\xc8\xc1\xb2\xbd\x81\x4c\x73\x0d\x53\x1d\x9e\x22\x97\x50\xaf\x6d\xb7\x3e\x9e\x12\x00\xc7\xc8\xf2\xbd\xda\xae\x5f\x51\xe4\x70\x54\xe8\x3b\x8d\xce\x92\xde\x5d\xd9\x4a\x4e\xdb\x31\x6e\xd6\x90\x5b\xaf\x5f\x9e\xbd\x7c\xf5\xe6\xfd\xd9\x9b\xd3\x2f\xbf\xbc\xf9\x27\xc4\x96\xc3\x39\x35\xa8\xf5\xe3\x87\xd3\x37\x6f\x7f\x7a\x2f\x3e\x9a\xc9\xa8\x9f\x97\x5e\xf9\xa8\x30\xa8\x1d\x37\x94\xed\x6a\xc6\x0d\xea\xc7\x7d\x8d\x02\x54\x36\x6e\x00\x59\xed\xb8\x1e\x62\x9c\x92\x15\x80\x63\xa6\x06\xc0\xd6\xa7\x77\x9f\x7f\x92\xbd\x57\x4d\x85\xd5\x4f\xe5\x55\x40\x70\xd9\x4c\xea\xf1\x2e\x1a\x01\x38\x0e\x5a\xcf\xa2\x01\x21\xa7\xe8\xf7\x10\x31\x9e\xa1\x2a\xb5\xb7\x91\x25\x76\x25\xc4\x36\xb2\x18\x76\x96\x6c\x4e\xf8\x1e\xbb\xf2\xb9\x3b\x37\x90\x45\x55\xb3\xb3\xd5\x12\x99\x37\xae\xc3\x10\x70\x13\x70\xc0\x28\x0d\x41\x01\x5a\x83\x5b\x92\x25\xbe\x77\x16\x82\xcc\xd6\xe9\xca\x5f\x58\xb8\x44\x54\x40\x1d\xac\x0c\xf1\x05\xc6\x2c\xc4\x5c\x43\x39\xc6\x28\xcf\xe8\xd4\x84\x03\xfb\x86\x71\x42\xd1\x08\x41\x31\xef\x11\x87\xbe\x37\xc2\x30\x9a\xfa\x88\xc2\xd4\x9c\x47\x99\xe9\xae\x21\xb3\x6f\x14\x13\x1e\xc9\x59\xa8\xbf\x13\xec\x18\x81\x29\xd0\x35\x4a\xc1\x93\x2a\x99\x23\xc7\x43\x94\xa9\x52\xfd\x23\x5b\xc3\x73\xb8\xa3\x8a\xc5\x5f\x99\xb2\xf5\x5e\x06\x7c\xc1\xd3\xa3\x42\x01\x71\xb6\x7e\x61\x95\xb0\xdd\x88\xb5\x9a\x35\xeb\x71\xeb\xf4\xcd\x7f\x7e\x7e\xf3\xe9\xec\xcb\xe7\x8f\xaf\x5f\x9e\xbd\x19\xe5\xbe\xbe\x3a\x7d\x23\xbe\xea\x19\x62\xcb\x71\x83\x68\xb5\xf0\x1a\xe6\xb1\x94\x99\x5d\xab\x51\x5f\xbf\x79\xf7\xa6\x72\xd4\x4a\xaa\x0a\xac\x8f\x9f\xcf\x3a\x50\x8d\xcf\x2a\x78\x5c\xbc\xd3\x90\xb5\x74\xf8\x1c\x3b\x0b\x64\xdb\x0a\xa3\x4b\x87\x32\x24\x76\x5d\x9a\x84\x53\xbd\xc8\xdd\x08\xa0\x31\x80\x24\x92\x01\xa6\x21\x37\x60\x66\xa3\x41\x00\x4c\xd3\x34\xe3\xee\xe5\x64\x4a\xf7\xbd\xde\x75\x7b\xdd\x66\x94\xda\x54\x65\x13\x32\xb8\x7d\xb3\x86\x8e\xc1\x61\xc2\x28\x94\x2c\x60\x41\x38\xfb\x91\x92\x85\xe8\x10\x99\xa6\xac\x53\x98\x39\xe4\x85\xc9\x57\xf0\xed\x8e\x98\x4c\xf7\x62\xe0\x30\x08\xe0\xa6\xe8\x9c\x3b\xd8\x0b\xd0\x29\x62\x4b\x82\x59\x8e\x35\x50\xc1\x4b\x05\x56\x89\x9c\x09\x0c\x6d\xba\xe7\x4f\x0d\x31\x33\x66\x7d\xf8\x45\x95\xb9\xb9\x59\x06\x62\x8a\xf1\xa6\xd9\x1f\x28\x9a\xdd\x1f\xd8\xb6\x1d\x8e\x42\x5b\xce\x09\x5b\x1f\x4f\xdf\xfe\xfa\xf2\xf4\x9f\x09\x3e\x43\x5f\x6c\x04\xd1\x81\x6b\x9a\x7b\xe7\x14\x39\x17\x7b\x8a\xac\x45\xa9\xcf\x52\x02\xdc\x70\xcd\x51\x68\xfb\xa2\xa3\x70\x3c\xd0\x6c\xbb\xa9\xcb\xb2\x3e\x33\x58\x74\xcd\x51\xba\x2c\x43\xab\xf9\xb2\x14\xd5\xa4\xe6\xd2\x66\x22\x30\x1c\x27\x94\x34\x49\xa6\xa5\xd7\x6c\x14\xda\xa1\xb5\x70\x96\x46\x09\x65\xc8\x41\x50\xd9\x20\x24\x33\x02\xca\x8d\xb0\x36\xd7\x6b\x14\x30\xd4\x8b\x97\xee\xf3\xfb\x97\x9f\xcf\x7e\xfe\x70\xfa\xf6\x7f\xde\xbc\x56\x8b\xe8\xd9\x18\x5d\xf5\xde\x2c\xce\x11\xb5\xde\x50\x4a\xe8\x1e\x9f\x53\x72\xd5\xf3\x2c\x97\x78\xc8\x46\xd0\xb3\x16\x88\x31\x67\x86\x6c\x0a\xbd\x12\xd6\x21\xe9\x25\x84\x81\xb9\x5e\x9b\x6b\x13\xd6\x9d\x0f\xd2\xba\x7f\xfa\x9c\x80\xc4\xe8\x7d\xc1\xaf\x93\xca\x14\x09\xd5\x0e\x84\x34\x28\x51\xdf\x95\x0a\xf5\xf9\xf4\x5d\x89\xee\xde\x5d\x6b\x87\x28\x27\xf8\x6d\x94\x63\xb3\xa9\x0f\x8a\xdb\xa7\x3e\x9c\xbe\x79\xf9\x3a\xf5\x53\x71\x60\xfb\x92\xf8\x5e\x6f\xb0\xa7\xa4\xeb\x37\x7f\x1e\x80\xcc\x36\x0a\x20\x00\x37\xb5\x11\x00\xcc\x41\x0c\x7e\x4f\xb6\x65\xba\x50\xe3\x07\x84\xa9\x1d\x96\x2e\xd7\x08\x05\x5e\x4a\x9b\x04\xc5\x15\x00\x9e\x0b\xcc\xbd\xba\xe3\x82\x60\x62\x6c\xe9\xb8\x68\x04\x2e\x87\x00\x52\xb4\x24\x23\x45\xc6\x3e\x96\xd8\x60\x88\x5e\xfa\x2e\x32\x00\x43\x9c\xfb\x78\xc6\x40\xac\x6f\x94\x09\xde\x78\xb7\x05\x52\x17\x14\x04\x3e\x43\xdc\x00\xa2\x63\x60\x5a\x53\x1f\x7b\x3f\xab\xd6\x86\x09\x1b\x45\xa7\xd4\xb8\xa2\x33\x43\x86\xdb\x2b\xc0\x7b\xe2\x70\xb1\xa7\x56\x5e\x0e\x88\xcc\x48\x7e\x45\x15\xc6\x6c\x02\xb9\x10\x1c\x0d\x27\x98\x44\x6e\x58\x6c\x19\xf8\xdc\x00\x87\xc0\xb4\x58\x20\x60\x1f\xc0\xfe\xd0\xb4\x5c\x82\x5d\x87\x1b\x63\x00\x26\xa6\xf5\x2f\xe2\x63\x59\xa5\x4e\xca\xa4\x79\xee\x8d\xef\x8d\x00\x58\x67\x64\xc7\x0c\xf1\x9f\x49\x29\xf6\x94\x26\x47\x18\xbf\xbd\x0d\x88\xda\xec\x8a\x2e\x5d\x12\x3c\x02\x87\x87\xe0\x51\xfc\x59\xd4\x5a\xc3\x94\x38\x2d\x83\xce\x43\x82\x0f\x7d\x3e\x7d\xfb\x8a\x2c\x96\x04\x23\xcc\x8d\x72\x80\x97\x64\x69\x08\xb4\x47\xe0\x94\x75\x26\x58\x1d\x8e\x28\x49\x30\x0c\xbd\xca\x02\x16\xd9\x38\xe6\xa6\x25\x4a\x7d\x7e\x73\x7e\x3f\x3c\x38\x50\xdb\x7c\xdf\x4e\xed\xce\xe1\xe4\x45\xfa\xc7\x08\x00\x75\x0e\x40\x0e\x75\xe7\x1f\x1d\xea\x2c\x14\x61\x31\xf3\xf6\x16\x00\x48\x6d\x00\xc4\x4a\xbc\x28\xd1\x2d\x46\x5c\x08\x5d\xac\xc7\xfb\xc7\xd0\x54\xac\x59\x32\x6c\xc1\xaa\x0d\xf0\x19\x8b\x43\x76\x8f\x93\x9e\xda\xac\xbd\x10\xfb\xbf\x87\xa8\xe7\x7b\xb0\xb7\xf0\x19\xf3\xf1\xac\x27\xf8\xaa\x8b\x30\x47\x14\x98\xa2\x3f\x7a\xc7\xfe\xc4\x1c\x41\x4c\xad\xff\xf1\xe9\xc3\x7b\x8b\x71\xea\xe3\x99\x3f\x5d\x19\x63\x0c\x05\x67\x81\xf1\xd9\x6b\x7b\x88\x1c\x4f\x20\x2e\xb6\x3d\x2a\x6d\x7b\x94\x6e\x7b\x34\x19\xdd\x44\x67\x05\x63\x00\x69\xa2\x26\x8d\x25\xd2\xcf\x43\x3f\x90\xc7\x44\x13\xa2\x89\xb4\x86\x34\x8a\x32\x97\x10\xea\xf9\x58\x9d\xa7\xef\x64\xf1\x4a\xf7\x54\x6b\xc4\xda\x86\xa4\xdb\x15\x79\x54\xcb\xf8\x3b\xda\x89\x12\x04\x1f\x62\xe2\x21\x56\x6d\x2f\xaa\x53\x88\xa5\x39\x50\x59\xf7\x14\xa7\xb7\x59\xa4\x10\xd3\x58\x21\x0e\x73\x0c\x95\x28\x85\xd8\xb1\x9d\x2a\xcd\x4e\xb1\x7e\xa8\x0e\x1b\xd4\xce\x2a\x77\xcc\xf6\x53\xea\x5d\x98\x53\xef\xa0\xc4\x35\xaf\x30\xd5\x09\x15\x5e\x2f\x3c\xeb\x64\xaa\xe3\x62\x39\x98\x38\xbe\x28\xe9\x04\x29\x64\xeb\x32\x43\x83\x44\x8b\x03\x49\xf3\x66\xf1\xdc\x4e\x9b\x24\x4b\xe8\x1b\xa9\x73\x4d\xc4\xf3\xa3\x8f\xbd\x97\x41\x50\x25\xbe\xd2\xd4\xe3\x70\x27\x20\xb3\xc3\x84\x89\x32\xd0\x0c\xb1\x8f\x39\xc2\x45\x3d\xb7\x3b\x77\xc8\x19\xbe\xf5\xd7\x54\xf7\x1d\xed\xde\xf5\x36\xf3\xbf\xac\xe2\x3b\x6f\x15\x77\x09\xc6\xc8\xe5\x09\x89\xd4\xf0\xbb\x8d\xec\xe3\xa5\x03\x6c\x6c\x29\xa7\x5d\x01\x29\x37\xd4\xd2\xc8\xa4\xdb\xcd\x62\x5e\x67\xb9\x2e\x1d\x3f\x36\x4b\xb7\x35\x60\x57\xcd\x68\x13\x5b\xfa\x7d\xcd\xe8\x1e\x2d\x53\x29\x46\x55\x62\x9f\xc2\x8d\xe6\xbe\x0a\xaa\xe1\x09\x8e\x2c\x4e\x3e\x2f\x97\x88\xbe\x72\x18\x32\x4c\x69\xe3\xf8\xf8\xe1\xd3\xd9\xc1\xc1\x26\x76\xc9\x3b\x4d\xb7\x4e\x7b\xc0\x62\x59\x33\xe6\x34\x1c\x69\x0f\x41\xa5\x39\x8d\x95\x9a\xd3\x2a\xac\x62\x9b\x19\xb7\x60\x64\xaf\xcf\x9b\xb2\x68\x4b\x9b\x5a\x47\xe3\x15\xad\x35\x5e\xd1\x82\xf1\xaa\xd2\xda\xf4\xed\xd9\xf7\x63\xda\x49\xac\xfc\x4d\x5a\xc2\xc5\xe5\xc6\xea\x81\x92\xd7\x3e\xfb\x91\x04\x1e\xa2\x65\x45\xd2\x06\x22\x64\x8d\xc6\xcf\x19\xf9\x0f\x49\x32\x2f\xff\xe5\x5c\xff\xec\xb0\x79\x49\x9b\x0b\xb4\x3a\x23\x2f\x29\x75\x56\x65\x4a\x87\x98\xed\xd6\x6f\xe6\xab\x94\x0c\xe8\x43\xc7\xbc\x89\xe5\x71\xf8\xb0\xf2\x78\x23\x05\xc7\xdd\x01\x05\xa7\x4e\xd1\xf8\x42\xcb\x09\x65\x94\x3d\x32\xdf\xc4\x67\xfd\x12\x49\x01\x54\x21\xb0\x6d\x5b\x99\xe2\xc4\x2c\x3f\x4c\x0d\x64\xbe\x40\xa3\x9c\xa5\x00\x09\x35\x1a\x2a\xf3\x0e\xad\xb0\xdc\x39\x9c\x9c\x03\x13\x76\xd4\x81\x04\xa5\x1a\x03\x18\x24\x93\x16\xfa\x89\x09\x5d\x23\x14\x4c\x08\x5c\xa0\x15\x03\x50\x48\x28\xb3\x4c\x67\xd9\x54\x33\xaa\x1a\x76\xab\x0e\x04\xc5\x41\xb4\xb7\x00\x4b\x34\x00\x13\x86\x75\x7e\x05\xa4\xb3\x5f\x41\xb5\x96\x56\x9c\x0e\x6d\x3d\x1d\x5a\x3f\x9d\x2a\x15\x89\x42\x5f\xed\x15\xc7\x2e\xef\xd7\x2f\xed\x37\xb1\xff\xe0\x64\xb2\x7e\x71\xb2\x07\x07\x86\x63\x51\xe4\x86\x94\x21\x3b\x45\x23\xb5\x40\x97\xf4\x03\x1d\x73\x7b\xb6\x4d\xf1\xa7\x8b\x8c\xe3\x94\xf5\xd6\x6c\xb6\x0c\xef\xe7\xac\x8e\x73\x87\x19\x8a\xf8\x1b\x8c\x1d\x29\xc7\x00\x29\x31\x49\xa2\xae\x38\xb1\xba\xe2\xe7\xd4\x15\x5a\x7d\xfb\x47\x46\x44\xad\x15\xab\x53\x2b\xfc\xc6\xdb\x3f\xdf\x1c\x11\xdb\x15\x1d\x91\xf1\x60\x02\x5b\x75\x99\xd7\x54\x88\x4d\x0a\x9a\x4a\xe6\xfa\x38\x54\xc6\x98\x6c\xbf\x41\xba\x53\x88\x24\x4d\x73\x98\xac\x37\x44\x26\xe4\x35\x1a\x0b\x81\x74\x2b\x1a\x8b\x34\x17\x6f\x4d\x6d\x49\xd8\xb5\xe0\xd3\x64\x6a\x50\x1b\x5b\x17\x97\xd6\x6f\x42\xb2\x99\x2f\x14\x4f\x9e\x21\xae\x66\x01\x34\xaf\x06\xa6\x85\xae\x91\x1b\x72\x64\x50\x73\x24\xb6\xc8\x7a\x8b\x0e\x0d\x91\xd6\x68\xa9\xdf\x7b\xb5\x6a\x97\xdf\xcd\x9d\xa1\x49\x21\xc3\xc4\xbb\xb3\x3d\x57\xf5\xd1\xd9\x92\x1b\xab\x3a\xc1\x0e\xa8\x3a\x6c\xc7\x55\x9d\x8e\xfa\x84\x50\xed\x29\x76\x82\xc3\xd0\x6f\x32\x21\x6f\xa4\x38\xe4\xfb\xaf\x37\xa8\xd4\x31\x6e\x92\xc8\x46\x7d\xce\x24\x45\x2b\x75\xfe\x9c\xe9\x77\x39\x67\x86\x36\x53\xc7\xc4\xa0\x8b\xeb\x45\x8b\x63\x22\x53\xc7\xc4\x92\x6e\x9d\x46\x1f\x87\xca\x63\xa2\xdf\xbc\xe7\xb5\xae\x79\xd7\x6d\x1f\x77\xf3\xd7\x1d\x4e\xc1\x9a\xca\x76\xd6\x9a\x9a\xde\x99\x7a\x85\xb7\xbd\xf9\xe7\xc8\x09\xf8\x3c\x45\x86\x77\xde\xfa\x50\x5d\x43\x41\xb7\x6c\xf3\x7b\x1b\x6f\x7e\xcf\x1c\x31\xc3\xb1\x9b\x76\xbd\x67\x9a\x90\x19\x0e\x04\xef\x15\xa7\x74\x4d\xe8\xda\x4e\x8e\x0f\xb8\xb6\xdb\x70\x23\xc6\x8a\x7c\x20\x4c\x8f\x02\x91\xf5\xde\x59\xa0\x7a\x06\xe0\xb6\x63\x00\x8c\xdd\xdb\x4d\x4d\xdc\xf9\xd6\x6d\x24\x1b\x32\x0e\xb2\x4b\x8c\xc3\xdf\x59\xc6\xa1\xd7\xbd\x85\x38\xdf\x88\x69\x44\xdd\xb7\x0d\x54\xa8\xbd\x6e\x50\x64\xe1\x77\xb8\x34\xd8\x6b\x98\x56\xc1\xe3\x3f\x73\x3d\x41\xcc\xed\x1f\x17\xba\xfa\x35\xd7\x9f\x7e\x83\x9c\x12\x85\xef\xec\xfb\xea\xb7\x31\xaa\xb7\xf1\x7d\x25\xb1\xef\x6b\xab\x2e\x3b\x28\x60\x44\x31\xde\x92\x6e\xf3\x0a\x58\x07\x3b\x7d\x8d\x57\xe8\x72\x59\xc5\x72\x29\x62\x24\xb8\x94\x66\x6d\xe5\x19\x1a\x10\xc7\xeb\xfb\xd8\xe7\xbe\x13\xf8\x7f\x20\x9a\xad\xae\x76\xf7\x21\xc2\x97\x3e\x25\x58\xac\xf0\x36\xf5\x2c\x75\x08\x7e\x99\x30\xff\x98\x25\x2c\x64\x9b\x8f\x14\x4d\xfd\xeb\x51\x6c\x21\xb5\xd2\x9f\xe1\x92\x78\xbf\x96\x57\xcb\x95\xc0\x53\x0d\xf4\x28\xe6\x3d\x6b\xf3\xbb\xac\xb5\x2a\x80\xe5\xa3\xa4\x3d\x1e\x82\x52\x64\xbb\x91\x7d\x49\x46\xd8\xf5\xa7\x7e\xc0\x05\x7a\x13\xf4\xa7\x11\x76\x27\x8f\x0b\x85\xae\xd8\x9e\x15\x23\x8b\x3b\x33\x21\xaf\x47\x60\x4a\xe8\x02\x40\x37\x70\x18\x13\x1f\xd8\x68\x0c\xd4\x74\xfa\xe7\x0e\x05\x13\x48\xb0\x3b\x77\xf0\x0c\xa5\x7d\x33\xaa\x68\x28\x03\x96\xa8\xdb\x9f\x51\x12\x2e\x1f\x14\xb0\x0c\x24\x99\x59\x6c\x0e\xcb\x72\xd9\xbf\xf4\xd1\x55\x99\x93\xf4\x79\x40\xdc\x8b\x3e\x0b\x08\xcf\x2b\x06\x29\x1b\x82\x84\xd5\x9f\x1a\xf2\x2e\xc7\xf2\x99\xfc\x57\xc8\x83\x94\x84\x1f\x40\x6c\xeb\xef\x5a\xbe\x9b\xdf\xf1\x7f\xa0\xb4\xac\xc7\x42\x30\x0a\xe9\xb8\x17\x5f\x69\xe9\x3f\x54\xc7\x53\x4a\x16\x06\x32\x37\xb3\x21\x50\xdb\x23\xae\xe4\xc7\x56\xf4\xc7\x9b\x00\x89\x7f\x60\x60\x17\xfc\x72\x8c\x82\xdf\xdf\xa0\xd4\xef\x6f\x90\xf6\xfb\x1b\x4c\x46\xe3\x89\x59\xc1\xef\x00\x47\x8b\x65\xe0\x70\xd4\x07\x8f\xd0\xda\x4c\x1b\x0c\x2a\x96\x3a\xde\x98\xf0\x46\x30\x24\x1f\xcf\x46\xfb\xc3\x2c\x25\xc7\x2b\x37\x81\x9e\xef\x9d\x22\x17\xf9\x97\xd2\xa3\x86\xa5\x49\x40\x80\x2f\x7d\xca\xec\xbc\x85\x4d\x76\x06\xa4\x63\xe8\x5e\xbe\x4c\x8f\x09\xcc\x17\xec\x91\x0d\x7a\xd1\xcf\x91\x81\x6c\x6a\xc9\x86\xef\x7c\xc6\x4d\x8b\xa2\x05\xb9\x44\x5a\xf0\x21\x88\x8d\xc0\x18\xc7\x8d\x65\xdc\x01\x3b\x38\x30\x78\xb6\x91\xe3\x79\x91\xa8\x94\x2d\x58\x64\x7d\xee\xc9\x6b\xee\xb5\x00\xe7\x2d\x66\x88\x46\x8b\x94\x86\x47\x45\x67\x65\xe1\x35\x54\x9b\xd7\x4a\x13\x28\x69\xa4\x91\x50\x85\x82\x47\x09\x84\x7b\x5c\x28\x8a\x8d\x40\xf2\xdc\x94\x9b\xf7\xd9\xb9\xc3\x7c\xb7\xef\x51\xb2\xf4\xc8\x55\x69\x48\x42\xb6\x46\x4d\xdb\x3b\xfb\xac\x55\x36\xd0\x34\x07\xe0\x4d\x56\xa7\x9d\xa1\x52\xef\xeb\x58\x78\x74\x05\x5f\x88\x51\x8e\x30\xef\x23\xb5\x56\x77\x41\x47\xa1\xaf\x3f\x0f\x7a\xb6\x81\x96\x3f\x03\x3a\x38\xf5\x67\xb3\xac\x0e\xd1\x19\x1d\x51\x1f\x3b\x89\x8e\x58\x0e\x37\x48\xe9\x8a\x46\xbb\x08\xb3\xf6\x85\xfd\x33\xaa\x8f\x2e\xf1\x50\x1f\x79\x3e\x27\x0f\x0b\xd7\x82\x78\x68\x04\x52\xa6\xa5\xc3\x7f\x31\x82\x01\x24\xf8\x02\xad\xc2\x65\x67\x28\xf0\xd4\xa7\x0b\xd9\x51\xdf\xf3\xc5\x62\x75\xd5\x21\x37\xd2\xe5\xb0\x9d\xd7\x06\x18\xc2\xde\x4b\xf9\x25\x7d\x02\x8f\x14\x64\xb1\x72\x60\x12\x85\x25\xa5\x51\xfc\x70\x4a\xab\x91\x57\x3c\x62\xbd\x51\xa9\x0f\x9d\xf4\xc1\x98\x1e\x7f\xf0\xb1\xd0\x59\x04\x59\xea\xc5\x90\x5a\x17\xd4\x3f\x2a\xdc\x5c\x74\x29\x30\x21\xa2\x94\x54\x39\xc3\xc8\x32\x60\x66\x89\xff\xca\xe7\xf3\x7e\x7a\xdd\xc1\x04\xea\x78\xcd\x11\x78\x49\x51\x6f\x45\xc2\x1e\x0b\x29\x7a\x01\x60\x32\x23\xa1\xb8\x2e\x11\x5d\x38\x02\x1e\xf1\x43\x1c\xae\x0b\x2a\x5d\xed\x45\xb4\xb2\x36\x39\xd8\x45\x19\x75\x5d\x4d\x9d\x25\xea\x5c\x82\x05\xb8\x3f\x34\xd7\xd6\xb9\x2f\x50\x37\xf7\xa3\x2e\xf4\x55\xb2\x8d\x0b\x25\xba\xa9\x9d\x57\x1a\x95\x55\x86\x2b\x42\x48\x99\x19\x65\xc4\x9b\xb2\x2e\xa6\xe6\x89\x6d\x3e\x1e\x4c\x20\xb5\xb9\x0e\x89\x1b\x9a\x7b\xf9\x49\xa6\xa8\x12\x62\x13\x16\x8a\x63\xca\x80\x34\x0a\xfd\xf3\xd9\x29\x9a\xf9\x8c\x23\x8a\x3c\x03\xe8\xbd\x16\x5d\x99\x57\x80\x3f\x30\x47\x05\x6d\x37\x5a\xf9\xf8\x4a\x3d\x5f\x43\x2f\x26\x30\x4d\x8b\xcf\x11\x4e\x76\x0b\x37\x6f\xf8\xc1\x01\x8a\x1b\x9a\x6b\xd3\x72\x1d\xee\xce\x8d\x3c\xc2\x22\xd7\x17\x54\x50\xb6\x35\x49\xc5\xa3\xc7\x0a\x75\xca\x56\x96\x59\xb3\x56\x4c\x68\xb9\xea\x9f\x87\x9c\x97\x47\xf9\xba\x81\x2f\xfe\xb7\x3c\x27\x0e\xf5\xaa\xda\xed\xa2\x74\x4c\x22\x44\xfa\x4b\xdf\xbd\xc8\xeb\x46\xa9\xab\x01\xff\xda\xc7\xec\xd0\x0d\x7c\xf7\xa2\x4f\x42\xce\x7c\x2f\x1f\x6d\x71\x1f\xb2\x26\xc5\xae\x62\x69\x1a\x06\x60\x53\x89\xa9\x57\x93\x04\x01\x72\x8b\x91\x2e\xf9\xd2\xda\x96\xbb\xb8\xda\x0a\x08\xec\x70\xff\x12\xf5\x99\x4b\x49\x10\xc8\xa8\x99\x8e\x58\x28\x76\xb0\x8b\xc8\x98\x22\xe4\x9d\x3b\xee\xc5\x43\xe9\x1c\x5d\x68\xfd\xca\xf1\x79\x85\x40\xe5\xfe\x02\x91\x90\x03\x13\xca\x7b\xde\x4b\x27\x90\xde\x4c\x25\x02\x36\x02\x10\x4c\x20\xe3\x0e\x47\x23\x40\x91\xe3\xad\x40\x5a\x88\x0e\x36\x15\xa2\x2c\x74\x5d\xc4\x58\xec\xfc\x25\x7f\x15\xe5\xa4\x60\xd5\xba\x8e\xfc\x3b\xcd\x95\x61\xd4\xac\x60\x61\x91\x2d\x0a\x02\x4f\x02\x01\x20\xd0\xad\x40\x24\xf3\x12\xc9\x20\xd0\x96\x92\x4a\xc7\xe8\x18\xa6\x25\x8f\x1b\x20\x87\xbe\xd5\x58\x4b\xc9\x2c\xa4\x62\x3e\xc4\x57\x60\xa6\x25\x69\xba\x00\x72\x21\xab\xb2\xd2\x2c\xad\x39\xa0\x64\x82\x0a\xcd\x42\x08\x41\x05\x75\x47\x00\x23\x9d\xe9\x9b\x07\xaf\xc5\x26\x0b\x1c\x36\xef\x47\xea\x40\x85\x64\x95\x95\xaa\x5b\xed\x24\x73\xa1\x08\x71\x74\xcd\xbf\xee\xb1\xd3\x47\x81\xc7\x10\xcf\x1f\x3d\x73\x73\xdb\xf8\xfc\x39\x77\xd8\xdc\x77\x09\x5d\xf6\x55\xf1\xf6\xa0\xd4\x39\x26\x2a\x8d\xed\x38\x29\x3a\x27\xde\xaa\x99\xb7\xde\xf8\xec\xb5\xb6\xe0\xfc\xe6\x33\x5f\xae\xf3\xb0\xde\x3e\x9c\x58\x6e\xb5\xe1\xd6\x48\xac\xef\x92\xbf\x5e\x22\xca\x7d\xd7\x09\xfa\x0b\x84\x43\x60\xae\xa1\xd2\xc8\xd9\xe8\x26\x32\x16\x8d\x0a\x5b\x29\xe5\x97\xea\x46\xb7\x02\xc0\xfc\x7e\x70\x70\x90\x67\x06\x85\x09\x03\xb8\x9f\xef\xa3\x58\x47\xa6\xf0\xc8\xad\x25\x32\x6f\x90\xc5\x1d\x3a\x43\xdc\x72\xe7\xc8\xbd\x40\xde\x0b\x23\x0d\x9e\xe3\x79\x0d\xb0\x41\x6c\x31\xbe\x0a\x90\x35\x47\xfe\x6c\xce\xc5\xb1\x24\xfd\xf3\xca\xc7\x1e\xb9\xb2\x7c\x8c\x11\xfd\x59\x7e\x7a\x04\x96\xd7\xc0\x1c\x19\x9d\xb1\xd8\x30\x92\xf4\x6d\x5f\xb7\xa1\x4d\xe9\xa1\x24\xc1\xed\x47\x81\xc5\x5f\xeb\x82\xad\x64\x2e\x13\x01\x41\x49\x5a\xd2\x72\x08\x90\xd7\xa7\x88\x91\x90\xe6\x5d\xfe\x22\x5d\x6d\xb1\x0c\x39\xf2\xfa\x12\x55\x0f\xa0\x95\x57\x82\x97\x9e\xe8\x04\x3a\x9c\x53\xff\x3c\xe4\x69\x03\x43\x34\x45\xf9\xef\xc8\x18\x40\x9e\x5c\x15\x83\x19\xf5\xbd\x53\x72\xf5\x06\x7b\xc0\x84\x21\x56\x1d\xaf\xb4\x22\x14\x41\x69\x29\xb6\x65\x00\x39\x22\xb3\xfe\x1f\xe4\xb8\x73\xeb\x93\x5e\xe3\x62\xe2\x8e\x8c\x20\xd4\xd5\x22\x67\x10\xe0\x52\x5f\x52\x9f\xcc\x66\x72\x7b\x0b\xae\x1c\x8a\x75\x2c\x10\x5f\xcb\xcc\x3b\x77\x9a\x81\x1e\x65\xe9\xc8\x24\x24\x49\x84\x51\x6e\x36\x6b\x13\x26\xa0\xe7\x06\x33\xc0\xe7\x08\x11\xaf\xe4\x70\xa9\x11\x22\x6d\xe2\x38\x32\x1b\xe5\x19\x84\x6e\x98\x61\x34\xe8\xd1\x23\x78\xa3\x87\x1b\x01\x27\xe4\xa4\x77\xd8\x63\x4b\x07\xf7\xc0\x23\x03\x3d\x2a\x9c\xe5\xe3\x75\x88\x7b\xb9\xbd\x1d\xe8\xab\xaa\xa6\x5d\x18\xc7\x17\xfe\x19\xcd\xaf\xfe\xe5\xaa\xef\x12\x0f\x2d\x7c\xa9\xaf\x65\x36\x66\xb6\xac\xa6\xd5\x2e\x2a\x37\x81\xcf\xf8\x37\x72\x88\xae\xf3\x7e\x8b\xd7\xdf\xf3\x2f\x73\xcb\x9f\x87\x60\xd2\x0c\xf4\x92\x5c\x21\xda\x67\x48\x34\xe9\x2f\xc2\x80\xfb\xcb\x40\x8b\xb0\x38\x37\x6e\x01\x0d\x99\x46\xa9\xba\xdd\x3a\xde\x45\x1a\x29\x85\xaa\x09\x45\xcd\x1d\xfc\x69\x50\x51\x77\x17\xd9\x09\x25\x3b\x7d\x21\x59\xb5\x3f\xb6\xb2\x97\xfe\x54\x5b\x28\x0d\x30\x0b\x67\x33\xc4\x84\xc2\x47\x96\x55\x0c\xf8\x2e\xd8\x2a\x0e\xb0\xeb\xe8\xdb\x74\x97\xed\x3c\xe0\x87\xe7\x68\x4a\x28\xd2\xeb\xc8\x36\xc5\x43\xbe\x9b\x9d\x47\xcb\x5d\xf1\xf1\xa7\x41\xc4\x32\x70\x5c\x34\xd7\x09\x39\x36\x44\x46\xba\x8f\xdd\x47\x48\x9a\x29\x16\x3c\x73\xbb\xe1\xa5\xd8\xd5\xce\xa3\x47\xc5\xe6\xd7\x59\x74\x5b\xa1\x26\xd7\xcd\xce\xa3\xe5\x8e\xda\xdc\x4e\x2b\x71\xd4\xf1\x7c\xf2\x15\x9c\xd8\x8b\x76\xee\xe6\xb9\x72\xe7\xbc\x8f\x9d\xcb\x07\x9d\x27\x96\x93\xe4\xce\x39\x80\xf1\x94\xc5\x1c\xda\xcd\x96\x15\x0f\xd9\x0f\x6b\xbd\x4c\x4f\x62\x02\x53\xc0\x6c\x68\x36\xe1\xce\x79\x18\x38\x5b\xba\x85\x2f\xa9\xae\x62\x11\x31\x42\x1e\xeb\x53\x74\xe9\x04\xbe\xa7\xaf\xd5\xca\x6b\xfa\x1e\xc2\xdc\xe7\xab\xb2\x1a\x81\xb3\x22\x21\x67\x87\x33\xea\x7b\xa5\xb7\xc2\x15\x26\xd8\x62\x6c\xe4\xef\xcc\xe9\x4f\x1d\x97\x13\xba\xaa\xc9\x1f\x95\x0a\x87\x70\x76\x20\x1c\x22\x4c\xbb\x39\xc5\x33\x47\x3a\xb0\x43\x4f\x13\xdb\x83\xef\xf0\x3f\x78\x34\x29\x1c\xc5\x63\x52\x9b\x8f\xf1\x64\x8f\x5a\x09\x3f\xb2\xd3\x3f\x6e\x6f\xf7\x87\x90\x5a\xe9\x30\x48\x7b\x7f\x00\x81\x9c\x03\xf0\x71\x8f\x1e\x1c\x18\xd4\x8a\xc2\x22\xed\xfd\x41\x35\xf7\xa3\xd6\x05\x5a\x41\x9a\x04\x7d\x25\xf7\xa6\x99\x4c\x41\xf8\xe0\x00\x19\xa9\x28\x50\x88\x4d\x48\xe5\x37\xe9\x44\xb5\x5e\x1b\x66\x12\xe8\xe9\x2a\x38\xfd\xa9\xb1\x6f\xa0\x9e\x8f\x19\x77\xb0\x8b\xc8\xb4\xc7\xcd\x54\x8e\xea\xb3\xd5\x12\xe9\x3c\xd5\xaf\x1c\x8c\x09\xef\xb9\x4e\x10\xf4\x9c\x9e\xdc\x64\x3d\x87\xf5\x9c\x78\x36\xc0\x5c\xab\x70\x67\x63\x00\xfd\xc4\x2e\x6e\xc2\xa9\x9d\xc2\xae\xa6\x18\xf3\xc6\x55\x16\x5a\xa4\x2f\xdb\x2f\xd0\xca\xe6\xea\xcf\xb9\xef\x79\x08\xdb\xfb\x43\xf5\xd3\xe7\x68\x61\x63\xfd\x37\xf6\xd0\xb5\x4d\xf5\x2d\xbe\xa0\x56\x3b\x58\xc3\x65\xc5\x4a\xc6\x98\x49\xc6\xca\x0a\x28\x01\x3d\x4a\x81\x7b\x8a\xa6\x88\x22\xec\x46\x30\x8b\x56\xbd\xb9\xc3\xf0\xdf\x78\xef\x1c\x21\xdc\x8b\x22\xe2\x18\xf2\x7a\xfd\x9e\x0a\xbb\x33\x33\x35\x04\x7e\x90\x17\xdb\xe8\xf7\xf9\xed\x2d\x20\x72\x61\xc1\xbe\xce\xf9\xd2\xe3\x07\x07\x20\xc6\x5a\xf2\xf5\x05\x1a\xf1\xb5\x9a\xa9\x81\xac\x2f\x5f\xe4\x42\x7e\xf9\x72\x7b\xab\x09\x63\x86\xf8\xc7\x68\x6d\x65\x86\x2f\xb3\x2a\x11\x7e\x9e\x4e\x22\x60\xcb\x46\x3d\x38\xc0\x61\x10\xec\xdb\x36\x2f\x5f\xf7\x4f\x02\xc8\x1e\xba\x5e\x52\x15\x6a\xda\x5b\x84\x8c\xf7\x90\xcf\xe7\x88\xf6\xce\x51\x4f\xb4\xee\x11\x9a\x22\x04\xd8\x13\x84\x02\x1e\x45\x23\x98\x7b\x28\x21\xca\x28\x18\x5a\x1d\xd6\x0d\x7e\x70\x90\xa6\xd8\x1b\xc1\x79\x38\x0d\x05\xab\x19\xe9\x0d\x8b\x32\xa1\xc5\xc3\x74\x28\x71\x3e\xcc\x78\xbd\x36\x21\x3f\x38\x30\xf4\x18\x2c\x83\xb0\x17\xa5\x5f\x25\x6e\x46\x29\x74\xdb\xdc\x5c\x1b\x28\x95\x6a\x0a\x86\x06\x82\xe3\x9b\x0b\xb4\x52\xc6\x78\x87\xbf\xe5\x68\xf1\x49\xb1\x4a\x35\xc5\xec\x86\x8c\x82\x59\x59\x81\xee\xcd\x1b\x81\x2d\xdb\xb6\xb9\x8c\x07\xfa\x51\x17\x27\x08\xc8\xa5\x9a\x98\x21\x9e\x0a\xe3\x7e\x8d\x98\x4b\xfd\x25\x27\x54\xf4\x26\x13\xcb\xab\x78\x2c\xdb\xb6\xe3\x21\xcb\x69\x85\xc7\x01\xc4\x7a\x02\xec\x85\x6a\x3a\x42\x06\x93\x33\x5b\x0b\xf2\x88\xb9\x53\x60\xc6\x51\xbe\xf2\x9b\x0e\x64\x0f\x44\xbf\x51\x4f\x71\x2c\x18\x79\x41\x94\x0f\x28\x35\x47\xea\xe3\xda\x48\xad\x78\x0b\x4a\x4e\x21\x00\x16\x71\x2c\x1d\x6e\xd4\x08\xca\x0f\x55\x4e\x18\x12\x7b\x88\x4e\xfa\xf1\x74\x02\x85\x55\x62\x0f\x07\x83\xc1\xd0\x84\xec\x91\x0d\xfe\xe8\x4b\x86\x31\xea\x81\x47\x64\xbd\x9e\x98\x10\xad\x0d\x13\xa6\xfc\x5b\xe5\xc6\xe0\x74\x95\x68\x8e\x6e\x40\x18\x62\x5c\x48\x15\xe5\xda\x19\x5f\x84\xad\x6b\x33\xdc\xb0\x12\x5f\x3f\xae\xfc\xd3\xea\x6e\x12\xaf\x7c\x8f\xcf\x47\xc3\xe1\xe3\x01\x54\x97\xc4\xa3\xc7\x83\x41\x72\xbf\x48\xd2\xf7\x8b\x88\x2b\x84\x98\x50\x5f\x57\x2a\xdf\xa9\xbc\x0f\x54\xca\x47\xa7\xad\x4f\xb1\xd4\x84\xec\x9c\x57\x78\x4e\x2e\xfc\x4a\x42\x86\xde\x5c\x22\xcc\x4d\xd3\x9f\x66\xcb\xd4\xe7\xf8\x25\x36\x37\xa4\x14\x61\x7e\xa6\x6e\xed\x15\x01\xf9\x53\x83\xef\x17\x43\xcf\x14\x20\xc0\x94\x23\x16\x5c\x79\x75\x29\xe4\x66\xe4\x31\xcc\xbe\xc8\x05\x02\xfb\x76\x61\x9c\x19\xe2\x2f\x23\x4c\x1b\xc0\x97\x9d\x2a\x11\xbe\x30\x00\xa7\x00\xe6\x1a\x98\x90\xe6\x08\x21\x11\xfa\xe8\x3b\x6c\x63\x0b\xa3\x6b\xfe\xc9\x3f\x0f\x7c\x3c\xfb\x4e\xc0\x3c\xb4\x6d\xf1\x95\x78\x48\xf0\xc7\x83\x03\xf5\xb7\x58\x6b\x8b\x93\x77\xe2\x44\x14\xe7\xc4\xe5\x66\xac\xb1\x18\xb9\x71\x21\x08\x03\x60\xc2\xc0\x1e\xc7\xfe\xf8\x8e\xe1\x19\x7f\x9b\x12\xc2\x11\x1d\x53\x12\x20\x1b\xe8\xb8\x25\x99\x6d\x60\xf2\x37\xd3\x34\xc7\x83\x09\x64\xb6\xc4\xdd\x0f\x24\x94\x74\xf4\x2a\xf0\x11\xe6\xa7\xc8\xe5\x86\xd8\x0d\x41\x55\xd9\x1e\xb3\x38\x59\x3e\xa2\x96\x2b\x3f\x2a\x1f\x87\xef\x89\xf8\xf8\x82\xe6\x3d\x29\x9c\x73\x72\x89\x80\x39\xa2\x25\xbe\x0f\xba\x2c\xf6\x72\xf8\xe3\xad\x14\xc7\x72\xc3\xe9\x67\x8f\x2a\x97\x50\xf9\x3e\xc4\x4f\x23\x59\x1e\xe2\x8e\x1f\x1c\x1c\x44\x7f\x29\xd1\x6e\xa6\xca\xac\xc4\x93\xdb\x4c\x7b\xa7\xfb\x78\x66\x2d\x43\x36\x37\x72\x4d\xf7\x44\xe7\xe9\x3c\x79\xa9\xfa\xb2\xc6\x87\x69\xbe\xc9\x77\xfd\xe1\xbe\x6d\xfb\x07\x07\xf9\xfa\x3a\xfb\x9f\x0f\x87\xe6\xba\xe8\x40\x9f\x54\xb4\x75\x62\x9a\x31\x70\x51\x10\xf4\x95\xc6\x0d\x26\xf2\x81\xa7\x65\xe1\x32\x5a\x6e\x76\x60\xc2\xc7\x03\xdd\x91\xca\x89\x40\xd3\xea\x0b\xb2\x28\x62\xfe\x1f\xc8\xb8\x51\x33\x1d\xdd\x28\x16\x91\xf6\x52\xf9\x2f\xf1\x25\xe2\x17\x45\xf7\x15\x71\x8c\x91\xef\xc5\x48\x76\x51\xb8\x91\x57\xed\x32\x17\xf1\x8a\x56\x6f\x74\x8f\xc5\x7b\x78\xd9\x40\x26\x0f\xbd\xf2\x83\xe0\x14\x61\x0f\xd1\x4d\xdc\x2e\x11\x37\xc0\xdc\x61\x2a\x4c\x85\x81\x52\xc7\x7e\xbd\xcf\x41\xab\x10\xd8\xda\x31\x35\x66\x1c\xcf\x93\xfc\x49\x90\x32\xc2\x88\x1a\x40\x61\x18\x64\xd6\x40\x4f\xd1\xf3\xbd\x97\xcb\x25\x72\xa8\x61\xae\x25\xb0\xd5\xe1\xb4\xba\x7b\xb5\x39\xda\x8c\x20\xe1\x51\xbd\x17\x00\xd1\x75\xc4\xa8\xaa\x6d\xc9\x23\x2e\x59\x7e\x01\xf8\x39\xf1\x56\x7a\x08\x1d\xf5\xa9\xf9\x04\xce\x57\x5d\x38\x3e\xee\x7d\xdf\xf3\xfc\x4b\xa0\xaa\xc8\x47\x6e\xe2\x93\x4c\x25\x53\xd9\x90\x47\x09\x6e\x13\x64\xb8\x0d\x24\x72\x4b\xbc\x0f\x05\x69\x25\xbb\x50\x51\x56\x9f\x99\x7b\x29\xfa\xd0\xe4\xf9\xab\xc3\xe7\xd6\xc2\xb9\x16\x52\xd0\x34\x6b\x76\x19\xd6\x23\xa9\x5d\x11\xef\x2d\xf5\x1a\x96\x50\x23\xe2\xd7\xa3\xd4\xa7\x4f\xd2\xf3\xfb\x23\x61\xbe\x5a\x80\xf5\x1a\x7e\x91\x07\xee\xd3\xf8\xbc\x5d\x58\x1e\x9f\x69\x3a\x40\xde\xed\x6d\xf6\x8b\x8f\x67\xb7\xb7\x46\x44\xc9\x3f\x05\xfe\x62\x81\xe8\x91\x61\xaa\xd7\x85\xa8\x38\x4f\x78\x62\x61\x47\xd9\xcc\x12\x92\x9d\x98\x6b\xa8\xe6\xf4\x0a\x05\x41\xc6\x93\x58\x88\x4b\xd5\xa5\x00\x20\x12\xea\xd9\x9d\xa9\x0b\x21\x88\x5c\x5b\xb4\xbc\x17\x48\x7a\x27\x71\xa4\x8f\xad\x42\x58\x0a\x85\xb3\xbc\xd4\x46\xd1\xbb\x23\x38\x3a\xc9\xdb\x49\xd5\x5f\x9d\x25\x74\x72\x4a\xbb\xca\xd1\x1a\xda\x85\x0e\x25\x57\x7d\x19\xcd\x4e\xb9\xd8\xbf\x43\x53\x0e\xd3\x1f\xce\xc8\x52\xff\x4e\xaf\x5b\xfa\x8b\xa2\x19\x13\xba\xc5\x11\x5c\x12\xe2\xad\xf5\xaf\x33\x73\x69\x34\x2e\x6d\x45\x72\x3e\x36\x42\x5d\xfd\x3c\x9c\x4e\x11\x35\xf7\xc2\xbe\xbd\x84\xee\x23\xf1\x9f\xa4\x92\xfb\x28\x5d\x29\xe5\xef\xec\x25\x2b\xd2\x0f\x15\x6e\x17\xf6\x58\x25\x6d\xe2\xf6\xe0\x3b\xfe\x0f\x57\x5a\x37\x02\x3b\x7c\xc4\x21\xb3\xb3\xd9\x9e\x3d\x4b\x1d\x17\x5f\x72\x23\x30\x4d\xe8\x4b\x9d\xd6\x1f\xb3\x89\x09\xc9\x0b\x03\x17\x51\x92\xd3\x97\x8d\xa0\x15\xf4\x91\xe6\x27\xc5\x73\xda\xaf\x9a\x40\xad\x9c\x66\xe3\xb2\x08\x04\xea\x5c\x2e\x83\xca\xb2\x05\x17\x68\x05\x20\x33\xa1\x33\x66\x13\x9b\x98\xa3\x85\x92\xcf\x81\x99\x02\x39\x99\x76\x26\xb9\x95\xd0\x33\x9d\xb1\x41\x52\x60\xb1\x31\x9f\x98\xd6\x05\x5a\x4d\x44\xe9\x22\xb2\x0d\xdd\x04\xf6\x42\xbd\x38\x26\x31\x3a\xb7\x33\x88\xda\xcb\xa3\x71\x2e\xe3\xd0\xbe\x1a\xae\x22\x94\x64\x3e\xca\xed\x01\x60\x90\xff\xcc\xd1\x02\xc0\x79\x0d\xb6\x15\x62\xf3\x1a\x56\xba\xd2\xa0\x7c\x5a\x9a\x9b\x8e\x7a\x83\xef\x7a\x9e\xcf\x96\x81\xb3\x1a\xf5\x30\xc1\xe8\x3b\x90\x5e\x9c\x45\x26\xdf\x58\x60\x2f\xc6\x7c\x22\xb1\x7c\xde\x80\xe5\xf3\xfb\xc3\xb2\x92\x18\x53\x83\xc1\x73\x18\x08\xfc\x2a\x24\xc0\x34\x21\x09\x32\x53\xac\xc9\x20\xe6\x3a\xc3\xb4\x6c\x67\x9d\xf2\x69\x96\x91\x5f\xe5\x19\xaf\xb4\x6f\x71\xb9\x26\xaf\x5f\x8b\xf6\xf1\x32\xe4\x40\xbd\xf1\x1b\x38\xe7\x28\xd0\x7f\x3b\xfa\x5f\x1d\x43\xa7\x53\x63\xad\x95\xb8\x8e\x4f\x1d\x6a\x04\xd3\x92\xcf\x37\x7e\x92\x57\x26\x84\x1a\xc0\x51\x6f\xc3\xf1\xe8\x94\x22\xc0\x4d\x0e\x59\x06\x90\x73\x06\xf0\xe6\x3c\x3c\x3f\x0f\x10\x93\xe6\x0e\x19\xfc\x19\x19\x3f\x2e\x7d\x74\xa5\xd5\xbf\xb5\xb9\xc7\x2d\xb1\xc0\xe2\xc8\xaa\xda\x63\x73\xdd\xc6\xd5\x98\x93\x05\x99\x51\x67\x39\x5f\xf5\xe5\x3f\x5b\x77\x83\x2f\x7a\x94\xf6\x86\x4f\x06\x7f\x47\x6b\x88\x73\xc7\xaf\xe8\x0c\xfe\xfd\xe0\x85\x4c\xf2\xf5\x56\x70\xfa\xbf\x23\xf3\x70\x38\x18\x8c\x06\x2d\xc2\x83\x6f\xa4\xe2\x74\x7c\xfc\x04\x26\x50\x8d\x06\x70\xe1\x5c\x8f\xfa\xcf\xa3\xff\xdb\x38\xf0\xd6\xa7\x6e\x80\xec\x31\x37\x86\x26\xe4\x86\x75\xf4\x58\xfe\xa3\xfe\xfb\x54\xfe\x33\x34\x75\x4a\x38\x49\x23\x4c\xd4\xed\xeb\x7a\x7d\x4b\xff\xa3\x6a\xf6\x87\xe6\x64\x0d\x17\x7e\x10\xf8\x0c\xb9\x04\x7b\xac\xa0\xa6\x7b\xbe\x3a\x5e\x33\x00\xc1\xc2\xb9\x2e\x71\x9c\x2d\xc4\xad\x3a\xd7\xb1\xdd\x71\x8c\x8d\xa3\xc7\x10\x99\x10\x1b\x8f\x07\xea\xdf\xa7\xfa\xf7\x70\x20\x3e\x4c\x04\x61\x44\x43\x14\x06\x4f\xf0\x57\x32\xae\x0e\x06\x96\x90\xca\xf7\x3a\x53\xb5\x25\x3f\xb0\xe2\x8e\x6f\x6f\xc7\x4a\xd7\x14\x3c\xe1\x8d\x93\x8e\x94\xe5\xe6\x4d\x52\xf1\xfb\x8c\x67\xb1\x04\x24\x1d\x54\xa0\xbf\xc1\xa4\x81\xa0\x6b\x6e\xe1\xef\x8f\x9f\x0c\x22\x35\x36\x7a\xb4\x11\x06\xf6\xf1\x93\xc1\x21\xb7\xf0\x9e\x38\xc4\x6b\x77\xe7\x52\x52\x1b\x48\xbf\x69\x6e\xdb\xb4\x3f\xbc\xbd\x95\x92\x9d\x3a\xd8\x23\x0b\xc3\xfc\x47\x90\x3c\xc1\x86\xb3\x09\x8d\x78\x62\x73\xbf\xa1\x44\x06\xa7\x1d\x3f\x19\xfc\x9d\x1e\xc6\x33\x58\x1d\x8d\x92\xa9\x1e\x16\x61\xfb\x7b\x7f\xf8\x64\x00\x05\xcb\x19\x29\xce\x13\x2f\x45\xaa\x1d\x64\x68\x26\xcf\x1c\xdc\xd2\x7f\x69\xaf\xe5\xa6\x2d\xbd\xf2\x51\xe0\x75\x4d\x94\x91\x6a\xb4\x43\xd7\xb8\x98\x0b\x8d\xaf\xf8\xe2\xdd\xd6\x2f\x19\xa3\x81\x62\x56\xd3\x6a\x4a\x87\x8e\x1b\x3f\x9d\x5c\x15\x9b\x5c\xd6\x02\x79\xfe\x16\x96\xa1\xc6\x5a\xd9\x7e\xf2\x72\x2a\x65\x57\x9c\xd2\x66\x28\xe3\xb3\x52\x3d\xe8\x03\x94\x10\xba\x87\xf2\x8d\xf4\x7c\xed\x7e\xaa\xc6\xbf\x57\x6e\xe6\x16\x34\xa5\x6e\x26\x44\x7f\x3e\x62\x19\x6d\x25\x65\xa6\x65\x88\x4b\x55\x21\x5e\x53\x03\xc9\x2b\x32\x98\xd5\xcc\x92\xa3\x66\xea\xd4\x99\x7e\x44\xd9\xd0\x69\x7f\x6f\x44\xe3\x51\x76\x80\xb5\x99\x0e\x08\x2b\x09\xc9\x8a\x2c\xbd\x4a\xb5\xb9\xbd\xd5\x6e\x02\xa7\x61\x20\xe4\x95\xbe\xb7\x59\x47\xb9\x39\xb9\x25\x8a\xb5\x2e\x75\xb6\x5a\x22\x30\xca\xda\x09\x93\x91\xa1\xaa\x0b\xb5\xd9\x38\x9d\xa0\x53\xf7\x3e\x2a\xc4\x9a\x71\xb4\xb0\xf4\xc8\x51\xb3\x3a\xd5\xa7\x48\xe3\x5a\x2f\xaf\xcd\x1d\xa0\x82\x92\xa5\x2c\x91\xa9\x25\x0a\x77\xe4\xa1\x3b\xf5\xa9\x7c\x40\x5d\xb1\xb6\x60\x3b\xe4\xdd\x4c\x36\xa9\xc8\x6b\xa9\x61\xaa\xa7\x47\x46\x37\x82\x4a\x47\x37\x0e\x1b\x01\xf1\x17\x58\x43\xa6\x7e\x45\xc1\x2c\x14\x49\xd7\x37\x79\x6d\x06\x45\x95\x1f\x65\x41\x51\x19\x90\xc7\xe3\x16\xfa\x87\xaa\x17\x6b\x20\x40\x2a\x2e\xd8\x99\xe9\xe4\x66\x40\x29\xfb\x60\x92\x93\xa6\xb1\x2c\x95\x5a\xd3\xc8\xd0\x8f\x33\x83\x97\x41\x00\xf2\x46\x13\xd3\x7c\x04\x7a\x06\x78\x94\xa5\xf5\x32\xd9\xae\x7b\xc9\xc8\xde\xe8\x54\x3e\x42\x5a\x2b\xf8\x61\x65\x28\x7a\x84\xdc\xd4\xfc\x60\x2d\x3b\x31\xf5\x19\xc6\x30\x1f\x01\x33\x22\x68\x2d\x87\x54\xd3\xdc\xe3\x78\xfa\x31\x11\x8b\x41\x6a\xc7\x97\x72\xf8\x05\x00\x23\x0c\x03\x9b\xcb\x57\x99\x20\x4b\x8a\x02\x51\x14\x40\x22\xad\x65\xa9\x03\x47\x74\x6d\x6c\xf4\x87\xf9\x78\x2b\x99\x2c\xc5\xcc\x56\x8f\x0d\xdb\xc4\x3c\x38\x28\x36\x79\xfb\xba\xa6\xc1\xed\x2d\x00\xfb\xb6\xcd\x22\x45\x4b\xb7\x91\xf8\x30\x45\x41\x9a\x07\x30\x84\xbd\x57\x01\xc1\xa8\xc8\x96\x44\x91\x38\xad\x10\x8c\x00\x44\x75\xf1\x8d\x99\x9d\x97\xbc\xa9\xd8\x4d\x40\xa6\xda\x7d\x1b\x62\x32\x3f\xa1\x06\x61\x59\x27\x10\xab\x44\x69\xf2\x4a\xe2\x5f\x22\x73\xcb\x22\x33\xc5\x5d\x12\x8e\x92\x62\x2e\xfa\xf5\x95\xd4\x16\x51\x4d\xc1\x27\x19\x28\xaa\x36\x25\x24\x25\x15\x5e\x23\xc6\x7d\x2c\x57\x50\xd7\xf2\x6d\x55\xc6\xac\xa9\x8f\x3d\xc1\x7c\x54\x02\x24\x66\x42\xa7\xa2\x88\x98\x7b\xfe\xed\xad\xe1\xdb\x37\xf2\x4a\x99\xad\xa1\xae\x67\x8f\xfd\xd8\x34\xaf\x3f\x99\x26\x74\x6e\x6f\x0d\x47\xd7\x25\xa9\xba\x4e\x49\xdd\x96\xba\x00\x4c\x00\x1d\xf9\x30\x07\xd4\xc8\xc9\xa9\x0a\x72\x27\xbf\x47\x57\xef\x24\x2f\x2f\x89\x26\xfd\xcc\x90\x74\x15\xe1\x21\x45\xbd\x57\x92\xda\x7b\x9f\x54\x7a\x10\xed\x3b\xd3\xfb\x1b\x78\x84\x1e\x81\xbf\x01\xf9\x4e\xaa\x7c\x0c\xbf\xec\xa5\xad\x0a\xe9\x93\xc3\x1f\x2a\x0b\x29\x57\x19\xee\xd5\xa9\x31\xd1\x61\xd4\x3b\x28\x50\xa9\x32\x28\xe6\xf9\x66\xcc\xb6\x21\xd1\x7f\xec\x31\x5b\x30\x6e\x75\xb1\xad\x75\x1c\x9a\xd6\x71\x22\x4f\x0d\xb1\x63\x95\x85\x48\x5d\x79\xd5\xe8\x3d\xaa\x3d\xa4\x25\x7a\x4f\x8a\xd2\x46\x51\xf7\x19\xca\x1a\xf9\x53\x23\x7a\x58\x2a\x76\xec\x61\x07\x07\x46\x9a\x6e\x75\xcf\x91\x1c\x31\xe1\x20\x23\x2d\xf2\x48\x8c\xc4\x63\x44\xa1\xb1\x19\x56\x5d\xb0\x8e\x49\x4c\x91\x31\x69\xc5\xde\x5a\xcc\xe2\xea\x51\x4b\xc3\x2c\x66\xfe\xd2\xda\x84\x6f\xae\x9b\x70\xc1\x0a\x79\xc1\x74\x41\x64\xeb\x8b\x5b\x58\xd1\xc5\x8d\xb1\x89\xf8\xf9\x96\xd5\x3f\xc9\xaa\x21\xb3\x8d\xc0\x8e\x68\x08\xde\x45\xdd\x09\x52\x9a\xce\x5d\x94\x4b\xb5\xe3\x95\x42\xa9\xfe\x6e\x50\x30\x55\xa5\x7b\x50\x31\x9d\x20\x20\x57\x00\x02\x0f\xe1\xd5\x3d\xe8\x96\xec\x7e\xf4\x41\x85\x8e\xa2\x46\x58\xa3\xff\xa5\x05\x4e\x85\x52\x97\x53\x26\x4b\x75\xc2\x82\x58\x6a\xd9\x57\x85\xba\xa8\x89\x52\x2b\x8c\xed\xf6\xde\xc5\x65\x37\x95\xef\xe2\xf2\x41\x54\xbd\xd2\x8c\x16\xa5\x13\xd9\xc8\x1e\x22\x5f\xf3\xfd\xcb\x1c\xd2\x5e\xb7\xfb\x17\x23\x78\xb4\x3f\x84\x08\xd7\x3d\x58\x7b\xce\x89\x03\x4c\x58\xad\x08\xe6\x05\x90\x4a\xf7\xba\x3f\xcc\x38\xbf\x7d\x3b\x56\x15\x88\x8b\x76\x15\xe5\x97\x59\xb0\xab\xe0\xb4\xce\xe1\x78\x9e\x74\x68\x70\x02\x30\x2a\xe6\x78\x5f\x3a\x14\x61\x6e\xfd\x82\x56\x20\x2f\x55\x53\x82\x17\x88\x72\x08\xc0\x23\x03\x1c\x8a\xcd\xce\x5f\xf0\x11\x00\xe6\x23\x5c\xa2\x95\x48\x34\x16\x8c\x31\x1a\xb9\xf9\xe1\xe5\xe7\xcc\x73\x2b\x1a\xa6\x72\x6b\xce\x6f\x0a\xde\x42\xee\x4c\x9c\x7f\x0d\x33\x9a\x58\x7b\x83\xcf\xc5\xe5\xe1\xb4\x24\x60\xb0\xae\xbe\xd2\x0f\xbe\xfa\x39\x33\x9e\xc9\x1d\x35\x15\x2a\x7d\x6e\x38\xf5\x17\x05\x8e\x73\x2f\xa6\xf2\x2a\x15\xa2\x4e\x5b\xa8\x94\xa9\x32\xa9\x2c\x0c\x12\xb1\x49\x85\xd8\xa4\x5a\x6c\x4a\x77\xbe\xac\x58\xcf\x08\x2b\x45\xfe\xf1\xfb\xbe\xfa\xa9\x5f\xe9\x54\x50\x21\x05\x83\x9c\x14\x6c\x4f\x65\x94\x10\x5e\x9a\x10\xa0\x85\x58\xfc\xea\xb4\x26\xdf\xdc\x6c\x4f\x6e\x2a\xc7\x4e\x8a\xea\xee\x21\x8b\x53\x0d\x59\x6d\x7a\xa3\x4b\x82\x70\x81\x99\x3d\x3e\x7a\x0c\xa3\xff\x9f\xac\x2b\x53\x37\x19\x9a\x54\x91\x57\xe2\x3b\x59\x48\x5e\x14\xd7\x35\x8b\x37\x90\x25\x8d\x10\x04\xe2\xb0\xab\x47\x06\xea\x96\xef\x6b\xcd\xe3\xe7\xcc\x2c\x36\xd2\x6f\x55\xa6\xb0\x4a\xfd\x56\x6e\xd4\xac\x75\x93\x78\x5d\xf4\x5a\xa9\x70\xa8\x0c\x54\x46\x6b\xbd\x53\x51\x35\x9b\x93\xab\xbb\xf2\xd0\x74\x40\x5c\xb1\x74\x86\x78\x3f\xbe\x5a\xad\x09\x9c\xdb\x58\xbd\xcb\xb2\xb8\xbb\x9c\xe3\xea\x98\x70\x85\x4a\xd5\x66\x7f\x15\x92\x83\x4a\xaf\x17\xe4\x9d\x39\xe7\x20\x4a\xec\xd5\x57\x09\xc8\x80\x59\xc3\xee\x23\x12\x33\x72\x34\x66\x96\x5a\xcd\x8b\x54\xa5\x2d\x4b\xd5\x84\x65\xde\xde\x16\x5b\x7d\x24\x94\xcb\x26\x9f\xa4\x4d\xa5\x52\x38\xd0\x06\x7d\xae\x16\x0b\xb1\xe7\x91\x56\xad\x94\x4e\x9e\x55\x37\xe3\x59\x7d\xb8\xc2\x88\x6a\x9f\xd1\xef\xc6\x5f\x3b\x8f\x7f\x60\xfc\xcd\x4a\x05\xd6\xf6\xa4\x7b\xd4\x58\x46\x77\x01\x19\x4f\x0d\x26\x23\xed\xc7\xd5\x7b\xd4\xf3\xfc\xcb\x9e\x8c\x84\xf9\x9b\xa9\xce\xd1\x59\x3f\x90\xe4\x25\x76\x03\x99\x7b\x38\x1d\x1a\x17\x3d\x92\xde\xc3\x89\x5f\xf6\xc1\x81\xa8\x97\xf1\xd3\x36\xd7\x90\x11\xca\x55\x82\xb9\x1f\x56\x6f\x17\x62\x5f\x4b\x67\x8a\x32\x82\x2a\xcd\x61\x07\x69\x5a\x63\x4e\x65\xda\x8b\x34\x6d\xad\x64\x47\xb9\xf0\x46\xc5\xdc\x78\xf4\xc5\x60\x34\x54\xaa\x6d\x9c\x97\x6f\x54\x92\xa8\x4f\xd4\xeb\xeb\x8a\x51\xc2\xbe\x51\x64\x44\x2c\x1f\xa6\x5f\xd1\x6f\x6f\x18\xbf\x32\x18\x39\xb4\xc4\x31\xa2\x83\xb6\x0a\x71\xf4\xc8\xee\x57\x94\xf6\xf9\x17\x2d\xd2\x2e\x3a\xc8\xa2\xc8\x0b\x5d\x94\xbe\xe4\x4b\x9e\x27\x8d\x9d\xc4\x51\xea\xb0\x80\x21\x17\x27\xad\x81\xb9\xce\x44\xf8\xa4\x6e\xad\xe3\x8d\x2d\xbd\x95\xd4\x16\x88\x88\xfe\xe4\xef\x46\x55\x54\x90\x1c\xcd\x45\x7e\x10\xef\xa1\x43\x19\x45\x24\x0e\x8e\x6a\x6f\x61\x13\x06\xf6\x00\x32\x7b\xf0\x5d\xf0\x0f\xfc\xdd\xa3\x47\x01\x64\x8f\x6c\x6e\xd2\x71\x30\xb1\x91\xc5\xc2\x73\xc6\xa9\xc1\x60\x12\x8f\x47\xd7\x06\x87\xc7\x91\x4d\xae\x3f\x34\x1f\x0d\x07\x7f\xc7\xeb\xf4\x8b\x6c\x79\x39\xad\x26\x6f\xcd\xf9\x22\xf8\xe4\x4c\x91\xa1\xe2\x4a\x46\x3d\x69\x36\x5f\x5e\x83\xd2\x97\x34\x6a\x04\xc2\x03\x09\xf8\x6e\x86\xab\x9c\x80\x87\x9c\x70\x27\x90\xae\xa3\x05\x75\xe8\x66\xe1\x5c\x7f\x54\x3b\x06\x2e\x9c\xeb\xff\x52\x9b\x4a\xfc\xf9\x4a\x6f\x98\x75\x85\x17\x5b\x6c\x47\x4c\x7a\x50\xde\x76\xba\x0f\xf5\x23\xea\x05\x4c\x0a\xb4\x98\x79\x2a\xf7\x11\xcd\x9c\x36\xb0\x20\xc2\xe1\xe0\xb9\x74\x55\x9b\x97\x4f\x3d\x81\xaa\x4c\x97\x0b\x0a\x11\x44\xa9\xfa\xa6\xe8\x97\xa2\x85\xe3\x8b\x99\x6e\xd2\x7d\x3d\x35\xb9\x4e\xe0\x1a\x8f\x07\xff\x77\xaf\xdf\x03\x8f\x94\x6b\x1c\x09\xb1\x57\x3b\xa5\xc3\x23\x53\x50\xa0\x29\x93\x82\x26\x38\x6d\x61\xee\x8d\x24\x4e\xa1\x7b\x6d\xf1\x85\x40\xb1\xf7\x68\x95\xf4\x00\x7a\x9d\xb6\x38\x40\xb4\xf2\x7a\x80\x68\xed\xb7\x38\x42\x4c\x4e\x62\x08\xcd\xe9\xcb\x97\x2f\x4d\x95\x65\xd4\x41\x8b\x4f\x97\x24\x2d\xe4\x3b\xb1\x50\x4b\x98\xca\xfe\x63\x42\x6f\xdd\x7f\x8c\x20\xd9\x7f\x24\x92\x2a\x07\x88\xa1\x6d\x3f\x42\x82\x20\xb3\xce\xdf\xb1\x5c\x8a\xd5\x69\xf7\x4a\x3b\x67\xe1\xe2\xc3\x34\x39\x6a\x95\xbc\x04\x1e\x31\x9e\x4d\x25\xe0\x66\xba\x7d\x8b\x33\x2f\xdd\xfd\x33\x6f\x36\x69\x32\xb2\xd4\x9e\x30\xbf\x1f\x3c\xec\x89\x57\x08\xac\x8a\xa9\xd4\x1f\x7a\x2b\x0c\x50\x90\x25\x32\x91\x24\x85\x4c\x14\xb2\xda\x43\xaf\x55\x7b\xf2\xcd\xdb\x9f\x0e\x0e\x6a\xcc\x5b\x3a\xc5\xb2\x09\x49\xe5\x89\x58\xbd\x56\xa0\x13\x52\xd4\xbe\x68\x90\xa9\xb9\x33\x2e\xc1\x73\x14\x2c\x05\x4b\x70\xce\x4b\xd3\xe3\x2d\x1c\xb9\x7b\x55\x9d\x74\xdd\x6f\x11\xc0\xea\x3e\x25\x74\x2d\xfb\x73\xce\x59\x23\xb2\x5c\xd2\x01\x5b\xa2\xf2\x8e\xa1\x4b\xc2\xd7\x16\x5f\x2e\x69\x85\xb0\x79\x27\x8c\xcd\x77\x10\x65\xf3\x4e\x38\x9b\x37\x22\xcd\xf3\xda\xa3\xcc\xf3\x76\x0d\x61\x02\xba\xb6\xe8\xf2\xbc\x46\x64\xe1\x52\x64\x71\x1a\x96\x61\x0b\xef\x1c\xb6\x70\x07\x6c\xe1\x66\x6c\x2d\x97\xfd\x4b\x44\x59\x21\x1d\x5b\x46\x5f\x9c\xfa\xb3\x43\x84\x2f\x7d\x4a\xb0\xf6\x48\x4e\x64\x5e\xaa\x87\xe8\x9a\x0c\xcd\xd0\x75\x3e\xe7\xe5\x86\x57\x64\xce\x72\xf9\x9b\xea\xdc\x0e\xf6\x22\x2d\x22\xba\x97\x79\xf9\xf1\xa3\xa5\x87\x4e\xd2\x73\x05\xf1\x39\xb5\xf0\x0c\xfb\xb0\xf4\x19\xf6\x61\xfa\x19\xf6\xe1\x64\x74\xb3\xde\x8b\x2d\x28\x73\xdf\x43\x9f\xe6\xce\x0b\x6a\x2d\x1c\x75\x6f\xac\xc7\x3b\x45\xb3\x37\xd7\x4b\x73\x3c\x98\x8c\x54\x2d\x3d\xcb\x54\x4d\x36\x77\x52\xb5\xe8\x3a\xaf\xa4\xfe\x2c\x97\xc0\x52\x2b\x61\x04\x8d\xeb\x84\xca\x09\x5b\x9a\xcb\x99\x20\x85\x22\x75\xab\x46\x3b\x46\xe0\x1a\xd2\xb6\x34\x2e\xab\x37\x92\x39\xa5\xce\xaa\x2b\xf6\x64\x9b\x1d\x43\x9e\x82\xb3\x2d\xee\x44\xed\x46\xd4\x31\xbf\x34\x53\x63\xb9\xf4\x11\x95\x77\x0c\x65\x12\xbe\xb6\x18\x63\x3e\x6e\x83\xb0\x0e\x2a\x8e\xac\xbd\x7b\x28\xeb\xa0\xe2\x88\xda\x8d\x48\xe3\x4e\x07\x2a\x13\x95\x77\x0c\x65\x12\xbe\xb6\x18\xe3\x4e\x33\x95\x71\x07\x1f\x75\xc2\xd8\xd1\x0e\xa2\xec\xa8\x13\xce\x8e\xda\x20\xad\xc3\xd6\x94\xb5\x77\x0f\x69\x1d\xb6\xa6\xa8\xdd\x8c\x34\x72\x5e\x6f\x17\x94\x35\xb6\xff\x38\x7b\x55\x2e\xce\xc2\x65\x6d\xa4\xae\xed\xf9\x53\xe3\xd3\x6a\x71\x4e\x02\xcb\xe7\x88\x3a\x9c\x48\x9f\x4b\x9d\xe7\x22\xa9\x58\x76\x5f\x33\x9e\x40\x6a\xef\x0f\x60\x60\xef\x0f\xe3\xbb\x99\x3d\x4e\x57\xf1\x2d\x16\x81\xbe\x8d\xc6\xb9\xfe\x27\x86\xf9\xdd\xbe\x41\x6d\x83\xd8\xbe\xcc\x7a\x67\x98\xa6\xe5\x11\x8c\xcc\x83\x03\x03\xab\x6c\x2e\x44\x5f\x54\xc3\x7d\x7e\x7b\x8b\x93\xcc\x42\xdc\xfc\x4e\x0c\x69\x7e\x97\xe4\x4b\x0c\xc4\x14\x98\x8d\xd6\x53\x1f\x3b\x41\xb0\x92\x19\x16\xf7\xe9\xc1\x81\x6f\xa9\xb9\x27\x7f\x19\x66\x5c\xc9\x9f\x1a\x81\x4e\x41\xca\xe2\x6b\x4c\xac\xe2\xf1\xf6\x4a\x53\x93\xbe\xc5\xd2\x7d\xb6\xe7\x70\x8e\x16\x4b\xde\xe3\xa4\xe7\x21\x95\x3e\x34\xa4\xa8\x87\x09\xee\x4b\x08\xcf\x83\x24\x5b\x21\x30\x65\x1e\xdc\x7a\x35\x36\x7f\x69\x48\x6d\x81\xe5\xa1\x4c\x70\x55\xe5\x7b\x96\xb5\x26\x06\x55\x76\xeb\x88\x1e\x55\x18\xfe\x52\xba\x61\x34\x85\xe9\x97\xb6\xd9\xad\x1d\x2d\xa7\xfe\x51\x43\xdb\xb2\xdf\x54\x9b\xa6\xdd\xad\x12\xa3\xf4\x9d\x20\x28\x57\x8f\xb1\xca\xc0\xe8\xae\xca\x5a\xec\x16\x26\xd5\xc4\x5f\x0a\x48\x5b\xf6\x1a\xb7\x68\xc4\xe2\x39\x2d\x35\x0a\x97\xca\x15\x59\x79\xc7\x50\x27\xe1\x6b\x8b\xb5\x73\xda\x68\x65\x76\x91\x5f\x4a\x70\xe5\x08\x13\x95\x77\x0c\x61\x12\xbe\xb6\x08\x43\x7e\x33\x85\x25\x51\x12\x35\x21\x14\xe9\xa8\x88\x92\xa6\x3b\x86\xc3\x04\xe4\xb6\x88\x8c\x03\x06\x1a\xb1\x19\xe2\x8b\x8e\xf6\x00\xd5\x66\xd7\x50\x28\xe1\x6c\x8d\xbe\x10\x5f\x34\xa2\x2e\xf8\xe3\xb8\xfd\xc1\x43\xd5\xde\x31\xa4\x29\x08\xdb\x22\x4d\xd4\x5e\x57\xbc\xeb\x1b\xa3\x81\x2c\x96\x4e\xc5\xbd\x61\x0d\xc5\xe9\x56\x3b\x86\xbe\x08\xd6\xb6\x08\x54\xf5\x1b\xe9\x4e\x5d\x69\x6f\x80\xc2\xf0\x1b\x7d\x08\xb0\x1e\x85\x12\xd6\x0e\x28\x0c\x39\x6a\x46\x21\xe6\x8e\x5f\xfe\xfc\x59\x2d\x0e\x75\xb3\x5d\x43\x62\x04\x6d\x6b\x2c\xaa\x06\xcd\x68\x6c\x7f\xe7\xbb\x7b\x57\xbe\x5d\x6e\x7c\x5b\x5c\xf8\x76\xba\xef\xdd\xc1\xeb\xde\x4e\xb7\xbd\x6d\x2e\x7b\x3d\xe4\x76\xdc\x9f\xa2\xc5\x6e\x21\x4d\xc2\xd8\xba\x3f\xb7\x11\x65\xfe\x65\x6b\x12\x13\x75\x77\x0c\x59\x02\xba\xb6\xfd\xf9\x97\x8d\xc8\xa2\xa4\xf4\x69\xc3\x3a\x02\x13\x4d\x76\x0c\x69\x12\xca\xb6\x1d\x52\xb2\x6c\x42\x5b\xc9\xf3\xba\x3e\x6b\xf9\x50\x64\x9b\x3e\x76\x0b\xbd\x12\x92\x8f\x02\x10\x95\x95\xf7\x2d\xfb\x49\x61\xa2\xe5\x18\x15\xed\x37\x5b\x84\x28\x38\xe8\x8e\xeb\x10\x77\xb3\xeb\x4b\xf1\x29\xc6\xc7\xc6\xab\x11\x75\xb1\xc1\x82\x70\x1a\xa2\xbe\x4a\xa9\xd3\xf7\xa7\xfd\x25\x45\x4c\xba\x8c\x6c\xbe\x36\x15\x3d\xee\xf6\x32\x9d\xd1\x10\xa9\x00\x82\xb7\xd3\x8f\x11\x8e\x36\x5c\xaf\x92\xbe\x1a\x17\x0e\x5f\x76\xf0\xf7\xd9\x42\x50\x32\xc2\x97\x36\xfd\xeb\x8e\x67\x37\xee\x78\x62\x67\x2a\x9a\xbd\xc7\x39\x32\x61\x60\x53\xfd\x58\xc9\x78\x38\xa9\x74\xd8\xee\xa9\x97\xf1\xe2\xad\x31\x0e\x26\x2f\xd2\x3f\x46\xa4\xc1\x1d\x8a\x36\x90\xef\xef\xed\x7d\xfc\xd0\xef\x61\x21\xa7\xd4\x37\xcf\x2d\xe4\x9c\x5b\xf3\x03\x51\xbb\x71\xc7\x5f\x97\x2a\x2a\xa5\x4a\x72\xd1\x7d\xef\x9b\x47\xd8\x75\x07\xe5\xe3\xba\x59\xd1\xb8\x5e\x2e\x86\x5d\xd0\xb5\x18\xee\x1e\xc2\x04\x84\xed\x51\xb6\x18\x36\x21\x4d\x45\x7e\xf4\xcf\xbb\x3a\xda\x25\xed\x76\x0b\x85\x51\x6a\xb8\xd6\x9d\x46\x0d\xda\x21\x72\x23\x2c\xee\x24\x0a\x3b\x22\xb0\x19\x7d\xd8\xdb\x84\x0a\x55\xab\x5d\x43\x20\xf6\x3a\x51\xa0\xa8\xde\x8c\xc0\x6b\xe4\xf5\x67\xd4\xf7\xa2\x17\xc3\xea\x5f\xea\xce\x3c\x97\xbd\xf5\x04\x36\xd5\x0e\x26\x91\xae\x91\xcd\x3e\x26\xd4\x13\x34\x1e\x4e\x9a\xfc\x48\xa6\x81\xd0\x8c\x2a\x5e\x21\xaf\x26\x13\xdd\x6a\xc7\xc8\x24\x82\xb5\x2d\x9d\xa8\xfa\x8d\x84\x12\x10\x52\xca\xa7\x4a\xc5\xa4\xaa\xbd\x6b\x88\x93\x10\xb6\x46\x1b\x21\xcd\xec\x49\xe6\xc3\xec\x63\xf9\xa2\x5f\x65\x24\x61\xba\x52\x45\xd3\x1d\xc3\xa4\x9c\xfb\x7b\x0d\x75\x5b\x84\xa6\x1a\x35\xe2\x55\x46\xd9\xb7\xa7\x46\x55\x7d\xc7\x90\xa8\x61\x6c\x8b\x3e\x59\xbd\x09\x71\xd2\x00\xd9\x5d\x60\xc6\xcd\x76\x0b\x85\x72\xda\x1d\x44\xa6\xae\xdf\x88\xc4\x52\x21\x59\x7e\x22\x9d\xed\x9a\xf1\x6a\xd6\xbe\xbb\x59\xa3\xe9\x69\x56\x7e\x89\x5f\x85\xaa\x5d\xbb\xbb\x9f\x75\xb8\xb7\x9f\x35\xdf\xd9\xcf\x1d\xd6\xc7\xe8\xba\xab\xeb\x48\xdc\x6c\xb7\x90\x37\x77\xd8\x7b\x09\x6c\xcb\x3e\x75\xfd\x36\x48\x5c\x52\x74\xe9\x93\xb0\xab\xf3\x43\xa6\xe9\xce\x21\xf3\x63\x0c\x74\x7b\x84\x46\x6d\x1a\x91\x4a\xd1\xb4\xcf\x49\x19\x3e\x75\x51\xa1\xea\x8e\xe1\x8f\xa2\xe9\x19\x69\x8f\x3a\x59\xbd\x11\x6b\xab\x65\xf9\xeb\x72\xa5\x4a\x8a\xaa\xbd\x63\x68\x53\x10\xb6\xc5\x9a\xa8\xdd\xe4\x3e\xe7\x2f\xc2\xf6\xde\xc2\xb2\xf2\x6e\xa1\x4c\xc1\xd7\xb2\x43\x51\xb9\x89\xca\x7c\xdc\xd5\x89\x44\xb4\xd8\x31\xa4\xe1\xf6\x4e\x24\x3e\x6e\x74\x22\xf1\x31\x47\x94\x55\x25\x77\xa9\x43\x5c\xd4\x6e\xd7\xd0\x17\xc3\xdb\x1a\x89\xba\x45\x33\x2a\x2f\xc9\x45\x57\x4f\x4d\xdd\x68\xd7\x90\xa8\x20\x6d\x8d\x41\x51\xbd\x11\x7d\xac\x5f\x19\xaf\x5e\xae\x25\xc7\x2d\x76\x0c\x7b\xea\x7a\xb7\x3d\xfa\x54\xfd\x16\xf8\x73\x03\x7f\x79\x4e\x1c\xea\xf5\x59\xb8\x14\x18\x2c\xf7\x0e\x71\x03\x3f\xa9\xda\xd4\x7e\xd7\x70\xfb\x2a\x82\xe1\x53\x82\x82\xd6\x88\x2e\x36\x6e\x81\x75\x7d\x6b\xd9\x81\x6a\x77\xf1\x6e\xd6\x67\x6f\x3a\xdd\xce\xea\xfa\x2d\xf0\x27\x34\xc8\x7b\x7c\x8b\x57\x1b\xd2\xa3\x47\x42\x28\x09\x79\xe5\xdb\x20\xaa\x10\x98\x50\xbb\x96\x17\x5f\xdb\x88\x82\x37\xd3\xcf\x59\x47\xad\x4c\xb1\x55\x5d\xee\x5f\xa2\x28\xa3\x5e\x06\x92\x07\x4c\xb8\x8c\x4c\x73\x0d\x09\xfe\x7c\xfa\xee\x95\xca\x32\xad\xc0\x25\xe7\x02\x52\x44\xa3\x29\x5b\x2a\xca\x91\x7f\x3e\x7d\x97\x49\x94\x27\x41\xa3\x48\x23\xc1\xa8\x4e\xaa\x18\xad\xe2\xbf\x48\x79\xb6\x8c\x1a\xc1\x27\x9b\xec\xd6\x16\x50\x50\xb6\xec\x50\x54\x6e\x22\xfe\xc0\x61\x7c\xab\x94\x2f\x3a\xb4\x71\xfe\x21\xfd\xbf\xfc\x8d\xbe\x71\x7f\xa3\x6c\x52\x73\x1d\x37\x5e\x48\xa3\x1c\xe5\x16\xdf\x1f\xe8\xac\xdf\xfa\x05\xbf\x38\xeb\x39\x8d\xb2\x7a\xd3\x28\x71\x74\x7f\x68\xae\x1b\x5c\x8e\x70\x03\x89\xa2\xa9\x7e\xfb\xa5\x36\x17\x42\x52\xed\xaf\x84\x08\xff\x1e\xc4\xbb\x49\x42\x84\xb4\x23\x5d\xc9\xcb\xc6\xe3\xe1\x04\xfa\x49\x01\x11\x05\xa4\x34\x5b\x02\x83\x7e\x93\x48\x0a\xc8\xac\x5f\x7a\x18\x2b\xb5\x9e\xa8\xda\xbb\x25\x8e\x02\x32\x7b\xd3\xba\x43\x51\xb9\x51\x1c\x91\xd9\x70\xd0\x05\x65\xc3\xc1\xee\xa1\x4c\x40\xd8\x1e\x67\xc3\x41\x1b\xa4\xb5\x77\x31\x54\xb5\x77\x0f\x69\xed\xdd\x0c\x65\xed\x16\x48\x6b\x1f\x4c\x2d\x2b\xef\x1c\xca\xda\x87\x52\x8b\xca\x8d\x08\xeb\x70\xcf\x1a\xec\x9a\x4d\xae\x43\x77\xcd\x89\x9c\x83\x2e\xf7\xac\xc1\xce\xdd\xb3\x06\x1d\xee\x59\x83\xe6\x7b\xd6\x85\xb3\x81\x0b\x84\x6e\xb4\x5b\x88\x5b\x38\x5d\xdc\x1f\x64\xed\x16\xc8\xeb\x8e\xb9\xdd\x43\x5b\x17\xa4\x35\xa3\xec\xba\x35\xdf\x17\x75\x77\x0d\x59\xd7\x1d\x90\x75\xdd\x88\xac\x0e\x29\x48\x17\x3b\x67\x4d\x59\x74\x30\xa6\x2c\x9a\x6d\x29\x8b\x0d\xbd\x77\x93\x76\xdf\x84\x0f\x6f\xd3\xb1\x66\x41\xda\xfb\xfa\x89\xba\x3b\x46\x13\xa4\xbd\xb5\x7e\x41\x1a\x8d\xf3\x8b\xb0\x5c\x6f\x2a\xc7\x56\xb8\x73\x8a\xd3\xa2\x4b\x87\x8b\x16\xaf\x60\x6c\xe0\x72\xb5\x83\xee\x56\xb8\x8b\xaf\x15\x6e\xe1\x68\x85\x09\xef\x77\x89\xcd\x53\xf5\x77\xef\x0e\x08\x13\xfe\xe6\xf7\xf6\x98\x13\xb5\x5b\xa0\xae\x13\xde\x76\x0f\x63\x5d\xf0\xd5\x84\x2d\xf5\xba\x4e\xdf\xe9\xba\x49\x93\x76\xbb\x85\x3e\x35\xef\x97\xed\x3b\x8d\x1a\x34\x22\x72\xc9\xd5\xd3\xf7\x1d\xf1\x18\x35\xdb\x31\x34\xc6\xd0\xb6\x45\xa3\x6e\xd0\x88\xc6\xd2\x48\x8e\xf2\xcd\xbb\x73\x41\x30\x1d\x22\x60\x74\xf8\x4b\x8d\x2f\xdf\x12\x51\x17\x61\xee\xcc\x50\x5f\x3f\xa9\xd6\x4d\x4d\x2d\xb6\xff\x26\xd4\x55\x19\x72\x06\xd1\xf8\xa8\x51\x71\x5d\x22\x3a\x25\x74\xd1\x36\xdd\x6e\x54\x7d\xb7\x48\x26\x06\xb2\x65\x9f\xba\x7e\xd3\x36\x5b\xfa\x4b\xd4\x57\x0f\x47\x77\x64\x58\xe9\x96\xdf\x22\x2a\x1b\xc1\xde\x00\xde\x6f\x12\xd0\x1a\x9a\x91\x50\xb6\x25\x18\x7f\xd9\x68\xd3\x5b\x06\x21\x75\x02\xff\x0f\xd4\xbf\x22\xb4\x3c\x3d\x50\x54\xa3\xd8\xe6\xbe\xb9\x4a\x72\x89\xd7\x0e\x86\xb2\xe9\xfb\x78\x2a\xd8\x23\xa1\x87\x81\x7f\x7e\xbf\x20\x24\x4b\x58\x3b\xdb\xfc\x13\xa2\x35\xc7\x4c\x51\x77\xc7\x08\x54\x40\xd7\x96\x3e\xc9\x55\x23\x79\x6e\x16\x91\xb2\xa3\xd1\x28\xcb\xae\xa1\x28\xcb\x96\x71\x28\xbf\x87\x28\xec\xca\x1d\x55\x9b\xdd\x42\xa0\x86\xb3\x65\x8f\xb2\x76\x13\xea\xa8\x83\x3d\x52\xaa\x89\x94\xee\x58\x5d\x7d\xb7\xd0\x16\xc1\xd8\xb2\x4b\x55\xbd\x05\xe2\x66\x5d\x69\x4e\xb5\xd9\x39\xe4\xcd\xda\xd3\x9c\xac\xdd\x88\x3a\xf9\x10\x7c\x57\xdc\xa9\x46\x3b\x86\x3c\x0d\x69\x5b\xec\xc9\xea\xcd\xe8\x93\x06\x8c\xce\x57\x95\x49\xbb\x5d\x43\xa2\x28\xe8\x70\x61\x19\x35\x68\x46\xe4\x12\x75\x36\x1f\xe9\x46\xbb\x86\x42\x05\x69\x6b\x04\x8a\xea\xcd\xe8\xbb\x44\x94\x75\xdf\xc7\xaa\xd5\xae\x21\x50\xc3\xda\x1a\x83\xb2\x7e\x23\x0a\xfd\xd9\xbc\x8d\x4b\x69\xaa\xde\x5f\x3e\xa5\x7f\xf9\x94\x7e\x0b\x3e\xa5\xdd\x52\xad\xec\x62\xa6\x95\x6e\x89\x56\x5a\xe5\x59\x61\xf3\x70\x3a\x0d\xba\xb2\xcc\xa8\xd5\x6e\xa1\x2f\x86\xb5\x65\x9f\xba\x7e\x23\x0a\xfd\x59\x7b\x4f\x10\x59\x79\xc7\xd0\x26\xe1\x6b\x8b\x33\x7f\xd6\xe8\x0c\xd2\xe5\xf1\xde\xdd\x7b\xbb\xb7\xcb\xd3\xbd\x2d\x5e\xee\x65\x3e\x9e\x85\x81\x43\x3b\x59\xdb\xd2\x8d\x1e\xdc\xde\xc6\x02\xbf\xf3\x59\x4a\xb5\xd9\xb1\x85\x56\x70\xb6\x5d\x6a\x51\xbb\x71\xb1\x83\x70\xe6\x4f\x57\x5b\x8d\x3a\xd3\x7d\xfe\x15\x78\xb6\x2b\x7a\x56\x6d\xe0\x59\xa4\x1c\xe5\xa2\xcf\x4c\x71\x42\x0a\x1c\x17\x19\x87\xbd\xc3\x19\x04\x7d\x60\x5a\x9c\xbc\x23\x57\x88\xbe\x72\x18\x32\xcc\xbb\xc5\x99\x31\x42\x37\x38\xde\x47\xad\x76\x6c\x5b\x13\xda\xe5\x68\xaf\xaa\x37\x6e\xec\x65\xe0\x6f\x37\x98\x54\xf6\xf8\xd7\xa6\xde\xf5\x4d\x7d\x64\x42\x6a\xe3\xc2\x4b\xc4\xe3\xc9\x88\x42\x66\xe3\x62\x3e\x7b\x98\x4a\x68\x1f\x28\x2a\x30\xc8\x5d\xb7\xf7\xef\x1d\x5e\x8a\x95\x95\x77\x6c\x4b\xff\xde\xe1\xa5\x58\x51\xb9\x71\x3b\x87\xe7\xed\xf1\x15\xde\xfd\x1d\xf2\x07\x46\x97\x80\xae\x2d\xb6\xc2\xf3\x26\x64\x71\xa7\x73\xde\x1d\xd9\x64\xb7\x90\xa6\xa0\x6c\xd9\xa1\xa8\xdc\x8c\xb6\xf6\xa7\x24\x51\x77\xd7\xd0\xd5\xfe\x94\xc4\x9d\xc6\x53\x12\x77\x70\xfb\x17\xfd\x64\xe5\x9d\x43\x57\xfb\x17\xfd\x44\xe5\x26\x8f\x37\xee\xb0\x8a\xb7\x86\x8b\xde\x5e\xb2\xee\xae\xe1\x8b\xb5\x7f\x62\x58\x54\x6e\x24\x30\x32\x9b\x05\x1b\xba\x79\x65\xdb\x7e\x8b\x88\x6c\x01\xfa\x46\x30\x7f\x93\xc0\xd6\x50\x8d\x86\xb4\x2d\xdd\xc8\xea\x8d\xe8\xa3\x61\x79\xe2\xc3\x72\xde\x24\x6b\xef\x18\xda\x14\x84\x6d\xb1\x26\x6a\x37\x21\x2d\xc4\xdd\xb7\x99\x6a\xb3\x5b\xa8\xd3\x70\xb6\xec\x51\xd6\x6e\x42\xdd\x95\xcf\xe7\x95\x9e\xcb\x95\xc8\x8b\x5a\xed\x16\xfa\x62\x58\x5b\xf6\xa9\xeb\x37\xa1\xf0\xba\x8b\xe7\xfc\xf5\xce\xb9\xce\x5f\x77\xf0\x9d\xbf\xae\x7e\x3b\xc2\xc7\x3e\xf7\xa5\x4f\x27\x65\x87\xce\x72\xd9\xbf\x44\x94\x55\x6d\xdb\xc0\xef\xa7\xaa\xa4\xdb\xf6\xa7\x8e\xcb\x09\x5d\x75\x7b\xef\x0f\xe2\x0d\x2f\x9a\xa3\xc4\x4d\xf1\x79\x7b\x0f\x47\x48\xb3\x5e\x7e\xfc\x78\x70\x60\x50\x3b\xf3\xc5\xc2\xce\x02\xc1\x20\xf7\x51\x03\x92\xb6\x8e\xdf\x88\x8a\x23\xf0\x72\xb9\xec\xfd\x16\x61\x22\x81\x73\x94\xbd\xcf\xa4\x30\x30\xd7\xcd\x68\x95\x3b\x16\x23\xcc\xfb\x4b\x4a\x96\x7d\xbe\x5a\xa2\x52\xd7\xcc\xa4\xb4\x45\xfb\xdd\xa2\xd6\x04\x9e\x0e\x29\x4d\xa3\x26\xad\x68\x57\xbf\xb1\x8e\x68\xdf\x43\xe7\xe1\xac\xef\x78\xce\xb2\xe2\xad\x2b\x8a\x18\x09\x2e\x11\x3d\x8c\xfe\x60\x87\x6e\xe0\x30\xe6\xbb\x95\xbd\x6c\xf1\xfe\x45\x53\x58\xe5\x7c\x53\xd4\x96\x42\x8b\xa0\x7a\x64\x3b\x74\x16\x8a\x7d\xc4\xc6\xc3\xc9\xed\x6d\xf2\x6b\x30\xd9\x43\x16\x45\x33\x9f\x71\x44\x8d\xaa\xbe\x47\x0b\xc7\xc7\x20\x45\xc0\x10\xe9\xf4\x8c\x4d\x4d\x80\x98\x33\x5b\x3a\x2e\x02\x10\x38\xcb\x65\xe0\xbb\x8e\x98\x98\x2a\x36\xd7\x2d\x36\x41\xe1\x64\xd2\x74\x70\x69\x68\xfd\x17\xfd\x97\x61\xc8\x73\xb8\x53\x86\x58\xf1\xfd\x90\x21\x1e\x2e\xfb\xf1\x4a\x67\x0a\xef\x81\xc2\xd3\x33\x4a\x11\x75\x06\x61\x4d\x40\x49\x30\xfa\x29\x8a\xeb\xcf\x02\x72\x9e\x0f\x2a\xec\xf4\xc4\x6c\xca\x88\xdb\x62\x5b\xf9\x53\x63\x7f\xb8\x6f\x27\xb7\xa6\x96\x1a\xf6\x65\x32\xa5\x9f\xe4\x8c\xb4\x3d\x58\x34\x00\x21\x56\x70\x79\x60\x3f\x4a\x1c\x78\xe5\x63\x8f\x5c\x99\xd8\x56\x7f\xec\xa1\x80\xa1\x5e\x45\x5d\x05\xa2\x89\x6d\xf5\x87\xac\x7b\x93\xad\x1b\x27\x24\x64\x28\x98\x6a\xdb\xfe\x1e\xb6\xc5\xaf\xb5\x14\x90\x30\x68\x9e\xf2\x1e\xb5\x0b\x09\x0e\x83\x17\x81\xce\x64\xaa\xde\xfd\xb5\x14\x67\x9c\xae\x92\xb0\x12\x6b\x21\x57\xff\x23\x45\x53\xff\xda\x84\x78\x4c\x27\xb7\xb7\x86\xf8\xc7\x46\x50\xb0\x21\xb2\x44\xd8\xb8\xb9\xf2\x83\xe0\x35\x62\x9c\x92\x55\x9a\xbe\x65\xe2\xd3\x2f\x2c\x5c\x22\x1a\xa5\x70\x9d\xfb\x0c\xc6\x48\x17\x34\x11\x20\x8e\x7a\xa2\xc3\xf5\xda\x34\xd7\xeb\x0d\x48\x31\xa1\x22\x1b\x97\x50\x66\x35\x59\xa5\x08\x15\xb7\x20\xd0\x69\xe0\xb0\x79\x7f\x81\x18\x73\x66\x79\xa1\x7e\xdf\x0f\x1f\xa7\x60\x24\xda\xfd\x4e\xad\x9c\x87\x96\x14\xb9\x0e\x47\x90\xea\x2f\x62\x09\x67\xf8\xf6\x56\xfd\x5a\x20\x3a\x13\xca\x10\x18\xa7\x9e\x05\x13\x80\x4c\x7a\x3f\x86\xf2\x12\x44\x2b\x46\xac\x47\xa6\xbd\x5c\x9d\x9e\x58\xd7\x1e\x26\xbd\x80\xe0\x19\xa2\x3d\x25\x40\x7a\x7c\x8e\x7a\x3a\xc7\x6f\xcf\x09\x39\x59\x38\xdc\x77\x9d\x20\x58\x59\xbd\xb7\x98\x71\xe4\x78\xb0\xb7\x22\x61\x8f\xcd\x49\x18\x78\x3d\x74\x2d\x50\xef\xf3\x60\x15\x75\xe0\xf3\x9e\x8f\x39\x11\x95\x68\xef\x94\x84\x1c\xc1\xde\x2b\x82\x39\x25\x41\x80\x68\x8f\xd0\xde\xab\x48\x0d\xea\x89\x13\x40\xef\xff\x94\xe5\x17\xfe\x3f\x16\x80\xcc\xbe\xe1\xfe\x02\x91\x90\x8f\x8e\xd1\x31\x54\x59\x89\x91\x77\xa6\xbf\x0d\xe0\x92\xfa\x84\xfa\x7c\x35\x1a\x0e\x06\x90\x71\xdf\xbd\x58\x8d\xf6\x87\x90\xcd\xc9\xd5\x47\x4a\x66\x14\x31\x26\x7e\x8b\x1d\x31\x02\x3e\x9e\x12\x20\xff\x66\xa3\x31\x60\xa1\xeb\x22\x26\x56\x57\x7d\x07\x57\x0e\xc5\x62\x03\x41\xe0\x39\x02\x1f\x42\x3e\x06\x88\x72\x00\x01\x43\x2e\xc1\x9e\x43\x85\xac\x52\xb3\xf4\x09\xfe\x51\xea\xc9\xbe\xec\x4c\x66\x08\x56\x84\xa2\xc1\x04\x10\x5c\xfa\xe8\x4a\x7e\xd3\xc0\x82\x09\x5c\x52\x74\x89\x30\x7f\x1d\x2a\x72\x45\x62\x76\xeb\xe4\x32\x8a\xb4\xe0\x63\x90\xd8\xc9\x06\xbe\xbd\xbd\x59\x9b\x96\x5c\xcd\x5f\x15\xe5\xbe\x56\x05\x0c\xfa\xb6\x41\x6e\x6f\xc7\x13\xd3\x2a\xce\x18\x3a\x36\x35\x18\x24\x26\x0c\xed\x7d\xc3\x3f\x38\xf0\xa3\x24\xca\x79\xe5\x63\xea\xcf\x46\xf9\x9d\xe1\xc0\x1b\x75\x9b\xc6\x7d\x87\x23\x01\x41\x5a\xfd\xd0\xcb\x57\x68\x05\xca\x66\xa9\x50\x56\x32\x88\x09\xb1\x11\xc0\x10\xde\xf8\xde\x08\xe4\xe8\x36\xd9\x15\xfd\x18\x34\x7d\x6a\xf1\xc5\x48\x21\xe6\x7e\x30\x02\x47\xd6\xc0\x1a\x80\xb5\x09\x9d\x12\x0c\x58\x53\x42\xdf\x38\xee\x3c\xf1\x93\xe4\xe6\x4d\x0c\x04\xcf\xce\x96\x49\x12\x28\x05\xcb\x5c\xa7\xef\xfa\x34\x53\xca\x83\x9e\xe2\x44\xa4\x8a\x13\xa9\xdb\xc9\x7e\x57\x45\x20\xa9\xdf\x67\x9c\x50\xd4\xd7\xf3\xfc\x86\x94\x80\x6a\xc8\xda\xbc\x0e\xb4\xad\x57\x81\x2a\xc0\x89\xa7\x50\xa6\xab\xab\x34\xe4\x65\xb2\xfa\x47\x87\xf1\x1f\x08\x89\xef\xdd\xe3\x26\x38\xf2\xb6\x05\x2f\x81\x6d\xdb\xd8\xe2\x0e\x9d\x21\x6e\x71\x67\xf6\xde\x59\xa0\x17\xd1\x87\xfc\x20\xc8\x72\x03\xc2\x10\xe3\xb1\x1b\x40\xf4\xc1\x00\x0e\x30\xf7\xa6\x84\x1a\xc8\x46\xd6\xd2\xa1\x08\xf3\x37\x01\x12\xec\xe0\x3b\x74\x70\x00\x5e\x82\x7d\xdb\x46\xd1\x00\xdf\x99\x85\x5a\xd1\xed\x34\x5a\x1b\xd1\xe8\xe6\x1e\x3d\x38\xc8\xc5\xa7\x43\x0c\xa9\x69\x2d\x9c\xd5\x39\xfa\xd9\xc1\x5e\x80\x0c\x73\xbd\xe7\x11\x57\x32\x1e\xeb\x9c\x78\x2b\xcb\xf1\xbc\x37\x82\x7d\xbd\x13\xfc\x01\x4b\x0e\x11\xf8\xee\x05\x80\xf2\xdc\xdd\xa0\x2d\xe8\x59\x64\x7b\xa4\x68\x41\x2e\x51\x65\xa7\x8d\x1a\x86\xd0\xa7\x2b\x8e\x2c\x0b\xff\xda\xc7\xec\xd0\x71\x03\x69\x4c\xd3\xe6\xfe\x4a\xc1\xae\xab\xcb\xaa\x53\x84\xbc\x73\xc7\xdd\xc2\x25\x4b\xde\x0d\xe0\x57\x31\x8a\xe5\x52\xe4\x70\x94\x70\x71\x78\xc3\x10\xe7\x3e\x9e\xb1\x8a\x4c\xfb\x51\x31\x30\xa1\x06\x63\x74\xa3\x3a\x29\xe6\xdc\x97\xa9\xf6\xf7\x54\x3f\x33\xc4\x15\xc6\x40\x0c\x92\x69\xa1\x6b\xe4\x86\x1c\x19\xc5\xb5\x49\x35\x92\x11\x21\x04\x98\xd6\x52\xe8\x0f\x8c\x1b\xc8\xb4\xf8\x1c\xe1\x92\x56\xdc\xe2\xd4\xc1\xcc\x17\x5f\xcf\x88\x01\x3c\xd7\x72\xdc\x40\xb1\x46\x08\xfe\x29\x94\x80\x97\xaf\xde\xf5\x38\xb9\x40\xb8\x37\x77\x58\xef\x1c\x21\xdc\x73\x3c\x0f\x79\x16\x80\xe0\x6c\x8e\x28\xea\x5d\x39\xac\xe7\xe0\x1e\xa2\x94\x50\x51\xe6\xe3\x99\xd2\x1f\xe2\xa6\x16\x30\xd7\x30\x5c\x7a\x3b\x08\xb4\x00\x8e\x39\x97\x95\xf0\x32\xe7\xb2\x02\x5e\xa5\x40\xdf\x3b\xbc\x6a\x1b\x96\x81\xab\xb3\xa8\x4b\xef\x75\x8e\x04\x87\xd1\xe9\xd4\x35\xc0\x96\x8f\x3d\x74\x0d\x46\xa9\xb8\x96\x29\x45\x6c\x6e\xc8\xcb\x4e\x41\xdc\xa3\x46\x8c\x55\xa1\x4c\x01\x5f\x85\x34\x59\x5a\x81\x36\x57\x48\x9c\xa0\x70\x56\xa9\x9a\x00\x0c\xd9\x3d\xe2\x38\xd9\xbc\x31\x5d\xdd\xc8\xa9\x8e\x92\x6a\x08\x82\xb7\xaf\x05\xf1\x14\x16\xa0\x04\x6d\x9a\x2d\x44\xc4\xf6\x9e\x5c\xf5\x42\x26\x30\x21\x38\x7a\x8c\x88\x72\xac\xa9\x8a\x7c\xee\xf0\x54\x4d\x81\xb1\x80\xe0\xfb\xa7\x33\x39\xca\xb7\x46\x66\x72\x52\x55\x54\x26\x0a\xcb\x89\xac\xca\x94\xa3\x05\x89\x14\x62\x7d\x12\x72\xe6\x7b\x68\x7b\x9e\x86\x79\xff\x42\x64\xde\x18\x05\xd7\xc2\xd8\xa3\x6d\x9f\x6b\x81\x7f\x7b\xbb\x1f\x0b\x5e\x6d\x35\x62\x46\x54\x68\x42\x6a\x23\xdb\xb6\x93\xca\xa8\xa4\x92\x56\x23\xf6\xf1\xc1\xc1\x3e\xd5\xee\x79\x42\xf5\x37\xf2\xd4\x81\x94\xda\x01\x4c\x88\xcc\x83\x03\xb9\xf1\x08\x3e\x0f\x42\x6a\x20\x73\x5d\x88\x5a\xca\x48\xc4\x1b\xa1\x85\x75\x33\x31\xc8\x1a\x73\xa9\xb0\x50\x9b\x5b\xe7\x3e\xf6\x64\x1d\xf9\xa4\x8b\xab\xde\x73\x49\x75\x28\xbe\x8a\xb9\x64\xbf\x79\xbe\xf7\x16\x33\x44\x23\x95\xa9\x68\xae\x95\xfb\xa0\x79\x36\x0a\x26\x1a\x62\xe5\x45\x29\x8b\x53\x7d\xc5\x6b\x50\xa9\x49\xa1\x08\x16\xb9\xb7\x53\xaa\x54\xc9\xcc\x5a\xd8\x5f\xa2\xf1\xea\xf4\xac\x34\x06\xcd\x06\xaa\xf6\x31\x47\x58\x0c\xbf\xb9\x4e\x55\x8c\x5d\x9c\x73\xbe\x3c\x64\xdc\xe1\x85\x7c\x15\x9b\xdc\x2a\xb5\xd6\xb9\x76\x42\x93\x8a\x11\x9e\x51\x2d\xe2\xaf\x45\x7d\xaa\x70\xae\x90\x7c\x8c\x1d\x1c\x44\x7f\x8d\x07\x93\x08\xbe\xd4\xa7\x3d\x7f\x6a\x2c\x1d\xca\xd0\x5b\xcc\x0d\x6e\xa9\xd5\x30\xe5\x09\xe6\xed\xfb\xb3\x37\xa7\xef\x5f\xbe\xfb\xf2\xe9\xcd\xe9\x6f\x6f\x4e\xbf\xbc\x39\x3d\xfd\x70\x7a\x70\x30\xb0\x95\x29\x95\x3b\x7e\xa0\xd8\xf3\x87\xa9\x01\xbc\xc8\xb2\x91\x9a\xe3\x94\x84\xd8\x1b\x81\xc8\x71\x19\xbc\xc4\xa9\x42\x27\xa0\xc8\xf1\x56\x3d\x74\xed\x33\xce\x7a\x53\x42\x7b\x02\xcf\xbd\x4f\x24\xa4\x2e\xea\x0b\xea\xf7\xb1\x34\xec\xf5\x96\x8e\x4f\xad\xde\xc7\x00\x39\x0c\xf5\x10\xe6\x88\xf6\x9c\x9e\xe7\x4f\xa7\x48\x9c\x76\x7a\x2e\x59\x9c\x47\x55\xc9\xb4\xf7\x49\x4b\x49\xd8\x13\x1a\x16\x72\xa8\x3b\x97\xa6\xad\x04\xa3\x3d\x4e\x7a\xc8\xf3\xb9\xe4\xf6\x62\x78\xc1\xe8\xe3\x62\x0b\x68\x26\xd7\xa4\xa1\xa6\x5b\x7c\xe3\x2a\x6a\x13\x35\x75\x51\x54\x53\x60\x7f\xeb\x8a\x6a\x02\xf6\x9d\xf4\x88\x0c\xf6\xaa\xd0\xd7\x45\x69\xcd\xa2\xb0\xad\xd2\x9a\x9d\x46\x3d\xbf\xbe\xb8\xfc\x7a\x87\xdf\x62\x1c\x79\xce\xb4\xa9\x6d\x8e\xdf\x0f\x0e\x0e\xd4\xd5\xff\xbe\x6d\xa7\x2d\x9d\x2f\xd2\x3f\x46\xe0\x10\x40\x9e\xb1\x8b\x6a\x8d\x04\x1c\x02\xdb\xb6\xd1\x0b\x2e\xb1\x73\x71\xa9\xd7\xd8\x1c\xc5\x1f\xa6\x24\xf0\x10\x05\xb0\x51\xff\x68\x25\x1d\xa2\x68\x0b\xba\x15\xda\xa6\x1d\x76\x34\x36\x32\x7b\xe2\x17\xb4\x02\x26\xa4\x19\x0a\x51\x0a\x10\x35\x53\xdb\xfb\x02\xad\xba\x1f\xbb\x2f\xd0\xaa\xfc\xc0\xbd\xd3\xd0\xb7\xe5\x6d\x11\xf4\x05\xae\x76\x5f\xd0\x37\xf2\x35\x5a\xc6\xd7\x12\xd2\x1e\xa5\x3e\x65\x59\x1c\xad\x66\x71\x9d\x10\x9a\xc5\x68\x57\x4e\x17\x21\x34\xcf\xe3\x24\x42\xeb\xa6\x53\x60\x81\x29\x0d\xdf\x5c\x43\x5f\x45\x00\x39\x1c\x7d\x42\x8c\xf9\x04\x57\x48\x20\x2b\xb9\x98\x81\xd1\xdd\x5a\xb2\x6c\x4c\xb5\x3d\x95\x6b\xb1\xd9\xa2\xe2\x9a\x25\x54\x6c\x2f\x03\x9c\xcf\xd1\x22\x1e\x8a\x29\x03\x80\x86\x00\x40\x1c\x06\x41\x7c\x77\x8a\x2c\xfd\x1d\x26\x95\x79\x3c\x65\x5d\x59\x2e\xce\x99\xbc\xb7\x93\x9f\xe5\x52\x24\xb8\xa9\x5a\xa3\xb8\x86\xb2\x09\xc4\xcd\x5b\x1c\x6f\x73\x3a\xfd\xbd\xbd\x47\x9a\x3d\x1e\x7e\x89\x46\xac\xb0\x93\x26\x4b\x05\x3b\x9f\x24\xf7\x0a\xcb\x24\x57\xff\x4b\xaa\xcf\x8c\x36\xc3\x8a\x04\x02\x6f\x34\x81\xa4\x07\x46\x11\xd5\xe8\x61\x11\x1c\x4f\x04\x39\xba\x0e\xd7\x31\x80\x4b\x4a\x38\xe1\xab\x25\x52\x41\xc9\x96\xeb\x04\x81\x91\x3a\xc5\x8d\x79\x8a\x7c\x27\x6a\x2f\xb6\x59\x1d\x3f\xe0\x48\xba\x04\x6c\xdb\xfa\xa0\x9b\x45\x57\xd1\xe9\x1e\x93\x17\x5a\x87\xdf\xf1\x7f\xe4\x45\xbd\x7c\x97\x55\xdb\x26\x12\x49\xce\x27\x7b\x51\x33\x19\xe9\x88\x4d\x3d\x40\x82\x98\xb9\xc3\x3e\x5c\xe1\x68\x86\x0a\x43\x18\x52\xf3\xe0\xc0\x40\x63\x3a\xb1\xf1\x98\x4e\xcc\x75\x7c\xd1\x91\x57\x3d\x50\x72\xe2\x91\xde\x83\xd8\x06\x20\x7a\x20\x12\x59\x62\x04\xcd\x54\xa9\xe3\xf9\x44\x73\x53\x75\x66\xd0\x3f\x38\xba\xe6\x60\x84\x6d\xa4\xa2\x20\xd7\x25\x31\x98\xe2\xb0\x1a\x29\x8f\x02\x0c\xf4\xa2\x0a\xbd\x3c\xc2\x29\x86\x59\x07\x22\x75\x17\x1a\xc6\xbf\xaf\xa8\xcf\xf5\xdf\x6b\x73\x84\xc6\x7c\x62\x63\x88\xd6\xc6\xcd\x5a\x0e\xd7\x64\x4d\x51\x14\xc0\x46\x37\x6b\xa8\xfe\x44\x9e\xde\x38\xfa\xb1\x5a\xcf\x90\x9c\x48\xde\xce\xaa\xba\x99\x97\x6d\x13\xd3\x07\xe4\x85\x8d\x11\x35\x88\x0c\x43\xbd\x7c\x05\xd5\xb3\x69\xa9\x8a\x46\xfa\x8a\x2c\xbe\xe3\xd2\x65\x18\x72\x95\x4d\x86\x21\xae\xf1\xe4\x23\x76\x47\x2b\x4c\x32\x93\xdf\x43\x44\x57\x1f\x1d\xea\xa8\xf9\x94\xdc\xfc\xa6\x7d\x2e\xb4\x3d\x36\x81\x4f\x2f\xe3\x05\x5a\x31\x83\x97\xb7\x4f\x58\x02\x86\x1c\xa6\xfb\xe1\x42\x19\x81\x19\x46\x1f\xa3\x1a\x4b\xc6\x1d\x6b\x9a\xea\x7b\x41\x82\x29\x5d\x03\x06\x36\x16\xb4\xaa\x8d\x71\xe9\x39\x05\x25\x73\x42\xe9\x39\x51\x88\x20\x00\xfb\x76\x30\x46\x93\x17\xe2\x3f\x23\x2d\x35\x60\x81\x95\x45\x53\xe3\x92\xc6\xe2\x47\x9b\x63\x64\xc0\xc0\x6c\x27\x1b\xe6\xc8\x09\xaa\x98\x50\xe5\xd1\x23\xa9\x5c\x30\x12\x85\xee\xd4\xa7\x8c\x03\x08\xd4\x8e\xc9\x67\x95\x97\xd7\x97\xdd\x19\x5a\xdb\xe3\x40\x8a\x84\x46\x37\xca\x3a\x32\xba\x71\xd8\x08\x68\xbb\xd5\x1a\xea\xdf\x0a\x06\xb0\x5e\x43\x85\x81\x1f\xf5\x1e\xac\xd8\x77\x75\x6a\x42\x7a\x17\x41\x6e\x8f\x01\x80\x60\x29\x98\xae\x44\x50\xe2\xae\xe2\x52\x5f\x7a\xe8\x80\x89\xb5\x70\x96\x05\xba\x0e\xec\x82\x81\x18\x88\x13\x53\x7c\xd5\x9c\x21\x7b\xc5\xa8\x41\xe4\x9c\x0d\x5e\xcd\x91\x7b\xc1\xc0\x23\x63\x00\x71\xe2\x2e\x1d\x9b\x83\x7b\x48\xe7\x24\x4c\x5b\xa1\x71\x34\x70\xd2\xb3\x58\x9d\xb8\xc9\x23\x23\x3e\xef\x25\x4e\x6a\xaa\x56\x34\x5e\x91\x6d\xa0\x14\xdb\xf8\x24\x91\x2e\x60\x58\x9b\x91\xe7\xca\xed\xed\x60\x0d\x07\xa6\x8e\x17\x57\x55\x6f\x5c\x12\x62\x3e\x0a\x60\xe0\x9c\xa3\x60\xa4\xe1\x7e\x01\x5e\x06\x01\x18\xe5\x21\x32\x1f\x81\x9e\x21\x01\xa5\xc9\x67\xb5\xb7\x16\x0e\x37\xcc\x47\xc0\x04\x50\x91\x0e\x5f\xaf\x63\x68\xf8\x78\x30\xb1\x64\xff\xb6\xe8\xb7\xd8\x85\xce\x35\x62\x0c\xcd\x12\x54\x25\xea\x2f\x7a\x24\xc4\x7c\x88\xb9\x80\x22\x37\x2a\xaf\x7c\x53\x7c\x41\x3c\x14\xc8\x5b\xee\x4a\x17\x11\x59\x25\xfb\xc9\xe1\x3c\xef\x08\x1d\x2d\x1a\x4d\xbc\x91\x82\x87\x95\x6a\x1b\xe8\x8a\x9f\xde\x7d\xfe\xe9\xcb\x2f\x6f\xfe\x69\x23\xeb\xe3\xe9\xdb\x5f\x5f\x9e\xfe\x53\xfe\xd2\x81\x04\x02\x1c\x96\x2b\x02\xa1\xef\x01\x48\xec\x54\x5b\xf0\xf6\x35\xd8\x2b\x49\xaa\x13\x3d\x82\x6f\x04\x06\xb5\x6f\xd6\x90\xc1\x2c\xc5\x44\x0e\x97\xa6\x09\x03\x83\x42\x52\x5f\x0c\xc4\xd1\x0d\x54\xd4\x81\x37\xfa\xd3\x6f\x12\x3a\x00\xd6\x51\xab\xb3\xd5\xb2\xb2\x55\x54\xe7\x34\x0c\x10\x6b\xaa\xf4\x4a\xb2\xb4\xb7\xf2\x6c\x98\xaf\x8a\x43\x41\x1a\x71\xd5\x5f\x89\xe7\x4f\x57\xad\xaa\xbe\x76\xb8\xe3\x4a\x53\x6c\xcd\xf8\xf2\x3c\x5e\x4d\xbb\x2e\x21\xd4\xf3\xb1\x53\xfe\x14\xe5\x5f\x24\xdc\x86\x84\xdf\x13\x0f\x3d\x04\x11\xbf\x22\x32\x49\x7e\xb6\x52\x54\xf8\x09\xcd\xe4\xc5\xdf\xc6\x94\xe0\x95\xc6\x65\xd6\x52\x40\xe6\x0b\x45\x81\x34\xff\xb3\xb9\xbf\x2c\x5e\x25\x09\xfd\x40\x8a\xa5\x84\x3c\xd8\x2e\x91\xc7\x8f\x1f\x4e\xdf\xbc\xfd\xe9\x7d\x3d\xb1\x90\x52\x62\xf1\x6d\x23\xdb\x3e\xb3\x71\x33\x84\x24\xb8\x94\x59\x4b\x49\xcc\x08\x04\x25\xd5\x90\x0a\x33\x02\xe8\xd7\x17\x83\xe8\x7a\x46\x52\x0b\x15\x07\xbc\x5f\x1d\xbc\x32\x63\xff\xd1\xb8\xa2\x20\xed\x62\x2d\x2c\x08\xde\x54\xea\x68\x0d\x45\xc5\xb6\xf2\x0d\x08\x2b\xaf\x81\xca\x9a\x87\xd1\xd2\xfe\x39\xc8\xeb\xe5\xd9\xd9\xe9\x27\xbb\x0d\x17\xaa\x22\xac\x9c\x20\x85\xce\xbd\x51\xcc\x6b\xc4\x5c\xea\x2f\xd5\x6a\x36\x10\x97\xbc\x3c\x7c\xff\xa9\x65\xbd\x1a\xc9\x9c\x1a\x3c\xba\x87\x6c\x53\xfd\x23\x45\x2e\xf2\x10\x76\x8b\x35\x13\x09\x9a\x9a\x42\x9d\x98\xcf\x2b\x07\x8a\x2e\xa5\x8a\x20\x7b\x78\xe9\xd6\xa1\x24\xdf\xda\x43\x78\x95\xb4\xd5\x7e\xd8\x2f\x3d\xaf\x4e\x80\xa7\xab\x7e\x24\xb4\xc8\xe1\x73\x30\xfd\x8a\xb8\x53\x22\x24\x54\x37\x6d\x34\x06\x59\x53\x69\x2c\xde\xcb\xe2\x70\x9e\xd0\x15\xa2\x6a\x9f\xe5\x0d\x45\x63\xb5\x76\xfa\x8f\x9a\x7e\x3b\xfd\xc7\x14\x6c\x52\x6d\xa0\xac\xc2\xef\xc0\x71\xbc\x28\x19\x12\x2b\x52\x51\x66\xf9\x33\x14\x3e\x49\x33\x61\xa7\x8e\xc7\x5d\x5c\x6e\x81\xb9\xf9\xec\x47\x75\x93\xf0\xa7\x60\x6b\xdb\x61\x67\xbf\xa0\x55\xbd\x4e\xb5\x1d\xbe\xf6\x8e\xb8\x17\xad\xe8\xf2\xc7\xc0\x99\x15\xf5\xfc\x5c\x25\xb9\xcd\xdb\x6d\xad\xf2\x41\x73\x55\xeb\x36\x43\x41\xa4\xeb\x3b\x89\x06\x46\xd2\x9a\x03\xc4\x44\x09\xf3\xd6\x13\xb1\x34\xb0\x70\x0b\x93\xdd\x86\x79\x4b\x8a\xbc\x50\xba\xbd\x05\xc0\x5c\x37\x2b\x0e\x52\xbf\xb8\xfb\xb6\x62\xe1\xe2\xc3\xf4\x33\x56\xa6\xa0\x55\x99\xef\x93\xc3\x3e\x95\x7b\x3e\x41\x0a\x75\x24\x22\x4b\x07\x08\xed\xfe\xd6\xf3\x4b\xb7\x9e\xd3\xe9\x48\x4e\x0c\x26\x76\x5e\xcd\xd6\x22\x06\x83\x4e\x7d\x31\x10\x62\x4f\x86\x80\xd5\x57\x93\x87\xab\x86\x3a\x15\x22\x4f\x96\x65\x54\xdd\x92\x72\x6d\xf2\xaa\x2a\x6d\x25\xb8\xd4\x24\x5a\x09\x2e\x59\xf5\xcc\x99\xcd\x90\xa7\x11\x50\x3d\xb5\x56\x7b\x55\x03\x59\x7a\xf8\x8b\xa1\xa8\x38\x38\xca\xc2\x64\x13\xc0\xa2\xb9\xcf\x18\xc0\xa0\x7a\x53\x47\xe6\x42\x88\xc4\x76\x96\xbd\xf9\xec\xe7\x68\xbb\xe5\xd9\x46\x84\xe9\xe2\xfd\xed\xc0\xb6\xed\x7a\xf6\x11\x8d\x24\xb9\x87\x1e\x28\xb5\xb3\x5b\x0f\xd5\x72\x94\xef\x07\x52\x49\xab\xe7\x52\xd1\x59\xe9\x2f\xbb\xc9\xa6\x76\x13\xa1\x85\x3d\x80\xdd\xe4\xac\x44\x72\x67\x15\xf3\x02\x9d\x8c\x65\xc8\xb4\x6a\xfe\x8b\x8f\x0b\x9b\x27\x3f\x44\x33\x33\x93\xd5\x1a\xf4\x77\x59\xe7\x0d\x16\x6b\x74\xe6\xcc\x3e\x5c\x22\x4a\xfd\x12\xee\x77\x4e\x48\x80\x1c\x7c\xcf\xd6\x45\xb5\x19\x3e\x46\x57\x1e\xe5\x56\x27\x55\xe9\x55\x74\x07\x52\x5b\xeb\xbf\xa2\x3b\x93\xf2\x4a\x89\xa9\xa1\xa4\xb0\x15\x2b\x8c\xbb\xa9\xb6\x91\xe9\x1d\x5b\x37\xcd\xaa\xd2\xf8\xf2\xa7\x9c\xd9\xc4\x88\xaa\x66\x3e\x37\xea\xb2\x64\x14\xdf\xbf\x94\xf3\x9f\xa8\x27\xf3\x45\x43\x79\xea\xee\xa6\x8a\x93\xe9\x8b\x9c\x1f\x56\x06\x88\x98\x7c\x0c\x88\x99\xdc\x37\x25\xc4\x5e\x21\x0d\xd4\x05\xfe\x20\xbe\xb6\xd7\x37\xf6\x51\x57\xa3\xe2\x0d\x75\x6e\xae\x7b\xe7\x14\x39\x17\x7b\xb2\x55\x7c\x65\x56\xd9\x2c\x26\xa8\x4c\xbb\xe8\xd2\xad\xb2\x59\x44\x61\x99\x56\xa2\xfa\x30\x72\x19\xe0\x82\xb3\x2b\x4b\x68\x49\x3e\xc8\x98\xb1\xb3\xaa\xd4\x4d\x7f\x31\xf6\xdd\xb8\xd3\xa9\xe3\x06\x0f\x73\x39\x23\x0e\x96\xaf\x51\xe0\xac\x9a\x2a\xfe\x80\xe6\xce\xa5\x4f\x1a\x19\xdb\xd9\xd9\xbb\x76\x2c\xfb\x0e\xa2\xee\x4e\x37\x4a\x51\x16\xa8\xba\x54\x51\x0f\xf7\x00\x87\xf4\xd8\xa4\x0f\x98\x49\x24\x93\x3a\x44\x26\xde\xa0\x11\xc1\xdf\x04\x44\xe7\x79\x4a\xf6\x42\xf4\xe9\x6c\xb5\x44\x90\x12\xc2\x3f\x9f\xbe\x4b\x15\xeb\x2f\x6b\x73\x0f\x67\xfd\x0a\xb4\x33\x9d\x04\xcf\x00\x9e\x0b\xe0\xcd\xd2\xe1\xf3\x11\x38\x1c\x79\x2e\x58\xc3\x8a\x8a\x71\xd8\x62\x5c\x3d\xfe\x52\xdd\x66\x2e\x9f\xd3\xd5\xf5\xff\x8e\xc5\x7e\x93\x37\xe1\xe9\x4a\x58\x89\xed\xa8\x96\xfa\xd9\xb2\xcb\x51\x79\x97\x29\x1f\xfb\xb8\x6a\xea\x5b\x65\xe7\xc8\xf3\x79\xaa\x73\xdf\x03\xb9\x8e\x95\x67\x49\x52\x45\xff\x2e\x4c\xe0\xe2\x32\xa9\x73\x71\x59\x3d\x60\xe4\xda\x1e\xa3\xe8\x02\xad\xf2\x63\x66\x27\x25\x6a\x1c\xca\x4f\x0d\x53\x93\x15\xe3\xf9\x65\xaa\x0a\xd2\xe8\xb7\x05\x45\x86\x5f\xc6\xb5\xe4\xaf\x7b\xc1\x5f\x61\x09\x25\x8b\x8c\x2a\xe6\x3b\x8a\xa3\x72\x53\xd4\xa8\xbf\x14\xc8\x8b\x4f\xf5\x43\x5f\x11\x66\xc4\xbf\x0a\xd0\x84\x17\xe0\x6a\x1e\x20\xd3\x31\x46\xf9\x89\xb6\xee\x8d\x19\x07\xf9\x45\x7f\x44\x79\x1f\xf2\xbe\x4d\x92\x23\xc4\x0c\xa1\xb3\x83\xec\x1a\x52\xb4\x24\x15\x9e\xb7\x9e\x9b\xce\x4d\x10\x10\xc7\xf3\xf1\x6c\x54\xf4\x09\x4a\x9c\xd9\xc2\x20\x90\x19\xb6\x3c\x17\xec\xdb\x36\x4e\xfb\xb7\x2b\x21\x2f\x27\x24\x15\x9d\x1f\x09\x55\x23\xdc\xde\xde\x78\x2e\x93\xde\x6b\xd0\x73\x47\x37\xa2\xba\xfc\xb5\x5e\xef\x05\x05\x8d\x4c\xfb\xd6\xcf\x10\x7f\xe9\x72\xff\x12\x19\xcc\xf2\x5c\x4b\xb4\x81\xe2\x2f\x16\x3b\x8c\x6a\xf4\x7c\xfa\xed\xa3\x35\x77\xd8\xdc\x88\xe7\xbf\xcf\x55\xd2\xac\x77\x3e\x4b\xc5\xcd\x6a\x81\xa2\x2b\x01\x53\x4c\x25\x28\x84\x6e\xe3\xc8\x03\x2f\x8e\x06\xcd\xf9\x7a\x05\x36\x4d\xf9\xf5\x4a\x18\xd3\x64\x62\xee\x05\x56\xc6\x1d\xd2\x90\x59\x36\x96\x94\x2c\x7c\x86\x2c\xfd\x6a\x46\x36\x54\x3c\x99\xac\xf6\x46\x2f\x4c\x35\xdf\x67\x02\xea\x50\x80\x81\x2d\xcf\x55\x7b\x69\x6d\xc2\xfd\xc1\x1a\x4a\x47\xf1\x0a\xa7\x7a\x88\xed\xc8\x15\x0e\x59\x2e\xf1\xd0\xed\x2d\x00\x50\xa7\xbd\x19\x21\x4b\xff\x75\x7b\x8b\x74\x98\xe0\xed\x2d\x90\x2f\x77\x80\x75\xe2\x3d\x56\x12\x9e\x78\x70\x60\xe0\x74\x6c\x22\xc4\x51\x57\x36\xb6\xb8\xcf\x03\x74\x7b\x8b\xf3\x5d\x9a\x10\xa8\x84\x2b\x6a\x46\xf2\xdd\x92\xa8\x55\x5c\xa5\xb0\xd0\x0a\x3c\x2c\x40\x1f\xec\x27\xad\x2d\x4e\x54\xce\x34\xc3\x4c\x42\x1b\x1f\x97\x1c\x87\x0a\x24\x66\x2a\x72\xcc\x13\x03\xaa\x23\x06\x9e\x27\x03\xa4\xa6\x9b\x5b\x2a\x24\x57\xc5\x52\xaf\xac\xa4\x9a\xdf\xb9\xe3\x08\x0b\xc9\xb2\x57\x39\x78\x6a\x6e\x96\xf5\xd3\xf8\x37\x71\x29\xaf\xe7\xa6\x4d\xac\xb1\x7d\x82\x17\xc9\xf1\x0a\x3b\xae\x18\xb4\xa2\x48\x6f\xaf\x8a\x85\x49\x3e\x29\xb8\x84\xf7\x32\x08\x8c\x62\x66\x09\x9a\x8b\x47\x4a\x9a\x4a\xd7\x5f\x0a\x6f\x24\x3b\x10\x1d\xfc\xb0\xfa\x14\x84\x33\x03\x59\x9e\x0b\xa9\x62\x9c\x2a\xc2\x4a\x66\xa1\x4c\x32\xbb\xe5\xc2\x7a\x9a\xfd\xb4\x51\x8e\x10\x79\x65\x5c\x43\x4c\x78\x52\x7f\x88\xa4\x7e\xbd\x4b\x71\x21\xf3\xcf\xd6\xb3\xf9\x64\x48\x20\xe5\x2c\xcc\xd1\x62\x19\x38\x4a\x9a\x8d\x40\x34\x69\xa5\xdb\xd4\x10\x8a\xca\x53\x01\xcf\xd1\x94\x50\xf4\x6b\x96\x0c\xa2\x6d\x5e\x60\x3d\x49\x94\x8f\x61\xae\xf3\xc4\x93\x1c\xf3\xc5\x5a\xf8\x1c\x2d\xaa\x84\xa4\xf6\x78\xce\x7b\x84\xcb\x36\xd9\x43\x59\x89\x58\x8e\xc4\x6a\x09\x83\xd5\xd1\x93\xfb\x03\xe8\xb3\x77\x89\xa8\x11\xdd\x8e\x92\x01\xa2\x9c\x7c\x0b\x07\x3b\x33\x95\x3d\x02\x02\x37\xf0\xe5\xa1\xe8\x9e\x28\x0d\x7a\x48\x10\xc6\xa5\x93\x0d\xd7\xc9\xe2\x47\xc3\xef\xb3\xf7\xe8\x0a\x44\xd9\x2c\xc4\x37\xcb\x53\x69\x19\x4e\x91\x4b\xa8\x67\xb4\x25\x5c\x45\x03\xdf\x28\xd9\x36\x93\xe6\x5d\xb8\x58\x15\xa3\x2a\xd2\x45\x05\x89\x66\x58\x91\xef\xd5\x13\xe2\xd7\xa0\xa8\x76\x24\xa0\xcf\x25\x3b\x4b\x03\xd9\x48\x88\x4c\xd0\x03\xd4\x8f\x1e\x8a\x21\xab\x79\x51\x33\x05\xb0\x3a\x12\x78\x19\x04\x3f\xac\x12\x86\x64\xd4\x91\xc1\xd7\x5a\xe8\xc4\x54\xd0\x4e\x54\x55\xa4\x55\xd9\x92\x3d\x68\xc7\x75\xab\x3a\xc1\x9a\x42\x74\xa3\x78\x4d\xe7\x4d\x80\x91\xe9\xe9\xb4\xba\x7e\x92\x66\xeb\xdf\x51\x24\x97\xef\xc3\x34\xda\x36\xd8\x8f\x50\xad\x81\x60\xcc\x4e\x10\x90\x2b\x00\x95\x3f\xe2\xa4\xec\xc0\x14\x87\x85\xdf\xac\x21\x82\x37\x6a\x42\x63\x75\xe6\x07\x7f\x07\xeb\x38\x8a\x16\xc9\x19\x8b\x43\x9b\x7a\x4b\xb3\x26\x76\x48\x6c\x3d\x8c\x5c\xf9\xfc\xc2\xf5\x0a\x64\x2e\xc0\x90\xbe\x67\xd5\xda\xed\x9f\x43\xeb\x28\x6c\x90\xbf\xf8\xd0\x16\xf8\xd0\xfd\x70\x99\xaf\xad\x33\xfd\xb5\xe5\xbf\xba\xb6\xd0\x46\x39\xbc\xa7\x4d\xba\x0d\x15\x31\x43\xfc\xf7\xa0\x28\xfe\x19\x74\xc3\x8b\xcb\x76\x3a\x61\x3e\x75\xd3\xd7\xb3\x5e\x5c\x5c\x36\x2b\x57\x17\x97\xdb\x55\x93\x52\x19\x1b\x2e\xd0\xea\xf6\x16\x1c\x82\x92\xc4\x2d\xaa\x27\x48\xcb\xee\x0a\xa2\x85\xde\x2b\xe8\x5b\xb8\x83\x6a\x45\x37\x56\xa0\x54\x0e\xee\xac\x09\x8d\x43\xfa\x67\x51\x2c\x62\xa2\xe8\x44\xc4\x45\x07\x68\x87\xb9\x08\x7b\xdb\x7b\x4d\xec\xdf\x42\xc9\x90\xbb\x2d\x95\xb5\xa8\x52\xb7\x50\x5e\x44\x55\x46\x65\x1e\x5d\xc6\xc9\x4d\x06\x4b\xaf\xdc\xe2\x2b\x33\xd2\xd5\x02\x9d\xd9\x1c\x7a\x37\x90\xf4\x6e\xc8\xba\x77\x04\x70\x68\xaa\x7d\xce\x94\x2a\x92\xad\x1c\x40\x56\x34\x62\x97\x18\xc7\x91\xa6\xf1\x28\x18\x21\x9e\x1f\x7e\x91\x9f\x21\xd5\x6a\x86\x46\xd3\x28\x93\xa1\x29\x93\x13\x4a\x4f\xe4\x17\xb4\x32\xb0\x9c\x87\x39\x42\x5f\x51\x7e\x44\x97\xfe\xe5\x5b\x2f\x53\x55\x29\x14\xf7\xe1\xf2\x12\x5f\x7d\x94\xc9\x0b\xad\xc7\x54\x08\x84\xb6\xb9\x98\x64\xbd\xa5\xd4\x20\x34\x3d\xa6\x52\x9e\x99\xf2\xf6\x58\xe5\xfe\x53\x32\x02\x87\x41\xa0\x7f\x98\x69\xa6\x5f\xc8\xa0\x98\xe4\x08\x6c\x85\xee\x36\x4a\xd9\xb6\xa5\xf5\x9f\xee\xc8\xd4\x5a\x1b\x6c\x62\x7b\xb5\xac\x8c\xa6\xf5\x85\xa0\x96\x9f\xb1\x6d\xf0\x33\x96\x66\x51\xf2\x0d\xc9\xea\x53\x4d\xbe\xc3\xec\x29\x87\x25\x0a\xac\xec\x2c\xcd\xd1\xd4\x60\x51\x4a\xbc\xa0\xe4\x0e\xb8\x3a\x69\x69\x8a\xd6\xa3\x6b\xbb\xd8\x4d\xa3\x78\xb9\x5f\x95\x31\xf8\xe0\x00\x9c\x0c\x4e\x80\x9d\xbe\x96\x8f\x92\x04\xb7\xdc\x6a\x7b\x7c\x4e\xc9\x55\x0f\xad\xbf\x1a\xdb\xcc\x78\x2d\xb5\xe0\x9d\xba\xea\x7d\x32\xcf\xa6\x59\x4b\x5f\xb6\x22\x03\xda\x72\x92\xbd\xd6\x57\xd9\xca\xb5\xee\xaf\xa3\x5d\xed\x6a\x29\x07\xc3\x72\xfa\x52\x3a\xaf\xe7\xab\x37\x91\x4a\x8a\x38\x59\x90\x19\x75\x96\xb9\x60\xc1\x16\x09\x7a\x37\x4c\x80\xa5\x3c\x91\xbe\x79\x39\x03\x99\x9d\xcb\xd7\x14\xff\x59\x2f\x82\x68\x2b\x85\x3a\x22\xec\x2e\x3a\xf5\xa6\x9b\x20\x9b\x98\xb4\x42\x3a\xd5\x9c\x75\xeb\xb3\x97\xd6\x6d\xb0\xbc\x3f\x87\xcc\x85\xc8\x8b\x32\x4b\xfb\xea\xa5\x35\x7e\xac\xd5\xea\x57\x71\xb2\x9e\x42\xe6\xbf\x64\xb0\x40\x48\x36\x0c\x6f\x12\x62\x4e\xed\x6c\x92\xc4\x7c\x7c\x3f\x7c\xc1\xa2\x69\x90\xac\x8f\x71\x5e\x6c\x6a\x6b\x9d\x7c\x7d\x4c\xea\xdf\xd2\x07\x30\xcf\x35\x70\xac\xfc\xc7\x46\x55\x8d\x1d\x36\xa2\x1a\xf6\xf7\xc4\x43\x46\xb1\x89\x74\xd3\x37\x25\x36\xee\xed\x88\x1e\x0b\xde\xd6\x69\x6b\x2b\xa9\x83\xe7\x92\xda\x36\x91\x45\x21\x7b\x63\x6d\x52\xdb\x7c\xf6\x36\x14\xe1\x67\x2f\x95\x55\xb8\x2a\xdd\x6d\x5c\x25\x85\xef\x00\xf2\x4a\x97\x38\x95\x35\x31\x9a\x2f\x93\x69\xba\x1f\x32\xa3\x6d\xc2\xbf\x23\x53\xf7\xb7\x23\x70\x53\xb7\x03\x9b\xcb\xdc\xf2\xc4\x82\x4d\xda\xae\xdc\x5a\x68\x67\x64\x6f\xbc\x76\x79\xf1\xfb\x97\x7b\xe2\xbd\x93\xd8\xa6\x3e\x8a\xd5\x32\xa9\x96\xc0\x2a\x4f\x58\x3c\x73\xa2\x52\x96\x20\x4b\x06\x75\x7e\xa5\xcb\x9d\x6f\x86\x8f\x48\x1f\xd4\x4e\xca\x78\x95\x2e\x9e\x72\x75\x96\x67\xc9\x29\x47\xf4\xd7\x4a\xdb\xbd\xe8\x69\xaf\xfc\x64\x98\x84\x03\x65\xec\x6e\x2a\x01\x59\x13\x62\x93\x98\x8c\x07\xc2\x6d\x85\x25\x4b\xb1\x56\x8c\xae\x34\x12\xa5\xa3\xb9\x01\x3e\x3a\x33\xd4\xc3\x84\xab\xa7\x6f\x92\xb3\xaf\xf4\x92\xb7\x4f\x06\x27\xb0\xf2\x25\x78\x0d\x60\x12\x9e\xf2\xf0\x2f\x76\xec\x38\xb3\x4b\x19\x9d\x3c\xb7\x46\x93\x97\x7b\xa2\x96\x31\x56\x79\x33\x66\xd2\x51\x6f\xb2\x89\x62\xb7\x6c\xe8\xb9\xc5\x53\xaf\x9a\x75\xba\x5a\x81\xe7\xf1\x7a\xab\x92\x04\x3d\xfb\xbc\x60\xdc\x69\xb2\x81\xa5\x0a\xcd\xef\xd9\xa1\x3b\x51\x7a\x1f\xfc\x61\xa4\xe8\xc1\x8a\x68\x21\x7b\x57\x42\x5f\x6c\xfb\x06\x48\xd4\xea\xab\x3c\x6f\xa4\xa6\x9e\xcc\x7c\xee\x5c\x22\xf5\x84\x09\x45\x0c\xf1\x8a\xe9\xcb\x32\x5e\x0a\x41\x15\xbb\x61\x88\xc6\x8f\x32\x17\x92\x08\x57\x54\x4b\xc7\xb2\x95\xa7\x22\xde\xea\x2b\x6e\xc5\x1b\x87\x25\xf5\x17\x0e\x5d\xfd\x82\x56\x23\x9c\x8e\x04\x6f\x03\x63\x79\x20\x5e\x26\x92\x3e\x5d\x9f\x22\xb6\x95\x17\xfe\xab\x81\xc1\x84\x2e\xe4\x68\xa7\x88\x2d\x09\x66\xd9\xe7\x66\xa2\x3c\x50\x69\x23\xab\xda\x82\xb2\x54\xa5\x41\x22\x36\x57\xda\x92\x3c\x1c\xfa\xea\x70\x18\xf7\xfb\xd1\x59\x05\xc4\xf1\x0c\xd5\x13\x24\x82\x07\xb3\x8a\x88\x7f\x06\x49\x34\x71\xbf\x53\xc4\x3f\x1b\x93\x89\xed\x43\x66\xca\x41\x54\x98\x3e\x24\xd0\x5f\xc3\xfc\x3c\xca\x1c\x9c\x2a\xa5\x61\x7a\x29\xaa\xf2\x04\x6f\x44\xa5\xa9\xce\xbe\x5d\x62\xcd\xe7\xc3\x6d\x86\xf4\x5e\x29\x35\x05\x89\xce\x7c\x50\xb3\xb6\xa9\xec\x13\x8a\x49\xea\xbc\x1d\x58\xe7\xed\xd0\xb2\x2d\x7e\xa7\x08\x55\xd9\x7f\xb8\xa0\xf0\xf4\xd1\x82\x42\x90\xcc\x04\x98\x30\xb0\x11\xc4\x82\xa8\x79\x05\x51\x8b\x4d\xa4\x61\x0c\x3a\x11\x35\x1f\xe3\x89\x1d\x40\xae\x0e\x81\x10\xc3\x60\x9d\xd2\x42\x5a\x2c\x60\x45\xfa\xd9\x8d\x28\x36\xe9\xeb\x2b\x11\x2c\x74\x38\xa7\x6c\x84\x55\xce\xcb\x16\xd0\xe7\x13\x53\x6e\x04\xf6\xc5\xe5\x37\xbc\x41\x8b\x59\x02\x37\x82\x51\x76\xf3\x15\xa1\x2c\xc9\x43\x93\x9e\x7d\x69\x96\xb1\x8d\x00\x8d\x7a\xfa\x76\x57\xb4\x34\xf1\xce\x86\xb0\xaa\x9e\xbe\x39\x58\x95\x99\x4c\x65\x65\xa8\x38\x5c\x56\x69\x72\x5b\x38\x56\x6b\xd3\x7c\x3c\x7f\xc6\x09\x45\x55\x27\x30\x51\x06\xcc\x92\xc7\xc9\x2b\x9e\x1c\xd2\x0d\xf4\x3b\xe3\x40\x2a\xb4\xe9\x53\x10\x4f\x63\xc8\x34\xd7\xb0\xc4\xba\xd9\x65\x1c\x69\x30\xd3\xe3\x88\x13\x17\xaa\x39\xa8\xf1\x86\x87\x83\x78\xd6\xf1\x30\x32\x82\x27\xc6\xb1\xf2\x57\xf3\x6a\xa7\xa6\xdd\xf9\xf4\x04\x7d\x6f\x84\xa0\xe7\x8e\x78\xa3\x93\x82\x7e\xa8\x28\x13\x63\x62\x42\xe9\xf2\x94\x7f\x0e\xb3\x79\x2d\x64\x83\xf4\x4c\xcc\x35\xd4\x87\xb4\x52\x5d\xd0\x12\x27\x34\x43\xa6\x9b\x10\xa7\xa1\x12\x33\x63\xe4\xea\x11\x9d\xbd\x55\x6e\x28\xe9\x08\x94\xf1\x63\x3c\x38\x30\xa8\x8d\xd4\x0b\x46\xea\xe6\xc8\x84\x80\x48\x0a\x06\x76\xe4\x07\xce\x57\x4b\xf4\x61\x6a\x48\x63\x43\xc9\x2d\x8a\x86\x62\x89\xd0\x45\x06\x9b\x74\x9c\x21\xa6\x89\x69\xca\xb4\x8e\x19\x2f\xca\x06\x2c\xeb\x17\x6f\xa2\x21\x42\x2c\x54\x28\xdd\x56\x19\xd2\x93\x0b\x8f\x3a\x7f\xda\x6c\x07\x2f\x83\x20\x42\x73\x23\x0f\xf8\x97\x73\x5d\x76\x0a\x12\xdf\x73\xb5\xee\xcc\x07\x2a\x1b\x68\xf6\x00\xe0\x4d\x56\x35\x9b\x21\x5e\x16\x35\x15\x67\x5c\x6a\x04\x8e\x93\xf3\xfa\x6b\x78\x59\xe3\xde\x19\x5c\xc9\x93\x80\x79\x58\xb4\x35\x45\x91\x71\x3a\xe3\x4a\x13\x8c\xe7\x9c\x38\xf5\x30\xca\x1a\x3b\x0d\xa3\x4b\x3c\xd4\x5f\xf8\x32\x73\x45\x06\x54\xff\x72\xd5\x17\x85\xaa\xac\xbc\xc1\xee\x91\xad\x3c\x91\xd0\x45\xfd\xaa\x46\x95\x1e\x7e\x61\xd3\x69\x79\x13\x17\x0c\xc1\xae\x9a\x01\x4b\x3c\x07\xee\xcf\x6a\xbf\x89\x7a\xb1\x1d\x25\x20\x6d\x9e\xd0\xba\x40\x23\x4e\xee\x21\x91\x4a\x91\xf6\xb2\x24\xe2\x4f\xf5\xab\xa3\x3e\x53\xa1\x53\x28\x7a\x97\xbf\x87\xf6\xfc\xa9\xf1\x69\xb5\x38\x27\x81\xe5\x73\x44\x1d\x2e\x6f\xd4\x7b\x6a\x26\xa9\x8a\xd9\x1e\x95\x4c\x1e\x4f\x20\xb5\xf7\x07\x30\xb0\xf7\x87\x90\x45\xe9\x1a\x39\x5d\xc5\x77\x06\x04\xfa\x36\x1a\xe7\xfa\x9f\x18\xe6\x77\xfb\x06\xb5\x0d\x62\xfb\x2a\x7f\x8d\x69\x5a\x1e\xc1\xc8\x94\x19\x7c\x96\x21\x9b\x1b\x44\xbd\xb9\x69\xc2\x7d\x7e\x7b\x8b\xf5\x5d\xc3\xbe\x6d\x73\xf3\x3b\x31\xa4\xf9\xdd\x5a\xf9\x43\x22\xf3\x26\x10\x53\x60\x36\x5a\xeb\xf4\x48\x37\x62\x02\xfb\xf4\xe0\xc0\xb7\xd4\xdc\x93\xbf\x0c\x33\xae\xe4\x4f\x8d\xc0\x54\x57\x34\x6c\x1d\x1d\xf0\xb1\x7e\x49\x4e\x7d\xc7\xe8\xaa\x77\xb6\x5a\x22\x7d\xc1\xf3\x56\x09\xe6\x9e\xc3\x39\x5a\x2c\x79\x8f\x93\x9e\x94\xfd\xa1\xcb\x43\x8a\x7a\x98\xe0\xbe\x84\xf0\x3c\x40\x3d\x1f\x6b\x6f\x2f\x73\xbd\x36\x8a\xde\x4a\x9b\x50\x6c\xfb\x3c\x0c\x95\xea\xa3\x3f\x95\xf6\xff\x7d\x3b\xd1\xaa\xf4\x45\xac\xa1\x8d\x3b\x48\x7a\x58\xe3\xe2\x2b\x81\x91\x7f\x49\x34\x48\x6c\x72\xcf\x5e\x3a\xe0\xe8\x32\xaf\xa0\x6e\xc6\xd9\x68\xa5\xcb\x78\x59\x8b\xb5\xb9\x56\xa6\xa3\xdc\xd5\x5a\xe2\x7f\xa2\x9c\x4f\x6d\xe9\x96\x2a\x35\x2f\xee\xf8\x81\x9d\xbf\x79\x83\x1f\x75\xba\x2c\x8a\x24\x0d\x2b\x97\x57\x36\x1a\x53\x99\x04\x22\xbe\x0b\xa9\xcb\x54\x56\x0d\x78\x34\x9b\x68\x10\x27\x08\x8c\x31\xba\xbd\x0d\xd2\x57\xda\xea\x9e\x09\xdf\xde\x4a\xab\x6c\x7c\xa5\x33\xa9\x8c\x65\x10\xaa\xf7\x91\x09\x99\x8d\xc7\x83\x09\x24\x36\x4e\x9e\xba\x8f\x9d\x6a\x64\xcf\x0c\x92\x12\x57\xe0\xbc\xb7\x07\x81\x40\xbe\x9c\xa9\x76\x71\x32\xe9\xa0\x62\xd9\x50\xb2\x6c\x52\xe3\x4f\x4e\x49\x1d\x94\xfe\x08\x4c\x75\x75\x5f\xa5\x06\x23\x8b\x11\xca\x63\x8a\x33\x5b\x70\x4c\x54\xd4\x03\xf2\xc2\x11\x6d\x47\xf2\x3f\xa0\x68\x2c\x7b\x47\xbb\x44\xe8\x3b\x41\x20\x18\x4a\x5f\x9c\x58\xee\x1f\x3e\x4c\xb8\x3f\x5d\x55\x3d\xb4\x1d\x38\x6c\xfe\xab\x4a\xaf\x26\x5f\x85\x25\xb3\x19\xa2\x15\x95\x55\x21\x30\x61\x11\x65\x99\x67\x2e\xf2\xaf\x6b\xcb\x17\x40\xb4\xcf\x3d\x80\xfb\x03\x53\xbf\xdc\x92\xc5\x2f\x36\xa1\x9f\xfb\x44\x4d\xe8\x14\xb6\xad\x82\x27\xa1\x7f\x54\x38\x9f\x99\x37\x8e\xe5\x78\x9e\x71\x23\x10\x3c\x02\x2c\x74\x5d\x99\x50\x3d\xca\x68\x47\x8c\xd2\x0c\x6c\x28\x7d\x22\x63\x30\x06\x37\xbe\xfb\x43\x26\x04\x67\xb1\xfb\xc3\xcb\x73\x42\x39\xf2\x54\xdc\x0a\x76\x16\xe8\x45\x8b\x51\x47\x99\x3a\x7a\x13\x44\x35\xfc\x28\x35\x5c\x49\x4e\xc0\x4a\x6c\x0e\xdb\xec\x36\xb9\xcc\xfd\x45\xb4\xce\x25\x47\x45\x37\xf0\xfb\xb2\x56\x65\xa3\xdd\x53\xc1\xd3\xc9\x5f\x6b\x0d\x64\x55\x26\xf9\x9d\xd6\x63\x53\x77\x16\xdf\xb2\x49\x2b\x3d\xcd\x6f\xc1\xb0\x95\xcc\x67\xd7\xcd\x5b\x29\xcc\x7e\x45\x23\x57\x03\xf2\x53\xb6\xae\x34\xe6\x9b\xb6\x76\xf5\x95\x54\xee\x19\xb4\x07\xb8\x89\xba\xdb\x46\x2f\x6e\x20\x2c\x95\xfa\xbc\xf2\x11\x29\xb3\xff\xf1\xe9\xc3\x7b\x4b\x65\x50\xf7\xa7\x2b\x63\x8c\x21\x9a\x98\x65\xda\x6d\x09\x45\x08\xac\x25\xcf\x77\x07\xb7\xb7\x86\x0e\xad\x2b\x89\xa3\x0f\x54\x98\x1a\x44\xb9\x8f\xe9\x2d\x87\xcd\x4c\x54\x7d\xa2\xa3\xcb\x0c\xed\x46\x90\x4b\xcc\xdb\xc0\x09\x64\xaa\xec\x88\x05\xe0\x4d\x58\x00\xd6\x2c\x20\x13\x81\x37\x2a\x3b\xe5\xa6\x3c\xbe\x74\xe8\xe9\xc1\x81\x81\x6c\x00\xcc\xd4\x63\xf9\x39\x86\x9a\x99\x1f\x87\x0c\x2d\x1d\x79\xf4\x55\x49\xa9\x8b\xa1\x17\xd1\xf9\xb3\x90\xd2\x24\xf5\x56\x78\xf6\xe5\x0e\x1d\x19\xb8\x36\xeb\x63\x28\xaa\x99\x5f\x41\xa7\xa1\x92\x96\x68\x1c\x0a\x48\x4b\x42\x01\x69\x31\x14\x50\x07\x0e\xe4\x69\x8d\x4b\x5a\x63\x76\x29\x47\xc8\x13\x5a\x60\xee\xb1\x83\x03\x56\xc8\x48\xa0\xce\xe1\x74\x0b\xac\xfa\xe2\xf2\xae\x3c\x9a\x7f\x6d\x1e\x2d\x77\xe4\x18\x6f\x83\x39\xf3\x7b\x65\xce\x12\xd7\x4d\x5c\x59\x6b\xcc\x0f\x67\x15\x2c\xb1\x57\x37\x4e\x52\xbf\x32\xf0\x4d\x59\x2e\x53\x36\xd5\x8a\x9a\x69\xab\xeb\xb6\x34\x44\xe5\xf5\x10\xd9\x38\xab\xd5\xb9\x02\xcb\x6c\xc7\xcf\x75\xf7\xed\x95\x3a\xcd\x50\x32\xa0\x96\xc7\xad\x54\xab\xb2\xc8\x4a\x45\xb6\xd9\x5c\x5b\x3e\x9a\x68\x22\xf1\x61\xff\xa6\xc8\x62\x3b\xcb\x1c\xbb\x7d\x7c\xcb\xc7\x80\x64\x92\xb5\xf4\xa2\x98\x76\x21\xa0\x4d\xec\x09\x9c\x95\xa4\xa5\xb6\xb2\x4c\xa0\x9a\x7e\x25\x8c\x9b\x10\xb7\x22\x91\x82\x43\xc9\x37\x42\x21\x2a\x34\x6f\x03\xdc\x27\x41\xb0\xed\x76\x69\xa5\x6b\x5f\xa3\x5a\x12\x11\xc9\x2f\x68\x75\x27\x1a\xe9\x32\xdb\xc6\x83\x62\xad\x4e\x90\x3c\x6c\xb0\x65\x49\xdc\x4c\x67\xc5\x80\x91\x07\x20\x34\x67\x86\x46\x57\x3e\xf6\xc8\x95\x7c\x4e\x28\xf8\xa4\xbe\xc9\x75\xfb\x19\x39\x1e\xa2\xac\xa8\x38\xdc\x80\xff\xee\xbf\x52\x20\x9c\x91\x0b\x84\x41\x31\xe9\xa0\xea\x46\x85\x2b\xbc\xe5\x68\x61\x00\x2e\x6b\x9a\xeb\x66\x73\x74\xe9\xa1\xe2\x46\xb6\xef\x32\x50\x15\x8b\x6a\x33\x5a\xf3\x30\xc8\xac\xd0\x41\xf3\xac\x2a\xd7\x41\x44\x60\x7a\x35\x2f\xd0\x8a\x19\xc8\x2c\x72\x60\x6c\xde\x88\x7d\xaf\x06\xc3\x10\x8d\xf1\x44\x50\x50\xcd\x9c\x51\x49\x9c\xc3\xdd\x60\x55\x3b\x25\x83\xd5\x16\x84\x2c\xb7\x40\xa5\x6f\xbe\x2c\xbd\x57\x37\xe7\xbc\xb7\x59\x8a\xdd\xc8\x33\x9e\xe3\x39\x4b\xae\x1e\xd4\x40\x91\xab\x99\x84\xfc\x26\xf6\xc1\x1f\xa1\x35\xe4\xea\x44\xf8\xc5\x17\xdc\x03\x3b\xc1\xaf\x3a\x9c\xf1\xad\x27\xbb\xd4\x67\x92\x4f\xd8\x59\xb2\x39\xe1\xc6\xcd\xba\x05\x6e\x38\xba\xe6\xfd\x05\x72\x58\x48\xcb\x1f\x39\xcb\x54\xa8\x6a\xb7\x7b\x16\x59\xee\x2f\x10\x09\x2b\x93\xcd\x29\xdb\x8d\x7e\x19\xc7\x3b\xd3\x95\xb7\x14\x0d\x97\x35\xa8\x14\xf7\x41\xf3\x8d\x6e\xc9\xb5\x07\x4f\xe5\x3a\x95\x29\x18\x2a\x30\x10\xe5\xd6\x62\x87\x7d\xfd\x31\x7a\xca\xe7\xfe\xd8\xfc\xcf\x67\xbf\xbe\xfb\xc1\xa1\xcc\x8a\x06\x37\x84\xdc\x04\xff\x33\xfc\x69\x1e\x1c\x0d\x7e\x01\xf0\x3c\x20\xee\xc5\xe8\x6f\x37\x80\xc9\xbb\x7c\x06\x46\xe3\x89\xd8\xf6\x0e\x97\x89\xf6\xc5\xef\xf1\x13\x08\xd8\xe5\x0c\x4c\xe0\xf8\x39\x04\xd7\x8b\x40\xe6\xfe\x9b\x73\xbe\x1c\x1d\x1e\x5e\x5d\x5d\x59\x57\xc7\x16\xa1\xb3\xc3\xa3\xc1\x60\x70\x28\x2a\x56\x16\xca\xb6\x87\xe9\x8e\x46\xd7\x81\x8f\x2f\x4a\x5b\x0c\x9f\x3f\x7f\x7e\x58\x5d\x5c\xec\xf0\xca\xf7\xf8\x1c\x40\x70\x72\xb2\xbc\xd6\x9f\xe6\xc8\x9f\xcd\x79\xf6\xdb\xa5\x8f\xae\x7e\x20\xd7\x00\x82\x41\x6f\xd0\x3b\x39\xe9\x9d\x9c\x44\x25\x82\x83\x4b\x07\xe0\xa1\x35\x14\xdf\x9e\x4e\xe0\x78\x00\xc1\xff\xfe\x2f\xee\xf5\x7a\x3d\xf1\xe5\x09\x04\xb3\x62\x49\xaa\xd4\xf5\xa9\x1b\x20\xdd\x21\x15\x5d\xe9\xbf\x5d\x31\xe2\xd1\xd3\xe8\xd7\x4a\xfc\xd2\x3f\x18\x5f\x89\x75\x04\x32\xac\x76\x4a\xe8\xa2\x4f\xa8\x3f\xf3\xf1\xa8\x77\xf4\x74\x79\xdd\x3b\x52\x53\x17\x43\x3e\xdb\x6c\xd8\x61\xc7\x61\x87\xdb\x19\x36\x0b\xed\x49\x4b\x70\x4f\xb6\x0d\x6f\x8b\x81\x87\x5b\x1a\xf8\x28\x3d\x6e\x3c\x8b\x1a\x80\x97\xd7\x72\xec\x6d\x0e\x7b\xd4\x6e\xd8\xa3\xbb\x0f\x7b\xd2\x15\xdc\x93\x2d\xc1\x7b\xd2\x15\xe0\x93\x2d\x41\x7c\x7c\x9c\x21\xac\xc6\x71\x8f\x8f\x05\x5d\xdd\x9d\x9e\x87\xdd\x86\x1d\x0e\xb7\x32\x6c\x0e\xda\x41\x4b\x70\x07\xdb\x86\xb7\x79\x60\x05\xf0\xdd\x07\x8e\x87\x52\x14\x3d\x6c\x26\xac\x81\xa0\xe8\xe1\xdd\x07\x4e\x8f\x1b\xe3\xbd\x66\xdc\xe5\xb5\xc4\xf6\x56\xe1\x6d\x33\xee\x60\x4b\x03\x77\xc5\x73\x2d\x9a\x41\xe9\x97\xee\xb2\xfa\x28\xc3\x54\x8f\xba\x6d\xba\xa3\xa3\x4d\x37\x5d\xdd\xb0\xcd\xb4\xaf\xc6\xdd\x88\xf6\x8f\x2a\x97\xe4\xa8\x59\x66\x8a\x25\x39\xda\x48\x64\x1e\x55\x93\x60\x9b\x71\x07\xdb\x19\xf8\x79\x7a\xdc\xe7\x8d\xc3\x3e\x5f\x5e\x8b\xff\xdd\x71\xd0\xe3\xc7\x99\x0d\xf7\xb8\x99\xa5\x3e\x16\x1b\xee\xf1\x96\x07\x6e\x06\x57\x8e\x7b\x77\x78\x9f\x77\x04\xf7\x79\x2d\xb4\xf7\xb3\xcf\x9f\xa5\xe7\xf8\xac\x79\xbf\x3d\x5b\x5e\xf7\x9e\xdd\x19\x33\xc3\x27\xdd\x86\x1d\x3e\xd9\xca\xb0\x59\x68\x8f\x9f\xb4\x03\xf7\xf8\xc9\x96\xe1\x6d\x31\xb0\x04\x78\x0b\x03\x67\x00\x3e\x6a\x46\xb4\x80\xf7\xe8\xee\x88\xce\x0c\x3b\x6c\x06\x57\x0c\x3b\xbc\x3b\xb4\xc7\x4f\x3a\x82\x2b\x30\xbc\x0d\x78\xb3\x03\xb7\x00\x58\x0e\x5c\x0d\xf1\xb6\xf6\xfa\xe3\x6a\xe1\x3a\x6c\x71\x22\x95\x27\x96\x8d\x84\x4d\xcd\xc0\xc7\x2d\x07\x3e\xbe\xfb\xc0\xc3\xa3\x8e\xe2\x75\x78\xb4\xb1\x78\xcd\x0c\x7c\xdc\x75\xe0\xe3\x2d\x0d\x3c\xcc\x48\xba\x61\xb3\xcc\x19\x0a\x51\x37\xdc\x48\xc4\x66\xd7\x38\x23\xec\x8e\x9a\x65\xec\x91\x90\x76\x47\x1b\x09\xd9\x9a\x81\x5b\x40\x2c\x07\xde\x02\xc4\x59\x54\xb7\x80\x58\xa2\xba\x1a\xe2\x6d\xed\xf9\xe7\xd5\x5b\xaf\x05\x21\x1e\xd5\x13\x62\x76\x92\xd9\x5f\xf2\xc3\x04\x82\xb9\xc3\xde\x5c\x3a\x01\x18\x4d\x9d\x80\xa1\xf5\xdf\xe0\x02\x71\x67\x74\xb3\x90\x16\x5b\xf5\xb6\x42\x0b\xbb\xb0\x35\x3f\x67\xa0\xd9\x9a\x5c\x91\x06\xe9\x61\x2c\xc9\xfe\xe7\x1f\x4f\xe6\xa7\x27\x3f\xb6\xb3\x24\x9f\x40\xe0\x4f\x01\x1c\x8f\x8f\x1e\x4b\x27\x7e\xf9\xe7\x00\x8e\x41\x64\x09\x9f\x4c\x26\x10\x87\x41\xa0\xff\x81\x37\xb9\x0e\x06\x10\x44\x6b\x30\x84\xe3\xe1\x33\x08\x48\xc8\x03\x24\x2f\x08\x04\xaa\x53\xeb\x32\x81\x40\x3e\x37\x81\x38\xa2\x72\x32\xeb\x42\x6f\x27\x72\xa5\xe6\xbe\x4b\xe8\x52\xe3\x1e\xa8\x71\xc7\x63\xe0\x7b\x00\x02\xcf\x15\x43\x81\x2b\xea\x2c\x97\xf2\x96\x46\xce\x56\x7c\x15\x33\x2d\xe9\xcf\x59\x2e\xfb\x97\x3e\xba\x4a\xfa\x91\xef\xf8\xcb\x5e\x34\x90\x3d\x99\xd5\xb4\xbc\xb9\x44\x62\x9f\x05\x12\x35\x62\xb5\xb9\x7c\xe4\xb6\x16\x19\xe9\xdd\x30\x3c\x8a\xef\x52\x92\xcb\x85\x49\x3d\x56\x34\xbe\xef\xfa\x3d\x43\xf7\x9c\x86\xed\xc9\x3e\x45\xc0\x2d\x49\xde\x25\x8b\x25\xc1\x02\x0f\x87\x8e\x1b\xf4\xa3\xbc\xa6\x0f\x4d\xfc\xff\x7d\x7e\x7d\xbc\x40\x74\xda\x8e\xf8\xb3\xb4\x2b\x76\xc0\x94\x22\x24\xaf\xf3\x22\x00\x22\x9a\x21\xd8\x9d\x3b\x78\x26\xf8\x93\x9c\x15\x80\x40\x66\x6c\x9d\x2b\x87\xe6\x89\xde\x41\x2a\x1d\x9f\xd8\x44\xc3\xe7\x70\xa0\x16\x5a\x12\x68\xdc\x3e\xde\x4f\xba\x80\x21\x87\xba\x73\x49\x06\x9f\xe4\x9f\xbd\xf3\x55\x0f\x3b\x0b\x74\xa8\xee\x74\x45\xfd\xec\x46\xca\x4d\x99\x3a\x9e\x4f\xfa\x33\x4a\xc2\x65\x32\x5d\x2c\x63\x0c\xe3\xa9\xca\x94\xad\x00\xa6\x66\x01\xc7\x40\x06\x3a\xe9\x59\xa8\xa0\xa7\x68\x4e\x0a\x78\xa6\x3e\x74\x83\xaa\x38\xdd\xbb\xb0\xdf\x52\xaa\xda\x84\x22\xc5\xfc\x23\x1c\x3d\x34\x4d\xce\xfc\xdf\x97\xbf\x9f\xbe\x5e\x94\xd2\x24\x38\x88\x6e\x75\xcb\x2e\xf9\x7c\xbc\x94\x57\xad\x42\x3c\xaa\xe5\x52\xcb\xad\x3f\xe9\x55\x4e\x5e\xc5\x19\x0f\x07\x50\xb2\xc9\xf1\xd1\x13\x38\x8e\x0a\xbe\x00\xc5\x98\xf5\x6b\x45\x62\x89\x64\x45\x77\x8e\xdc\x0b\x24\x6b\x3f\x4e\x8b\x01\xf4\x7b\x2c\x05\xa2\x2a\x62\x0d\xe3\xbf\x35\xfd\x16\x3e\x68\xb2\x16\x5d\x27\xdb\xa5\xf3\xb6\x48\xf5\xa2\xc9\x37\x3d\xf9\xa8\xf8\x69\x41\xe6\x3f\x81\x20\x70\xce\x51\x10\xa1\x61\x2a\x23\x19\x9b\xf0\x50\x7a\xa1\xc8\x96\x0e\x4e\xe9\x37\x1f\x96\x08\xd7\xe9\x18\xf9\xd1\x9f\xeb\xc1\xe3\x71\xdd\x80\x30\x54\x54\x98\xca\x47\x7b\x15\x55\xae\x1b\x6e\x38\x84\xc3\xed\xef\xb0\x64\x97\x6c\xb0\xc7\x62\x31\xfb\xd0\xfb\xeb\xe5\xff\xa0\xe7\xbf\xfd\xc7\xaf\x1f\xcb\xf7\x97\x8c\x97\x93\xfe\x0c\x7a\xaa\x00\xc6\xdf\xaa\xf7\xde\x70\x08\x4f\x32\x08\xbf\xb3\x9e\xf4\x04\x82\xb9\xf4\xd8\xca\xd2\x81\xee\x1b\x39\xee\x3c\xee\x32\x1b\xfe\x09\xc1\xef\x21\x0a\x93\x1d\x52\xa6\xa4\x64\x82\x02\x13\x29\xa0\xe0\x9c\xa8\x6d\x37\x14\xdb\xae\x44\xc7\x49\xe9\x2c\x9a\x20\x97\xd1\x16\x52\x8a\x52\x39\x7b\x18\x3e\x87\x47\xd1\x5c\xcf\xb4\xf4\x00\x9f\x74\x78\x65\xcc\x22\xe2\x78\x4b\x10\xc5\x0d\x67\x36\xf9\x53\xbd\x03\x38\x25\x38\x3e\x51\x0c\xef\x32\xa0\xfe\xb0\x0f\x20\x90\x21\xed\xfb\x71\x51\x24\x95\xf4\x46\x8a\x35\xd6\xe7\xf0\x18\x8e\x41\x84\xbb\x49\xa1\x62\xa9\x92\x76\x04\x8f\xab\xf4\xae\x61\x54\x00\xd3\xfa\xc5\x13\x08\x3c\xff\xb2\xf6\xd0\xa4\xcb\x9f\xc7\x78\x4f\x33\xf6\x62\xab\xa8\xe5\x09\x04\x2b\x1f\x05\x5e\xac\xa3\x26\xad\x4a\xc9\x45\x92\x76\x95\x12\x59\x36\xb5\xca\xa3\x68\x25\x40\xa9\x3a\xd8\x89\x80\x72\xa8\xef\xf4\x15\x8f\x84\xe0\x07\x8a\x1c\xcf\xa5\xe1\xe2\xbc\xba\x87\x4a\x00\xcf\xe3\xc6\xdb\x01\xb2\x1a\xd0\xca\x29\xc4\x5b\xf9\x3e\x50\x5c\xfe\xa5\x30\x07\x4e\x48\x70\xee\xdc\x6d\x12\x79\xc9\x52\x5d\xbf\x6c\xbd\xb3\x8c\xb1\x8c\x1b\x36\x71\x9b\x4d\x4e\x47\x0d\x5d\x16\xf0\xd4\x70\x66\x6b\x89\xa7\x92\xe3\x55\x01\x75\x9b\x1d\xb6\x4a\x84\x68\x77\xf1\xeb\x3a\xdc\x09\xc8\xec\xab\x1d\xbc\x9e\x53\xe7\x7f\x9e\x0f\x87\xff\xb9\xf5\x83\x57\xd9\x69\x2b\x7b\x90\x69\x38\x47\x75\x57\x41\xef\xef\xc8\xa5\x02\xf9\xa2\x43\x97\xfe\xf5\x4d\x1f\xbb\xb2\x74\xb5\x01\x5d\x12\x0f\xf5\x91\xe7\x73\xf2\x15\x88\xf2\x02\xbd\xf9\xe3\xc9\x25\xbe\x6a\x47\x94\x91\xde\x91\x49\x7c\x56\x24\x44\x21\x7c\x08\x0e\x56\x20\x3e\x7f\x45\xe2\x9a\x2c\x75\x5a\x03\x55\xf7\xb3\xcc\xc8\xef\x25\x24\xaa\x7a\x88\x57\x31\xee\x28\xfe\x22\xfb\x8b\x7f\x69\x33\x95\x26\x86\xb9\x54\x59\xa3\xc9\x04\x3e\x46\xef\x43\x01\xbb\x18\x6e\x21\x63\xba\x00\x9f\xa3\x85\xa4\x33\xc1\x7e\x74\x27\xb2\x48\x93\xc2\xdc\x97\x04\xd3\x44\x5b\x17\x68\x15\x2e\x1f\x82\xb4\x12\xd2\x48\xe8\xaa\x24\x59\x6e\x45\x63\x99\xa3\x4d\x9a\xa8\xfa\x9e\x2f\x68\xf4\xe1\xe9\xeb\xc3\xd9\xf4\xa7\xf3\x77\x4f\x48\xd7\x93\x7d\xee\xf4\x96\x3b\x5d\xc8\x9d\xa2\x16\x63\x89\xe8\xc2\x91\x87\x96\x68\xdd\xd2\x47\x0f\x8d\x83\x92\xd3\x47\xcd\x51\xa1\x4c\x57\xd4\x2c\x52\x99\x3a\xd5\x8b\xc0\xf9\x41\x52\x64\xe9\x60\x57\x9c\x73\x3b\xd8\x85\x0b\x87\xd5\x4e\x86\xcd\xbc\x8e\x51\x02\x76\x33\xa0\x31\x89\x54\x02\xaa\xfd\xc8\x4b\x00\x8d\xb6\x52\x72\x44\x50\x1f\x14\xf2\xde\xab\x4d\xfb\x50\xd8\xd8\xd2\xd6\x2b\xec\x9e\xee\xac\xdd\x8b\xa3\xb7\xfa\x4b\xdf\xbd\xf8\x1a\x5a\x07\xf2\x7f\xff\xef\xc1\xc5\x4f\xa8\x7c\x03\x0a\x11\x5c\xdc\x7c\xf9\xd3\xb6\x12\xd4\x6d\x15\xd6\x27\x10\x04\x7e\x9b\xf3\xb1\x56\x4a\x02\xe4\x0a\x19\xa0\xb3\x7e\x49\xcb\x9b\x3c\x8a\x83\x2c\xd9\x40\xe0\x33\xf9\xba\xf5\x25\x2a\x3b\x23\x57\x1e\xaf\x1c\x90\x18\xdc\x02\x5f\xe6\xb8\xba\x83\xbd\x6d\x4e\xd1\x54\xf7\x20\xfe\xec\x73\x22\xb7\x4e\xfa\x35\xa9\x8a\xe9\xa7\xe6\x3a\xcc\xd7\x29\x39\x50\x97\x9f\x7d\x8a\x9b\x61\xb8\x55\xaa\x2f\x10\x6c\x77\x9a\x8f\x72\x89\x7d\x3d\x91\x83\x3e\xbc\x1f\x1c\xbe\xbb\xd8\xae\xc8\x49\x93\x2c\x77\x14\x13\x8c\x6d\x37\xad\x39\x5b\x81\xe9\x66\x7b\x28\x3b\x79\x0d\x37\x3a\x79\x95\xb1\xfb\x06\x60\x32\xe6\xa7\x0d\x40\x49\xb7\xdf\x22\x20\x55\x37\x77\x5d\x54\x82\x52\x78\x85\x72\xb9\x02\xad\x94\x82\x3a\xb0\x9b\xf5\x83\x78\x8d\x23\x16\xa3\x31\xd5\x30\xa8\xa6\x47\x7d\x65\x1c\x8b\x56\xcd\x2b\xee\x84\xcb\xbb\x33\x8a\xdc\x2e\xdf\x80\x4d\xe4\x4f\xb3\x0f\x1e\x4e\xf6\xcf\xff\x99\x7f\x38\x62\xcf\x3b\x1f\xc7\x73\xf7\x18\xd1\xd1\x86\xaf\x96\xa8\x1f\x1d\xb1\x6b\x6c\x98\xb9\xbb\x8c\x4f\x71\x8b\x4a\x23\x62\xc9\x15\x57\x32\x8e\x92\x6c\xfa\xf7\x9d\x44\x5b\x74\xac\xd9\xb8\x93\xe4\xba\x2d\xbe\x68\x8b\xae\xa7\x8e\x9e\x88\x8e\x9e\xc1\xf8\x80\x17\x5d\xb0\x65\x6c\x16\x49\xb5\xcc\xc5\xb1\xac\xfc\x1c\x02\x27\xe4\x64\x4a\xdc\x50\xda\x7d\xe3\xbf\x5b\xf9\xbb\x6c\x87\xe6\xb3\x24\xdb\x9d\xe6\x8b\x7e\x1b\x0f\x4d\xf4\xc1\x74\xfa\xf4\x28\xf8\xb1\xf3\x45\x6b\x96\xfc\x93\x5b\x9a\xe7\x10\x50\x22\x5d\x92\xce\x1d\x8c\xf3\x37\x37\x55\x9a\x58\xb9\x12\x95\xbd\xbe\xcc\x5d\x7f\xc4\xa1\x9c\x51\xc0\x64\xec\x25\x1a\x87\x4b\xc6\x21\x55\xdb\x89\xf6\xd4\x23\x2f\x1d\x3e\xd7\xfd\x0a\x1d\xf5\xd7\xe1\xb1\x75\xf4\xec\xa4\x37\x7c\x62\x0d\x9f\x3e\x73\x8e\xac\x67\x4f\x9f\xf4\xd4\x7f\x07\xbd\x61\x6f\xd8\xb7\x06\x83\x67\xfd\xc7\xd6\xd3\xc7\xc3\x5e\xb6\x50\x14\x8b\xc2\x9e\x28\xfc\x63\xf1\xd8\x7a\xfc\xfc\x49\x7f\x68\x3d\x3e\x79\xea\x0c\xad\xe3\xe3\xe3\x9e\xfa\xaf\xec\xa5\x37\xe8\x1f\x59\x4f\x9e\x3c\xed\x65\x4b\x44\xd9\xa0\x27\x4b\xfe\x58\x9c\x58\xcf\x1e\x8b\xa2\xa3\x93\xe7\xce\xd0\x3a\x7a\x3a\xec\xa9\xff\xaa\x0e\xac\xc1\xd1\xd3\xbe\x35\x1c\x3c\x75\x07\x3d\x6b\x70\x2c\x3e\x5b\x83\x27\x4f\xfb\xe2\xbb\xf8\xfc\xc7\xa2\x6f\x3d\x3f\x7e\xda\x3f\xb6\x4e\x8e\x9f\x94\x4c\xc0\x7a\xfe\x4c\xce\xee\xf9\x63\xd7\x1a\x1c\x1f\x5b\xc3\xa7\x47\xf2\xdf\xe3\x93\x67\xa2\xab\xc7\x47\x7d\x6b\xf0\xd4\x7a\x7c\xdc\xb7\x4e\x9e\x3c\xb6\x9e\x3f\xe9\x8b\x06\xbd\xa1\x35\x10\xb0\x9d\x58\x4f\x8f\x7a\xc7\xd6\xd1\xf3\x62\xc7\x7d\x51\x45\xf6\x7c\xfc\xac\x04\xba\xa1\x35\x1c\x8a\x6e\x4e\x86\x4f\xad\xe3\x93\x23\xf1\x3f\x59\x32\xd0\x13\x3f\x9a\xf7\xad\xc1\xf0\xf8\x8f\x85\xe8\xe7\x59\xff\xd8\x3a\x3e\x2e\x99\xbc\x28\x7b\x26\xc6\x38\x3a\x71\xad\xc1\xf0\xc4\x1a\x0e\x4f\xe4\xbf\x47\x47\xcf\xc5\xec\x8f\x4f\x8e\x9c\xc2\xd8\xfd\xa1\x35\x1c\x1c\x89\x19\x3c\x3b\x9a\x8b\xda\x12\x45\x47\x8f\x7b\x4f\xc5\x7f\xcb\x50\x34\x7c\xf2\xb8\x2f\xd6\xcf\x15\x73\xb2\x86\xcf\x8f\xfb\x02\xc5\xd6\xf1\xb3\xbe\x28\x12\x25\xa2\x8b\x93\x67\xfd\xe1\x91\x35\x7c\x3e\x2c\xeb\xe2\xf1\xe0\x69\x7f\x68\x3d\x1b\x9e\xb8\x96\x98\xde\xf1\x53\x6b\xf8\xfc\x99\xf5\x58\x4c\xf9\xc9\x89\xf5\xf4\x99\x58\xb0\xe3\x67\xd6\x89\xe8\xf3\xe8\xd9\x73\xeb\xd9\xd1\xb3\xbe\x25\xe9\x62\x70\xf2\xf8\x52\x8c\xfb\xec\x0f\x41\x92\xa2\xcb\xa3\x27\x82\x30\x5e\x3d\xb6\x9e\x3f\xd5\x7f\x0b\x62\x19\x58\x4f\x9e\x8b\x3f\x74\xa5\x41\x4f\x96\xcb\xff\x24\x1f\xdd\x23\xeb\xf9\xd1\x73\xd1\x9d\xa0\xce\xa7\xcf\xac\xe7\x8f\x1f\xf7\x9e\x59\x83\xe7\xcf\x7a\x47\xd6\xd3\x93\xa3\x77\xc3\xe7\xd6\xb3\xde\x89\xf5\xec\xb9\x33\x1c\x28\xc2\x1c\xe8\x01\xc4\xda\x0c\x9f\x5a\xc3\xe3\xe3\xde\x33\xeb\xe4\xe4\xa4\x57\x52\xa1\x27\x2b\x3c\x15\x15\x9e\x0e\x83\xa1\xf5\xe4\xe8\x69\xef\xc8\x1a\x1e\x3b\x62\x33\x0d\x9f\xf5\xf4\x3f\x6a\x19\xe2\x61\x8f\x8f\xff\x88\x5c\x05\xfc\x20\x00\x10\xfc\x5f\x3f\xfe\xf8\x63\x9a\xdd\x3f\xeb\x28\x34\xa5\x33\xc6\x39\xb9\xce\xba\x86\x2c\x10\x0e\xf5\x17\xe9\x3d\xb7\x70\x7c\xdc\xc7\xce\x65\x9f\x93\xd9\x4c\x79\x68\xb6\x73\xd7\x00\xb1\x15\xbb\xc0\xc1\x2a\x67\x59\x74\x88\x28\x19\xbe\xf6\x78\x9b\x53\x27\xce\x64\xab\xde\xaf\x1a\xa8\xa6\xf3\x64\xae\xb3\x36\xd7\x95\x77\x98\x72\xf5\xb4\x4b\x3c\x3a\xf2\x8d\xaa\x4b\x92\x3b\xd4\xba\x7b\x37\xe5\xff\xd8\xca\x29\x31\xd5\x71\x18\xd4\x83\xd3\x6b\x69\xf4\x90\x66\x14\xf6\x9a\x92\xa5\x47\xae\xf0\x6f\x3e\xf3\xcf\x03\x75\x0c\x69\x61\xd5\x28\x1c\x46\x67\x3c\x05\x96\x10\x75\xea\x49\x02\xd1\xdf\xa0\xd5\x49\xa6\x06\x0c\x29\xa5\x8f\xcb\x29\xdc\xd3\xf3\x07\x93\x94\x23\x82\x46\x2e\xac\x31\x66\x74\xbd\xa4\xac\x9b\x5a\x5b\x2b\x4c\x61\x5e\xe5\x86\x98\x4d\xa7\x5f\xa0\xaf\xb2\xd5\xdd\x6c\x11\xf4\x2d\x4b\x89\xf1\x30\x76\xf3\xd5\x37\x67\x29\xe3\x59\xca\xfb\x92\xe0\xf3\x20\xa4\xc9\xa5\x8a\x20\x91\x58\x7d\x97\xdb\xa0\x9a\x87\x25\x2b\x1c\xbb\x60\xb6\xac\x59\x71\x09\x52\x7d\x48\xad\x02\xbf\x7c\xa3\x77\xd9\x6c\x8f\xe5\xae\xd2\x44\xd2\x96\x2c\xba\x9a\x17\x1f\x82\x3e\xe5\xf9\x50\xb7\x68\x85\x99\x7b\xc0\x5d\x94\x76\xf2\xeb\x22\xae\x71\x16\xd9\x21\x55\x8a\xbc\xaf\x85\x32\x99\xe4\xf9\xeb\xe2\xab\x7e\x0a\xd9\xf1\x7e\x41\xab\xc3\xdf\xd4\xb9\xfc\xc1\x10\xa6\xcd\x75\x05\xd4\xe9\x57\x21\x2b\x67\x5e\xd1\xc4\xf3\x99\x73\x1e\xa4\x1c\x6d\x4b\xea\x85\xd8\x09\xf9\x9c\x50\xff\x8f\xa4\xde\x83\xae\x49\x13\x6c\xd9\x11\x5f\xbe\x7a\xf7\xd5\x08\x38\xf3\x00\xc1\xd7\x25\xe4\x76\x53\xc9\x8e\xfb\x36\x69\x73\x47\x0c\xb6\x77\xfe\x2a\x89\x6b\xe9\xa6\xb1\x56\x60\xa8\xb3\x02\xba\x39\xee\x9f\x44\xb7\x03\x58\x26\xa9\x7f\xf5\xe1\xfd\xa7\xcf\xef\xbe\xbc\xfe\xf0\xea\xf3\xaf\x6f\xde\x9f\xbd\x3c\x7b\xfb\xe1\xfd\x97\xcf\xa7\xef\x12\x4f\xd2\x43\x69\xb2\xb1\xe6\x7c\xa1\x2e\xbb\xa5\x39\x48\x3a\x2f\xce\x51\xb0\xec\x61\x42\x96\x08\x23\xda\xc3\x84\xa2\x29\xa2\x34\xb6\x19\x71\x87\xce\x10\x07\x10\x7c\x39\x0f\x1c\x7c\x91\x9a\xf6\x6b\xe2\xca\x77\xf9\xa2\xd7\xc4\xbf\x02\xfd\xc7\x89\x2c\x1f\x8e\xce\x0b\x43\xe6\x75\x80\xb8\x78\x8b\xd4\xdc\x5c\x52\xfc\x5a\xfe\xe5\x89\x3a\x03\x16\x4f\x60\x99\x2b\xb5\xda\xf6\x53\x42\x78\xde\xa4\xa8\xbd\x12\x7d\x3c\x25\xdd\xec\x8a\xe5\x84\xfc\xea\xc3\xc7\x7f\x9e\xbe\xfd\xe9\xe7\xb3\x1c\x11\xe7\x48\x77\x13\xaa\xfd\xff\xfe\xdf\xb4\xe2\x5e\x35\xec\x3f\xdf\xbc\x3c\x2d\xf8\x5a\x0b\xbe\xf1\xb3\xc3\xe6\xfe\x2b\x42\x97\xb5\x87\xf3\x65\xfa\xbc\x2c\xed\xc7\xd5\x83\xfe\xf6\xe6\xf4\xd3\xdb\x0f\xef\xab\x3c\xbb\x37\xc6\xe1\x37\xc7\x0c\x52\x18\x70\x09\x76\x1d\x79\x25\xf7\xbf\xd7\xc7\xee\x7e\xbf\xdf\x03\x65\xa8\xf9\xe9\xed\xd9\x97\x4f\x3f\xbf\x4c\x26\xde\xef\xff\xef\xf5\x71\xb2\xb5\x39\x0d\x8b\x5e\x8d\xcf\xb6\x73\x83\x90\xbf\x00\xd8\xe0\x0a\x01\x39\x01\x9f\x4b\x4b\x56\x3f\x76\x95\x7c\x68\x47\xd6\x77\xa7\xff\xf9\xfc\x3f\x38\x2f\xbf\x44\x28\xc6\x33\x94\x04\x63\xe4\x7d\x04\xa2\x83\xad\x7e\x59\x6c\x0c\x7c\x1c\xf8\x18\xb5\x0a\x38\xd5\x87\xd3\x6a\x37\x27\x97\x2c\x57\xfd\xf3\x90\x73\x71\x84\x8d\x46\xca\x4f\x53\x08\x06\x7f\x79\x4e\x1c\xea\x9d\xa1\x6b\x41\x8e\xdc\xe7\x81\xf6\x9e\xad\xb8\x0f\x8b\x43\x46\x1a\xce\xca\x3a\x30\x23\x1f\xd7\x48\x42\x2e\x6d\x94\x13\x35\xc5\x9e\xfa\xdd\xe3\xa4\x17\x4f\xa5\x0c\x01\x39\x4d\xe3\x95\x68\xfa\x41\x36\xed\x74\x85\x3e\x84\x47\x19\xf7\xb1\x2c\x52\x1b\xfc\x12\x2a\x22\x62\xca\xe5\xe1\x2b\xb2\xf4\x91\xa7\xc1\xdb\xef\xe0\xdb\x52\x33\xbf\x5a\x67\x83\x4e\xb3\xfb\x44\x28\x5d\xc1\x1e\x23\x0b\xc4\xe7\x3e\x9e\xf5\xae\x10\xe6\xbd\x2b\x4a\xf0\x6c\x93\xb9\xd6\x87\x07\x94\x28\x73\x91\xe9\x95\x83\x94\x77\xd0\x33\x6d\x9e\xae\xe2\xdc\x51\x23\x2f\xdf\x88\x70\xf5\xe2\x59\x6d\x2b\x9e\x9a\xc5\x07\x4d\x84\x8d\x23\x94\xcb\x24\x1a\x9b\x7b\x9f\x08\x2a\xf6\x50\x6e\x42\x11\x8d\x67\x66\xd4\x9c\x9b\x60\x3b\xdc\xb6\xc0\x2b\xef\xc4\x6f\x91\xd7\xa7\x88\x91\x90\xba\xe8\xe1\x79\xee\xe0\xea\xe8\x9c\xfe\x1c\x7c\x2e\xe7\xb9\x72\x7e\xed\x6f\x6d\xab\x7d\x13\xf2\x51\x66\x62\x09\x1d\xcf\xa3\x8a\x15\x74\xd6\x22\x44\x73\xf9\x67\x3b\x57\xbd\xf4\xa5\x40\x9b\x5d\xd0\x51\x47\xcd\x1f\xa5\x8a\x77\x04\x71\xb8\xc1\x1d\x3c\x1d\xc5\xac\xa3\x7e\xba\x39\x28\x76\x44\x5a\xd3\xba\xc5\x93\xa8\x46\x5e\x39\xba\x5a\xb2\xbb\x12\x49\x9b\xf1\x1a\x0d\xb1\xda\x3c\xab\x3b\xe2\x53\x79\x4a\x7e\x8a\x23\x41\xbe\x1a\x4e\xf3\x13\xa9\xc7\x6b\x0d\x5d\xb7\xf5\xfd\xdc\x7c\x79\x86\x65\x9e\xe1\xd9\x8b\xa3\x68\x71\x36\xbb\x3c\xca\x2c\x57\xca\x0b\x6a\xe9\x30\xe6\xe3\x8a\xd4\x37\xf7\xbc\x40\x95\x60\x6d\xb4\x50\x03\x08\x08\x9f\x23\xda\xd3\x20\xa5\x8f\x1a\xcb\x20\xa4\x4e\xe0\xff\x81\xfa\x57\x84\x7a\xb5\x28\x8d\x79\xf3\x78\x0c\xc8\xc2\xe7\xaf\x48\x28\x03\xed\x64\x04\xca\xe4\x9e\x96\xbf\xd6\x6d\x32\x25\x15\xb6\x2f\x68\x53\x42\xb2\xbb\xb0\x8d\x6d\x7d\x5f\xcd\x29\xf0\xc7\xf0\xec\xf9\xea\xec\xbc\x65\x66\xa0\x6f\x36\x39\xca\x27\xb9\x00\x3d\x42\x7b\xaf\x11\xe3\x3e\xd6\x07\xe8\x7b\x0b\xda\x4b\x7c\x60\x07\xa9\x03\xd9\xb7\x1c\xb4\x97\x27\xb5\xee\xc4\x1a\xf8\x8c\xf7\x5d\x12\x04\xc8\xfd\x3a\x59\xac\x7e\xfb\xe5\xf2\xbf\x5e\xe3\xd3\xdf\x2a\x54\x42\x24\x7d\x66\xaa\x5d\xfa\x4e\xa2\x27\x3b\x04\x75\x5c\xa2\x3e\x73\x29\x51\xef\x7b\xa7\x8e\xe3\xce\xec\xbd\x0e\xe0\x53\xb6\xb8\x3e\xf3\xff\x90\x0e\x9e\xb2\x72\x3f\x40\x53\x9e\xfc\xe2\x64\x19\xff\x78\x15\x51\xbb\x1b\xf8\x08\xf3\x4f\xfe\x1f\xe8\x55\x42\x2f\xd2\xf5\x51\x2e\xf9\x17\xdd\xb1\xa8\x90\x50\xcc\x17\xd5\xc9\x3b\xd1\x7d\xfe\xe3\x19\x59\xd6\xde\x60\x67\xc6\x6f\xbc\xc5\x2e\x99\x9e\x26\xbc\x3a\x27\xf0\x8c\xb1\xfd\x59\x49\x7e\x8a\x2f\x02\xfd\x35\x8a\x64\xa2\xe9\x3c\x8e\xb3\xba\x45\x7a\x81\xfa\x19\xa7\x5e\x19\x0e\xe1\x51\x9c\x9e\x42\x07\xec\x24\x31\x32\xe9\x44\x2d\xcf\x6a\x83\x43\xee\xcf\x17\x3c\xb7\x13\xba\x6f\xa5\x0c\xdf\x79\xf0\xdc\x28\xaf\xff\xf8\xe9\xe4\xfd\xcb\xd7\x5d\x02\xa4\x4a\xb3\x56\x24\x86\x6b\x09\x8f\x02\xa7\x90\x70\x28\x29\x8b\x72\xed\xe8\x90\xd6\x49\x63\xd2\x93\xee\x61\x58\xe9\x64\x3f\xf1\xbd\xc7\x13\x9d\xcc\xaa\x2f\xa7\xd2\x93\x18\xeb\x27\xe4\x97\x78\x68\x27\x13\x92\x1d\x56\xea\x72\x75\x29\x98\xc4\xc8\x3a\xc4\x3c\x75\x9c\xc3\x25\x09\x8c\xb2\xa3\xa7\xab\x34\x24\x61\x4a\x9b\x7e\xb3\x51\xc4\x49\x2f\x39\xd0\x22\xd3\xef\xbd\xa6\x67\x2a\x77\xfc\x6b\x3c\x16\xe8\x45\xdb\x4a\x4c\x58\x4e\xd9\xdb\xb2\x0c\x4d\x6d\xdb\xee\x7b\x9e\x3b\xe7\x7d\xec\x5c\x3e\xfc\x7e\xff\xe7\xbf\x5e\xfb\xd7\x6f\xc9\x1f\x9b\xef\xf7\x0a\x3b\xc2\xc3\x87\x4c\x46\x62\x90\x05\xe1\xcc\x9f\xae\x40\x3a\x8b\x51\xce\xbf\x21\x69\xd3\xe6\x06\xb3\x8c\x7f\xa4\x92\x85\x55\xf1\x30\x08\xbe\x80\xc6\x19\x4d\x32\xac\x92\x3b\xe7\xd1\x00\x32\xfb\x8d\x50\x07\x28\x09\xd8\x36\x86\x82\xe0\xcb\xd2\xc1\x3a\x10\xbb\xf9\x3c\xe9\x14\xf6\xe2\xfd\x1f\xce\xef\x73\x7b\xea\x1d\xb6\xd9\xd6\x64\x5f\x4b\xab\x45\xc3\x85\xfb\xcf\xcf\x3f\xfc\xf7\x5d\x52\x01\x6e\x87\x7f\xd7\x4b\xb3\x66\x61\xde\x4e\xee\x97\x12\xb2\x62\x21\x5e\xf6\x1c\x54\x1e\x50\xa5\x6a\xd5\x49\xca\x3c\xcb\xa8\x92\x7a\xa5\xf9\x02\x0b\xba\x0d\x77\xce\xf5\xa6\xfa\xff\xd9\xfb\xd6\xed\xb6\x6d\xac\xd1\xff\x7e\x0a\x94\xdf\x3a\xa9\xd4\x50\xb2\xe4\xdc\x1a\x77\xd4\x2c\xd7\x71\x1b\xb7\xa9\xe3\x71\x94\x74\x66\xfc\x79\x75\xc1\x22\x2c\xb1\xa6\x48\x05\x80\x6c\xab\x49\xde\xe5\x3c\xcb\x79\xb2\xb3\x70\x23\x41\x12\xbc\x8a\x92\x6c\x8f\xfb\xa3\xb1\x48\xe2\xb6\xb1\xb1\xb1\xef\xbb\xda\x0a\xd5\x5c\xf3\x0e\xa5\x95\x8e\xfd\x6f\x4c\x47\xa1\x21\x75\xad\x13\x31\xf7\x20\xde\xa8\xb8\x37\xbc\xf8\xe5\xf8\x9f\x2f\xdf\x6d\xd7\x14\xf7\x98\x18\x61\x52\x8a\xd3\x09\x82\xf9\x36\x1e\x9a\x61\x33\x00\xb5\x33\x7f\xed\x94\x4a\xba\x15\x57\xcd\x4f\x20\xd9\x53\x59\xdb\x4a\xfa\x55\x87\x2b\xb4\x8a\x72\xc6\xed\x45\x4f\xea\xa4\x14\x55\xa1\x23\xa9\x64\x96\x55\x08\x51\x0e\x19\x52\xba\x17\x2b\x11\xfc\x52\xc9\x2b\xad\xc8\x50\xa2\xb8\x98\x4d\xe9\x04\xe8\x79\xe0\x2c\xee\xae\x5a\xc0\x78\x60\x6a\xaa\x06\x44\x07\xf9\xaa\x81\x52\x19\x2d\x2c\xf1\xbe\x34\xcf\x97\x3a\xca\x38\xb8\xce\x3b\xc7\x65\xf4\x12\x6b\x3d\xea\x4e\xe1\x51\x2f\x9b\x41\x51\xb5\xcc\x88\xeb\x8f\x09\x9d\xc9\x35\xc7\x14\xb4\xe1\xb9\xb6\x93\xf9\x82\x0b\xc3\xff\x1b\x81\x6f\x36\xbf\x5a\x89\x70\x6c\x46\xb5\x94\xbe\x78\x6b\x5c\xde\xc1\x34\x18\x63\x38\x9b\x2c\x3a\xfc\x9f\xf5\x5f\xdd\x37\xb4\xdf\xef\xcf\x4f\x4e\xaa\x88\x9c\x51\xd0\x33\x3b\xe9\x32\xea\x39\x62\xfb\x88\x20\x8c\x8a\x12\xa8\x20\x68\xe3\x07\x46\x7f\x93\x71\xe2\xa4\x44\x50\x52\x63\x86\x45\x26\x42\xa5\x11\x7b\xc0\x57\x25\x4f\x05\x63\x0e\x43\x0e\x53\x52\xea\x9d\x50\xf8\xb2\x41\xa9\xcf\xda\x59\xe2\x99\x95\x5b\x46\x43\xfb\x22\x56\x4a\x43\xad\xe7\x1c\x8e\x2e\x19\x33\xea\x3b\x6a\x3d\x61\x82\x01\x71\x0c\x45\x23\xdb\xea\xe9\xba\xae\x5c\x7d\x8d\x71\x1c\x78\xe3\x92\x82\x11\xfa\x2b\x1f\x61\x67\xe5\x23\x3c\x59\x6e\x84\xf3\x00\x4b\x33\x57\xce\x18\x4f\xf3\xc7\xc8\x16\xb6\x0d\xe8\xec\xb9\x3e\x22\xc5\x6a\x1a\xc7\x25\x14\xfa\x3c\xbc\xaa\x8a\xaa\xc6\x47\x99\x87\x04\x07\x54\x9e\x10\x49\xa9\xc5\x03\x4e\x6b\xdb\xd1\xd5\xbd\xd8\x09\x0f\xab\xf8\x6c\x11\xee\x21\x7b\xed\x40\x0a\x3b\x3e\xcf\x59\x18\xfb\xca\x49\x7d\xa4\x56\x90\xfc\x50\x3d\x4e\x7e\x4f\xd0\x78\xca\x53\x7c\xc7\x3e\x57\x4f\x8d\x1b\x50\x46\x83\x51\x7d\x83\xe0\x39\xf2\x0a\xf2\x36\x67\x61\xd3\x2c\x70\x7d\xc5\xa0\x87\xf5\x7d\x0a\x11\x33\x45\xf1\x04\x6b\x54\x44\xeb\x7a\x82\x8e\xf5\x94\x6a\x96\x48\x9a\x91\x43\xb9\x32\xf0\xe5\xa5\x6d\xdd\xec\x58\xb6\xf5\xa2\x97\x3f\x5f\xad\x29\x45\x37\x6a\xa1\x37\xac\xa5\xaa\x5d\xb4\x10\x93\x10\x69\x29\xd8\x8f\xee\x93\x1d\x7e\x8b\xe8\xe9\xc1\xd9\x7a\x20\xed\xf8\x3c\x09\x66\x88\xf2\x53\xd7\xf3\x5c\x82\x46\x81\xef\xa8\xa5\x70\xe7\x04\xf1\x35\x67\xc5\x7b\x76\xaf\xd7\xeb\xf6\x7a\x49\x8b\xf1\x34\x27\x02\x21\xff\x4d\xb3\xe0\xef\xdf\x1f\xf0\xf7\xef\x20\xf8\x77\xee\x0f\xf8\x77\xee\x20\xf8\x9f\xdc\x1f\xf0\x3f\x59\x29\xf8\xcb\x68\x3c\x9a\xd7\xba\x27\xa4\x8d\x92\xf2\x8a\x33\x5a\xbf\x58\x42\x2f\x0e\xcf\x3f\xbe\xa6\xcf\x4a\x97\x41\x2b\xac\x3b\x46\x72\xaa\x8f\x25\x73\x10\xe4\x7b\x58\xd4\x28\x94\xb6\xa4\xd4\xe9\x8c\x4a\x6f\xd5\x36\x1c\x79\x64\xbb\x23\x8f\xec\x9a\x77\xed\xc5\xaf\x9f\x9e\xe2\x7f\x3d\x3d\x32\x0b\x93\x2a\x45\xaf\xad\x72\xf3\x46\x99\x03\xed\x30\x51\xb1\xad\xe2\x6c\xd2\x32\x27\x5f\x53\x96\xcc\x78\xe1\x22\xcf\x21\x88\xe6\xaa\x8b\x0b\x9c\x0e\x38\x51\x49\xa7\x46\x61\x32\x70\x14\x8a\xa3\x5c\x63\x2d\x30\x81\xa4\x13\x4f\x4c\x99\x4d\xf5\x2c\xa3\x13\xa7\xe8\x2b\x8f\x72\xa8\xac\xe6\x5c\xed\x1b\xa2\xb4\x52\xba\x2a\x75\xaf\x9e\x70\x2e\x36\xe9\x70\xb2\xe9\x2f\x8d\x0e\x77\xf9\x04\xab\x9c\x3f\x49\x56\x86\x99\x38\x18\xc3\x1a\x30\x69\x30\xa6\x75\x81\x26\xf1\x88\xed\x58\x79\x25\x9c\x09\x09\x4a\x05\x2e\xa7\x1d\x21\x9e\x95\x30\xbe\x26\x3a\x29\xa7\xb5\x1f\xca\x00\xb3\xb4\x2d\x4d\x0e\x5a\xa5\xf4\x97\x04\xb7\x02\x73\xd4\x47\x23\xfe\x25\x95\xf2\x49\x99\x11\x2b\x4d\x27\x9f\x55\x97\xdc\x32\x93\x5d\x2a\xfe\xa0\xca\x69\x3c\x0e\x3c\x77\xb4\x68\x30\xea\x74\x3c\x77\x1d\xc4\x6b\xdf\xf1\xd0\xd3\xff\xc1\x73\x0f\x75\xc8\x0c\x8d\xdc\x0b\x55\x5f\xb4\xa1\x68\xd4\xd6\x9b\xfd\xb7\xe0\x67\xce\xa8\xb4\xad\xcc\xf4\x63\x20\x19\x94\xaa\x15\x77\xf0\x63\xd5\x35\xed\xa4\x6b\x6d\x58\x57\x40\xc5\xa4\x67\x1e\xec\x93\xb9\xc7\xcf\x65\xb2\x66\x94\x7c\x61\xc7\x1a\x85\x1f\x97\x44\xb4\xb2\x54\xcb\x4a\xa7\x9d\xc7\x48\xa8\x3a\xaa\x7b\xa8\x35\x81\x5b\x87\xaf\xeb\xd3\xf9\x24\x51\x67\x7d\xe5\x40\x42\x9b\x04\xd2\xef\xcb\x3f\xd0\xff\x7e\xeb\x79\x60\xcc\x70\x0b\x52\x04\x20\xf8\xf0\xe1\xf0\x35\x70\x2f\x00\x9d\xb8\x04\xf0\x0b\x14\xb8\x04\x78\xe8\x82\x02\x34\x9d\xd1\x45\xb7\x0c\xef\x5a\x42\x53\x6f\x66\x76\xcb\x95\x24\xaa\xbe\x6f\x32\x9a\x36\x9e\x93\x76\x7e\x3e\x75\x43\xff\x8b\x30\x23\x4b\x46\xe6\x17\x0d\xd6\x2e\x39\xc6\x2e\xa1\x32\xde\x37\xf1\xea\xd0\xbf\x82\x9e\xeb\x98\x52\x39\x27\xef\xdf\x54\x52\x97\x54\x56\x18\x09\xb2\x8c\xec\x66\x12\x0c\xfa\x0c\x62\xec\xc6\x7b\x78\x95\xaa\x3a\x58\xbd\xf8\xd2\x12\xd0\xcb\x02\x4c\xd5\x85\xce\x79\xd9\x95\x65\x17\x9a\xbe\x42\x32\x96\x86\x91\xe4\x19\xb3\xe0\x2e\x39\xd5\xac\xe9\xec\xcb\x2a\x13\xd9\xc4\x87\xd1\x35\xdf\x31\xd5\x1e\x4c\xa0\x76\x54\x22\xa4\x93\x46\x24\x7e\xe6\x59\x4f\x81\xbf\x98\x06\x73\x52\x2e\x37\xf8\xd3\x90\xaf\x8e\x57\x5a\x51\xf4\x25\x4a\xdc\x7d\x6a\xed\x61\x04\x16\xc1\x1c\x90\xb9\xfc\xe3\x1a\xfa\x3c\xe0\xdb\x41\x1e\xa2\x48\x10\x89\xbd\xfd\xb7\x80\x17\xb6\x7d\xd5\x40\xf8\x7b\x36\x8b\x66\xda\xa9\xd8\xb3\x18\x3d\x16\x13\xcc\xde\x44\xf6\xd7\x53\x99\x18\x4e\x7c\x1a\xdf\x4e\xf9\x6b\x06\xb1\x52\x33\x47\x59\x1e\xc2\xae\xf3\xf0\xed\x69\x4e\x30\xb8\x04\x79\x65\x10\xe4\x44\x85\x27\xee\x0d\x51\x02\x31\xf7\x3a\x30\x13\xe1\xb5\x00\x5e\xd5\xad\xd4\xb2\x83\x30\x7c\x04\x26\xc8\x36\x32\xa9\x51\x78\x22\xb3\x27\xb5\x93\x9c\x94\xf1\x14\x1b\xb2\x12\x64\xd7\x8d\x2c\x8e\xb1\xcf\xf0\xa7\xd4\x7f\x2d\xa9\xea\x89\x49\xff\x15\x35\x06\x8c\x0f\x5c\xbf\xc2\xe0\xf7\xbf\x9f\x5f\xff\x8b\xfc\x94\x51\x82\xa9\xa4\xc2\xa0\x4c\x56\x8f\xac\x32\xf2\x76\x54\xf3\x90\xd3\x2b\x0f\x48\x40\xc8\xbc\x99\x6f\xa3\x8a\x88\x25\xe8\x5d\x89\xd2\x96\xe9\x6b\x29\xc8\x11\x46\xad\x64\x40\x4f\xf9\x64\x6e\x19\x99\xdb\x3c\x0f\x0c\x19\x01\x27\xd9\x62\x42\x79\xfe\x2e\x05\x80\x7c\xf7\xba\xf4\xda\x27\xfd\x22\xfe\x2f\xce\x48\x95\xf7\xb8\xe9\xdb\x46\x46\xac\x84\xae\xae\xa0\xf3\x23\x74\x2d\xae\xc0\xd2\xac\x47\x4d\x58\x16\x14\x82\xcd\x2c\x69\x9c\x64\x2b\xb2\xdb\xdf\x9f\x54\x37\xcf\xca\xa6\xba\x79\x9e\x4e\x75\x13\xe7\xb0\x78\xb6\x1b\xbe\xbf\xe0\xf0\x75\xd5\x7c\x37\xea\xbf\x7d\xbd\x93\x4a\x51\xd0\xcf\xec\xe7\x8d\x64\xbe\x49\x9c\xb3\x12\xdc\x84\xcc\x84\xa3\x66\xfd\xcd\x52\x19\x07\x6b\xe7\xc4\xa9\x3e\xef\x92\x39\x72\xaa\xae\xa1\xb4\xfb\x59\x21\x83\x92\x25\x59\x78\x81\x9f\x23\xe7\xec\xb3\xd7\x62\x37\x8c\xd2\xc5\xf2\x7c\xfd\x9c\xdc\x26\xa6\xbe\x90\x75\x9f\x93\x1c\x68\x7d\x20\x19\xb0\x7a\x60\xd5\x97\x67\xd5\x3f\xe4\xa5\xbf\xaf\x3d\xa3\x5b\xca\xa7\x57\xa0\x69\x05\x95\xa9\x0d\xb5\xb2\x15\xb3\x2d\xad\x73\x05\x95\xb2\x4b\xbb\xb0\x56\xaa\x57\xad\x33\xfc\x15\x65\x04\xe1\xd8\xbb\x76\x21\x01\xf7\xbd\xeb\xbf\x8f\xf0\x7e\x8e\x8b\xaa\x2d\x9d\x8e\xa3\x7f\xc3\x94\x16\xa1\xad\x24\x5f\x9c\xf0\xc3\xb8\x01\x51\x2f\x75\x29\x19\xc2\x73\x49\x6d\x19\x62\x59\x16\x5a\x3f\x98\x8c\xb2\x0b\x6e\xbf\x19\xf6\xbe\x80\x25\x4d\x4f\x2e\xd3\x88\x92\x16\x58\xba\x8a\x6f\x8d\xc7\x62\xc6\x69\x86\xfa\x24\x24\x08\xe1\x83\x9a\x2b\x2a\xa8\xc2\x9f\x95\xa0\x48\x65\x1a\xa9\x9b\x9e\xa8\xaf\x98\x54\x2f\x95\x8d\x45\x65\x24\x09\x0b\xa9\x85\xb4\x34\x5d\x35\x9d\xbd\xf8\x59\xcf\x60\xa2\xa7\x34\xe1\xa5\xce\xd2\x0f\xa9\xb2\x08\x66\x59\x5c\xe4\x84\xf2\xb3\x9c\xac\x98\x76\x66\xc1\x5d\xcc\x8d\x9f\xe6\xd2\xa0\x7f\xca\x03\x03\x53\x31\x72\x7e\xac\x5e\xc5\x59\x6a\x84\x86\x4f\x6b\xe2\x60\x50\xbd\x10\x5f\xbe\x1b\x40\x56\x2b\x65\x2e\xae\x89\xf9\x39\xe1\x33\x39\xd3\xce\x08\xc7\x33\x7c\x5a\x4e\x4d\xd1\x95\xea\x16\xe9\xeb\x2b\x4c\x5b\x69\xcd\x45\xd5\xcc\x64\x69\x3a\x57\x72\x41\x29\xdd\xbd\x50\xc5\xcb\xc1\x43\xa7\x85\x29\xf4\xe1\x18\x4d\x23\x0c\xae\x5f\xd3\x26\x2b\x97\x9b\x1a\x6b\x65\x15\x6b\xcc\x29\x1e\xaa\x0d\x1b\x17\x87\xcc\x90\x6f\x58\xe9\x91\x2b\xf8\x68\xc5\xc5\xed\xb0\x68\xdf\x29\x5f\x4b\xc4\x7d\x37\xa2\xcd\x10\x1f\x24\x13\x52\x29\xee\x43\x4b\xa9\x15\xf9\x59\x9c\x46\xfc\x7f\x9e\x8a\xe2\x69\x4a\x97\x61\xc0\xc6\x67\xe1\xe7\x4f\xf2\xbc\x3a\xca\xab\x2c\x12\xc8\x51\x26\xb5\xbf\xa1\x59\xd9\x34\xff\x86\xa6\x8d\xd2\x8b\x9e\x6d\x1d\x38\xae\x21\x75\x6c\xd1\x44\xaa\x7d\xdd\xd0\x8a\xe3\xb1\x92\x9f\xe6\x88\xbb\xfe\xe4\xa6\x76\xee\xe9\x42\x71\x95\x5c\xcf\x4f\x0d\x19\x38\xe2\x80\x2b\x14\xf7\x6e\x29\xdc\x8c\x91\xb5\xc8\x77\xf6\xa5\xa2\x25\x09\xa5\xf8\xaa\xc5\x57\x4b\xac\x3b\x75\x73\x44\x86\x5c\x1d\x59\x33\x6d\xb8\x55\x0f\xea\xba\x61\xa9\xa3\x5d\x68\x46\xcd\x87\x69\x19\x1b\x5f\x11\x50\x2b\x97\xe9\xaa\xda\x51\x4a\x7b\xd0\x6b\x52\x45\x94\x00\xf7\xac\x2c\xb7\xf1\x32\x06\xe8\xa5\x11\xa5\xa2\x71\x7f\x89\xc2\xed\xd1\xdc\xe7\x64\x95\x13\x37\x68\x2f\x0b\xb0\xab\x5e\x2a\xd2\x62\xd4\x2a\xd8\xf2\xe6\xf5\x74\xcf\x13\x5a\xb1\x75\x22\x52\xdc\x7c\x7f\x7b\xf0\x44\xd3\x55\xde\x07\x34\x28\xa3\x1c\x7d\x51\x4b\x39\xfa\xdc\x7e\x61\x7f\x6f\xbf\xac\xaa\x06\x7d\x62\x3f\xb5\x9f\x95\x28\xd7\x50\x5e\x32\xc9\x57\x9e\x0f\x27\x08\x23\x00\x31\x02\x7e\xc0\x8e\x38\xe9\x66\x84\xc9\x18\x36\x6d\x19\x55\x6a\x3d\x87\x0b\x59\x5e\xa6\xac\x32\x35\xaa\x1b\xb6\xa9\x40\x0d\x44\x7f\x23\xd7\xc3\x7f\x67\x24\xec\x29\xe9\x77\x21\x96\xc1\x15\x55\xbc\x0e\x64\xe2\x2f\x91\xfb\xf7\xd6\xc6\x71\x88\x3c\xc4\x4d\x46\x73\xc8\xcc\xc6\xb2\x2a\xa6\x91\x3b\x9c\x05\xd7\x08\x77\x44\xc6\xab\xce\xb5\x4b\x27\x4a\xa5\x19\x4a\x90\xc1\x4c\xe6\xf6\x90\x10\xfc\x99\xc1\x23\x5e\x4e\x55\xbc\x38\xd6\x52\x34\x33\x52\xe2\x7a\xce\xfb\xf9\x78\x8c\x88\x20\x16\x16\x99\x04\xd7\x42\x3d\xfa\xc7\x04\xf9\x42\x26\x95\x63\x19\x34\x89\x2a\x27\xa0\x54\x46\x49\xad\x61\x02\x46\xc3\xc5\x0c\x01\xb9\xc5\x40\xe5\x1a\xcb\xf2\xdb\xe6\x63\x1d\xa1\xeb\xb7\x72\xaf\xf2\x45\x13\xcb\x25\x1f\x7c\xf7\xd3\xbc\x4c\xfe\x1b\x25\x59\xeb\xb3\x5b\xa2\x91\x59\x1d\x60\xb8\xaf\x5e\x68\xea\x27\xeb\xbb\xea\x77\xd6\x77\xa0\xb5\xe7\x79\x0a\x3f\x48\x7b\x09\x55\x4e\x64\x4f\x7c\x61\xd0\x89\x55\xa0\x8d\x2f\xec\xef\x73\xeb\x04\x26\x9d\xc8\xf7\x27\x41\x40\x10\x80\x40\x56\xf9\x92\x8b\xb1\xc1\x35\x76\x29\x02\xae\x0f\x20\xb8\x98\x53\xc6\xad\x25\xbf\x08\xb0\xfc\x08\xfa\x0b\xf5\x14\x88\x80\x89\xaa\x19\x15\x9a\x38\xff\x5a\xf6\xf1\x26\x89\x80\xd6\xed\x7d\xa2\x04\x26\x68\xdd\x32\x72\x90\x9a\xe2\xb2\x2d\x4b\x13\x86\xe7\xb7\x91\x30\x3c\x5f\x8e\x30\x3c\xbf\x83\x64\xa1\xb1\xe0\xc4\xbd\xb0\x4a\x40\xed\xf0\x44\xc1\x1c\xdd\xaf\x00\x45\x05\x96\x66\x43\x14\x23\x60\xdf\xf7\x20\xc5\xe5\x6e\xab\x11\x76\x67\x39\x68\x59\xe7\xa6\x8a\xba\x2c\x48\x11\x61\xc0\x0f\x2d\x03\x83\x44\x8f\x44\x87\x29\x1c\xd1\xd7\x15\x5f\x8f\x32\xf6\xcf\x62\x37\x99\xa1\xbf\x15\x6c\xfa\x2d\x23\x67\xe6\x27\xf7\x29\x56\xee\x21\x04\xae\x60\xa1\xf7\x39\x04\x2e\xe5\x70\x7d\xfb\x63\xde\xc2\x8a\xed\x0f\x31\x6f\x0f\x8e\xb4\x0f\x31\x6f\xb7\x2f\xe6\x2d\xa9\x4e\xad\xa5\x88\xdd\x4c\xfc\xdb\xdb\xde\x4f\x57\x17\x47\xbf\xfc\xa7\x7e\xb9\xea\xff\xaa\xc0\xb6\x68\xbf\x72\xc2\xdb\x0e\xf5\x8f\xee\x50\x88\x9b\xe6\xcb\x52\x44\x33\x0f\x1c\x97\x46\xeb\x5c\x22\x90\xad\xa8\x8f\x87\x60\xb6\x3b\x56\xb7\xdd\x10\xcc\xc6\xb3\x7c\x2c\x13\xc8\xc6\x3a\xd8\x48\xf9\xf6\xc4\x49\x2a\x1f\xc4\xc6\x66\xfc\x10\xc0\x76\xbb\x02\x5f\xb4\xbb\x76\xc3\xe1\x2f\x89\x5b\xbf\x16\xbb\xb0\xa1\x50\x98\xcb\xf9\xf0\x88\xbe\xf0\x8f\x57\x1a\x0a\x53\x27\x06\xe6\x2c\x54\x79\xba\x81\x2f\xe2\x5f\xd6\x1d\xec\x12\xdd\xfb\xb7\x3b\xd8\x25\xc2\xa2\xff\xc6\x90\x97\x74\x1d\x65\xbf\x46\xe0\x8b\xd8\x9b\xaa\xa1\x2f\x7a\xf1\xdf\x87\xe0\x17\x06\x73\x1c\x08\x2a\x70\x89\x16\x61\xfd\xe4\xb3\x14\x9a\x0a\xc9\x41\x37\x9d\xdb\x9b\x0b\x96\x11\xb3\x28\x25\x44\xc7\xda\xfd\xbf\xff\x5b\xbd\x4d\xac\x42\x75\xd5\xc6\xc7\x18\x8d\x90\x83\xfc\xb4\x01\xf8\x4e\x45\xe9\xa4\xf0\xa0\x7c\xac\x4e\xdc\x7d\xa4\xd1\x88\x9d\x94\x29\x45\x4c\x53\xab\x15\x1b\x9a\x95\x0a\x72\x51\x83\xe2\x50\x9c\xc8\x42\xb5\xa2\xa8\xa3\xac\x39\x45\x53\x30\xd8\xdf\x1b\x50\x7d\x55\x9f\x87\x8e\xd4\xd5\xa6\xb0\xe1\x30\x20\x5b\x57\xfc\x26\x02\x82\xec\x72\x8a\x60\xb7\x49\x45\xf0\x43\x08\x51\x46\xb3\x95\x86\x10\x55\x26\x66\x77\x39\x90\x28\x03\x39\x5e\xae\x39\x86\xa3\xfc\x97\xe5\x99\xab\x97\xeb\x8b\xd1\x28\x9a\xbe\xa0\x8d\xdf\x97\xb0\x46\x54\x03\xcf\xca\xac\x13\xc9\xf0\x81\x2a\xd6\x89\xa5\x26\xb7\x62\xa7\xf6\xcd\xbb\xb4\x97\x43\xa2\x98\x6b\xbb\x46\x90\x2a\x49\xad\xeb\x75\x72\x4f\x2a\x3e\xca\xab\x4c\x2e\xaf\x36\xe5\xe2\x8e\x9f\x8e\x5e\x07\x2f\x9f\xbc\x5b\xc2\xc5\xbd\x21\xe7\xf5\xb4\xc6\x7f\x89\x64\xd4\x4b\xf8\x10\xfd\x86\x16\x4d\xf9\x0e\xfd\x86\x16\x20\xc0\xe0\x42\xb8\xea\x54\xf1\x1e\x8a\x2a\x10\x24\xea\x16\xa4\x3d\x8b\x62\x0e\x44\xcf\x98\xd0\x7b\x41\x3b\x14\xbb\xd3\xe4\x02\xe5\xc2\x62\x26\x6d\xf5\x34\x71\xab\x69\x35\x72\x1d\xc7\x65\x38\x08\xbd\xe8\xf6\x14\x39\xce\xab\xfa\x16\xa5\x1c\x97\x52\xd0\xa9\xe6\x7d\x34\x0c\x80\x40\x10\x00\x65\x1f\x36\x40\xbe\x03\x20\xb8\x44\x0b\x70\xed\xd2\x89\x6a\x36\x0a\x1c\x5d\x35\xb4\xdd\x90\xc1\x2b\xe5\xe4\x13\xf9\xed\x2d\xbb\x0d\x56\xdc\x2d\x57\x73\xfc\x60\x3d\x43\x42\xcd\x9d\x86\xcd\xb7\xcb\x39\x81\xa4\x0f\x50\xca\x7d\xca\xb0\x0b\xd9\x19\xdf\x83\xf1\xd8\x43\xf9\x7c\x81\x01\xd7\x25\x3e\x73\x86\xfa\x3c\xb8\xb1\x62\xf8\xf7\x17\x89\xdc\xda\x32\x6b\xa3\x8b\x8f\x36\xe6\x08\x99\x3e\xf8\xfb\x12\xe3\x6a\x96\x9a\x6a\x82\x8c\x7d\x14\x49\xf1\xab\x12\xb2\xec\x35\x7d\x54\x59\xf6\xf3\xea\x07\xc8\x9d\xa8\xa6\x8b\xc9\x2b\xb4\x90\x55\x59\x01\xd2\xe0\x3c\xb9\x74\xb5\xe4\xb2\x2e\xed\xd5\xd4\x97\x15\x14\x4b\xe8\x86\x42\x8c\x42\x79\x43\x23\xe6\xc5\xd9\xcd\x53\x14\x38\xac\x6e\xb0\x0c\xf1\xd5\xca\x7a\x95\x81\x5c\xdd\xb4\x26\x66\xcc\x2e\x4f\x55\x33\x1d\x61\x6a\x31\x06\x39\xec\xf7\x5a\x2b\x1d\x34\x76\x1b\x34\xea\x13\x9a\xe3\xdb\xb6\x9c\x8f\xe8\x92\x60\x5f\xa9\x7f\x68\xa5\x45\x17\x2c\xa6\x96\x47\x68\xce\x04\x84\x10\x07\xc4\x01\x26\xc6\x33\xd0\xac\xcb\xe6\x25\x5a\xac\xc0\x59\x73\x65\x12\xf9\xdd\x70\xd4\x2c\xa7\x1e\xb1\x4a\x3b\x68\x16\x32\x0b\xb7\xc7\x31\x73\x65\xea\x8e\xb5\x3a\x65\x36\xec\x76\xa9\x44\xfb\x4a\xca\x80\xcd\xb8\x59\xf6\x7f\x9d\x92\x03\xff\xfc\x66\xa9\x70\xf7\xc8\x07\x52\xcb\x2d\x56\xd7\x21\xf3\xf2\xea\x3e\xf8\x63\x5e\x5e\x75\x15\x20\x4c\xba\x63\x26\x07\x6f\x03\xce\x82\x19\x7c\x31\xad\xec\x84\x49\x26\x9e\x21\x25\xfb\x99\xa0\x14\x86\xe0\x3d\xb3\x2d\xe2\xf1\x4c\x04\xa7\x3d\xbb\x23\x19\x44\x32\xf3\x5c\x5a\xba\xff\x0a\xc5\x06\x6a\x41\x4e\xaa\x07\xc4\xdb\xbf\x02\x97\x51\x07\x6b\x5b\x31\xc0\xb3\x19\x8a\xaa\x06\x45\x4b\x11\x2f\x1d\x27\x8a\x3c\x3d\x3b\xb3\xfb\x31\xb9\xa0\xe2\x2a\x53\x4c\x58\xa6\x49\x37\x1d\x57\x98\x1b\x28\x17\x65\xb2\x2f\x9d\x4b\xfd\xb6\xb9\xe4\xea\xdc\x6a\xd9\xa2\x13\xcb\xf2\xc4\xcd\x54\xa8\xd0\x8e\xde\xaa\x5d\x7b\x4b\x7b\xb3\xf4\x78\x31\x7f\x42\xdc\x0a\x92\xb4\xce\x74\x68\x17\xac\x1f\x50\x77\x84\xc0\x35\xc4\xbe\x1b\x59\xec\x53\x26\xfc\x9e\x6d\xfd\x21\x3e\xd1\x4b\xd9\x81\x21\xe3\x11\x7f\xfb\x08\x26\x90\x00\x08\xd8\x62\x80\x9c\x57\x17\xfc\x3b\x98\x83\x11\xf4\x39\x75\x06\xbf\x7d\xfc\xdf\x6f\x89\xd0\xb8\xe9\x5f\x11\x1b\x9c\xcf\x29\xb8\x46\x00\xa3\x51\x30\x9d\x22\xdf\x01\x4e\xe0\xfa\x63\x40\x02\xf1\xf5\x08\x62\x11\x0b\xe8\x07\x34\x7a\x05\x29\x80\x9e\xd7\x05\x87\x14\x4c\xe1\x02\xf8\x68\x0c\xa9\x7b\x85\xbc\x05\x70\xa7\x33\x38\xa2\x80\x4e\x10\x60\x9c\xc2\x15\x02\x7e\xe0\x20\xe0\x52\x36\x3e\x24\x24\x18\xb9\x90\x22\x87\x77\xde\x05\xef\x11\x02\xe7\xc8\x0b\xae\xc1\x45\x80\xc1\x34\xc0\x08\x38\x88\x42\xd7\x23\x20\xf0\x79\x27\x6f\xd9\x6c\xdf\x8b\xd9\x02\xe8\x3b\x80\x20\x54\xe8\xa6\x56\xbe\xb0\xa5\xeb\x53\x84\x7d\xe8\x91\x6d\x05\x11\x5e\xe0\x32\xac\x66\x69\x28\x54\x19\xd6\xb8\xcc\x28\x6f\x29\xb7\x2b\x98\x63\xe0\x04\xa3\x39\xc3\x87\xa4\xdf\x4d\xb4\x5a\xd7\x17\x45\xb9\xd9\x8e\x55\x96\xc1\x41\xdc\x2f\xf5\xf2\xca\xe4\x8f\x9a\x24\x08\x35\xf1\x36\x57\xeb\xa8\x53\xa4\x9d\xbc\xdb\xa3\xb1\x0d\xfa\x1f\xf9\xab\xe3\x20\xe2\x8e\x1b\xab\x3e\xaa\x63\x5b\xb6\x0a\x3a\x09\x9a\xf2\x21\xee\x8e\x6e\x50\x29\x4e\x96\xac\xb7\x0c\x1d\x5c\x54\x3d\x20\xb5\x91\xe9\x92\x40\xe5\x3a\xd4\xa7\xb2\x37\x16\x74\xaf\xea\x5c\x4a\xd9\x56\x8b\x79\x08\x46\x22\x48\x97\x4c\x82\x6b\x2b\xbd\x38\xae\xa7\xcd\xb8\xcb\xcd\xdf\x56\x00\x44\x75\x50\xe5\xd5\x3f\xad\xb0\x67\x22\xae\xa1\xfe\x8e\xfd\x84\x26\xf0\xca\x0d\x72\x4c\x56\x15\x26\x13\x75\x66\xd6\x2b\x66\xd1\x10\x2e\xb9\xc3\xf2\x9c\x45\xde\x82\x64\x4f\x0d\xac\x86\x1d\x62\x35\xaf\xb2\x49\xa7\x53\x26\xa3\x78\x97\xc3\xe1\xdb\x46\x16\xc9\xfb\x69\x60\x89\x62\x3e\xb5\x17\x17\xf7\xc6\x8d\xba\xdd\x9f\xa0\xd1\x65\x5d\xd7\xe8\xa2\xb5\xbf\x41\xd0\xa3\x13\x20\xc7\x58\x05\xad\xe9\xc7\x25\x10\x1b\xa4\xa9\x89\x1a\x3d\x83\x49\xcd\xea\xb9\x76\x1e\xd7\x15\xa9\x08\x5d\xa1\x73\x85\x4a\x4d\x28\x17\xb8\xa2\xb8\xee\xc4\x96\xac\x4c\x6d\x18\xad\x4a\x5d\xc1\x76\x8a\x67\xd1\x08\x71\x04\x83\xc4\x8d\xbd\x96\x48\xef\x04\x50\x6a\xf9\x5a\x95\x89\xfc\x36\x63\xd2\xda\xb7\x28\x4b\xe1\x18\xee\x42\xa1\x67\xf9\x52\x53\xdc\xb0\x0a\xb2\xc0\x8a\xd6\x68\xec\x5a\xed\x68\x30\xa9\x94\xac\xa4\xc5\xdc\x50\xf4\x97\x3b\xfc\x18\x3c\x77\x4e\x7a\x6b\x89\xfe\x6a\x5e\xe1\xb9\x4c\x55\xa4\x46\x14\x9e\x8d\x6b\x1c\x1b\x52\x05\xd6\x54\x9f\xe6\xf2\x2c\x19\x9a\xd0\x9d\xfb\xaa\x09\xed\xf7\xd7\xa9\x0a\x95\x89\xf0\xcb\xea\x42\xfb\x3d\xbb\xdf\xbf\xdd\x5a\xcf\x78\x6a\xb6\x65\x4f\x02\xfb\xaf\x58\x05\x59\xd1\x8d\x86\xc2\x4b\xee\xaf\x2e\x7e\x39\x38\x98\x45\xbf\x30\xba\x42\x98\x84\x65\x33\x6a\xe1\x77\x05\x15\x6c\x63\x9b\xb8\xf6\x60\x4c\x89\x0c\xca\x8d\x55\xe3\x18\x44\x30\x5f\x47\xcc\xc6\xc8\x94\x69\x7b\x71\x81\x11\xa2\xe8\x86\xa6\xa2\x38\xb5\xb0\x15\xe5\xe1\xa4\x3b\x47\xe6\xa4\x3e\x48\xc4\x5e\xc6\x42\x0c\x55\x44\xa7\xf5\x9e\x87\x86\x82\xf3\x85\x48\x38\x9a\xf6\x70\x6a\x86\xd1\x58\x3e\x83\x45\x53\xf7\x4a\x91\xd2\x36\x45\x4c\xe3\x2e\x31\x66\x23\x43\xc3\x91\xc6\x4d\x4e\x1f\x07\x01\xed\xac\x32\x2e\xfa\x6e\x06\xea\x6a\x55\xea\x18\x79\x0b\x30\xed\x9c\x2f\x2c\xce\x48\xfd\xcc\x4f\xd6\xae\x83\xc8\x48\xec\xf3\x2e\x64\x7f\xa5\x23\x74\x15\x34\x37\x56\xd5\x6e\x5d\x91\xaf\xe6\x44\xaf\x32\xd2\x4b\x01\x8c\x4f\x46\x71\x1e\x0c\x4e\x46\xd7\xc2\x25\x03\x68\x0b\x86\x8f\xb3\x3f\xf2\x27\x12\xf1\x66\xca\xc3\x57\x34\x33\x3a\xb3\x6b\xbe\x8f\xd8\x1d\x4f\x34\x9b\x63\xd2\x08\x19\xeb\xa5\xc0\x27\x2f\x24\x4e\x95\xa3\x5d\xef\x7e\x88\x67\x43\x8e\x63\x0f\xc1\x9d\x19\xcd\x56\x16\xdc\xb9\xb6\x83\x96\x3d\xc6\x47\x2e\x03\xab\x70\xd1\xb2\x27\x28\xff\x44\xad\x11\xac\x0f\xb1\xa3\x0f\xb1\xa3\x0f\xb1\xa3\xb7\x2d\x76\x34\x16\x2c\xaa\x49\xd4\x60\x06\x5d\x4c\x4a\x38\x3f\xac\x37\x50\x54\xe9\x46\xcb\x6b\x53\xb9\x11\x7b\xbb\x33\xe1\x66\xae\x91\xb4\xa4\xad\x5b\xb1\xfa\xfb\x47\xef\xaf\x17\x3f\xfd\xfe\xd2\xac\x58\xe5\xb3\x32\x06\x83\x26\x6f\x41\xcb\xa0\x7b\x0b\x59\xf5\xec\x22\x95\x01\xa6\xc2\xc2\xf6\xd3\xe2\x70\xca\xd6\x0d\x45\xd2\x18\x5d\x0e\x96\xda\xdd\xa4\x25\x2e\x8f\x52\x69\xe2\xba\x06\xde\x0e\xfb\x72\x4e\xb4\xfc\x88\x70\x2c\x72\xfa\x84\x35\xf0\xe5\xc1\x53\x1f\x32\x71\x16\xb1\x7f\x83\x39\x95\xc1\x6d\xec\xc6\x49\x95\x64\x0e\xd3\xbe\x88\x76\xb9\x4f\x8e\x78\x97\xda\x83\x77\xb2\xef\x52\x41\x4b\x9a\x2e\xad\x49\x8f\x66\x03\x2e\x56\x46\x64\x4c\x37\xe0\xdf\xfc\xe6\x3f\x4f\xf6\xe0\xf4\xe9\x95\x11\x7f\x4d\x88\x6b\x72\xbe\x31\x59\x9f\x35\x32\xfe\xbb\xeb\xbb\xd3\xf9\xb4\x30\x4d\x7d\x86\xe5\x59\xd7\x1d\x71\xdf\xad\x8e\x3f\x67\x4b\x0a\x85\x66\x1a\x4c\x83\x31\x86\xb3\xc9\x82\x31\xf2\xae\x74\x4e\x3a\x95\x5f\x73\xa4\xeb\xd9\xbd\x5e\xaf\xdb\xeb\x25\xd5\x3e\x53\x52\x38\xab\xac\x45\x21\xc7\x85\xfe\x9a\xd6\xc4\xc7\x5a\xcf\xb2\xe0\xcd\x1a\xf7\x0a\xde\x2c\xbf\xa8\xf4\x2f\xa5\xf4\x0d\x87\xea\xf0\x7f\x34\xba\x15\x4d\xe2\xcc\x30\x31\x13\x25\x69\x8c\x44\x60\x5a\xc1\x60\x28\xdb\xc8\x4a\x49\x1b\xb8\xde\xfc\x67\xcf\xbd\x8b\x73\xef\xa8\x84\xdd\xd0\x64\xe0\x6b\x54\x03\x5d\x5f\xff\x7c\xeb\xb4\xcf\xdb\x6c\x0f\x33\x54\xd0\x95\x14\x23\x1b\x50\x22\xae\x3a\x7b\x9f\x49\x21\x68\x2c\x87\x56\xd4\xe8\x98\xc3\xb8\x4a\x8b\x21\x1c\xa7\x62\x0c\x57\xa2\x77\xb4\xca\x25\x51\x2b\xa5\xbf\xb0\x9c\x51\x57\x11\x08\xe5\xd2\xa9\x98\x27\x05\xb6\x82\x64\x7b\xe1\x67\x05\x5a\x87\x62\x68\x3a\x89\x93\x39\x93\x7b\x50\x26\xea\x8f\x27\x89\x13\xe7\x62\x99\xd8\xbf\x24\x50\x53\xc6\x4a\x71\x52\xe4\x80\x7c\xcb\x9b\xf1\xbf\x33\x79\xc3\xe9\xa3\x54\x0a\xd4\x68\xd6\x0b\x66\xd5\x65\x76\x43\xfc\x2b\x4d\xc3\x1a\xb9\x4b\xc3\x61\xab\x5f\xa8\xc2\xb5\x7d\x03\x69\x98\x0f\xde\x7c\x7c\x7d\x79\xf4\xb6\x94\x23\x4e\xed\x5c\xcb\x79\x6e\xa7\x95\xd0\xbd\xe0\x5a\x50\xc7\x3c\x4a\xff\xaa\x41\x36\x3e\xe8\x9a\x6e\x8a\x7c\x1f\x7f\x53\x8b\x3c\xff\x72\xd3\xf7\x05\x7e\xd3\xc6\x7b\x25\xcf\x0b\xd9\xd4\xa0\xd8\xd9\xdc\xd4\xaa\xc8\xd3\xd7\xd4\x26\x99\xdb\xf6\xf6\xa4\x98\xed\xa7\xb4\x04\x1b\x49\x5b\x1a\x8f\x1b\x58\xeb\xd0\x46\xbf\xfa\xb5\xce\x20\xe1\xf6\xbe\xd6\xb1\x4d\x31\x12\x0d\x4e\x20\x9f\x2d\x68\xd4\x31\x3f\x87\x31\x28\x70\x8f\x5f\xca\xf9\xbd\x1a\x20\xee\xaa\x8f\x3c\x58\xb9\xd1\xc2\xec\x27\x6f\x70\xd1\x8e\x1c\xe4\x0b\x2d\x07\xab\x30\x24\x81\x5a\xc6\x24\x10\x3b\x78\xcf\x2a\x18\x94\x8a\x31\x6e\x2d\x7b\xf4\xa4\xc8\x51\xbe\x82\x19\xb1\xd6\x24\xcb\x18\x98\x9e\xd6\x32\x30\xe5\xda\x8a\x2a\x85\xc1\xdc\x16\x41\x41\x8f\xf7\x24\x3c\x2a\x97\x93\x8a\xa3\xc0\x41\x5d\xf0\xb3\x21\x48\xd7\x06\x57\x2e\xba\x5e\x63\x00\xf2\x92\x01\xad\x79\x51\xc8\xe5\x85\xa3\xd8\xcb\x06\xc4\x24\xb5\xd0\x6a\x62\xd2\x86\x62\x15\x8e\xff\xbe\x98\x1e\xbc\xb9\xca\xa8\x6c\x97\x8c\x55\xe0\xb9\x8e\x2a\x96\x9b\xe1\x11\xf2\x1b\xa9\x34\xc3\x10\xbd\xa1\x22\x33\x9b\x2d\xc6\x32\x82\x94\x5d\x53\x65\x4a\xb1\x84\xc6\xc0\x74\x31\x16\x61\x2f\xab\x5a\x8c\x85\x44\x36\xc1\xbb\x58\x8c\x65\xee\x8b\x65\x2f\x96\xd8\x85\x58\xc9\x7a\x75\x1b\x45\x1d\x17\x25\xeb\x89\x92\x06\xf4\x6c\xeb\x83\x6a\x26\xb0\xb3\x50\x7e\x34\x96\x93\xce\x96\xf7\x32\x42\x69\x62\x25\xef\xb5\x99\xd7\xe7\xaf\x35\xe3\x2b\x72\x3a\x18\x11\x51\xee\xc5\x60\xad\x16\xb7\x87\x96\xce\x17\x8b\xda\x7b\xa3\x50\x80\x16\x56\xea\x82\xc0\x79\xc1\x7c\x24\x03\xe6\x13\x8f\xc3\xdf\x7b\x72\x18\xed\x51\xc4\xfd\x97\x40\xd0\x27\x35\x59\x7f\xf3\x9b\x4a\x84\xc7\x88\xc6\x2b\x42\xe2\x5a\x28\xfc\x26\x1b\x81\x25\xba\x31\x6a\x9f\x63\xd9\xb0\xad\x11\xf2\xbc\x8e\x07\x17\x81\x70\x53\x88\xaf\x51\x91\x9a\x19\xc2\x23\x76\xb1\x8f\x11\xeb\x6b\x3e\xf5\x89\x6a\x92\x07\x15\x99\xa0\x53\x34\x60\x0f\xfa\xbd\x5e\x25\x2f\xca\x0a\x48\x9e\x85\xda\x8a\x66\x1a\x9d\xe7\x93\x58\xdd\x37\x63\x75\x3f\x81\xd5\xfd\x34\x56\xc7\x64\xe7\x9e\x65\x47\xfe\x1b\xa5\x9c\x33\xf4\xea\x8f\x75\xa3\x64\xd2\x95\xc3\xe3\x01\x4a\x79\x78\x6b\x27\x3f\x2e\x43\xab\xcb\x99\x2d\xcb\x70\xc7\x62\x1f\x2a\xda\x01\xd7\xea\xa5\xa5\x31\x86\x55\xb9\x49\x89\x5e\xeb\xf6\xcf\x3a\xd8\xef\xfb\x8b\xc7\x73\x33\x33\x39\x83\x3e\x67\xf7\x97\x29\x97\x1c\x32\x94\x7c\x81\x77\x28\x05\x9f\x2f\x89\xa5\xc9\xff\x76\xcf\xf3\xe2\xc4\x74\xc9\x1b\x64\x75\x1c\xb5\xa5\xe5\xeb\x90\xb2\x41\x32\x2b\x4d\xde\xbc\x0d\x24\x96\xc2\xf3\x8e\x0f\xaf\xd2\x57\x04\x41\xec\xf6\x90\x0e\xf7\x22\x3b\x37\x4f\xbe\x15\xd2\x1b\x8c\x21\x8f\xac\x89\x67\xde\x08\xcd\xd7\x24\x9d\xd4\x37\xe6\x75\x62\x5b\x27\xc1\xdc\x77\xc0\x10\xbb\x33\x30\x74\x39\x05\x8f\x24\xd7\x98\xe8\x9c\x0c\x3f\x4d\xf6\xab\xa6\x3a\x84\xe7\xd1\x0d\x94\x78\x28\x29\x61\x27\xe4\x7a\x2a\xf3\xca\xb5\x62\x33\xee\x4f\xb5\xe5\x9d\xb2\xd5\x96\x9f\x64\x56\x5b\xd6\xae\x4f\x51\x72\xf9\xf0\x18\xc8\x4b\xbb\x6e\xe1\x65\xc3\x81\xd0\x46\xa9\x2b\x06\x69\x39\x1b\x36\x53\x98\xf9\xf0\x18\xc8\x65\x3c\x94\x67\x5e\x83\xe4\xaa\x3b\x22\x67\x90\x39\x71\xa5\x40\xa2\x39\xd6\xb9\x8e\xc5\x27\x41\x5d\x51\x1b\x45\x06\xde\xcf\xc7\xee\x85\x81\x30\x86\xa4\x2d\xe4\x11\x62\xbe\xdc\x67\x8a\xeb\xae\x36\x46\x48\x6c\xd3\xdd\x87\x7e\x74\x61\xd7\x99\xa4\xb8\xc6\xc0\x49\xe2\x9d\x1e\x1f\x53\xe1\xf4\x95\x28\x65\x52\x6d\x18\xe3\x3d\x10\x5b\xa4\x6e\x71\x2f\xc1\xa7\x0a\x1b\x7f\x87\xa4\x24\x23\x47\xbf\xf3\x12\xea\x1b\x15\xc6\x64\x4a\xec\xdf\xc8\x3d\x64\xc7\x86\x28\xa2\xb2\xa9\xf2\x15\xa5\xc5\xaa\x9d\x70\xa0\x10\xe6\xec\x71\x9d\x0a\xe3\xfd\x4d\x24\xa1\x89\x98\xeb\x88\x21\xdf\x2a\x60\xc8\xd5\x39\xd8\x94\x86\xf7\xc5\x1f\xcf\x16\x6f\x8f\xfd\xdf\x6b\x7a\x95\xe6\x71\xe2\x72\x69\x9b\xd1\xee\x2a\xd2\xf3\xa0\xe0\xfd\xaf\x56\xf0\xae\xb7\xda\x76\xc2\xf7\xd3\xda\x58\x95\x6d\x26\x6c\x12\xba\xf0\x78\x74\xe7\xf7\xb6\x85\xd1\x14\xba\xbe\xeb\x8f\xff\x70\x1d\xfe\x45\x5c\xc4\x2c\xf4\xe9\x2d\x1c\x80\x4e\xcc\x3d\x33\xf9\x0f\x08\x56\x63\x89\xde\x0b\xa6\xbf\x36\x5f\x61\x60\xca\x51\x50\x61\xa2\xcb\x24\x21\xc8\xf1\x2d\x3e\x52\x49\x54\x72\x1c\x8b\x57\x52\xbf\xbb\x5c\xde\xdf\x92\x41\xe6\x96\x1e\x0d\xf3\x52\xc9\x7d\xb6\x75\x0c\x09\x91\xf9\xc7\x52\xf5\xc6\x66\xf2\x5d\x32\x82\xde\x4b\x3b\x10\x85\xdd\x68\xe9\xa5\x2c\xf0\x37\xc2\x81\xa9\xec\xd8\x1b\x8d\xfb\x05\xda\x0c\x4a\x07\xc6\x3a\x4e\xc9\x65\xd4\x9a\xb7\x3e\xed\x90\x47\x4b\xe0\xa2\x84\x8d\x01\x13\x63\x51\x41\x7f\x46\x51\x41\x19\xe3\x56\x0f\x83\xcf\xda\xca\x3f\xa2\x4c\xef\xa9\xad\x54\x59\xe0\x4b\x80\x24\xec\xa6\xfa\x56\x6a\x33\xa8\xbf\x95\x59\xcb\xa8\x35\xef\x32\x5b\x29\x61\x53\x77\x2b\xa3\x71\x9b\xdb\xca\x7d\xec\x52\x77\x04\xcd\x65\x00\x47\xea\x65\x09\xa0\x44\x1d\x55\xdf\x4d\x7d\x12\xf5\xb7\x33\x73\x29\xf5\xa6\x5e\x66\x43\x15\x80\xea\xee\xa8\x36\x72\xb5\x2d\x2d\x26\xee\x25\x49\x7f\xf5\x9b\xcf\xba\x47\xc1\x1f\x69\x60\xad\xd8\xaf\x0b\x94\xd2\x76\xe5\x06\x82\x94\x9b\x73\x0d\x5e\xbd\x11\xff\xa8\xb8\x1c\x5c\xde\xa0\x15\xb6\xdb\x8c\x4d\xeb\x97\x43\xba\x7d\x79\x31\xbe\x5c\x9d\x83\x94\x92\xa2\xef\x9a\x49\x8b\x24\x75\x7f\x69\xab\x96\xa6\x1e\xbc\x73\x86\x2d\x25\x32\xd9\xa6\x90\xbd\xc6\x16\xf1\xa0\x77\xb8\xa3\x7a\x87\x14\x96\x2c\x71\xbf\x95\x90\xa3\x2c\x53\xbd\x85\x94\x54\x6c\x6a\x51\x22\xec\x35\xb3\xb4\x80\x79\x91\x65\xea\x0b\xac\xca\x11\xea\xde\xf9\xf3\x99\xf6\xa3\x44\xca\x9a\xfb\xe1\xc8\x97\x2a\x80\x63\x7e\x9b\x7c\xfc\xe0\xdd\xf7\xe0\xdd\xb7\x39\xef\x3e\x95\xfd\xaa\xa6\x9b\x9f\x11\xe7\xd3\x6f\x93\x8f\xb3\x7d\xff\x6e\x85\x37\xe0\x5a\x1d\xe3\x62\x22\x41\x49\x49\x42\x3a\x9f\xac\x5b\x7c\xf8\x95\x1c\xfd\x71\xbd\x7f\xf4\xae\x5c\xca\xa7\xa7\xc2\x3e\xed\x8e\x02\x3c\xeb\x88\x55\x24\x4c\xc4\xce\x88\xf0\xff\x73\x72\x7b\x8d\xe1\x6c\x26\x32\xf2\xf7\x38\xd6\x69\x8c\x16\xfb\x24\xc3\x0e\x91\x27\x8c\x70\x30\x95\x17\x45\xea\x84\x90\x17\x97\xf5\x54\x8e\x42\x21\x8b\x58\x4d\x79\xaf\x18\xf9\x54\x37\xfa\x29\x00\x2d\xd3\xa7\x61\xec\x65\xec\xdb\x76\x03\x89\xf1\x8b\x47\x29\x95\xc3\xbe\xf2\x85\x53\x89\xd9\xad\x55\x4a\x7c\x9f\xe3\x29\xc0\x88\xce\xb1\x8f\x1c\x00\x7d\xc0\x17\xda\x35\x7c\xfb\xef\x60\xce\x6b\x6a\x4e\xe0\x15\x02\x57\x2e\x71\x29\x6b\x00\x3e\x9c\xbc\x05\x74\x02\x29\x70\x09\x90\xd5\x48\x58\x37\x73\xff\xd2\x0f\xae\x7d\xa0\x68\xb3\x0d\x48\xc0\xa3\x60\x47\xd0\x07\x14\x2f\xc0\x98\x97\xec\x3c\x87\xa3\x4b\x40\x03\x5e\x51\x13\x07\x01\x35\x0d\x7c\x78\xc1\x1a\x62\xb0\xb7\xff\x16\xd0\xe0\x12\xf9\xe0\x1a\x12\x5e\xf6\xf3\x82\x7b\x92\xa8\x5e\x79\x4d\x7d\xe0\x52\x9b\x97\xe3\xa4\x13\xe4\x8b\xb0\x5b\xd7\xf3\xc0\x39\x02\x18\x39\x2e\xe6\xfe\x14\x6a\x40\x82\x28\x75\xfd\x31\x01\x33\x38\x46\xec\x21\xf2\x29\xc2\x00\x02\x1f\x5d\x47\xa3\x99\xa6\x34\xc4\x0b\xe0\x05\xc1\x25\x5b\x82\xeb\x03\x36\xbb\x2c\x3b\x51\x9d\xc8\x3b\xae\x64\x29\xa8\xf6\x29\x37\xb5\x20\x86\xce\x8c\x77\x06\x44\x29\xd0\x19\x28\x50\x65\x28\x0c\x4e\x38\xe8\x43\x90\x95\x1b\x2d\x0a\x22\x0c\x44\x2e\x8a\xec\xe1\xf3\x4a\x60\xff\x12\x84\x58\xc4\x30\xe8\xf6\xdd\x7a\xe2\x44\x95\xbb\xea\x54\x3d\x9f\xb5\x5f\x76\xe3\xcb\x4f\xbd\x97\xaf\xaf\x51\xa9\xcb\xae\x3e\x2c\xe4\xfa\x4a\x42\xc3\x0f\x28\x3f\xe2\xeb\x07\xc7\xf6\xfe\xaf\xbd\x9b\xf1\xc8\x59\x31\x38\xd4\x02\x4b\xc2\x23\x3c\x85\x6b\x87\xc7\x3f\x7b\x3d\xf8\xee\xef\xe1\xf1\x5d\xe1\x85\x42\xd2\xbe\x56\x76\x28\x79\x4d\xbc\x97\xb3\xc8\x25\xc6\x1b\x67\x02\x86\x13\x44\xb4\xcb\x10\x7a\x5e\x70\xcd\x6f\x4e\x1a\x00\x9e\xed\x62\x2c\x33\x58\x60\x70\x8e\x83\x6b\x82\xb0\x0c\x48\x47\x8a\x7f\xf8\x03\x9d\x83\x0f\x87\x5d\x70\x70\x85\xf0\x42\xb8\xf0\xba\x04\x10\x78\x25\xee\x5a\x2f\x18\x41\x8f\xd0\x00\xc3\x31\x12\x97\xf3\x0c\x61\xe2\x12\x4a\x00\x9d\xe0\x60\x3e\x9e\x08\x7e\x82\xf0\x77\x6a\x8c\x39\xe3\xb3\x92\x97\x6f\xe1\xcd\xa2\xb2\x1e\x16\x6a\x69\x2e\x5c\xe4\x39\x04\x15\x64\x5d\xd3\x1a\x78\xf0\x5c\x64\x22\x48\xe6\x35\xa0\xe8\xa6\x44\x2f\x5a\x4f\x64\x06\x7d\xad\x01\xbb\x36\x87\xf9\xd7\x66\xb2\x13\x95\x29\xdf\x9f\x31\xa1\xdb\xd7\x03\x03\x54\x5a\xc6\x33\xe9\x81\x71\x1d\x60\x27\xa1\x13\x94\x77\xb4\x41\xb8\x2c\x98\x38\xd2\x41\x3b\x9c\x20\xc9\x8e\xf1\x1c\x26\x3e\x15\x75\xd9\x31\xfa\x34\x47\x6c\x6b\x21\xe1\x28\x22\x9b\x8e\xb8\x68\x1c\x36\xfe\x57\x47\xa0\x4e\x27\xb9\x70\xf0\x66\x38\x3c\x06\xe2\x30\x82\xf0\x54\x74\x45\x41\x79\x97\x80\x39\x11\x38\xc5\x0e\x02\x0e\x3c\x3e\x04\x03\xa0\xc2\xc8\x6b\x81\x8a\xc5\xa0\x6c\xc0\xd8\x6a\x4a\x7f\x41\xe6\xe7\x53\x5e\x9d\x20\x23\xc9\x85\x35\x9f\x39\x5a\xf1\x20\x61\x75\xd2\x6d\xe8\xef\xe1\x55\x41\x62\xae\xdb\xc5\xdd\x28\xba\x91\x7b\x85\xcd\xa9\xeb\x91\x6d\x48\x46\x68\x45\x97\x79\xd4\x8d\x4d\xdb\x9f\xaf\x20\x06\xfe\x00\x75\x79\x8d\xb2\x96\xb5\x6d\xb5\xb7\x84\x88\x03\xfc\xae\x50\x48\xfd\x48\x5f\xf9\x5d\x5e\xe9\xae\xd5\xb3\x3b\xb4\xdd\x1d\x05\xfe\x08\xd2\x96\x65\xb5\xbb\x7f\x05\xae\xcf\x1b\xed\x5a\x6c\x41\xd9\xcb\xa1\xc1\xb9\xbe\x18\xdb\xf2\x67\xd3\x5d\x9e\x34\x15\xf9\x0c\xdf\x71\xc7\x73\xb9\x37\x22\x7b\x7e\x0e\x09\x7a\xfe\xb4\xf3\x57\x7c\xd1\x36\xb5\xfd\x86\x16\x2e\x96\x8d\x07\x10\x8f\xb9\x44\x40\xd4\x52\xfb\x8f\x1e\x5d\x05\xae\x03\x7a\xdf\x0c\xa2\x97\xa7\xfd\xb3\x57\xfa\x8f\x5d\x6b\x4e\x2f\x3a\xdf\x5b\xb6\x37\xf0\x55\xe7\x5d\x1a\xfc\xb4\xa0\x68\x0f\x63\xb8\x68\xa1\x08\x86\xe8\xba\x35\x44\x37\xf4\x35\xe2\x8b\xfc\xf2\x85\xaa\x06\xed\x16\x6e\x77\x1d\xfe\xb8\xe5\xb5\xf3\x60\x77\x4e\x03\x78\x2f\x61\x27\x61\x73\xe0\x1b\x61\x23\xd6\xa6\xc3\x32\x84\xf5\x05\x0e\xa6\x11\xb4\xf3\xa1\x37\x82\x9e\x07\xcf\x3d\xd4\x11\x44\x67\xa5\xe7\xa9\xfd\x59\x4c\xd5\x52\x8f\xac\x6f\x06\x6c\xd8\xe0\x02\xa0\x57\xe1\x67\xea\x2b\x80\xbe\xee\xa2\xdc\xa9\xab\xda\xa7\x2b\x9e\x34\xdb\x4f\x5a\x77\x3f\xe5\x24\xd5\x26\x09\xce\xf8\xe4\xfd\xc7\xe3\xee\x31\x0e\xa6\x2e\x41\x5d\x8c\x48\xe0\x5d\xa1\x16\x6d\xa1\x76\xfe\x56\xf1\x9a\x69\x4c\xc0\x5f\x17\xd9\x4b\xad\x79\xc7\xb8\xe6\x1d\x7d\xcd\x3b\x67\xbb\x9f\xbf\xda\x06\xfc\x7f\x62\x6c\xfb\x44\x6f\xfb\xe4\x6c\x57\xa0\xf5\x87\x93\xc3\xfd\x60\x3a\x0b\x7c\xe4\x53\x09\xba\x53\xa4\x08\x2b\xd5\xe8\xaa\x2d\x57\x7c\x89\x16\xa4\xe5\xb7\xbb\x53\x38\x6b\xa5\x11\x0e\xb0\x8b\xe9\x9b\x81\x7f\x8a\xce\x5e\xe1\x16\x6a\x3f\xb6\x06\xd6\x63\xdc\x62\xbf\xdb\xbb\xe8\xab\xea\xef\x91\xd5\x3e\xeb\x0a\x6b\xb3\xa1\x13\xcb\xfa\x66\x30\x88\x3e\x7e\x65\xe5\x6e\x96\xe3\x12\x51\x18\x62\xf5\x7b\x85\x06\xa8\xbb\x1f\x04\xd8\xb1\xe9\x80\x8a\xbf\xb6\x2e\x02\xdc\x12\x7b\xd8\xb3\xf1\xa0\xf7\x03\xfe\x07\xea\x7e\x44\x23\xb9\x17\x3f\xe0\xc7\x8f\xc5\x1e\x7b\x03\xfe\xfc\x14\x9f\x75\xa8\xfc\x63\xcb\x7f\x3c\xf0\xbe\xf3\xbe\xb2\xd7\x64\xf0\x3b\xa4\x93\x2e\xf9\x84\x69\xcb\x6f\x3f\x46\xdd\x37\xc8\x1d\x4f\xe8\x63\x2a\xff\xb0\x83\x01\x79\x8c\xba\x7b\xce\x5f\x73\x42\xd9\x1e\x3e\xa6\xda\x0f\x85\xf4\xc1\x8f\xbd\x47\x8f\x5a\x64\x10\xb4\x6d\xde\x1d\x66\x72\x6a\xab\x8f\x9e\x7d\x47\xda\xdb\xfd\x5e\x2f\x0f\x8e\xab\x53\xd8\xeb\xdd\xb0\x51\x03\x0f\x75\xf9\x68\x2d\x94\xbb\xb3\x63\x44\x3b\x23\x85\x9c\x9d\x0b\x38\xa2\x01\x5e\xdc\x6e\x22\x64\x71\xe1\xb6\x83\xd1\xd8\x25\x14\x2f\x76\xa7\xd0\xf5\x2d\x9b\xb1\x35\x5e\x10\x5c\xce\x67\x2d\x1a\xde\x22\xe9\x31\x51\x77\x8c\xe8\x1e\xa5\xd8\x3d\x9f\x53\xd4\x62\x42\x77\x7b\xcb\xbd\x68\xd1\xb6\x3a\x5c\xa7\xf4\xec\x6b\x1e\xc4\x26\x90\x48\x2b\xd4\xba\x08\x57\x6f\x8b\x5c\xbb\x74\x34\x69\xd1\xf6\xe7\x11\x24\x28\x74\x20\xdf\xe5\xbf\x42\xbf\x55\xf1\x53\xb9\x24\xef\xfa\x52\x73\x31\x46\xb4\x85\x24\x25\xf8\x69\xd1\x52\x36\x34\x9b\xb6\x43\x2b\x64\x7b\xeb\x1c\x23\x78\xb9\xc5\x3b\x60\x2d\xfb\x5f\x15\x38\x7e\xcc\x45\xe8\x09\xa5\xb3\xed\x29\xa2\x93\xa0\x41\xf6\x75\x0b\x75\x8f\x3f\x0c\x07\xd6\xf1\x87\xa1\x65\xa3\xee\xeb\x83\xb7\x07\xc3\x83\x81\x25\xfe\x65\x4f\x8e\xdf\xbd\x67\xaf\xdf\xbd\x1f\x5a\x45\x73\x23\x0d\x6f\xd5\x16\xea\xbe\xfb\x6d\xb0\xd3\xeb\xd9\xa8\xfb\xe1\x68\xef\xc3\xf0\xcd\xbb\x93\xc3\xff\x1c\xbc\x1e\x3c\xed\xf5\x6d\xd4\x3d\x3c\x1a\x1e\x9c\x1c\xed\xbd\xfd\xf3\xfd\xc1\xc9\xc7\x83\x93\x3f\x0f\x4e\x4e\xde\x9d\x0c\x9e\xf5\x7a\x39\xf3\x74\x7d\x36\x05\xc6\xb6\x9c\x08\x11\x71\x18\xfc\xfa\xcf\x39\xc2\x8b\xbd\xbf\xe0\xcd\x1b\x1e\xda\xb9\x16\x46\xe6\xf3\x9f\xd8\x3c\xfe\x6e\xf8\x6d\x88\x92\x9f\xbf\x6e\xf9\x5d\xc6\xed\x0c\x68\x57\x6c\xbf\xed\x77\xe7\xd8\x1b\x50\xf6\x7f\xdb\xef\x3a\x90\xc2\x21\x7b\x6f\xfd\x45\xb8\x3d\xbd\xcb\x35\x34\x37\x74\x40\x27\x2e\xb1\x29\xff\xe0\xd1\xa3\x96\xf5\xcb\xc1\x90\x5d\x45\xaa\x9b\x57\x2d\xf9\xa5\x4f\x45\x73\x38\x9b\x79\xee\x88\x6b\xf1\xb7\x59\x57\x3f\x80\xd1\x04\x62\x82\xe8\x40\xf2\x96\x62\x2c\x26\xd4\x50\xec\xfa\x63\xf7\x62\xd1\x12\xbd\xb7\xdb\xbb\xf2\x9d\xfc\xbd\x25\x78\x59\xda\x15\x52\x34\x51\x54\x22\xa4\x39\xf8\xd1\xa3\x96\xdf\x3d\x47\x17\x01\x46\xef\x91\xef\x0c\x0c\xb0\xe6\x77\x33\x6e\x77\x2f\x02\x7c\x00\x47\x93\x96\x0e\x1b\xc5\xea\x75\x09\xa2\x72\x2f\xdf\xf0\xa1\x5a\xd4\xc6\xa7\xf4\xac\xcd\xa5\x40\xff\xeb\xd7\x6e\x16\xa8\xf3\xce\x5a\x58\xf1\x6f\xa5\xe8\x10\x32\x08\xdb\xd6\x60\x30\x68\xa5\x68\x74\xcf\x48\xa3\x7b\x3a\x8d\xee\x9d\xed\x32\x71\x51\x48\x91\x9d\x7e\xee\xfd\x73\x89\x16\xc3\x60\x4f\xe5\xbd\xb8\xc5\x97\xce\xb6\x25\xd1\xa5\x85\x06\x83\x01\x7d\x65\x59\xbb\xa8\x2d\x05\x69\x9a\xbb\x44\xbd\x1a\xea\x4a\x37\x8e\x2d\x10\xa5\x17\x58\x76\xc7\xec\xfa\xc0\x51\xb0\x01\xbd\xc1\x60\x80\xba\xdc\x2a\xf5\x8e\x5d\xaa\xaf\x50\x97\xcc\xcf\x09\xc5\x2d\x2a\xbb\x6c\xe7\x8b\x41\x53\x78\x89\xd8\xf5\xcc\xc8\xe1\xba\xe8\x1e\xa4\x14\xef\x9a\xce\xb1\x60\x05\xf2\x66\x1b\x38\xc8\xdb\xbe\xc6\x2e\x5d\xc3\x7c\x99\x50\x9f\x21\x8d\x97\x95\x64\xb6\x42\x0a\x85\xe0\x68\x12\xb1\x41\x31\xa0\x9c\xa2\xb3\x01\x6d\x31\xb1\xe7\x14\x9d\xd9\x9f\x09\xc2\x2e\xf4\xdc\xbf\xd1\x6e\xa7\xff\xcd\x60\xe0\x87\xbb\x8b\x04\x39\xc3\x5f\xb7\xc4\xc9\x92\x8b\x64\xbc\xc9\xd8\xff\xf2\x45\xef\x53\x31\xf0\x74\xd0\xff\x81\xfe\x23\x39\xfd\x1f\xa8\x62\xe0\x35\x21\xed\x94\x9e\x85\x7c\x3f\x06\xae\x0f\xfc\xb6\x1c\x60\x86\x03\x1a\xb0\xbb\xa7\x3b\x81\xe4\xdd\xb5\xaf\xe0\xd9\x65\xd2\x7f\xcb\xb7\x71\xfb\xd1\xa3\x16\x3a\xc5\x67\x03\xff\x14\x9f\xb5\xbf\x86\x02\x78\xce\x46\xce\x84\xf8\xea\x0c\xdd\x29\x12\xfe\x5a\xb7\xf7\xa4\x4a\x51\xbb\xfe\x71\x25\x88\xca\x75\xa6\xf8\x64\x06\xbd\xcf\x91\x3a\x0b\x24\x50\xa3\x45\x5b\xda\x2a\x50\xcb\x6f\x7f\xb5\xfd\x36\xc3\x83\x3c\xe0\x7e\x22\x70\x3d\x92\xc5\x92\x80\x55\x2e\x03\x29\xa8\x84\x24\x21\x7d\x1d\x96\x83\x38\x6a\x77\x3f\xb1\x6b\xfe\x3d\x4f\x5c\x11\xe0\x3d\xcf\xe3\x97\x46\x0e\xd0\x62\x55\xb5\x6f\x2f\xcc\x96\xbb\x36\x84\xd4\xc6\xdb\x74\xd4\x05\x11\xd1\x28\x0f\x12\x7a\x18\x5e\x25\x83\xc1\xc0\x8f\xae\x93\x9e\xed\x17\xdc\x24\x64\x3e\x7d\x77\xf1\x21\x72\xad\x5e\xcb\x5d\x02\x50\x17\x23\x67\x3e\xd2\xcf\x4d\x24\xc8\x45\xf2\x18\x0d\x5d\x19\x95\xac\x1a\x89\x71\x6c\xa1\x5f\xbe\x84\x72\x9c\x58\xf7\xe3\xfe\x2e\xfa\x6a\xf7\x72\x19\x0d\xbd\x34\xde\x7a\x56\x1b\x9d\x90\xe8\x72\x7a\xa9\xfe\xb3\xbd\x41\x27\xfa\x41\x06\xa7\x67\x5b\x7e\x9a\x6d\x0e\xda\x9f\x99\xf8\x3d\x18\x04\xdd\xa3\xc0\x91\x6c\x9a\x3b\x08\xba\xef\xd1\x98\x1f\x46\x43\x1b\xca\xdb\x88\x06\xdf\x0c\x28\xff\xf7\xd1\x23\xaa\x9a\x0c\x06\x6e\x68\xea\x68\x05\x36\x6d\x6f\x91\xee\x6c\x4e\x26\xad\xcf\x7e\xe0\xa0\x5d\xf1\xbd\xad\x74\x5a\xbb\xbe\x4d\x44\xbb\x5d\x97\x71\xe7\xff\x60\x52\x00\x1e\xf8\x6d\xdb\xff\xd1\x7b\xf4\xa8\xe5\x0d\x7c\x0e\x75\xd2\x25\x01\xa6\x89\x7d\x0d\x77\x5d\xf5\xd6\xa1\xe1\x9f\x8c\x2a\x0a\x99\x23\xb0\xdd\x81\x3a\x1b\x36\x14\x5a\xa8\x0b\x2f\x08\x70\xcb\xdd\xde\x09\x95\x15\xee\x8f\xbd\x57\xc1\xc0\xfd\x3f\x3b\xaf\xc8\x29\x3c\x0b\xbb\xd9\x6d\x91\x53\xd8\xe9\x47\x0f\x1e\xc7\x5e\xb7\xb7\x77\x76\x5b\x81\x50\x8c\xd9\xde\xa0\xd7\xb6\x3f\xab\x57\x64\x97\xd8\xfe\x6e\x38\xf0\xd4\xf5\x77\x67\x4c\x74\x3a\xf4\x69\xab\xdf\xeb\x7d\x87\xb9\xbe\xca\x16\xc5\x21\xe3\xaf\x02\xf9\x0a\xde\xc4\x9f\x7b\x52\xc5\x95\x87\x86\xf3\xd1\x85\x8b\xc9\x8a\xef\x50\x4d\x91\xf9\x58\x23\x0a\xfd\x76\x97\x06\x1f\x66\x33\x84\xf7\x21\x41\xad\x76\xf4\x8e\x8b\x21\x86\xbc\x36\x32\xc7\xbf\x1b\xf8\x64\x1b\x8e\xbc\xb8\xed\x05\xb1\x03\xdb\x11\xb1\x41\x04\xd1\x8e\xfe\xb1\xfc\x3b\xc0\x49\xe3\xcb\x52\x0b\xfc\xcc\x0d\x8a\xa7\xad\x9e\x4d\xbb\xaa\xfc\xc0\x31\x46\x04\xb1\xad\x6e\x7d\xd3\x6b\xdb\xb1\x57\x6f\x05\x4f\xdd\xfa\xcc\x36\xb7\xff\xb5\x7d\x66\x33\xa9\x79\x37\xbb\xb9\x79\xe3\xf4\x75\xb9\x5c\xf8\x16\xe6\xd9\xcd\x82\xe2\x3d\x77\x90\x5c\x16\x20\xaf\x11\xa1\xae\xcf\x67\xba\x6c\x57\x7b\x7c\x61\x4b\x41\xf7\xf2\x6a\xe3\x60\xfd\x0d\x2d\x96\x01\x02\xaf\x01\x5e\x19\x06\xc2\x5f\x66\x1b\xf9\x57\x2e\x0e\x7c\x46\x6d\x2d\xfb\x54\x5b\x55\xfb\x33\xc5\x0b\xc9\x8a\x14\x34\xa3\x03\xc5\xa8\xc5\x99\xaa\xd6\xb7\x53\x44\xe1\xa9\x0f\xa7\x68\x60\x7d\xfb\x18\x3d\xfe\xd6\x3a\xfb\xb6\x9d\xd0\xf7\x2a\x3f\xa1\xb6\xed\x0f\x3e\x4b\x90\xec\xfe\xfa\xfe\xdd\x51\x97\xd3\xb9\xd6\xdc\x47\x64\x04\x67\xa8\x45\xdb\xed\x50\x5c\x32\x43\xdb\xcf\x86\xb6\xff\x75\x04\xa5\xc2\x96\x4e\x70\x70\xcd\x79\xe9\x03\xae\x8e\xff\x76\x3f\x98\x7b\x0e\xf7\xd2\xc5\x08\x3a\xd2\x91\x08\x5c\xe0\x60\x0a\xd8\xfc\x01\x85\x63\xe1\x30\xc2\x16\x02\xe4\x42\xba\xdf\x72\xb8\xe2\xb9\xcf\x78\x82\x21\x22\x94\x7c\xf9\x82\xd1\xa7\xb9\x8b\x63\x60\x86\xb3\x99\xd5\x0e\x4d\xa7\xc2\xc6\xd6\xfa\xec\xc7\x9d\x14\x2c\xfb\x0a\x61\xc2\x30\xd9\xda\xe9\xf6\xba\xbd\xc7\x10\xa1\xef\x7b\x4f\x5e\x3e\xb7\xbe\xb6\xb7\xfe\x7f\x00\x00\x00\xff\xff\x82\x03\xc4\xa1\x84\xa3\x03\x00") +var _web_uiV2AssetsConsulUi61975faed99637b13431ce396e4422b4Js = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xfd\xfb\x76\x1b\xb7\x92\x38\x8c\xfe\xaf\xa7\xa0\x70\xd6\xd1\xee\xde\x06\x5b\xa4\x24\xdf\xe8\xdd\xdb\xc7\xb1\x9d\xc4\x13\xc7\xf6\xc8\x76\xe6\xb7\x87\xc3\xe5\x5f\xab\x1b\x24\xb1\xd5\x04\x18\x00\x2d\x89\x91\xf8\x40\xe7\x35\xbe\x27\xfb\x16\x2e\x7d\xbf\x92\x92\x6c\x33\x93\x59\xb3\x63\xb1\x51\xb8\x54\xa1\x50\x28\x14\xaa\x0a\x20\xe2\xa8\xc7\x05\xc3\xbe\x00\x7b\x01\x9a\x62\x82\x2c\xe0\x53\xc2\xa3\xb0\x1f\xe1\x43\x2f\xf0\x96\x02\x31\x7e\xe8\xf9\x21\x80\x63\x80\xae\x96\x94\x09\x0e\x60\x25\xcc\x72\x19\x62\xdf\x13\x98\x92\x1c\xc0\x82\x06\x28\x34\x4d\x94\x3f\x07\x7e\xee\x6b\x24\x70\xc8\x0f\xe7\x42\x2c\x0f\x17\x48\xcc\x69\x50\x57\xca\x85\x27\x22\x5e\x51\xba\xf0\xce\xd1\x0b\x21\x98\x77\x16\x22\x30\x81\xd3\x88\xf8\x72\x48\x16\x82\x02\x12\xc8\x60\x08\x39\xa4\xf6\xf5\xfb\xb3\x7f\x23\x5f\x38\x1a\xe7\x0f\x8c\x2e\x11\x13\x2b\x0b\x41\xf0\xe5\x0b\xe2\xbf\xd2\x20\x0a\x11\x80\xd7\x17\x5e\x18\xa1\xd1\xfe\x60\x6d\xef\x5d\x78\xac\x87\x5d\x53\xcd\xe3\x1c\xcf\xc8\xcd\x4d\xda\xba\x7d\x3d\xa5\xcc\x92\x40\xc2\x1d\x3e\x13\xff\xf0\xd8\x2c\x5a\x20\x22\xb8\x13\x22\x32\x13\xf3\x67\xe2\xc1\x03\xfb\x5a\x96\x13\x37\x29\x1b\x8b\xc9\x5e\x5c\x8d\xf5\x30\xe9\x11\xdb\x74\xb0\x64\x54\x50\xb1\x5a\x22\x67\xee\xf1\xf7\x97\x24\x1e\xa1\xe3\x7b\x61\x68\x11\xc8\xec\x83\x03\x0b\x8d\xd9\xc4\x25\x63\x36\xb1\xd7\x0c\x89\x88\x91\x1e\x5a\xef\xc5\x43\xea\x79\x1a\x65\xfb\xda\x94\x09\xd9\x01\x7a\x5e\x87\xb8\x88\xb1\x25\x10\x91\x68\x81\x14\x01\x47\xfb\x03\xe8\x53\x32\xc5\xb3\x28\xf9\x7d\xc9\xb0\x30\x7f\xaf\xed\x11\x1a\x8b\x89\x4b\x20\x5a\x23\xd9\xa4\x17\x85\xc2\x15\xf1\x5f\x0e\xba\x12\x88\x04\xd6\x75\xc4\xc2\x1f\x29\xfb\xcf\x08\xb1\xd5\x28\x4b\xb2\x78\x64\x73\xcc\x1d\x6f\xb9\x44\x24\xf8\x7c\xfa\xd6\x02\x9e\x1f\x1e\x86\x98\x0b\x00\xc7\x13\xa8\x0a\xfd\x10\x79\x44\xd5\xb7\x90\x6d\xaf\x61\xa6\xc1\x53\xe4\x53\x16\x74\x6d\x16\x93\x29\x05\x70\x8c\x1c\x1c\x34\x36\xfd\x92\x21\x4f\xa0\x52\xdb\x59\x72\x56\xb4\xee\xab\x5a\x6a\xd8\x9e\x75\xbd\x86\xc2\x79\xf5\xe2\xd3\x8b\x97\xaf\xdf\x7d\x7a\x7d\xfa\xe5\x97\xd7\xff\x82\xc4\xf1\x84\x60\x16\x73\x7e\x7c\x7f\xfa\xfa\xcd\x4f\xef\xe4\x47\x3b\xed\xf5\xf3\x32\xa8\xee\x15\x86\x8d\xfd\x46\xaa\x5e\x43\xbf\x61\x73\xbf\xaf\x50\x88\xaa\xfa\x0d\x21\x6f\xec\x37\x40\x5c\x30\xba\x02\x70\xcc\x75\x07\xc4\xf9\xf8\xf6\xf3\x4f\xaa\xf5\xba\xa1\xf0\xe6\xa1\xbc\x0c\x29\xa9\x1a\x49\x33\xdd\x65\x25\x00\xc7\x61\xe7\x51\xb4\x10\xe4\x14\xfd\x1e\x21\x2e\x72\x5c\xa5\xd7\x36\x72\xe4\xaa\x84\xc4\x45\x0e\x27\xde\x92\xcf\xa9\xd8\xe3\x97\x58\xf8\x73\x0b\x39\x4c\x57\xfb\xb4\x5a\x22\xfb\xda\xf7\x38\x02\x7e\x8a\x0e\x18\x65\x31\x28\x61\x6b\x09\x47\x89\xc4\x77\xde\x42\xb2\xd9\x3a\x0b\xfc\x85\x47\x4b\xc4\x24\xd6\xe1\xca\x92\x5f\x60\x22\x42\xec\x35\x54\x7d\x8c\x8a\x82\x4e\x0f\x38\x74\xaf\xb9\xa0\x0c\x8d\x10\x94\xe3\x1e\x09\x88\x83\x11\x81\xf1\xd0\x47\x0c\x66\xc6\x3c\xca\x0d\x77\x0d\xb9\x7b\xad\x85\xf0\x48\x8d\x42\xff\x9d\x52\xc7\x0a\x6d\x49\xae\x51\x06\x9f\x4c\xc9\x1c\x79\x01\x62\x5c\x97\x9a\x1f\x79\x88\xc0\x13\x9e\x2e\x96\x7f\xe5\xca\xd6\x7b\x39\xf4\xa5\x4c\x8f\x0b\x25\xc6\x79\xf8\xd2\x2c\x11\xb7\x95\x6a\x0d\x73\xd6\x13\xce\xe9\xeb\xff\xfc\xfc\xfa\xe3\xa7\x2f\x9f\x3f\xbc\x7a\xf1\xe9\xf5\xa8\xf0\xf5\xe5\xe9\x6b\xf9\xd5\x8c\x90\x38\x9e\x1f\xc6\xb3\x45\xd6\xb0\x48\xa5\xdc\xe8\x3a\xf5\xfa\xea\xf5\xdb\xd7\xb5\xbd\xd6\x72\x55\xe8\x7c\xf8\xfc\x69\x03\xae\xc1\xbc\x46\xc6\x25\x2b\x0d\x39\x4b\x4f\xcc\x89\xb7\x40\xae\xab\x29\xba\xf4\x18\x47\x72\xd5\x65\x59\x38\xd3\x8a\x5a\x8d\x00\x5a\x03\x48\xe3\x3d\xc0\xb6\xd4\x02\xcc\x2d\x34\x08\x80\x6d\xdb\x76\xd2\xbc\x1a\x4c\xe5\xba\x37\xab\x6e\x6f\xb3\x11\x65\x16\x55\xd5\x80\x2c\xe1\x5e\xaf\xa1\x67\x09\x98\x0a\x0a\xbd\x17\xf0\x30\x9a\xfd\xc8\xe8\x42\x36\x88\x6c\x5b\xc1\x94\x46\x0e\x45\x69\xf0\x35\x72\x7b\x43\x4a\x66\x5b\xb1\x48\x14\x86\x70\x5b\x72\xce\x3d\x12\x84\xe8\x14\xf1\x25\x25\xbc\x20\x1a\x98\x94\xa5\x92\xaa\x54\x8d\x04\x46\x2e\xdb\xc3\x53\x4b\x8e\x8c\x3b\xef\x7f\xd1\x65\x7e\x61\x94\xa1\x1c\x62\xb2\x68\xf6\x07\x9a\x67\xf7\x07\xae\xeb\x46\xa3\xc8\x55\x63\x22\xce\x87\xd3\x37\xbf\xbe\x38\xfd\x57\x4a\xcf\x08\xcb\x85\x20\x1b\xf0\x6d\x7b\xef\x8c\x21\xef\x7c\x4f\xb3\xb5\x2c\xc5\x3c\xb3\x81\x5b\xbe\x3d\x8a\x5c\x2c\x1b\x8a\xc6\x03\x23\xb6\xdb\x9a\xac\x6a\x33\x47\x45\xdf\x1e\x65\xcb\x72\xbc\x5a\x2c\xcb\x70\x4d\x66\x2c\x5d\x06\x02\xa3\x71\xca\x49\x93\x74\x58\x66\xce\x46\x91\x1b\x39\x0b\x6f\x69\x55\x70\x86\xea\x04\x55\x75\x42\x73\x3d\xa0\x42\x0f\x6b\x7b\xbd\x46\x21\x47\xbd\x64\xea\x3e\xbf\x7b\xf1\xf9\xd3\xcf\xef\x4f\xdf\xfc\xf7\xeb\x57\x7a\x12\x03\x97\xa0\xcb\xde\xeb\xc5\x19\x62\xce\x6b\xc6\x28\xdb\x13\x73\x46\x2f\x7b\x81\xe3\xd3\x00\xb9\x08\x06\xce\x02\x71\xee\xcd\x90\xcb\x60\x50\x21\x3a\x14\xbf\x44\x30\xb4\xd7\x6b\x7b\x6d\xc3\xa6\xf3\x41\x56\xf7\xcf\x9e\x13\x90\xec\xbd\x2f\xe5\x75\x0a\xcc\x90\x54\xed\x40\xc4\xc2\x0a\xf5\x5d\xab\x50\x9f\x4f\xdf\x56\xe8\xee\x9b\x6b\xed\x10\x15\x36\x7e\x17\x15\xc4\x6c\xe6\x83\x96\xf6\x99\x0f\xa7\xaf\x5f\xbc\xca\xfc\xd4\x12\xd8\xbd\xa0\x38\xe8\x0d\xf6\xf4\xee\xfa\xdd\x9f\x07\x20\x77\xad\x12\x0a\xc0\xcf\x2c\x04\x00\x0b\x18\x83\xdf\xd3\x65\x99\x2d\x34\xf4\x01\x51\x66\x85\x65\xcb\x0d\x41\x41\x90\xd1\x26\x41\x79\x06\x40\xe0\x03\x7b\xaf\xe9\xb8\x20\x85\x18\x5f\x7a\x3e\x1a\x81\x8b\x21\x80\x0c\x2d\xe9\x48\xb3\x31\x26\x8a\x1a\x1c\xb1\x0b\xec\x23\x0b\x70\x24\x04\x26\x33\x0e\x12\x7d\xa3\x6a\xe3\x4d\x56\x5b\xa8\x74\x41\xc9\xe0\x33\x24\x2c\x20\x1b\x06\xb6\x33\xc5\x24\xf8\x59\xd7\xb6\x6c\xd8\xba\x75\x2a\x8d\x2b\x3e\x33\xe4\xa4\xbd\x46\xbc\x27\x0f\x17\x7b\x7a\xe6\x55\x87\xc8\x8e\xf7\xaf\x18\x60\xcc\x27\x50\xc8\x8d\xa3\xe5\x04\x93\xee\x1b\x0e\x5f\x86\x58\x58\xe0\x10\xd8\x0e\x0f\x25\xee\x03\xd8\x1f\xda\x8e\x4f\x89\xef\x09\x6b\x0c\xc0\xc4\x76\xfe\x4d\x31\x51\x20\x4d\xbb\x4c\x56\xe6\x5e\xe3\x60\x04\xc0\x3a\xb7\x77\xcc\x90\xf8\x99\x56\x52\x4f\x6b\x72\x94\x8b\x9b\x9b\x90\xea\xc5\xae\xf9\xd2\xa7\xe1\x03\x70\x78\x08\x1e\x24\x9f\x25\xd4\x1a\x66\xb6\xd3\x2a\xec\x02\x24\xe5\xd0\xe7\xd3\x37\x2f\xe9\x62\x49\x09\x22\xc2\xaa\x46\x78\x49\x97\x96\x24\x7b\x8c\x4e\x55\x63\x52\xd4\x91\x98\x93\xa4\xc0\x30\xb3\x2c\x71\x51\x95\x13\x69\x5a\xa1\xd4\x17\x17\xe7\x3f\x87\x07\x07\x7a\x99\xef\xbb\x99\xd5\x39\x9c\x3c\xcf\xfe\x18\x01\xa0\xcf\x01\xc8\x63\xfe\xfc\x83\xc7\xbc\x85\x66\x2c\x6e\xdf\xdc\x00\x00\x99\x0b\x80\x9c\x89\xe7\x15\xba\xc5\x48\xc8\x4d\x97\x98\xfe\xfe\x31\xb4\xb5\x68\x56\x02\x5b\x8a\x6a\x0b\x7c\x26\xf2\x90\xdd\x13\xb4\xa7\x17\x6b\x2f\x22\xf8\xf7\x08\xf5\x70\x00\x7b\x0b\xcc\x39\x26\xb3\x9e\x94\xab\x3e\x22\x02\x31\x60\xcb\xf6\xd8\x2d\xdb\x93\x63\x04\x09\xb7\xfe\xc7\xc7\xf7\xef\x1c\x2e\x18\x26\x33\x3c\x5d\x59\x63\x02\xa5\x64\x81\xc9\xd9\xeb\xee\x08\x39\x9e\x40\x52\xae\x7b\x54\x59\xf7\x28\x5b\xf7\x68\x32\xba\x8e\xcf\x0a\xd6\x00\xb2\x54\x4d\x1a\x2b\xa2\x9f\x45\x38\x54\xc7\x44\x1b\xa2\x89\xb2\x86\xb4\x6e\x65\x3e\xa5\x2c\xc0\x44\x9f\xa7\x6f\x65\xf1\xca\xb6\xd4\x68\xc4\xba\x8b\x9d\x6e\x57\xf6\xa3\x46\xc1\xbf\xa1\x9d\x28\x25\xf0\x21\xa1\x01\xe2\xf5\xf6\xa2\x26\x85\x58\x99\x03\xb5\x75\x4f\x4b\x7a\x97\xc7\x0a\x31\x4b\x14\xe2\xa8\x20\x50\xa9\x56\x88\x3d\xd7\xab\xd3\xec\xb4\xe8\x87\xfa\xb0\xc1\xdc\xbc\x72\xc7\x5d\x9c\x51\xef\xa2\x82\x7a\x07\x15\xad\x45\x8d\xa9\x4e\xaa\xf0\x66\xe2\xf9\x46\xa6\x3a\x21\xa7\x83\xcb\xe3\x8b\xde\x9d\x20\x83\x7c\x5d\x65\x68\x50\x64\xf1\x20\x6d\x5f\x2c\x81\xbf\xd1\x22\xc9\x33\xfa\x56\xea\x5c\x1b\xf3\xfc\x88\x49\xf0\x22\x0c\xeb\xb6\xaf\x2c\xf7\x78\xc2\x0b\xe9\xec\x30\x15\xa2\x1c\xb4\x63\x8c\x89\x40\xa4\xac\xe7\x6e\x2e\x1d\x0a\x86\x6f\xf3\x35\xd3\xfc\x86\x76\xef\x66\x9b\xf9\x5f\x56\xf1\x9d\xb7\x8a\xfb\x94\x10\xe4\x8b\x94\x45\x1a\xe4\xdd\x56\xf6\xf1\xca\x0e\xb6\xb6\x94\xb3\x4d\x11\xa9\x36\xd4\xb2\xd8\xa4\xbb\x99\xc5\xbc\xc9\x72\x5d\xd9\x7f\x62\x96\xee\x6a\xc0\xae\x1b\xd1\x36\xb6\xf4\xfb\x1a\xd1\x3d\x5a\xa6\x32\x82\xaa\xc2\x3e\x45\x5a\xcd\x7d\x35\x5c\x23\x52\x1a\x39\x82\x7e\x5e\x2e\x11\x7b\xe9\x71\x64\xd9\xca\xc6\xf1\xe1\xfd\xc7\x4f\x07\x07\xdb\xd8\x25\x6f\x35\xdc\x26\xed\x81\xc8\x69\xcd\x99\xd3\x48\xac\x3d\x84\xb5\xe6\x34\x5e\x69\x4e\xab\xb1\x8a\x6d\x67\xdc\x82\xb1\xbd\xbe\x68\xca\x62\x1d\x6d\x6a\x1b\x1a\xaf\x58\xa3\xf1\x8a\x95\x8c\x57\xb5\xd6\xa6\xef\xcf\xbe\x9f\xf0\x4e\x6a\xe5\x6f\xd3\x12\xce\x2f\xb6\x56\x0f\xf4\x7e\x8d\xf9\x8f\x34\x0c\x10\xab\x2a\x52\x36\x10\xb9\xd7\x18\xfa\x7c\xa2\xff\xa1\x58\xe6\xc5\xbf\xbd\xab\x9f\x3d\x3e\xaf\xa8\x73\x8e\x56\x9f\xe8\x0b\xc6\xbc\x55\x95\xd2\x21\x47\x7b\xe7\x37\xf3\x75\x4a\x06\xc4\xd0\xb3\xaf\x93\xfd\x38\xfa\xba\xfb\xf1\x56\x0a\x8e\xbf\x03\x0a\x4e\x93\xa2\xf1\x85\x55\x33\xca\x28\x7f\x64\xbe\x4e\xce\xfa\x15\x3b\x05\xd0\x85\xc0\x75\x5d\x6d\x8a\x93\xa3\x7c\x3f\xb5\x90\xfd\x1c\x8d\x0a\x96\x02\x24\xd5\x68\xa8\xcd\x3b\xac\xc6\x72\xe7\x09\x7a\x06\x6c\xb8\xa1\x0e\x24\x39\xd5\x1a\xc0\x30\x1d\xb4\xd4\x4f\x6c\xe8\x5b\x91\x14\x42\xe0\x1c\xad\x38\x80\x72\x87\xb2\xab\x74\x96\x6d\x35\xa3\xba\x6e\xef\xd4\x81\xa0\xdc\x89\xf1\x16\xe0\xa9\x06\x60\xc3\xa8\xc9\xaf\x80\x6e\xec\x57\x50\xaf\xa5\x95\x87\xc3\x3a\x0f\x87\x35\x0f\xa7\x4e\x45\x62\x10\xeb\xb5\xe2\xb9\xd5\xed\xe2\xca\x76\x53\xfb\x0f\x49\x07\x8b\xcb\x83\x3d\x38\xb0\x3c\x87\x21\x3f\x62\x1c\xb9\x19\x1e\x69\x44\xba\xa2\x1d\xe8\xd9\x77\x67\xdb\x94\x7f\xfa\xc8\x3a\xce\x58\x6f\xed\x76\xcb\xf0\x7e\xc1\xea\x38\xf7\xb8\xa5\x99\xbf\xc5\xd8\x91\x71\x0c\x50\x3b\x26\x4d\xd5\x15\x2f\x51\x57\x70\x41\x5d\x61\xf5\xb7\x7f\x74\x44\xf5\x5c\xf1\x26\xb5\x02\xb7\xde\xfe\x61\x7b\x44\x5d\x5f\x36\x44\xc7\x83\x09\xec\xd4\x64\x51\x53\xa1\x2e\x2d\x69\x2a\xb9\xeb\xe3\x48\x1b\x63\xf2\xed\x86\xd9\x46\x21\x52\x3c\x2d\x60\x3a\xdf\x10\xd9\x50\x34\x68\x2c\x14\xb2\x3b\xd1\x58\x94\xb9\xf8\xce\xd4\x96\x54\x5c\x4b\x39\x4d\xa7\x16\x73\x89\x73\x7e\xe1\xfc\x26\x77\x36\xfb\xb9\x96\xc9\x33\x24\xf4\x28\x80\x91\xd5\xc0\x76\xd0\x15\xf2\x23\x81\x2c\x66\x8f\xe4\x12\x59\xdf\xa1\x43\x43\xac\x35\x3a\xfa\xf7\x5e\xa3\xda\x85\x37\x73\x67\x68\x53\xc8\x08\x0d\x6e\x6d\xcf\xd5\x6d\x6c\x6c\xc9\x4d\x54\x9d\x70\x07\x54\x1d\xbe\xe3\xaa\xce\x86\xfa\x84\x54\xed\x19\xf1\xc2\xc3\x08\xb7\x99\x90\xb7\x52\x1c\x8a\xed\x37\x1b\x54\x9a\x04\x37\x4d\xf7\x46\x73\xce\xa4\x65\x2b\x75\xf1\x9c\x89\x37\x39\x67\x46\x2e\xd7\xc7\xc4\x70\x13\xd7\x8b\x0e\xc7\x44\xae\x8f\x89\x15\xcd\x7a\xad\x3e\x0e\xb5\xc7\x44\xdc\xbe\xe6\x8d\xae\x79\xdb\x65\x9f\x34\xf3\xd7\x1d\x4e\xc9\x9a\xca\x77\xd6\x9a\x9a\x5d\x99\x66\x86\xef\x7a\xf1\xcf\x91\x17\x8a\x79\x86\x0d\x6f\xbd\xf4\xa1\xbe\x86\x82\x7e\xd5\xe2\x0f\xb6\x5e\xfc\x81\x3d\xe2\x96\xe7\xb6\xad\xfa\xc0\xb6\x21\xb7\x3c\x08\xde\x69\x49\xe9\xdb\xd0\x77\xbd\x82\x1c\xf0\x5d\xbf\xe5\x46\x8c\x97\xe5\x40\x94\xed\x05\x22\xe7\x9d\xb7\x40\xcd\x02\xc0\xef\x26\x00\x38\xbf\xb7\x9b\x9a\xa4\xf1\x3b\xb7\x91\x6c\x29\x38\xe8\x2e\x09\x0e\xbc\xb3\x82\xc3\xcc\x7b\x87\xed\x7c\x2b\xa1\x11\x37\xdf\x35\x50\xa1\xf1\xba\x41\xb3\x05\xde\xe0\xd2\x60\xaf\x65\x58\x25\x8f\xff\xdc\xf5\x04\xb5\xef\xfe\xb8\xb0\xa9\x5f\x73\xf3\xe9\x37\x2c\x28\x51\xe4\xd6\xbe\xaf\xb8\x8b\x51\xbd\x8b\xef\x2b\x4d\x7c\x5f\x3b\x35\xb9\x81\x02\x46\xb5\xe0\xad\x68\xb6\xa8\x80\x6d\x60\xa7\x6f\xf0\x0a\x5d\x2e\xeb\x44\x2e\x43\x9c\x86\x17\xca\xac\xad\x3d\x43\x43\xea\x05\x7d\x4c\xb0\xc0\x5e\x88\xff\x40\x2c\x0f\xae\x57\xf7\x21\x22\x17\x98\x51\x22\x67\xf8\x2e\xf5\x2c\x7d\x08\x7e\x91\x0a\xff\x44\x24\x2c\x54\x9d\x0f\x0c\x4d\xf1\xd5\x28\xb1\x90\x3a\xd9\xcf\x70\x49\x83\x5f\xab\xc1\x0a\x25\xf0\xd4\x20\x3d\x4a\x64\xcf\xda\x7e\x96\xb7\x56\x85\xb0\xba\x97\xac\xc7\x43\x58\x49\x6c\x3f\xb6\x2f\xa9\x08\xbb\xfe\x14\x87\x42\x92\x37\x25\x7f\x96\x60\xb7\xf2\xb8\xd0\xe4\x4a\xec\x59\x09\xb1\x84\x37\x93\xfb\xf5\x08\x4c\x29\x5b\x00\xe8\x87\x1e\xe7\xf2\x03\x1f\x8d\x81\x1e\x4e\xff\xcc\x63\x60\x02\x29\xf1\xe7\x1e\x99\xa1\xac\x6f\x46\x1d\x0f\xe5\xd0\x92\xb0\xfd\x19\xa3\xd1\xf2\xab\x22\x96\xc3\x24\x37\x8a\xed\x71\x59\x2e\xfb\x17\x18\x5d\x56\x39\x49\x9f\x85\xd4\x3f\xef\xf3\x90\x8a\xa2\x62\x90\xb1\x21\x28\x5c\xf1\xd4\x52\x77\x39\x0e\xe6\xea\x5f\xb9\x1f\x64\x76\xf8\x01\x24\xae\xf9\x6e\xf6\x77\xfb\x99\xf8\x07\xca\xee\xf5\x44\x6e\x8c\x72\x77\xdc\x4b\xae\xb4\xcc\x1f\xba\xe1\x29\xa3\x0b\x0b\xd9\xdb\xd9\x10\x98\x1b\x50\x5f\xc9\x63\x27\xfe\xe3\x75\x88\xe4\x3f\x30\x74\x4b\x7e\x39\x56\xc9\xef\x6f\x50\xe9\xf7\x37\xc8\xfa\xfd\x0d\x26\xa3\xf1\xc4\xae\x91\x77\x40\xa0\xc5\x32\xf4\x04\xea\x83\x07\x68\x6d\x67\x0d\x06\x35\x53\x9d\x2c\x4c\x78\x2d\x05\x12\x26\xb3\xd1\xfe\x30\xcf\xc9\xc9\xcc\x4d\x60\x80\x83\x53\xe4\x23\x7c\xa1\x3c\x6a\x78\x96\x05\x24\xfa\xca\xa7\xcc\x2d\x5a\xd8\x54\x63\x40\x39\x86\xee\x15\xcb\x4c\x9f\xc0\x7e\xce\x1f\xb8\xa0\x17\xff\x1c\x59\xc8\x65\x8e\xaa\xf8\x16\x73\x61\x3b\x0c\x2d\xe8\x05\x32\x1b\x1f\x82\xc4\x0a\xad\x71\x52\x59\xc5\x1d\xf0\x83\x03\x4b\xe4\x2b\x79\x41\x10\x6f\x95\xaa\x06\x8f\xad\xcf\x3d\x75\xcd\xbd\x96\xe8\xbc\x21\x1c\xb1\x78\x92\xb2\xf8\xe8\xe8\xac\x3c\xbe\x96\xae\xf3\x4a\x6b\x02\x15\x95\x0c\x11\xea\x48\xf0\x20\xc5\x70\x4f\x48\x45\xb1\x15\x49\x51\x18\x72\xfb\x3a\x3b\xf3\x38\xf6\xfb\x01\xa3\xcb\x80\x5e\x56\x86\x24\xe4\x21\x1a\xea\xde\xda\x67\xad\xb6\x82\xe1\x39\x00\xaf\xf3\x3a\xed\x0c\x55\x7a\x5f\x27\x9b\xc7\xa6\xe8\xcb\x6d\x54\x20\x22\xfa\x48\xcf\xd5\x6d\xc8\x51\x6a\xeb\xcf\x43\x9e\xbb\x20\xcb\x9f\x81\x1c\x82\xe1\xd9\x2c\xaf\x43\x6c\x4c\x8e\xb8\x8d\x9d\x24\x47\xb2\x0f\xb7\xec\xd2\x35\x95\x76\x11\x67\xe3\x0b\xfb\x67\x54\x1f\x7d\x1a\xa0\x3e\x0a\xb0\xa0\x5f\x17\xaf\x05\x0d\xd0\x08\x64\x4c\x4b\x87\xff\xe6\x94\x00\x48\xc9\x39\x5a\x45\xcb\x8d\xb1\x20\x53\xcc\x16\xaa\xa1\x7e\x80\xe5\x64\x6d\xaa\x43\x6e\xa5\xcb\x11\xb7\xa8\x0d\x70\x44\x82\x17\xea\x4b\xf6\x04\x1e\x2b\xc8\x72\xe6\xc0\x24\x0e\x4b\xca\x92\xf8\xeb\x29\xad\x56\x51\xf1\x48\xf4\x46\xad\x3e\x6c\xa4\x0f\x26\xfc\xf8\x03\x26\x52\x67\x91\x6c\x69\x26\x43\x69\x5d\xd0\xfc\xa8\x71\x73\x31\xa5\xc0\x86\x88\x31\x5a\xe7\x0c\xa3\xca\x80\x9d\x67\xfe\x4b\x2c\xe6\xfd\xec\xbc\x83\x09\x34\xf1\x9a\x23\xf0\x82\xa1\xde\x8a\x46\x3d\x1e\x31\xf4\x1c\xc0\x74\x44\x52\x71\x5d\x22\xb6\xf0\x24\x3e\xf2\x87\x3c\x5c\x97\x54\xba\xc6\x8b\x68\x6d\x6d\xf2\x88\x8f\x72\xea\xba\x1e\x3a\x4f\xd5\xb9\x94\x0a\x70\x7f\x68\xaf\x9d\x33\x2c\x49\x37\xc7\x71\x13\xe6\x2a\xd9\x25\xa5\x12\x53\xd5\x2d\x2a\x8d\xda\x2a\x23\x34\x23\x64\xcc\x8c\x2a\xe2\x4d\x5b\x17\x33\xe3\x24\xae\x18\x0f\x26\x90\xb9\xc2\x84\xc4\x0d\xed\xbd\xe2\x20\x33\x5c\x09\x89\x0d\x4b\xc5\x09\x67\x40\x16\x87\xfe\x61\x7e\x8a\x66\x98\x0b\xc4\x50\x60\x01\xb3\xd6\xe2\x2b\xf3\x1a\xf4\x07\xf6\xa8\xa4\xed\xc6\x33\x9f\x5c\xa9\x17\x21\xcc\x64\x02\xdb\x76\xc4\x1c\x91\x74\xb5\x08\xfb\x5a\x1c\x1c\xa0\xa4\xa2\xbd\xb6\x1d\xdf\x13\xfe\xdc\x2a\x12\x2c\x76\x7d\x41\x25\x65\xdb\xb0\x54\xd2\x7b\xa2\x50\x67\x6c\x65\xb9\x39\xeb\x24\x84\x96\xab\xfe\x59\x24\x44\x75\x94\xaf\x1f\x62\xf9\xbf\xe5\x19\xf5\x58\x50\x57\x6f\x17\x77\xc7\x34\x42\xa4\xbf\xc4\xfe\x79\x51\x37\xca\x5c\x0d\xe0\x2b\x4c\xf8\xa1\x1f\x62\xff\xbc\x4f\x23\xc1\x71\x50\x8c\xb6\xb8\x8f\xbd\x26\x23\xae\x92\xdd\x34\x0a\xc1\xb6\x3b\xa6\x99\x4d\x1a\x86\xc8\x2f\x47\xba\x14\x4b\x1b\x6b\xee\xe2\x6c\x6b\x24\x88\x27\xf0\x05\xea\x73\x9f\xd1\x30\x54\x51\x33\x1b\x52\xa1\xdc\xc0\x2e\x12\x63\x8a\x50\x70\xe6\xf9\xe7\x5f\x4b\xe7\xd8\x84\xd7\x2f\x3d\x2c\x6a\x36\x54\x81\x17\x88\x46\x02\xd8\x50\xdd\xf3\x5e\x78\xa1\xf2\x66\xaa\xd8\x60\x63\x04\xc1\x04\x72\xe1\x09\x34\x02\x0c\x79\xc1\x0a\x64\x37\xd1\xc1\xb6\x9b\x28\x8f\x7c\x1f\x71\x9e\x38\x7f\xa9\x5f\xe5\x7d\x52\x8a\x6a\x03\xa3\xfe\xce\x4a\x65\x18\x57\x2b\x59\x58\x54\x8d\xd2\x86\xa7\x90\x00\x10\x98\x5a\x20\xde\xf3\xd2\x9d\x41\x92\x2d\xb3\x2b\x1d\xa3\x63\x98\xdd\x79\xfc\x10\x79\xec\x8d\xa1\x5a\x66\xcf\x42\x3a\xe6\x43\x7e\x05\x76\x76\x27\xcd\x16\x40\x21\xf7\xaa\xfc\x6e\x96\xd5\x1c\x50\x3a\x40\x4d\x66\xb9\x09\x41\x8d\xf5\x86\x08\xc6\x3a\xd3\x77\x8f\x5e\x87\x45\x16\x7a\x7c\xde\x8f\xd5\x81\x9a\x9d\x55\x01\xd5\xd7\xda\x49\xe1\xc2\x10\x12\xe8\x4a\x7c\xdb\x63\x27\x46\x61\xc0\x91\x28\x1e\x3d\x0b\x63\xdb\xfa\xfc\x39\xf7\xf8\x1c\xfb\x94\x2d\xfb\xba\xf8\xee\xb0\x34\x39\x26\x6a\x8d\xed\x24\x2d\x3a\xa3\xc1\xaa\x5d\xb6\x5e\x63\xfe\xca\x58\x70\x7e\xc3\x1c\xab\x79\x1e\x36\xdb\x87\x53\xcb\xad\x31\xdc\x5a\xa9\xf5\x5d\xc9\xd7\x0b\xc4\x04\xf6\xbd\xb0\xbf\x40\x24\x02\xf6\x1a\x6a\x8d\x9c\x8f\xae\x63\x63\xd1\xa8\xb4\x94\x32\x7e\xa9\x7e\x7c\x2b\x00\xec\x7f\x0e\x0e\x0e\x8a\xc2\xa0\x34\x60\x00\xf7\x8b\x6d\x94\x61\x54\x0a\x8f\xc2\x5c\x22\xfb\x1a\x39\xc2\x63\x33\x24\x1c\x7f\x8e\xfc\x73\x14\x3c\xb7\xb2\xe8\x79\x41\xd0\x82\x1b\x24\x0e\x17\xab\x10\x39\x73\x84\x67\x73\x21\x8f\x25\xd9\x9f\x97\x98\x04\xf4\xd2\xc1\x84\x20\xf6\xb3\xfa\xf4\x00\x2c\xaf\x80\x3d\xb2\x36\xa6\x62\x4b\x4f\xca\xb7\x7d\xdd\x85\x37\x95\x87\x92\x42\xb7\x1f\x07\x16\x7f\xab\x0b\xb6\x8a\xb1\x4c\x24\x06\x15\x69\x49\xab\x31\x40\x41\x9f\x21\x4e\x23\x56\x74\xf9\x8b\x75\xb5\xc5\x32\x12\x28\xe8\x2b\x52\x7d\x05\xad\xbc\x16\xbd\xec\x40\x27\xd0\x13\x82\xe1\xb3\x48\x64\x0d\x0c\xf1\x10\xd5\xbf\x23\x6b\x00\x45\x7a\x55\x0c\x66\x0c\x07\xa7\xf4\xf2\x35\x09\x80\x0d\x23\xa2\x1b\x5e\x19\x45\x28\xc6\xd2\xd1\x62\xcb\x02\xaa\x47\xee\xfc\xff\x90\xe7\xcf\x9d\x8f\x66\x8e\xcb\x89\x3b\x72\x1b\xa1\x01\x8b\x9d\x41\x80\xcf\xb0\xe2\x3e\x95\xcd\xe4\xe6\x06\x5c\x7a\x8c\x98\x58\x20\xb1\x56\x99\x77\x6e\x35\x02\xd3\xcb\xd2\x53\x49\x48\xd2\x08\xa3\xc2\x68\xd6\x36\x4c\x51\x2f\x74\x66\x81\xcf\x31\x21\x5e\xaa\xee\x32\x3d\xc4\xda\xc4\x71\x6c\x36\x2a\x0a\x08\x53\x31\x27\x68\xd0\x83\x07\xf0\xda\x74\x37\x02\x5e\x24\x68\xef\xb0\xc7\x97\x1e\xe9\x81\x07\x16\x7a\x50\x3a\xcb\x27\xf3\x90\xb4\x72\x73\x33\x30\x57\x55\x6d\xab\x30\x89\x2f\xfc\x33\x9a\x5f\xf1\xc5\xaa\xef\xd3\x00\x2d\xb0\xd2\xd7\x72\x0b\x33\x5f\xd6\x50\x6b\x17\x95\x9b\x10\x73\xf1\x9d\x1c\xa2\x9b\xbc\xdf\x92\xf9\x0f\xf0\x45\x61\xfa\x8b\x18\x4c\xda\x91\x5e\xd2\x4b\xc4\xfa\x1c\xc9\x2a\xfd\x45\x14\x0a\xbc\x0c\xcd\x16\x96\xe4\xc6\x2d\x91\x21\x57\x29\x03\xbb\x59\xc3\xbb\xc8\x23\x95\x58\xb5\x91\xa8\xbd\x81\x3f\x0d\x29\x9a\xee\x22\x37\x22\xc9\x4e\x5f\x48\xd6\xad\x8f\x3b\x59\x4b\x7f\xaa\x25\x94\x45\x98\x47\xb3\x19\xe2\x52\xe1\xa3\xcb\x3a\x01\x7c\x1b\x6a\x95\x3b\xd8\x75\xf2\x6d\xbb\xca\x76\x1e\xf1\xc3\x33\x34\xa5\x0c\x99\x79\xe4\xdb\xd2\xa1\xd8\xcc\xce\x93\xe5\xb6\xf4\xf8\xd3\x10\x62\x19\x7a\x3e\x9a\x9b\x84\x1c\x5b\x12\x23\xdb\xc6\xee\x13\x24\x2b\x14\x4b\x9e\xb9\x9b\xd1\xa5\xdc\xd4\xce\x93\x47\xc7\xe6\x37\x59\x74\x3b\x91\xa6\xd0\xcc\xce\x93\xe5\x96\xda\xdc\x4e\x2b\x71\xcc\x0b\x30\xfd\x06\x4e\xec\x65\x3b\x77\xfb\x58\x85\x77\xd6\x27\xde\xc5\x57\x1d\x27\x51\x83\x14\xde\x19\x80\xc9\x90\xe5\x18\xba\x8d\x96\x97\x0f\xd9\x5f\xd7\x7a\x99\x1d\xc4\x04\x66\x90\xd9\xd2\x6c\x22\xbc\xb3\x28\xf4\xee\xe8\x16\xbe\x02\x5c\xc7\x22\x12\x84\x02\xde\x67\xe8\xc2\x0b\x71\x60\xae\xd5\xaa\x21\x71\x80\x88\xc0\x62\x55\x05\x11\x7a\x2b\x1a\x09\x7e\x38\x63\x38\xa8\xbc\x15\xae\x31\xc1\x96\x63\x23\x7f\xe7\x5e\x7f\xea\xf9\x82\xb2\x55\x43\xfe\xa8\x4c\x38\x84\xb7\x03\xe1\x10\x51\xd6\xcd\x29\x19\x39\x32\x81\x1d\x66\x98\xc4\x1d\x3c\x23\xff\x10\xf1\xa0\x48\x1c\x8f\xc9\x5c\x31\x26\x93\x3d\xe6\xa4\xf2\xc8\xcd\xfe\xb8\xb9\xd9\x1f\x42\xe6\x64\xc3\x20\xdd\xfd\x01\x04\x6a\x0c\x00\x93\x1e\x3b\x38\xb0\x98\x13\x87\x45\xba\xfb\x83\x7a\xe9\xc7\x9c\x73\xb4\x82\x2c\x0d\xfa\x4a\xef\x4d\x73\x99\x82\xc8\xc1\x01\xb2\x32\x51\xa0\x90\xd8\x90\xa9\x6f\xca\x89\x6a\xbd\xb6\xec\x34\xd0\xd3\xd7\x78\xe2\xa9\xb5\x6f\xa1\x1e\x26\x5c\x78\xc4\x47\x74\xda\x13\x76\x26\x47\xf5\xa7\xd5\x12\x99\x3c\xd5\x2f\x3d\x42\xa8\xe8\xf9\x5e\x18\xf6\xbc\x9e\x5a\x64\x3d\x8f\xf7\xbc\x64\x34\xc0\x5e\xeb\x70\x67\x6b\x00\x71\x6a\x17\xb7\xe1\xd4\xcd\x50\xd7\x70\x8c\x7d\xed\x6b\x0b\x2d\x32\x97\xed\xe7\x68\xe5\x0a\xfd\xe7\x1c\x07\x01\x22\xee\xfe\x50\xff\xc4\x02\x2d\x5c\x62\xfe\x26\x01\xba\x72\x99\xb9\xc5\x97\xdc\xea\x86\x6b\xb8\xac\x99\xc9\x84\x32\x69\x5f\xf9\x0d\x4a\x62\x8f\x32\xe8\x9e\xa2\x29\x62\x88\xf8\x31\xce\xb2\x56\x6f\xee\x71\xf2\x37\xd1\x3b\x43\x88\xf4\xe2\x88\x38\x8e\x82\x5e\xbf\xa7\xc3\xee\xec\x1c\x84\xa4\x0f\x0a\x12\x1b\xfd\xbe\xb8\xb9\x01\x54\x4d\x2c\xd8\x37\x39\x5f\x7a\xe2\xe0\x00\x24\x54\x4b\xbf\x3e\x47\x23\xb1\xd6\x23\xb5\x90\xf3\xe5\x8b\x9a\xc8\x2f\x5f\x6e\x6e\x0c\x63\xcc\x90\xf8\x10\xcf\xad\xca\xf0\x65\xd7\x25\xc2\x2f\xf2\x49\x8c\x6c\x55\xaf\x07\x07\x24\x0a\xc3\x7d\xd7\x15\xd5\xf3\xfe\x51\x22\xd9\x43\x57\x4b\xa6\x43\x4d\x7b\x8b\x88\x8b\x1e\xc2\x62\x8e\x58\xef\x0c\xf5\x64\xed\x1e\x65\x19\x46\x80\x3d\xc9\x28\xe0\x41\xdc\x83\xbd\x87\x52\xa6\x8c\x83\xa1\xf5\x61\xdd\x12\x07\x07\x59\x8e\xbd\x96\x92\x47\xb0\x48\x8a\x9a\x91\x59\xb0\x28\x17\x5a\x3c\xcc\x86\x12\x17\xc3\x8c\xd7\x6b\x1b\x8a\x83\x03\xcb\xf4\xc1\x73\x04\x7b\x5e\xf9\x55\xd1\x66\x94\x21\xb7\x2b\xec\xb5\x85\x32\xa9\xa6\x60\x64\x21\x38\xbe\x3e\x47\x2b\x6d\x8c\xf7\xc4\x1b\x81\x16\x1f\xb5\xa8\xd4\x43\xcc\x2f\xc8\x38\x98\x95\x97\xf8\xde\xbe\x96\xd4\x72\x5d\x57\xa8\x78\xa0\x1f\x4d\x71\x4a\x80\x42\xaa\x89\x19\x12\x99\x30\xee\x57\x88\xfb\x0c\x2f\x05\x65\xb2\x35\x95\x58\x5e\xc7\x63\xb9\xae\x9b\x74\x59\xcd\x2b\x22\x09\x20\x36\x03\xe0\xcf\x75\xd5\x11\xb2\xb8\x1a\xd9\x5a\xb2\x47\x22\x9d\x42\x3b\x89\xf2\x55\xdf\x4c\x20\x7b\x28\xdb\x8d\x5b\x4a\x62\xc1\xe8\x73\xaa\x7d\x40\x99\x3d\xd2\x1f\xd7\x56\x66\xc6\x3b\x70\x72\x86\x00\xb0\x4c\x63\xe5\x70\xa3\x7b\xd0\x7e\xa8\x6a\xc0\x90\xba\x43\x74\xd2\x4f\x86\x13\x6a\xaa\x52\x77\x38\x18\x0c\x86\x36\xe4\x0f\x5c\xf0\x47\x5f\x09\x8c\x51\x0f\x3c\xa0\xeb\xf5\xc4\x86\x68\x6d\xd9\x30\xe3\xdf\xaa\x16\x86\x60\xab\x54\x73\xf4\x43\xca\x11\x17\x72\x57\xd1\xae\x9d\xc9\x45\xd8\xba\x31\xc3\x0d\xaf\xf0\xf5\x13\xda\x3f\xad\xe9\x26\xf1\x12\x07\x62\x3e\x1a\x0e\x1f\x0e\xa0\xbe\x24\x1e\x3d\x1c\x0c\xd2\xfb\x45\x9a\xbd\x5f\x44\x42\x13\xc4\x86\xe6\xba\x52\xfb\x4e\x15\x7d\xa0\x32\x3e\x3a\x5d\x7d\x8a\x95\x26\xe4\x16\xbc\xc2\x0b\xfb\xc2\xaf\x34\xe2\xe8\xf5\x05\x22\xc2\xb6\xf1\x34\x5f\xa6\x3f\x27\x2f\xb1\xf9\x11\x63\x88\x88\x4f\xfa\xd6\x5e\x33\x10\x9e\x5a\x62\xbf\x1c\x7a\xa6\x11\x01\xb6\xea\xb1\xe4\xca\x6b\x4a\xa1\xb0\x63\x8f\x61\xfe\x45\x4d\x10\xd8\x77\x4b\xfd\xcc\x90\x78\x11\x53\xda\x02\x58\x35\xaa\xb7\xf0\x85\x05\x04\x03\xb0\x50\xc1\x86\xac\xc0\x08\xe9\xa6\x8f\x9e\x11\x97\x38\x04\x5d\x89\x8f\xf8\x2c\xc4\x64\xf6\x4c\xe2\x3c\x74\x5d\xf9\x95\x06\x48\xca\xc7\x83\x03\xfd\xb7\x9c\x6b\x47\xd0\xb7\xf2\x44\x94\xe4\xc4\x15\x76\xa2\xb1\x58\x85\x7e\x21\x88\x42\x60\xc3\xd0\x1d\x27\xfe\xf8\x9e\x15\x58\x7f\x9b\x52\x2a\x10\x1b\x33\x1a\x22\x17\x98\xb8\x25\x95\x6d\x60\xf2\x37\xdb\xb6\xc7\x83\x09\xe4\xae\xa2\xdd\x0f\x34\x52\x7c\xf4\x32\xc4\x88\x88\x53\xe4\x0b\x4b\xae\x86\xb0\xae\x6c\x8f\x3b\x82\x2e\x1f\x30\xc7\x57\x1f\xb5\x8f\xc3\x3f\xa9\xfc\xf8\x9c\x15\x3d\x29\xbc\x33\x7a\x81\x80\x3d\x62\x15\xbe\x0f\xa6\x2c\xf1\x72\xf8\xe3\x8d\xda\x8e\xd5\x82\x33\xcf\x1e\xd5\x4e\xa1\xf6\x7d\x48\x9e\x46\x72\x02\x24\x3c\x1c\x1e\x1c\xc4\x7f\xe9\xad\xdd\xce\x94\x39\xa9\x27\xb7\x9d\xf5\x4e\xc7\x64\xe6\x2c\x23\x3e\xb7\x0a\x55\xf7\x64\xe3\xd9\x3c\x79\x19\x78\x05\xf1\x7e\x5a\xac\xf2\xac\x3f\xdc\x77\x5d\x7c\x70\x50\x84\x37\xd9\xff\x30\x1c\xda\xeb\xb2\x03\x7d\x0a\xe8\x9a\xc4\x34\x63\xe0\xa3\x30\xec\x6b\x8d\x1b\x4c\xd4\x03\x4f\xcb\xd2\x65\xb4\x5a\xec\xc0\x86\x0f\x07\xa6\x21\x9d\x13\x81\x65\xd5\x17\xe4\x30\xc4\xf1\x1f\xc8\xba\xd6\x23\x1d\x5d\x6b\x11\x91\xf5\x52\xf9\x2f\xf9\x25\x96\x17\x65\xf7\x15\x79\x8c\x51\xef\xc5\x28\x71\x51\xba\x91\xd7\xf5\x72\x17\xf1\x9a\x57\xaf\x4d\x8b\xe5\x7b\x78\x55\x41\x25\x0f\xbd\xc4\x61\x78\x8a\x48\x80\xd8\x36\x6e\x97\x48\x58\x60\xee\x71\x1d\xa6\xc2\x41\xa5\x63\xbf\x59\xe7\xa0\x53\x08\x6c\x63\x9f\x86\x32\x5e\x10\x28\xf9\x24\x59\x19\x11\xc4\x2c\xa0\x29\x0c\x72\x73\x60\x86\x18\xe0\xe0\xc5\x72\x89\x3c\x66\xd9\x6b\x85\x6c\x7d\x38\xad\x69\x5e\x2f\x8e\x2e\x3d\x28\x7c\x74\xeb\x25\x44\x0c\x8c\xec\x55\xd7\xad\x78\xc4\x25\x2f\x2f\x80\x38\xa3\xc1\xca\x74\x61\xa2\x3e\x8d\x9c\x20\x45\xd0\x85\x87\x49\xef\x9f\xbd\x00\x5f\x00\x0d\xa2\x1e\xb9\x49\x4e\x32\xb5\x42\x65\x4b\x19\x25\xa5\x4d\x98\x93\x36\x90\xaa\x25\xf1\x2e\x92\xac\x95\xae\x42\xcd\x59\x7d\x6e\xef\x65\xf8\xc3\xb0\xe7\xaf\x9e\x98\x3b\x0b\xef\x4a\xee\x82\xb6\xdd\xb0\xca\x88\xe9\x49\xaf\x8a\x64\x6d\xe9\xd7\xb0\xa4\x1a\x91\xbc\x1e\xa5\x3f\x7d\x54\x9e\xdf\x1f\x28\xc7\x7a\x02\xd6\x6b\xf8\x45\x1d\xb8\x4f\x93\xf3\x76\x69\x7a\x30\x37\x7c\x80\x82\x9b\x9b\xfc\x17\x4c\x66\x37\x37\x56\xcc\xc9\x3f\x85\x78\xb1\x40\xec\xc8\xb2\xf5\xeb\x42\x4c\x9e\x27\x02\x39\xb1\xa3\x7c\x66\x09\x25\x4e\xec\x35\xd4\x63\x7a\x89\xc2\x30\xe7\x49\x2c\xb7\x4b\xdd\xa4\x44\x20\xde\xd4\xf3\x2b\xd3\x14\x42\x10\xbb\xb6\x98\xfd\x5e\x12\xe9\xad\xa2\x91\x39\xb6\xca\xcd\x52\x2a\x9c\xd5\xa5\x2e\x8a\xdf\x1d\x21\xf1\x49\xde\x4d\x41\x7f\xf5\x96\xd0\x2b\x28\xed\x3a\x47\x6b\xe4\x96\x1a\x54\x52\xf5\x45\x3c\x3a\xed\x62\xff\x16\x4d\x05\xcc\x7e\xf8\x44\x97\xe6\x77\x76\xde\xb2\x5f\x34\xcf\xd8\xd0\x2f\xf7\xe0\xd3\x88\xdc\x59\xfb\x26\x33\x97\x21\xe3\xd2\xd5\x2c\x87\x89\x15\x19\xf0\xb3\x68\x3a\x45\xcc\xde\x8b\xfa\xee\x12\xfa\x0f\xe4\x7f\x52\x20\xff\x41\x16\x28\xe3\xef\x1c\xa4\x33\xd2\x8f\x34\x6d\x17\xee\x58\x27\x6d\x12\xee\xe0\x99\xf8\x87\xaf\xac\x1b\xa1\x1b\x3d\x10\x90\xbb\xf9\x6c\xcf\x81\xa3\x8f\x8b\x2f\x84\x15\xda\x36\xc4\x4a\xa7\xc5\x63\x3e\xb1\x21\x7d\x6e\x91\x32\x49\x0a\xfa\xb2\x15\x76\xc2\x3e\xd6\xfc\xd4\xf6\x9c\xf5\xab\xa6\xd0\x28\xa7\xf9\xb8\x2c\x0a\x81\x3e\x97\xab\xa0\xb2\x7c\xc1\x39\x5a\x01\xc8\x6d\xe8\x8d\xf9\xc4\xa5\xf6\x68\xa1\xf7\xe7\xd0\xce\xa0\x9c\x0e\x3b\x97\xdc\x4a\xea\x99\xde\xd8\xa2\x19\xb4\xf8\x58\x4c\x6c\xe7\x1c\xad\x26\xb2\x74\x11\xdb\x86\xae\x43\x77\xa1\x5f\x1c\x53\x14\x9d\xbb\x39\x42\xed\x15\xc9\x38\x57\x71\x68\xdf\x8c\x56\x31\x49\x72\x1f\xd5\xf2\x00\x30\x2c\x7e\x16\x68\x01\xe0\xbc\x81\xda\x9a\xb0\x45\x0d\x2b\x0b\x34\xa8\x1e\x96\x91\xa6\xa3\xde\xe0\x59\x2f\xc0\x7c\x19\x7a\xab\x51\x8f\x50\x82\x9e\x81\xec\xe4\x2c\x72\xf9\xc6\x42\x77\x31\x16\x13\x45\xe5\xb3\x16\x2a\x9f\xdd\x1f\x95\xf5\x8e\x31\xb5\x38\x3c\x83\xa1\xa4\xaf\x26\x02\xcc\x32\x92\x64\x33\x2d\x9a\x2c\x6a\xaf\x73\x42\xcb\xf5\xd6\x19\x9f\x66\x15\xf9\x55\x9d\xf1\xca\xf8\x16\x57\x6b\xf2\xe6\xb5\x68\x4c\x96\x91\x00\xfa\x8d\xdf\xd0\x3b\x43\xa1\xf9\xdb\x33\xff\x9a\x18\x3a\x93\x1a\x6b\xad\xb7\xeb\xe4\xd4\xa1\x7b\xb0\x1d\xf5\x7c\xe3\x47\x75\x65\x42\x99\x05\x3c\xfd\x36\x9c\x88\x4f\x29\x12\xdd\xf4\x90\x65\x01\x35\x66\x00\xaf\xcf\xa2\xb3\xb3\x10\x71\x65\xee\x50\xc1\x9f\xb1\xf1\xe3\x02\xa3\x4b\xa3\xfe\xad\xed\x3d\xe1\xc8\x09\x96\x47\x56\x5d\x9f\xd8\xeb\x2e\xae\xc6\x82\x2e\xe8\x8c\x79\xcb\xf9\xaa\xaf\xfe\xb9\x73\x37\xf8\xb2\x47\x69\x6f\xf8\x68\xf0\x77\xb4\x86\xa4\x70\xfc\x8a\xcf\xe0\xff\x1c\x3c\x57\x49\xbe\xde\x48\x49\xff\x77\x64\x1f\x0e\x07\x83\xd1\xa0\x43\x78\xf0\xb5\x52\x9c\x8e\x8f\x1f\xc1\x14\xab\xd1\x00\x2e\xbc\xab\x51\xff\x69\xfc\x7f\x5b\x07\xde\x62\xe6\x87\xc8\x1d\x0b\x6b\x68\x43\x61\x39\x47\x0f\xd5\x3f\xfa\xbf\x8f\xd5\x3f\x43\xdb\xa4\x84\x53\x3c\xc2\x25\x6c\xdf\xc0\xf5\x1d\xf3\x8f\x86\xec\x0f\xed\xc9\x1a\x2e\x70\x18\x62\x8e\x7c\x4a\x02\x5e\x52\xd3\x03\xac\x8f\xd7\x1c\x40\xb0\xf0\xae\x2a\x1c\x67\x4b\x71\xab\xde\x55\x62\x77\x1c\x13\xeb\xe8\x21\x44\x36\x24\xd6\xc3\x81\xfe\xf7\xb1\xf9\x3d\x1c\xc8\x0f\x13\xc9\x18\x71\x17\xa5\xce\x53\xfa\x55\xf4\x6b\x82\x81\x15\xa6\xea\xbd\xce\x0c\xb4\x92\x07\x4e\xd2\xf0\xcd\xcd\x58\xeb\x9a\x52\x26\xbc\xf6\xb2\x91\xb2\xc2\xbe\x4e\x01\xff\x99\xf3\x2c\x56\x88\x64\x83\x0a\xcc\x37\x98\x56\x90\x7c\x2d\x1c\xf2\xcf\xe3\x47\x83\x58\x8d\x8d\x1f\x6d\x84\xa1\x7b\xfc\x68\x70\x28\x1c\xb2\x27\x0f\xf1\xc6\xdd\xb9\x92\xd5\x06\xca\x6f\x5a\xb8\x2e\xeb\x0f\x6f\x6e\xd4\xce\xce\x3c\x12\xd0\x85\x65\xff\x23\x4c\x9f\x60\x23\xf9\x84\x46\x22\xb5\xb9\x5f\x33\xaa\x82\xd3\x8e\x1f\x0d\xfe\xce\x0e\x93\x11\xac\x8e\x46\xe9\x50\x0f\xcb\xb8\xfd\xbd\x3f\x7c\x34\x80\x52\xe4\x8c\xb4\xe4\x49\xa6\x22\x53\x0f\x72\x34\x53\x67\x0e\xe1\x98\xbf\x8c\xd7\x72\xdb\x92\x5e\x61\x14\x06\x9b\x26\xca\xc8\x54\xda\xa1\x6b\x5c\x22\xa4\xc6\x57\x7e\xf1\xee\xce\x2f\x19\xe3\x8e\x12\x51\xd3\x69\x48\x87\x9e\x9f\x3c\x9d\x5c\x17\x9b\x5c\x55\x03\x05\xf8\x0e\xa6\xa1\xc1\x5a\xd9\x7d\xf0\x6a\x28\x55\x57\x9c\xca\x66\xa8\xe2\xb3\x32\x2d\x98\x03\x94\xdc\x74\x0f\xd5\x1b\xe9\x45\xe8\x7e\x06\xe2\x7f\x57\x6e\xe6\x0e\x3c\xa5\x6f\x26\x64\x7b\x18\xf1\x9c\xb6\x92\x31\xd3\x72\x24\x94\xaa\x90\xcc\xa9\x85\xd4\x15\x19\xcc\x6b\x66\xe9\x51\x33\x73\xea\xcc\x3e\xa2\x6c\x99\xb4\xbf\xd7\xb2\xf2\x28\xdf\xc1\xda\xce\x06\x84\x55\x84\x64\xc5\x96\x5e\xad\xda\xdc\xdc\x18\x37\x81\xd3\x28\x94\xfb\x95\xb9\xb7\x59\xc7\xb9\x39\x85\x23\x8b\x8d\x2e\xf5\x69\xb5\x44\x60\x94\xb7\x13\xa6\x3d\x43\x0d\x0b\x8d\xd9\x38\x9b\xa0\xd3\xb4\x3e\x2a\xc5\x9a\x09\xb4\x70\x4c\xcf\x71\xb5\x26\xd5\xa7\xcc\xe3\x46\x2f\x6f\xcc\x1d\xa0\x83\x92\xd5\x5e\xa2\x52\x4b\x94\xee\xc8\x23\x7f\x8a\x99\x7a\x40\x5d\x8b\xb6\xf0\x6e\xd8\xbb\x9d\x6d\x32\x91\xd7\x4a\xc3\xd4\x4f\x8f\x8c\xae\x25\x97\x8e\xae\x3d\x3e\x02\xf2\x2f\xb0\x86\x5c\xff\x8a\x83\x59\x18\x52\xae\x6f\xea\xda\x0c\x4a\x90\x1f\x55\x41\x59\x19\x50\xc7\xe3\x0e\xfa\x87\x86\x4b\x34\x10\xa0\x14\x17\xe2\xcd\x4c\x72\x33\xa0\x95\x7d\x30\x29\xec\xa6\xc9\x5e\xaa\xb4\xa6\x91\x65\x1e\x67\x06\x2f\xc2\x10\x14\x8d\x26\xb6\xfd\x00\xf4\x2c\xf0\x20\xcf\xeb\x55\x7b\xbb\x69\x25\xb7\xf7\xc6\xa7\xf2\x11\x32\x5a\xc1\x0f\x2b\x4b\xf3\x23\x14\xb6\x91\x07\x6b\xd5\x88\x6d\xce\x30\x96\xfd\x00\xd8\x31\x43\x9b\x7d\x48\x57\x2d\x3c\x8e\x67\x1e\x13\x71\x38\x64\x6e\x72\x29\x47\x9e\x03\x30\x22\x30\x74\x85\x7a\x95\x09\xf2\xb4\x28\x94\x45\x21\xa4\xca\x5a\x96\x39\x70\xc4\xd7\xc6\x56\x7f\x58\x8c\xb7\x52\xc9\x52\xec\x3c\x78\x62\xd8\xa6\xf6\xc1\x41\xb9\xca\x9b\x57\x0d\x15\x6e\x6e\x00\xd8\x77\x5d\x1e\x2b\x5a\xa6\x8e\xa2\x87\x2d\x0b\xb2\x32\x80\x23\x12\xbc\x0c\x29\x41\x65\xb1\x24\x8b\xe4\x69\x85\x12\x04\x20\x6a\x8a\x6f\xcc\xad\xbc\xf4\x4d\xc5\xcd\x36\xc8\x4c\xbd\xef\x63\x9b\x2c\x0e\xa8\x65\xb3\x6c\xda\x10\xeb\xb6\xd2\xf4\x95\xc4\xbf\xb6\xcc\x3b\xde\x32\x33\xd2\x25\x95\x28\x19\xe1\x62\x5e\x5f\xc9\x2c\x11\x5d\x15\x7c\x54\x81\xa2\x7a\x51\x42\x5a\x01\xf0\x0a\x71\x81\x89\x9a\x41\x03\x85\x5d\x5d\xc6\x9d\x29\x26\x81\x14\x3e\x3a\x01\x12\xb7\xa1\x57\x53\x44\xed\x3d\x7c\x73\x63\x61\xf7\x5a\x5d\x29\xf3\x35\x34\x70\xee\x18\x27\xa6\x79\xf3\xc9\xb6\xa1\x77\x73\x63\x79\x06\x96\x66\x60\xbd\x0a\xd8\x8e\xba\x00\x4c\x11\x1d\x61\x58\x40\x6a\xe4\x15\x54\x05\xb5\x92\xdf\xa1\xcb\xb7\x4a\x96\x57\x44\x93\x7e\xe6\x48\xb9\x8a\x88\x88\xa1\xde\x4b\xc5\xed\xbd\x8f\x3a\x3d\x88\xf1\x9d\xe9\xfd\x0d\x3c\x40\x0f\xc0\xdf\x80\x7a\x27\x55\x3d\x86\x5f\xf5\xd2\x56\xcd\xee\x53\xa0\x1f\xaa\x0a\x29\xd7\x19\xee\xf5\xa9\x31\xd5\x61\xf4\x3b\x28\x50\xab\x32\x28\x91\xf9\x76\x22\xb6\x21\x35\x7f\xec\x71\x57\x0a\x6e\x7d\xb1\x6d\x74\x1c\x96\xd5\x71\x62\x4f\x0d\xb9\x62\xb5\x85\x48\x5f\x79\x35\xe8\x3d\xba\x3e\x64\x15\x7a\x4f\x86\xd3\x46\x71\xf3\x39\xce\x1a\xe1\xa9\x15\x3f\x2c\x95\x38\xf6\xf0\x83\x03\x2b\xcb\xb7\xa6\xe5\x78\x1f\xb1\xe1\x20\xb7\x5b\x14\x89\x18\x6f\x8f\x31\x87\x26\x66\x58\x7d\xc1\x3a\xa6\x09\x47\x26\xac\x95\x78\x6b\x71\x47\xe8\x47\x2d\x2d\xbb\x9c\xf9\xcb\x68\x13\xd8\x5e\xb7\xd1\x82\x97\xf2\x82\x99\x82\xd8\xd6\x97\xd4\x70\xe2\x8b\x1b\x6b\x9b\xed\xe7\x7b\x56\xff\x94\xa8\x86\xdc\xb5\x42\x37\xe6\x21\x78\x1b\x75\x27\xcc\x68\x3a\xb7\x51\x2e\xf5\x8a\xd7\x0a\xa5\xfe\xbb\x45\xc1\xd4\x40\xf7\xa0\x62\x7a\x61\x48\x2f\x01\x04\x01\x22\xab\x7b\xd0\x2d\xf9\xfd\xe8\x83\x9a\x1c\x95\x1a\x61\x3e\xc8\x3e\xb3\xcb\xe4\x35\x39\x88\xf3\x90\xa5\xed\xa6\x00\xee\x15\x55\x4d\x18\xb9\x92\x0e\xb1\xf4\xe5\x3d\xeb\xef\x36\xa8\xd4\x46\x2d\xe5\x26\x41\x13\xdd\xd1\xb3\x6f\x6e\xb4\xe7\x44\xee\x13\xf8\xbb\xa4\x2e\x95\x8a\xe8\xbe\xeb\x46\x15\x65\xb8\xa2\x4c\xee\xfe\x6a\x5a\xf8\xcd\x4d\x0e\x21\xc3\xee\xea\x85\xe9\x86\x6c\x3d\xb9\x55\x7d\x7e\xb1\x99\x32\x79\x7e\xf1\x55\x94\xc8\xca\x5c\x19\x95\x03\xd9\xca\xd2\xa2\xde\x09\xfe\xcb\xd0\xd2\x5d\x6b\xfc\x37\xa7\x64\xb4\x3f\x84\x88\x34\x3d\x85\x7b\x26\xa8\x07\x6c\x58\xaf\x62\x16\xb7\x36\x9d\x48\x76\x7f\x98\x73\xab\xfb\x7e\xec\x35\x90\x94\x2d\x36\xda\xe3\xb3\x64\xb1\x21\x59\x6d\xc6\x0b\x02\xe5\x2a\xe1\x85\x60\x54\xce\x1e\xbf\xf4\x18\x22\xc2\xf9\x05\xad\x40\x71\xbf\xce\x6c\xe9\x40\x96\x43\x00\x1e\x58\xe0\x50\x9e\x3a\xc5\x73\x31\x02\xc0\x7e\x40\x2a\xf4\x1d\x45\xc6\x92\x99\xc7\x10\xb7\xd8\xbd\xfa\x9c\x7b\xc8\xc5\xe0\x54\x6d\x27\xfa\x4d\xe3\x5b\xca\xca\x49\x8a\xef\x6c\xc6\x03\xeb\x6e\x4a\x3a\xbf\x38\x9c\x56\x84\x22\x36\xc1\x6b\xcd\xe3\x9b\x9f\x60\x93\x91\xdc\x52\x07\x62\xca\x9b\x47\x30\xbc\x28\x49\x9c\x7b\x31\xc2\xd7\x29\x27\x4d\x7a\x48\xed\x6e\xad\xd2\xd5\xc2\x30\xdd\x90\x99\xdc\x90\x99\xd9\x02\xd5\xb6\x95\x57\x18\x72\x9b\x95\x66\xff\xe4\xe5\x60\xf3\x88\xb0\x72\x57\xa8\x31\xc0\x84\x85\x9b\xe6\xee\x5c\xc6\x28\x15\x95\xa9\x06\x3a\x6c\x8b\xdf\x9c\xd7\xd4\x6b\x9e\xdd\xd9\x4d\x67\xef\xc9\x70\xdd\x3d\xe4\x87\x6a\x60\xab\x6d\xef\x8a\x69\x18\x2d\x08\x77\xc7\x47\x0f\x61\xfc\xff\x93\x75\x6d\x52\x28\xcb\xb0\x2a\x0a\x2a\xbc\x32\x4b\x69\x91\x12\x58\xbb\x7c\xb7\x59\x51\x09\x41\x20\x8f\xd1\xa6\x67\xa0\xef\x0f\xbf\xd5\x38\x7e\xce\x8d\x62\x2b\xcd\x59\xe7\x20\x2b\x6b\xce\xd9\x85\x9a\xb7\x9b\xd2\xa0\xde\x6e\x5a\xd0\x89\xed\x83\x03\xa5\x70\xe8\xdc\x56\x56\x67\xbd\x53\x73\x35\x9f\xd3\xcb\xdb\xca\xd0\x6c\xa8\x5d\xb9\x74\x86\x44\x3f\xb9\xb4\x6d\x08\xc9\xdb\x5a\xbd\xcb\x8b\xb8\xdb\x9c\x10\x9b\x84\x70\x8d\x4a\xd5\x65\x7d\x95\xd2\x8e\x2a\x7f\x1a\x14\x7c\xf2\xce\x40\x9c\x32\xac\xaf\x53\x9b\x01\xbb\x41\xdc\xc7\x2c\x66\x15\x78\xcc\xae\x3c\x01\x95\xb9\xca\x9c\x9a\xea\x19\x2b\x3e\x23\xe5\x6a\x7d\xa0\x4c\xa8\x2a\x1f\x95\xb5\xa6\x76\x73\x60\x2d\xfa\x5c\x23\x15\x12\x9f\x26\xa3\x5a\x69\x9d\x3c\xaf\x6e\x26\xa3\x7a\x7f\x49\x10\x33\xde\xa8\xcf\xc6\xdf\xfa\x85\x80\xd0\xfa\x9b\x93\x09\xd9\xed\x29\xc7\xab\xb1\x8a\x1b\x03\x2a\x52\x1b\x4c\x46\xc6\x43\xac\xf7\xa0\x17\xe0\x8b\x9e\x8a\xb1\xf9\x9b\xad\x4f\xe8\x79\x0f\x93\xf4\x8d\x77\x0b\xd9\x7b\x24\x1b\x74\x17\x3f\xbf\xde\x23\xa9\xc7\xf7\xc1\x81\x84\xcb\x79\x80\xdb\x6b\xc8\x29\x13\x3a\x75\xdd\x0f\xab\x37\x0b\xb9\xae\x95\x9b\x46\x15\x43\x55\x66\xc7\x83\x2c\xab\x31\x67\x72\xf8\xc5\x9a\xb6\x51\xb2\xe3\x2c\x7b\xa3\x72\xd6\x3d\xf6\x7c\x30\x1a\x6a\xd5\x36\xc9\xf8\x37\xaa\x48\x01\x28\xe1\xfa\x06\x30\x4e\x05\x38\x8a\xcd\x93\xd5\xdd\xf4\x6b\xda\xed\x0d\x93\xf7\x0b\x63\x57\x99\x24\xfa\x74\xd0\x55\x21\x8e\x9f\xef\xfd\x86\xbb\x7d\xf1\xad\x8c\xac\xf3\x0f\x72\x18\x0a\x22\x1f\x65\xaf\x0f\xd3\x87\x4f\x13\xf7\x73\x94\x39\x2c\x10\x28\xe4\x49\x6b\x60\xaf\x73\xb1\x43\x99\xfb\xf0\x64\x61\x2b\x3f\x28\xbd\x04\x62\xa6\x3f\xf9\xbb\x55\x17\x6f\xa4\x7a\xf3\x11\x0e\x93\x35\x74\xa8\xe2\x93\xe4\xc1\x51\xaf\x2d\x62\xc3\xd0\x1d\x40\xee\x0e\x9e\x85\xff\x20\xcf\x1e\x3c\x08\x21\x7f\xe0\x0a\x9b\x8d\xc3\x89\x8b\x1c\x1e\x9d\x71\xc1\x2c\x0e\xd3\x48\x3f\xb6\xb6\x04\x3c\x8e\xad\x7d\xfd\xa1\xfd\x60\x38\xf8\x3b\x59\x67\xdf\x7a\x2b\xee\xd3\x7a\xf0\xce\x5c\x2c\xc2\x8f\xde\x14\x59\x3a\x62\x65\xd4\x53\x06\xf9\xe5\x15\xa8\x7c\xa3\xa3\x61\x43\xf8\x4a\x1b\x7c\xd3\xc5\x68\xeb\x06\x0f\x05\x15\x5e\xa8\x9c\x52\x4b\xea\xd0\xf5\xc2\xbb\xfa\xa0\x57\x0c\x5c\x78\x57\xff\xa5\x17\x95\xfc\xf3\xa5\x59\x30\xeb\x1a\xff\xb8\xc4\x42\x99\xb6\xa0\xfd\xf8\x4c\x1b\xfa\x47\xdc\x0a\x98\x94\x78\x31\xf7\x08\xef\x03\x96\x3b\x6d\x10\xc9\x84\xc3\xc1\x53\xe5\x04\x37\xaf\x1e\x7a\x8a\x55\x95\x2e\x17\x96\x62\x93\x32\xf0\xb6\x6c\x97\xa1\x85\x87\xe5\x48\xb7\x69\xbe\x99\x9b\x7c\x2f\xf4\xad\x87\x83\xff\x6f\xaf\xdf\x03\x0f\xb4\xd3\x1d\x8d\x48\xd0\x38\xa4\xc3\x23\x5b\x72\xa0\xad\xd2\x8d\xa6\x34\xed\x60\x48\x8e\x77\x9c\x52\xf3\xc6\x96\x0c\x81\x16\xef\xf1\x2c\x99\x0e\xcc\x3c\xdd\x61\x07\xf1\xcc\x9b\x0e\xe2\xb9\xbf\xc3\x1e\x12\x76\x92\x5d\x18\x49\x5f\x3d\x7d\x59\xae\xac\xe2\x0e\x56\x7e\x14\x25\xad\xa1\x5e\xa0\x85\x66\x87\xa9\x6d\x3f\x61\xf4\xce\xed\x27\x04\x52\xed\xc7\x5b\x52\x6d\x07\x09\xb6\xdd\x7b\x48\x09\x64\x37\x79\x52\x56\xef\x62\x4d\xda\xbd\xd6\xce\x79\xb4\x78\x3f\x4d\x8f\x5a\x15\x6f\x8c\xc7\x82\x67\xdb\x1d\x70\x3b\xdd\xbe\xc3\x99\x97\xed\xfe\x99\x37\x9f\x8e\x19\x39\x7a\x4d\xd8\xff\x1c\x7c\xdd\x13\xaf\xdc\xb0\x6a\x86\xd2\x7c\xe8\xad\x31\x40\x41\x9e\xee\x89\x34\x2d\xe4\xb2\x90\x37\x1e\x7a\x9d\xc6\x93\x6f\xd1\xfe\x74\x70\xd0\x60\xde\x32\xc9\x9b\x6d\x48\x6b\x4f\xc4\xfa\x1d\x04\x93\xea\xa2\xf1\xad\x84\x1c\xe4\xce\x38\x1b\xcf\x51\xb8\x94\x22\xc1\x3b\xab\x4c\xbc\xb7\xf0\xd4\xea\xd5\x30\x59\xd8\xef\x11\xc1\xfa\x36\x15\x76\x1d\xdb\xf3\xce\x78\x2b\xb1\x7c\xba\x01\xb5\x24\xf0\x8e\x91\x4b\xe1\xd7\x95\x5e\x3e\xed\x44\xb0\xf9\x46\x14\x9b\xef\x20\xc9\xe6\x1b\xd1\x6c\xde\x4a\xb4\x20\xe8\x4e\xb2\x20\xd8\x35\x82\x49\xec\xba\x92\x2b\x08\x5a\x89\x45\x2a\x89\x25\x58\x54\x45\x2d\xb2\x73\xd4\x22\x1b\x50\x8b\xb4\x53\x6b\xb9\xec\x5f\x20\xc6\x4b\x89\xde\x72\xfa\xe2\x14\xcf\x0e\x11\xb9\xc0\x8c\x12\xe3\xeb\x9c\xee\x79\x99\x16\xe2\x6b\x32\x34\x43\x57\xc5\x6c\x9a\x5b\x5e\x91\x79\xcb\xe5\x6f\xba\x71\x37\xdc\x8b\xb5\x88\xf8\x5e\xe6\xc5\x87\x0f\x8e\xe9\x3a\x4d\xfc\x15\x26\xe7\xd4\xd2\x03\xef\xc3\xca\x07\xde\x87\xd9\x07\xde\x87\x93\xd1\xf5\x7a\x2f\xb1\xa0\xcc\x71\x80\x3e\xce\xbd\xe7\xcc\x59\x78\xfa\xde\xd8\xf4\x77\x8a\x66\xaf\xaf\x96\xf6\x78\x30\x19\x69\x28\x33\xca\x0c\x24\x9f\x7b\x19\x28\xb6\x2e\x2a\xa9\x3f\xab\x29\x70\xf4\x4c\x58\x61\xeb\x3c\xa1\x6a\xc6\x56\xe6\x72\x2e\x59\xa1\xcc\xdd\xba\xd2\x8e\x31\xb8\xc1\xb4\x2b\x8f\x2b\xf0\x56\x36\x67\xcc\x5b\x6d\x4a\x3d\x55\x67\xc7\x88\xa7\xf1\xec\x4a\x3b\x09\xdd\x4a\x3a\x8e\x2b\x73\x40\x56\xef\x3e\x12\x78\xc7\x48\xa6\xf0\xeb\x4a\x31\x8e\x49\x17\x82\x6d\xa0\xe2\x28\xe8\xdd\x23\xd9\x06\x2a\x8e\x84\x6e\x25\x9a\xf0\x36\xe0\x32\x09\xbc\x63\x24\x53\xf8\x75\xa5\x98\xf0\xda\xb9\x4c\x78\xe4\x68\x23\x8a\x1d\xed\x20\xc9\x8e\x36\xa2\xd9\x51\x17\xa2\x6d\xb0\x34\x15\xf4\xee\x11\x6d\x83\xa5\x29\xa1\xdb\x89\x46\xcf\x9a\xed\x82\x0a\xe2\xee\x9f\x7d\xaf\xcb\xf2\x59\xba\xac\x8d\xd5\xb5\x3d\x3c\xb5\x3e\xae\x16\x67\x34\x74\xb0\x40\xcc\x13\x54\xf9\x5c\x9a\x0c\x1a\x29\x60\xd5\x7d\xcd\x78\x02\x99\xbb\x3f\x80\xa1\xbb\x3f\x4c\xee\x66\xf6\x04\x5b\x25\xb7\x58\x14\x62\x17\x8d\x0b\xed\x4f\x2c\xfb\xd9\xbe\xc5\x5c\x8b\xba\x58\xe5\xd3\xb3\x6c\xdb\x09\x28\x41\xf6\xc1\x81\x45\x74\x9e\x18\x6a\x2e\xaa\xe1\xbe\xb8\xb9\x21\x69\xce\x22\x61\x3f\x93\x5d\xda\xcf\xd2\x4c\x8c\xa1\x1c\x02\x77\xd1\x7a\x8a\x89\x17\x86\x2b\x95\xbb\x71\x9f\x1d\x1c\x60\x47\x8f\x3d\xfd\xcb\xb2\x13\x20\x3c\xb5\x42\x93\xdc\x94\x27\xd7\x98\x44\x47\xfa\xed\x55\x26\x3d\x7d\x43\x94\xfb\x6c\xcf\x13\x02\x2d\x96\xa2\x27\x68\x2f\x40\x3a\x31\x69\xc4\x50\x8f\x50\xd2\x57\x18\x9e\x85\x69\x1e\x44\x60\xab\x0c\xbb\xcd\x6a\x6c\xf1\xd2\x90\xb9\x92\xca\x43\x95\x3a\xab\xce\xf7\x2c\x6f\x4d\x0c\xeb\xec\xd6\x31\x3f\xea\x00\xff\xa5\x72\xc3\x68\x4b\x00\x50\x59\x67\xb7\x56\xb4\x1a\xfa\x07\x83\x6d\xc7\x76\x33\x75\xda\x56\xb7\x4e\xb9\xd2\xf7\xc2\xb0\x5a\x3d\x26\x3a\xb7\xa3\xbf\xaa\xaa\xb1\x5b\x94\xd4\x03\x7f\x21\x31\xed\xd8\x6a\x52\xa3\x95\x8a\x67\xac\xd2\x28\x5c\xb9\xaf\x28\xe0\x1d\x23\x9d\xc2\xaf\x2b\xd5\xce\x58\xab\x95\xd9\x47\xb8\x92\xe1\xaa\x09\x26\x81\x77\x8c\x60\x0a\xbf\xae\x04\x43\xb8\x9d\xc3\xd2\x28\x89\x86\x10\x8a\x6c\x54\x44\x45\xd5\x1d\xa3\x61\x8a\x72\x57\x42\x26\x01\x03\xad\xd4\x8c\xc8\xf9\x86\xf6\x00\x5d\x67\xd7\x48\xa8\xf0\xec\x4c\xbe\x88\x9c\xb7\x92\x2e\xfc\xe3\xb8\xfb\xc1\x43\x43\xef\x18\xd1\x34\x86\x5d\x89\x26\xa1\xd7\x35\x2f\x06\x27\x64\xa0\x8b\xa5\x57\x73\x6f\xd8\xc0\x71\xa6\xd6\x8e\x91\x2f\xc6\xb5\x2b\x01\x35\x7c\x2b\xdf\xe9\x2b\xed\x2d\x48\x18\x7d\xa7\x4f\x0c\x36\x93\x50\xe1\xba\x01\x09\x23\x81\xda\x49\x48\x84\x87\xab\x1f\x56\x6b\xa4\xa1\xa9\xb6\x6b\x44\x8c\xb1\xed\x4c\x45\x5d\xa1\x9d\x8c\xdd\xef\x7c\x77\xef\xca\x77\x93\x1b\xdf\x0e\x17\xbe\x1b\xdd\xf7\xee\xe0\x75\xef\x46\xb7\xbd\x5d\x2e\x7b\x03\xe4\x6f\xb8\x3e\x65\x8d\xdd\x22\x9a\xc2\xb1\x73\x7b\x7e\x2b\xc9\xf0\x45\x67\x16\x93\xb0\x3b\x46\x2c\x89\x5d\xd7\xf6\xf0\x45\x2b\xb1\x18\xad\x7c\x34\xb1\x89\xc1\x64\x95\x1d\x23\x9a\xc2\xb2\x6b\x83\x8c\x2e\xdb\xc8\x56\xf1\x70\x2f\xe6\x1d\x9f\xa0\xec\xd2\xc6\x6e\x91\x57\x61\xf2\x41\x22\xa2\xf3\xfd\xbe\xe1\x3f\x69\x4a\x74\xec\xa3\xa6\xfe\x76\x93\x10\x07\x07\xdd\x72\x1e\x92\x66\x76\x7d\x2a\x3e\x26\xf4\xd8\x7a\x36\xe2\x26\xb6\x98\x10\xc1\x22\xd4\xd7\xc9\x7a\xfa\x78\xda\x5f\x32\xc4\x95\xcb\xc8\xf6\x73\x53\xd3\xe2\x6e\x4f\xd3\x27\x16\x21\x1d\x40\xf0\x66\xfa\x21\xa6\xd1\x96\xf3\x55\xd1\x56\xeb\xc4\x91\x8b\x0d\xfc\x7d\xee\x20\x28\x19\x91\x0b\x97\xfd\x75\xc7\xb3\x1b\x77\x3c\x89\x33\x15\xcb\xdf\xe3\x1c\xd9\x30\x74\x99\x79\x06\x65\x3c\x9c\xd4\x3a\x6c\xf7\xf4\x9b\x7b\xc9\xd2\x18\x87\x93\xe7\xd9\x1f\x23\xda\xe2\x0e\xc5\x5a\xd8\xf7\xf7\xee\x3e\x7e\xe8\xf7\xa8\x94\xad\xea\xbb\x97\x16\x6a\xcc\x9d\xe5\x81\x84\x6e\x5d\xf1\x57\x95\x8a\x4a\xa5\x92\x5c\x76\xdf\xfb\xee\x09\x76\xb5\x81\xf2\x71\xd5\xae\x68\x5c\x2d\x17\xc3\x4d\xc8\xb5\x18\xee\x1e\xc1\x24\x86\xdd\x49\xb6\x18\xb6\x11\x4d\x47\x7e\xf4\xcf\x36\x75\xb4\x4b\xeb\xed\x16\x09\xe3\xa4\x73\x9d\x1b\x8d\x2b\x74\x23\xe4\x56\x54\xdc\x49\x12\x6e\x48\xc0\x76\xf2\x91\x60\x1b\x2e\xd4\xb5\x76\x8d\x80\x24\xd8\x88\x03\x25\x78\x3b\x01\xaf\x50\xd0\x9f\x31\x1c\xc4\x6f\x91\x35\xbf\x01\x9e\x7b\x88\xfb\xce\x13\xd8\xd4\x3b\x98\xc4\xba\x46\x3e\xfb\x98\x54\x4f\xd0\x78\x38\x69\xf3\x23\x99\x86\x52\x33\xaa\x79\xdf\xbc\x9e\x4d\x4c\xad\x1d\x63\x93\x18\xd7\xae\x7c\xa2\xe1\x5b\x19\x25\xa4\xb4\x52\x4e\x55\x6e\x93\x1a\x7a\xd7\x08\xa7\x30\xec\x4c\x36\x4a\xdb\xc5\x93\xca\xb4\xd9\x27\xea\xad\xc0\xda\x48\xc2\x2c\x50\x4d\xd5\x1d\xa3\xa4\x1a\xfb\x3b\x83\x75\x57\x82\x66\x2a\xb5\xd2\x55\x45\xd9\x77\xe7\x46\x0d\xbe\x63\x44\x34\x38\x76\x25\x9f\x02\x6f\x23\x9c\x32\x40\x6e\xbe\x61\x26\xd5\x76\x8b\x84\x6a\xd8\x1b\x6c\x99\x06\xbe\x95\x88\x95\x9b\x64\xf5\x89\x74\xb6\x6b\xc6\xab\x59\xf7\xe6\x66\xad\xa6\xa7\x59\xf5\x25\x7e\x1d\xa9\x76\xed\xee\x7e\xb6\xc1\xbd\xfd\xac\xfd\xce\x7e\xee\xf1\x3e\x41\x57\x9b\xba\x8e\x24\xd5\x76\x8b\x78\x73\x8f\xbf\x53\xc8\x76\x6c\xd3\xc0\x77\x21\xe2\x92\xa1\x0b\x4c\xa3\x4d\x9d\x1f\x72\x55\x77\x8e\x98\x1f\x12\xa4\xbb\x13\x34\xae\xd3\x4a\x54\x86\xa6\x7d\x41\xab\xe8\x69\x8a\x4a\xa0\x3b\x46\x3f\x86\xa6\x9f\x68\x77\xd2\x29\xf0\x56\xaa\xad\x96\xd5\xef\xd6\x55\x2a\x29\x1a\x7a\xc7\xc8\xa6\x31\xec\x4a\x35\x09\xdd\xe6\x3e\x87\x17\x51\x77\x6f\x61\x05\xbc\x5b\x24\xd3\xf8\x75\x6c\x50\x02\xb7\x71\x19\x26\x9b\x3a\x91\xc8\x1a\x3b\x46\x34\xd2\xdd\x89\x04\x93\x56\x27\x12\x4c\x04\x62\xbc\x2e\xb9\x4b\x13\xe1\xe2\x7a\xbb\x46\xbe\x04\xdf\xce\x44\x34\x35\xda\x49\x79\x41\xcf\x37\xf5\xd4\x34\x95\x76\x8d\x88\x1a\xd3\xce\x14\x94\xe0\xad\xe4\xe3\xfd\xda\x78\xf5\x6a\x2d\x39\xa9\xb1\x63\xd4\xd3\xd7\xbb\xdd\xc9\xa7\xe1\x3b\xd0\xcf\x0f\xf1\xf2\x8c\x7a\x2c\xe8\xf3\x68\x29\x29\x58\xed\x1d\xe2\x87\x38\x05\x6d\xab\xbf\x6b\xb4\x7d\x19\xe3\xf0\x31\x25\x41\x67\x42\x97\x2b\x77\xa0\xba\xb9\xb5\xdc\x80\x6b\x77\xf1\x6e\x16\xf3\xd7\x1b\xdd\xce\x1a\xf8\x0e\xf4\x93\x1a\xe4\x3d\xbe\xf2\x6b\x0c\xe9\xf1\x23\x21\x8c\x46\xa2\xf6\x6d\x10\x5d\x08\x6c\x68\x5c\xcb\xcb\xaf\x6d\xc4\xc1\x9b\xd9\x87\xb2\xe3\x5a\xb6\x5c\xaa\xbe\xc0\x17\x28\xce\xa8\x97\xc3\xe4\x2b\x26\x5c\x46\xb6\xbd\x86\x94\x7c\x3e\x7d\xfb\x52\x67\x99\xd6\xe8\xd2\x33\x89\x29\x62\xf1\x90\x1d\x1d\xe5\x28\x3e\x9f\xbe\xcd\x25\xca\x53\xa8\x31\x64\x88\x60\xd5\x27\x55\x8c\x67\xf1\xdf\xb4\x3a\x5b\x46\xc3\xc6\xa7\xaa\xec\xd6\x12\xd0\x58\x76\x6c\x50\x02\xb7\x31\x7f\xe8\x71\x71\xa7\x9c\x2f\x1b\x74\x49\xf1\x89\xfe\xbf\xfc\x8d\xbe\x73\x7f\xa3\x7c\x52\x73\x13\x37\x5e\x4a\xa3\x1c\xe7\x16\xdf\x1f\x98\xac\xdf\xe6\x6d\xc0\x24\xeb\x39\x8b\xb3\x7a\xb3\x38\x71\x74\x7f\x68\xaf\x5b\x5c\x8e\x48\x0b\x8b\xa2\xa9\x79\xfb\xa5\x31\x17\x42\x0a\xf6\x57\x42\x84\xff\x1d\xcc\xbb\x4d\x42\x84\xac\x23\x5d\xc5\x9b\xc9\xe3\xe1\x04\xe2\xb4\x80\xca\x02\x5a\x99\x2d\x81\x43\xdc\xb6\x25\x85\x74\xd6\xaf\x3c\x8c\x55\x5a\x4f\x34\xf4\x6e\x6d\x47\x21\x9d\xbd\xee\xdc\xa0\x04\x6e\xdd\x8e\xe8\x6c\x38\xd8\x84\x64\xc3\xc1\xee\x91\x4c\x62\xd8\x9d\x66\xc3\x41\x17\xa2\x75\x77\x31\xd4\xd0\xbb\x47\xb4\xee\x6e\x86\x0a\xba\x03\xd1\xba\x07\x53\x2b\xe0\x9d\x23\x59\xf7\x50\x6a\x09\xdc\x4a\xb0\x0d\xee\x59\xc3\x5d\xb3\xc9\x6d\xd0\x5c\x7b\x22\xe7\x70\x93\x7b\xd6\x70\xe7\xee\x59\xc3\x0d\xee\x59\xc3\xf6\x7b\xd6\x85\xb7\x85\x0b\x84\xa9\xb4\x5b\x84\x5b\x78\x9b\xb8\x3f\x28\xe8\x0e\xc4\xdb\x9c\x72\xbb\x47\xb6\x4d\x88\xd6\x4e\xb2\xab\xce\x72\x5f\xc2\xee\x1a\xb1\xae\x36\x20\xd6\x55\x2b\xb1\x36\x48\x41\xba\xd8\x39\x6b\xca\x62\x03\x63\xca\xa2\xdd\x96\xb2\xd8\xd2\x7b\x37\xad\xf7\x5d\xf8\xf0\xb6\x1d\x6b\x16\xb4\xbb\xaf\x9f\x84\xdd\x31\x9e\xa0\xdd\xad\xf5\x0b\xda\x6a\x9c\x5f\x44\xd5\x7a\x53\x35\xb5\xa2\x9d\x53\x9c\x16\x9b\x34\xb8\xe8\xf0\x0a\xc6\x16\x2e\x57\x3b\xe8\x6e\x45\x36\xf1\xb5\x22\x1d\x1c\xad\x08\x15\xfd\x4d\x62\xf3\x34\xfc\xee\xdd\x01\x11\x2a\x5e\xff\xde\x9d\x72\x12\xba\x03\xe9\x36\xa2\xdb\xee\x51\x6c\x13\x7a\xb5\x51\x4b\xbf\xae\xd3\xf7\x36\x5d\xa4\x69\xbd\xdd\x22\x9f\x1e\xf7\x8b\xee\x8d\xc6\x15\x5a\x09\xb9\x14\xfa\xe9\xfb\x0d\xe9\x18\x57\xdb\x31\x32\x26\xd8\x76\x25\xa3\xa9\xd0\x4a\xc6\xca\x48\x8e\xea\xc5\xbb\x73\x41\x30\x1b\x44\xc0\x98\xf0\x97\x06\x5f\xbe\x25\x62\x3e\x22\xc2\x9b\xa1\xbe\x79\x52\x6d\x33\x35\xb5\x5c\xff\xbb\x50\x57\x55\xc8\x19\x44\xe3\xa3\x56\xc5\x75\x89\xd8\x94\xb2\x45\xd7\x74\xbb\x31\xf8\x6e\xb1\x4c\x82\x64\xc7\x36\x0d\x7c\xdb\x32\x5b\xe2\x25\xea\xeb\x87\xa3\x37\x14\x58\xd9\x9a\xdf\x23\x29\x5b\xd1\xde\x02\xdf\xef\x12\xd1\x06\x9e\x51\x58\x76\x65\x18\xbc\x6c\xb5\xe9\x2d\xc3\x88\x79\x21\xfe\x03\xf5\x2f\x29\xab\x4e\x0f\x14\x43\x94\xeb\xdc\xb7\x54\x49\x2f\xf1\xba\xe1\x50\x35\x7c\x4c\xa6\x52\x3c\x52\x76\x18\xe2\xb3\xfb\x45\x21\x9d\xc2\xc6\xd1\x16\x9f\x10\x6d\x38\x66\x4a\xd8\x1d\x63\x50\x89\x5d\x57\xfe\xa4\x97\xad\xec\xb9\x5d\x44\xca\x8e\x46\xa3\x2c\x37\x0d\x45\x59\x76\x8c\x43\xf9\x3d\x42\xd1\xa6\xd2\x51\xd7\xd9\x2d\x02\x1a\x3c\x3b\xb6\xa8\xa0\xdb\x48\xc7\x3c\x12\xd0\x4a\x4d\xa4\x72\xc5\x1a\xf0\xdd\x22\x5b\x8c\x63\xc7\x26\x35\x78\x07\xc2\xcd\x36\xe5\x39\x5d\x67\xe7\x88\x37\xeb\xce\x73\x0a\xba\x95\x74\xea\x21\xf8\x4d\x69\xa7\x2b\xed\x18\xf1\x0c\xa6\x5d\xa9\xa7\xc0\xdb\xc9\xa7\x0c\x18\x1b\x5f\x55\xa6\xf5\x76\x8d\x88\xb2\x60\x83\x0b\xcb\xb8\x42\x3b\x21\x97\x68\x63\xf3\x91\xa9\xb4\x6b\x24\xd4\x98\x76\x26\xa0\x04\x6f\x27\xdf\x05\x62\x7c\xf3\x75\xac\x6b\xed\x1a\x01\x0d\xae\x9d\x29\xa8\xe0\x5b\x49\x88\x67\xf3\x2e\x2e\xa5\x19\xb8\xbf\x7c\x4a\xff\xf2\x29\xfd\x1e\x7c\x4a\x37\x4b\xb5\xb2\x8b\x99\x56\x36\x4b\xb4\xd2\x29\xcf\x0a\x9f\x47\xd3\x69\xb8\xa9\xc8\x8c\x6b\xed\x16\xf9\x12\x5c\x3b\xb6\x69\xe0\x5b\x49\x88\x67\xdd\x3d\x41\x14\xf0\x8e\x91\x4d\xe1\xd7\x95\x66\x78\xd6\xea\x0c\xb2\xc9\xe3\xbd\xbb\xf7\x76\xef\x26\x4f\xf7\x76\x78\xb9\x97\x63\x32\x8b\x42\x8f\x6d\x64\x6d\xcb\x56\xfa\xea\xf6\x36\x1e\xe2\x8d\xcf\x52\xba\xce\x8e\x4d\xb4\xc6\xb3\xeb\x54\x4b\xe8\xd6\xc9\x0e\xa3\x19\x9e\xae\xee\x34\xea\xcc\xb4\xf9\x57\xe0\xd9\xae\xe8\x59\x8d\x81\x67\xb1\x72\x54\x88\x3e\xb3\xe5\x09\x29\xf4\x7c\x64\x1d\xf6\x0e\x67\x10\xf4\x81\xed\x08\xfa\x96\x5e\x22\xf6\xd2\xe3\xc8\xb2\x6f\x17\x67\xc6\x29\xdb\xe2\x78\x1f\xd7\xda\xb1\x65\x4d\xd9\x26\x47\x7b\x0d\xde\xba\xb0\x97\x21\xbe\xdb\x60\x52\xd5\xe2\x5f\x8b\x7a\xd7\x17\xf5\x91\x0d\x99\x4b\x4a\x2f\x11\x8f\x27\x23\x06\xb9\x4b\xca\xf9\xec\x61\x26\xa1\x7d\xa8\xb9\xc0\xa2\xb7\x5d\xde\xbf\x6f\xf0\x52\xac\x02\xde\xb1\x25\xfd\xfb\x06\x2f\xc5\x4a\xe0\xd6\xe5\x1c\x9d\x75\xa7\x57\x74\xfb\x77\xc8\xbf\x32\xb9\x24\x76\x5d\xa9\x15\x9d\xb5\x11\x4b\x78\x1b\xe7\xdd\x51\x55\x76\x8b\x68\x1a\xcb\x8e\x0d\x4a\xe0\x76\xb2\x75\x3f\x25\x49\xd8\x5d\x23\x57\xf7\x53\x92\xf0\x5a\x4f\x49\xc2\x23\xdd\x5f\xf4\x53\xc0\x3b\x47\xae\xee\x2f\xfa\x49\xe0\x36\x8f\x37\xe1\xf1\x9a\xb7\x86\xcb\xde\x5e\x0a\x76\xd7\xe8\xc5\xbb\x3f\x31\x2c\x81\x5b\x19\x8c\xce\x66\xe1\x96\x6e\x5e\xf9\xba\xdf\x23\x21\x3b\xa0\xbe\x15\xce\xdf\x25\xb2\x0d\x5c\x63\x30\xed\xca\x37\x0a\xbc\x95\x7c\x2c\xaa\x4e\x7c\x58\x2d\x9b\x14\xf4\x8e\x91\x4d\x63\xd8\x95\x6a\x12\xba\x8d\x68\x11\xd9\x7c\x99\xe9\x3a\xbb\x45\x3a\x83\x67\xc7\x16\x15\x74\x1b\xe9\x2e\xb1\x98\xd7\x7a\x2e\xd7\x12\x2f\xae\xb5\x5b\xe4\x4b\x70\xed\xd8\xa6\x81\x6f\x23\xe1\xd5\x26\x9e\xf3\x57\x3b\xe7\x3a\x7f\xb5\x81\xef\xfc\x55\xfd\xdb\x11\x98\x60\x81\x95\x4f\x27\xe3\x87\xde\x72\xd9\xbf\x40\x8c\xd7\x2d\xdb\x10\xf7\x33\x20\xd9\xba\xfd\xa9\xe7\x0b\xca\x56\x9b\xbd\xf7\x07\xc9\x96\x17\xcd\x71\xe2\xa6\xe4\xbc\xbd\x47\x62\xa2\x39\x2f\x3e\x7c\x38\x38\xb0\x98\x9b\xfb\xe2\x10\x6f\x81\x60\x58\xf8\x68\x10\xc9\x5a\xc7\xaf\x25\xe0\x08\xbc\x58\x2e\x7b\xbf\xc5\x94\x48\xf1\x1c\xe5\xef\x33\x19\x0c\xed\x75\x3b\x59\xd5\x8a\x25\x88\x88\xfe\x92\xd1\x65\x5f\xac\x96\xa8\xd2\x35\x33\x2d\xed\x50\x7f\xb7\xb8\x35\xc5\x67\x83\x94\xa6\x71\x95\x4e\xbc\x6b\xde\x58\x47\xac\x1f\xa0\xb3\x68\xd6\xf7\x02\x6f\x59\xf3\xd6\x15\x43\x9c\x86\x17\x88\x1d\xc6\x7f\xf0\x43\x3f\xf4\x38\xc7\x7e\x6d\x2b\x77\x78\xff\x62\x38\xac\x76\xbc\x19\x6e\xcb\x90\x45\x72\x3d\x72\x3d\x36\x8b\xe4\x3a\xe2\xe3\xe1\xe4\xe6\x26\xfd\x35\x98\xec\x21\x87\xa1\x19\xe6\x02\x31\xab\xae\xed\xd1\xc2\xc3\x04\x64\x18\x18\x22\x93\x9e\xb1\xad\x0a\x90\x63\xe6\x4b\xcf\x47\x00\x02\x6f\xb9\x0c\xb1\xef\xc9\x81\xe9\x62\x7b\xdd\x61\x11\x94\x4e\x26\x6d\x07\x97\x96\xda\x7f\xf1\x7f\x15\x85\x02\x4f\x78\x55\x84\x95\xdf\x0f\x39\x12\xd1\xb2\x9f\xcc\x74\xae\xf0\x1e\x38\x3c\x3b\xa2\x0c\x53\xe7\x08\xd6\x86\x94\x42\xa3\x9f\xe1\xb8\xfe\x2c\xa4\x67\xc5\xa0\xc2\x8d\x9e\x98\xcd\x18\x71\x3b\x2c\x2b\x3c\xb5\xf6\x87\xfb\x6e\x7a\x6b\xea\xe8\x6e\x5f\xa4\x43\xfa\x49\x8d\xc8\xd8\x83\x65\x05\x10\x11\x8d\x57\x00\xf6\xe3\xc4\x81\x97\x98\x04\xf4\xd2\x26\xae\xfe\x63\x0f\x85\x1c\xf5\x6a\x60\x35\x8a\x36\x71\xf5\x1f\x0a\xf6\x3a\x0f\x9b\x24\x24\xe4\x28\x9c\x1a\xdb\xfe\x1e\x71\xe5\xaf\xb5\xda\x20\x61\xd8\x3e\xe4\x3d\xe6\x96\x12\x1c\x86\xcf\x43\x93\xc9\x54\xbf\xfb\xeb\x68\xc9\x38\x5d\xa5\x61\x25\xce\x42\xcd\xfe\x07\x86\xa6\xf8\xca\x86\x64\xcc\x26\x37\x37\x96\xfc\xc7\x45\x50\x8a\x21\xba\x44\xc4\xba\xbe\xc4\x61\xf8\x0a\x71\xc1\xe8\x2a\xcb\xdf\x2a\xf1\xe9\x17\x1e\x2d\x11\x8b\x53\xb8\xce\x31\x87\x09\xd1\x25\x4f\x84\x48\xa0\x9e\x6c\x70\xbd\xb6\xed\xf5\x7a\x0b\x56\x4c\xb9\xc8\x25\x15\x9c\x59\xcf\x56\x19\x46\x25\x1d\x18\x74\x1a\x7a\x7c\xde\x5f\x20\xce\xbd\x59\x71\x53\xbf\xef\x87\x8f\x33\x38\x52\xe3\x7e\xa7\x67\x2e\x40\x4b\x86\x7c\x4f\x20\xc8\xcc\x17\x39\x85\x33\x72\x73\xa3\x7f\x2d\x10\x9b\x49\x65\x08\x8c\x33\xcf\x82\x49\x44\x26\xbd\x1f\x23\x75\x09\x62\x14\x23\xde\xa3\xd3\x5e\x01\xa6\x27\xe7\xb5\x47\x68\x2f\xa4\x64\x86\x58\x4f\x6f\x20\x3d\x31\x47\x3d\x93\xe3\xb7\xe7\x45\x82\x2e\x3c\x81\x7d\x2f\x0c\x57\x4e\xef\x0d\xe1\x02\x79\x01\xec\xad\x68\xd4\xe3\x73\x1a\x85\x41\x0f\x5d\x49\xd2\x63\x11\xae\xe2\x06\xb0\xe8\x61\x22\xa8\x04\x62\xbd\x53\x1a\x09\x04\x7b\x2f\x29\x11\x8c\x86\x21\x62\x3d\xca\x7a\x2f\x63\x35\xa8\x27\x4f\x00\xbd\xff\x5b\x95\x5f\xf8\xff\x3a\x00\x72\xf7\x5a\xe0\x05\xa2\x91\x18\x1d\xa3\x63\xa8\xb3\x12\xa3\xe0\x93\xf9\x36\x80\x4b\x86\x29\xc3\x62\x35\x1a\x0e\x06\x90\x0b\xec\x9f\xaf\x46\xfb\x43\xc8\xe7\xf4\xf2\x03\xa3\x33\x86\x38\x97\xbf\xe5\x8a\x18\x01\x4c\xa6\x14\xa8\xbf\xf9\x68\x0c\x78\xe4\xfb\x88\xcb\xd9\xd5\xdf\xc1\xa5\xc7\x88\x5c\x40\x10\x04\x9e\xa4\x87\xdc\x1f\x43\xc4\x04\x80\x80\x23\x9f\x92\xc0\x63\x72\xaf\xd2\xa3\xc4\x94\xfc\xa8\xf4\x64\xac\x1a\x53\x19\x82\x35\xa3\x18\x34\x01\x04\x17\x18\x5d\xaa\x6f\x06\x59\x30\x81\x4b\x86\x2e\x10\x11\xaf\x22\xcd\xae\x48\x8e\x6e\x9d\x5e\x46\xd1\x0e\x72\x0c\x52\x37\x5d\xc0\x37\x37\xd7\x6b\xdb\x51\xb3\xf9\xab\xe6\xdc\x57\xba\x80\x43\xec\x5a\xf4\xe6\x66\x3c\xb1\x9d\xf2\x88\xa1\xe7\x32\x8b\x43\x6a\xc3\xc8\xdd\xb7\xf0\xc1\x01\x8e\x93\x28\x17\x95\x8f\x29\x9e\x8d\x8a\x2b\xc3\x83\xd7\xfa\x36\x4d\x60\x4f\x20\x89\x41\x56\xfd\x30\xd3\x57\xaa\x05\xaa\x46\xa9\x49\x56\xd1\x89\x0d\x89\x15\xc2\x08\x5e\xe3\x60\x04\x0a\x7c\x9b\xae\x8a\x7e\x82\x9a\x39\xb5\x60\xd9\x53\x44\x04\x0e\x47\xe0\xc8\x19\x38\x03\xb0\xb6\xa1\x57\x41\x01\x67\x4a\xd9\x6b\xcf\x9f\xa7\x7e\x92\xc2\xbe\x4e\x90\x10\xf9\xd1\x72\xc5\x02\x95\x68\xd9\xeb\xec\x5d\x9f\x11\x4a\x45\xd4\x33\x92\x88\xd6\x49\x22\x7d\x3b\xd9\xdf\x54\x11\x48\xe1\xfb\x5c\x50\x86\xfa\x66\x9c\xdf\x91\x12\x50\x8f\x59\x97\xd7\x81\xee\xea\x55\xa0\x1a\x74\x92\x21\x54\xe9\xea\x3a\x0d\x79\xd5\x5e\xfd\xa3\xc7\xc5\x0f\x94\x26\xf7\xee\x49\x15\x12\x7b\xdb\x82\x17\xc0\x75\x5d\xe2\x08\x8f\xcd\x90\x70\x84\x37\x7b\xe7\x2d\xd0\xf3\xf8\x43\xb1\x13\xe4\xf8\x21\xe5\x88\x8b\xc4\x0d\x20\xfe\x60\x01\x0f\xd8\x7b\x53\xca\x2c\xe4\x22\x67\xe9\x31\x44\xc4\xeb\x10\x49\x71\xf0\x0c\x1d\x1c\x80\x17\x60\xdf\x75\x51\xdc\xc1\x33\xbb\x04\x15\xdf\x4e\xa3\xb5\x15\xf7\x6e\xef\xb1\x83\x83\x42\x7c\x3a\x24\x90\xd9\xce\xc2\x5b\x9d\xa1\x9f\x3d\x12\x84\xc8\xb2\xd7\x7b\x01\xf5\x95\xe0\x71\xce\x68\xb0\x72\xbc\x20\x78\x2d\xc5\xd7\x5b\x29\x1f\x88\x92\x10\x21\xf6\xcf\x01\x54\xe7\xee\x16\x6d\xc1\x8c\x22\xdf\x22\x43\x0b\x7a\x81\x6a\x1b\x6d\xd5\x30\xa4\x3e\x5d\x73\x64\x59\xe0\x2b\x4c\xf8\xa1\xe7\x87\xca\x98\x66\xcc\xfd\xb5\x1b\xbb\x01\x57\xa0\x53\x84\x82\x33\xcf\xbf\x83\x4b\x96\xa2\x1b\xc0\xaf\xb2\x17\xc7\x67\xc8\x13\x28\x95\xe2\xf0\x9a\x23\x21\x30\x99\xf1\x9a\x4c\xfb\x71\x31\xb0\xa1\x41\x63\x74\xad\x1b\x29\xe7\xdc\x57\xa9\xf6\xf7\x74\x3b\x33\x24\x34\xc5\x40\x82\x92\xed\xa0\x2b\xe4\x47\x02\x59\xe5\xb9\xc9\x54\x52\x11\x21\x14\xd8\xce\x52\xea\x0f\x5c\x58\xc8\x76\xc4\x1c\x91\x8a\x5a\xc2\x11\xcc\x23\x1c\xcb\xaf\x9f\xa8\x05\x02\xdf\xf1\xfc\x50\x8b\x46\x08\xfe\x25\x95\x80\x17\x2f\xdf\xf6\x04\x3d\x47\xa4\x37\xf7\x78\xef\x0c\x21\xd2\xf3\x82\x00\x05\x0e\x80\xe0\xd3\x1c\x31\xd4\xbb\xf4\x78\xcf\x23\x3d\xc4\x18\x65\xb2\x0c\x93\x99\xd6\x1f\x92\xaa\x0e\xb0\xd7\x30\x5a\x06\x3b\x88\xb4\x44\x8e\x7b\x17\xb5\xf8\x72\xef\xa2\x06\x5f\xad\x40\xdf\x3b\xbe\x7a\x19\x56\xa1\x6b\xb2\xa8\x2b\xef\x75\x81\xa4\x84\x31\xe9\xd4\x0d\xc2\x0e\x26\x01\xba\x02\xa3\x4c\x5c\xcb\x94\x21\x3e\xb7\xd4\x65\xa7\x64\xee\x51\x2b\xc5\xea\x48\xa6\x91\xaf\x23\x9a\x2a\xad\x21\x9b\x2f\x77\x9c\xb0\x74\x56\xa9\x1b\x00\x8c\xf8\x3d\xd2\x38\x5d\xbc\x09\x5f\x5d\xab\xa1\x8e\x52\x30\x04\xc1\x9b\x57\x92\x79\x4a\x13\x50\x41\x36\x23\x16\x62\x66\x7b\x47\x2f\x7b\x11\x97\x94\x90\x12\x3d\x21\x44\x35\xd5\x34\xa0\x98\x7b\x22\x03\x29\x29\x16\x52\x72\xff\x7c\xa6\x7a\xf9\xde\xd8\x4c\x0d\xaa\x8e\xcb\x64\x61\x35\x93\xd5\x99\x72\xcc\x46\xa2\x36\xb1\x3e\x8d\x04\xc7\x01\xba\x3b\x4f\xc3\xa2\x7f\x21\xb2\xaf\xad\x92\x6b\x61\xe2\xd1\xb6\x2f\xcc\x86\x7f\x73\xb3\x9f\x6c\xbc\xc6\x6a\xc4\xad\xb8\xd0\x86\xcc\x45\xae\xeb\xa6\xc0\xa8\x02\xc8\xa8\x11\xfb\xe4\xe0\x60\x9f\x19\xf7\x3c\xa9\xfa\x5b\x45\xee\x40\x5a\xed\x00\x36\x44\xf6\xc1\x81\x5a\x78\x94\x9c\x85\x11\xb3\x90\xbd\x2e\x45\x2d\xe5\x76\xc4\x6b\xa9\x85\x6d\x66\x62\x50\x10\x73\xa5\xb0\x30\x57\x38\x67\x98\x04\x0a\x46\x3d\xe9\xe2\xeb\xf7\x5c\x32\x0d\xca\xaf\x72\x2c\xf9\x6f\x01\x0e\xde\x10\x8e\x58\xac\x32\x95\xcd\xb5\x6a\x1d\xb4\x8f\x46\xe3\xc4\x22\xa2\xbd\x28\x55\x71\xa6\xad\x64\x0e\x6a\x35\x29\x14\xe3\xa2\xd6\x76\x46\x95\xaa\x18\x59\x07\xfb\x4b\xdc\x5f\x93\x9e\x95\xa5\xa0\xdd\xc2\xd5\x98\x08\x44\x64\xf7\xdb\xeb\x54\xe5\xd8\xc5\xb9\x10\xcb\x43\x2e\x3c\x51\xca\x57\xb1\xcd\xad\x52\x67\x9d\x6b\x27\x34\xa9\x84\xe0\x39\xd5\x22\xf9\x5a\xd6\xa7\x4a\xe7\x0a\x25\xc7\xf8\xc1\x41\xfc\xd7\x78\x30\x89\xf1\xcb\x7c\xda\xc3\x53\x6b\xe9\x31\x8e\xde\x10\x61\x09\x47\xcf\x86\xad\x4e\x30\x6f\xde\x7d\x7a\x7d\xfa\xee\xc5\xdb\x2f\x1f\x5f\x9f\xfe\xf6\xfa\xf4\xcb\xeb\xd3\xd3\xf7\xa7\x07\x07\x03\x57\x9b\x52\x85\x87\x43\x2d\x9e\xdf\x4f\x2d\x10\xc4\x96\x8d\xcc\x18\xa7\x34\x22\xc1\x08\xc4\x8e\xcb\xe0\x05\xc9\x14\x7a\x21\x43\x5e\xb0\xea\xa1\x2b\xcc\x05\xef\x4d\x29\xeb\x49\x3a\xf7\x3e\xd2\x88\xf9\xa8\x2f\xb9\x1f\x13\x65\xd8\xeb\x2d\x3d\xcc\x9c\xde\x87\x10\x79\x1c\xf5\x10\x11\x88\xf5\xbc\x5e\x80\xa7\x53\x24\x4f\x3b\x3d\x9f\x2e\xce\x62\x50\x3a\xed\x7d\x34\xbb\x24\xec\x49\x0d\x0b\x79\xcc\x9f\x2b\xd3\x56\x4a\xd1\x9e\xa0\x3d\x14\x60\xa1\xa4\xbd\xec\x5e\x0a\xfa\xa4\xd8\x01\x46\xc8\xb5\x69\xa8\xd9\x1a\xdf\xb9\x8a\xda\xc6\x4d\x9b\x28\xaa\x19\xb4\xbf\x77\x45\x35\x45\xfb\x56\x7a\x44\x8e\x7a\x75\xe4\xdb\x44\x69\xcd\x93\xb0\xab\xd2\x9a\x1f\x46\xb3\xbc\x3e\xbf\xf8\x76\x87\xdf\x72\x1c\x79\xc1\xb4\x69\x6c\x8e\xff\x1c\x1c\x1c\xe8\xab\xff\x7d\xd7\xcd\x5a\x3a\x9f\x67\x7f\x8c\xc0\x21\x80\x22\x67\x17\x35\x1a\x09\x38\x04\xae\xeb\xa2\xe7\x42\x51\xe7\xfc\xc2\xcc\xb1\x3d\x4a\x3e\x4c\x69\x18\x20\x06\x60\xab\xfe\xd1\x69\x77\x88\xa3\x2d\xd8\x9d\xf0\x36\xdb\x60\x45\x13\x2b\xb7\x26\x7e\x41\x2b\x60\x43\x96\xe3\x10\xad\x00\x31\x3b\xb3\xbc\xcf\xd1\x6a\xf3\x63\xf7\x39\x5a\x55\x1f\xb8\x77\x1a\xfb\xae\xb2\x2d\xc6\xbe\x24\xd5\xee\x0b\xfb\x56\xb9\xc6\xaa\xe4\x5a\xca\xda\xa3\xcc\xa7\xbc\x88\x63\xf5\x22\x6e\x23\x82\xe6\x29\xba\xa9\xa4\x8b\x09\x5a\x94\x71\x8a\xa0\x4d\xc3\x29\x89\xc0\x8c\x86\x6f\xaf\x21\xd6\x11\x40\x9e\x40\x1f\x11\xe7\x98\x92\x9a\x1d\xc8\x49\x2f\x66\x60\x7c\xb7\x96\x4e\x1b\xd7\x75\x4f\xd5\x5c\x6c\x37\xa9\xa4\x61\x0a\xb5\xd8\xcb\x21\x87\x05\x5a\x24\x5d\x71\x6d\x00\x30\x18\x00\x48\xa2\x30\x4c\xee\x4e\x91\x63\xbe\xc3\x14\x58\x24\x43\x36\xc0\x6a\x72\x3e\xa9\x7b\x3b\xf5\x59\x4d\x45\x4a\x9b\xba\x39\x4a\x20\xb4\x4d\x20\xa9\xde\xe1\x78\x5b\xd0\xe9\xef\xed\x3d\xd2\xfc\xf1\xf0\x4b\xdc\x63\x8d\x9d\x34\x9d\x2a\xb8\xf1\x49\x72\xaf\x34\x4d\x6a\xf6\xbf\x64\xda\xcc\x69\x33\xbc\xcc\x20\xf0\xda\x30\x48\xb6\x63\x14\x73\x8d\xe9\x16\xc1\xf1\x44\xb2\xa3\xef\x09\x13\x03\xb8\x64\x54\x50\xb1\x5a\x22\x1d\x94\xec\xf8\x5e\x18\x5a\x99\x53\xdc\x58\x64\xd8\x77\xa2\xd7\x62\x97\xd9\xc1\xa1\x40\xca\x25\xe0\xae\xad\x0f\xa6\x5a\x7c\x15\x9d\x6d\x31\x7d\xa1\x75\xf8\x4c\xfc\xa3\xb8\xd5\xab\x77\x59\x8d\x6d\x22\xdd\xc9\xc5\x64\x2f\xae\xa6\x22\x1d\x89\x6d\x3a\x48\x09\x33\xf7\xf8\xfb\x4b\x12\x8f\x50\x53\x88\x40\x66\x1f\x1c\x58\x68\xcc\x26\x2e\x19\xb3\x89\xbd\x4e\x2e\x3a\x8a\xaa\x07\x4a\x4f\x3c\xca\x7b\x90\xb8\x00\xc4\x0f\x44\x22\x47\xf6\x60\x84\x2a\xf3\x02\x4c\x8d\x34\xd5\x67\x06\xf3\x43\xa0\x2b\x01\x46\xc4\x45\x3a\x0a\x72\x5d\x11\x83\x29\x0f\xab\xb1\xf2\x28\xd1\x40\xcf\xeb\xc8\x2b\x62\x9a\x12\x98\x77\x20\xd2\x77\xa1\x51\xf2\xfb\x92\x61\x61\xfe\x5e\xdb\x23\x34\x16\x13\x97\x40\xb4\xb6\xae\xd7\xaa\xbb\x36\x6b\x8a\xe6\x00\x3e\xba\x5e\x43\xfd\x27\x0a\xcc\xc2\x31\x8f\xd5\x06\x96\x92\x44\xea\x76\x56\xc3\xe6\x5e\xb6\x4d\x4d\x1f\x50\x94\x16\x46\x5c\x21\x36\x0c\xf5\x8a\x00\xba\x65\xdb\xd1\x80\x56\xf6\x8a\x2c\xb9\xe3\x32\x65\x04\x0a\x9d\x4d\x86\x23\x61\xe8\x84\x11\xbf\xa5\x15\x26\x1d\xc9\xef\x11\x62\xab\x0f\x1e\xf3\xf4\x78\x2a\x6e\x7e\xb3\x3e\x17\xc6\x1e\x9b\xe2\x67\xa6\xf1\x1c\xad\xb8\x25\xaa\xeb\xa7\x22\x81\x40\x01\xb3\xed\x08\xa9\x8c\xc0\x9c\xa0\x4f\x48\x4d\x94\xe0\x4e\x34\x4d\xfd\xbd\xb4\x83\x69\x5d\x03\x86\x2e\x91\xbc\x6a\x8c\x71\xd9\x31\x85\x15\x63\x42\xd9\x31\x31\x88\x20\x00\xfb\x6e\x38\x46\x93\xe7\xf2\x3f\x23\xb3\x6b\xc0\x92\x28\x8b\x87\x26\x14\x8f\x25\x8f\x36\x27\xc4\x80\xa1\xdd\x6d\x6f\x98\x23\x2f\xac\x13\x42\xb5\x47\x8f\x14\xb8\x64\x24\x8a\xfc\x29\x66\x5c\x00\x08\xf4\x8a\x29\x66\x95\x57\xd7\x97\x9b\x0b\xb4\xae\xc7\x81\x0c\x0b\x8d\xae\xb5\x75\x64\x74\xed\xf1\x11\x30\x76\xab\x35\x34\xbf\x35\x0e\x60\xbd\x86\x9a\x02\x3f\x9a\x35\x58\xb3\xee\x9a\xd4\x84\xec\x2a\x82\xc2\x1d\x03\x00\xc1\x52\x0a\x5d\x45\xa0\xd4\x5d\xc5\x67\x58\x79\xe8\x80\x89\xb3\xf0\x96\x25\xbe\x0e\xdd\x92\x81\x18\xc8\x13\x53\x72\xd5\x9c\x63\x7b\x2d\xa8\x41\xec\x9c\x0d\x5e\xce\x91\x7f\xce\xc1\x03\x6b\x00\x49\xea\x2e\x9d\x98\x83\x7b\xc8\xe4\x24\xcc\x5a\xa1\x49\xdc\x71\xda\xb2\x9c\x9d\xa4\xca\x03\x2b\x39\xef\xa5\x4e\x6a\x1a\x2a\xee\xaf\x2c\x36\x50\x46\x6c\x7c\x54\x44\x97\x38\xac\xed\xd8\x73\xe5\xe6\x66\xb0\x86\x03\xdb\xc4\x8b\x6b\xd0\x6b\x9f\x46\x44\x8c\x42\x18\x7a\x67\x28\x1c\x19\xbc\x9f\x83\x17\x61\x08\x46\x45\x8c\xec\x07\xa0\x67\x29\x44\x59\xfa\x59\xaf\xad\x85\x27\x2c\xfb\x01\xb0\x01\xd4\xac\x23\xd6\xeb\x04\x1b\x31\x1e\x4c\x1c\xd5\xbe\x2b\xdb\x2d\x37\x61\x72\x8d\x58\x43\xbb\x82\x54\xa9\xfa\x8b\x1e\xc8\x6d\x3e\x22\x42\x62\x51\xe8\x55\xd4\xbe\x29\xbe\xa0\x01\x0a\xd5\x2d\x77\xad\x8b\x88\x02\xc9\x7f\xf2\x84\x28\x3a\x42\xc7\x93\xc6\x52\x6f\xa4\xf0\xeb\xee\x6a\x5b\xe8\x8a\x1f\xdf\x7e\xfe\xe9\xcb\x2f\xaf\xff\xe5\x22\xe7\xc3\xe9\x9b\x5f\x5f\x9c\xfe\x4b\xfd\x32\x81\x04\x12\x1d\x5e\x28\x02\x11\x0e\x00\xa4\x6e\xa6\x2e\x78\xf3\x0a\xec\x55\x24\xd5\x89\x1f\xc1\xb7\x42\x8b\xb9\xd7\x6b\xc8\x61\x9e\x63\x62\x87\x4b\xdb\x86\xa1\xc5\x20\x6d\x2e\x06\xf2\xe8\x06\x6a\x60\xe0\xb5\xf9\xf4\x9b\xc2\x0e\x80\x75\x5c\xeb\xd3\x6a\x59\x5b\x2b\x86\x39\x8d\x42\xc4\xdb\x80\x5e\x2a\x91\xf6\x46\x9d\x0d\x8b\xa0\x24\x92\xac\x91\x80\xfe\x4a\x03\x3c\x5d\x75\x02\x7d\xe5\x09\xcf\x57\xa6\xd8\x86\xfe\xd5\x79\xbc\x9e\x77\x7d\x4a\x59\x80\x89\x57\xfd\x14\xe5\x5f\x2c\xdc\x85\x85\xdf\xd1\x00\x7d\x0d\x26\x7e\x49\x55\x92\xfc\x3c\x50\x5c\xf8\x11\xcd\xd4\xc5\xdf\xd6\x9c\x10\x54\xc6\x65\x36\x72\x40\xee\x0b\x43\xa1\x32\xff\xf3\x39\x5e\x96\xaf\x92\xa4\x7e\xa0\xb6\xa5\x94\x3d\xf8\x2e\xb1\xc7\x8f\xef\x4f\x5f\xbf\xf9\xe9\x5d\x33\xb3\xd0\x4a\x66\xc1\xae\x95\xaf\x9f\x5b\xb8\x39\x46\x92\x52\xca\x6e\xe4\x24\x6e\x85\x92\x93\x1a\x58\x85\x5b\x21\xc4\xcd\xc5\x20\xbe\x9e\x51\xdc\xc2\xe4\x01\xef\x57\x8f\xac\xec\xc4\x7f\x34\x01\x94\xac\x5d\x86\x22\x92\xe1\x6d\xad\x8e\x36\x70\x54\x62\x2b\xdf\x82\xb1\x8a\x1a\xa8\x82\x3c\x8c\xa7\xf6\xcf\xc1\x5e\x2f\x3e\x7d\x3a\xfd\xe8\x76\x91\x42\x75\x8c\x55\xd8\x48\xa1\x77\x6f\x1c\xf3\x0a\x71\x9f\xe1\xa5\x9e\xcd\x16\xe6\x52\x97\x87\xef\x3e\x76\x84\x6b\xd8\x99\x33\x9d\xc7\xf7\x90\x5d\xc0\x3f\x30\xe4\xa3\x00\x11\xbf\x0c\x99\xee\xa0\x99\x21\x34\x6d\xf3\x45\xe5\x40\xf3\xa5\x52\x11\x54\x0b\x2f\xfc\x26\x92\x14\x6b\x07\x88\xac\xd2\xba\xc6\x0f\xfb\x45\x10\x34\x6d\xe0\x59\xd0\x0f\x94\x95\x25\x7c\x01\xa7\x5f\x91\xf0\x2a\x36\x09\xdd\x4c\x17\x8d\x41\x41\x6a\x8d\x25\x78\x51\xee\x2e\x90\xba\x42\x0c\xf6\x59\xdd\x50\xb4\x82\x75\xd3\x7f\xf4\xf0\xbb\xe9\x3f\xb6\x14\x93\x7a\x01\xe5\x15\x7e\x0f\x8e\x93\x49\xc9\xb1\x58\x99\x8b\x72\xd3\x9f\xe3\xf0\x49\x56\x08\x7b\x4d\x32\xee\xfc\xe2\x0e\x84\x1b\xe6\x3f\xea\x9b\x84\x3f\x85\x58\xbb\x1b\x71\xf6\x0b\x5a\x35\xeb\x54\x77\x23\xd7\xde\x52\xff\xbc\x13\x5f\xfe\x18\x7a\xb3\xb2\x9e\x5f\x00\x52\xcb\xbc\xdb\xd2\xaa\xee\xb4\x00\xda\xb4\x18\x4a\x5b\xba\xb9\x93\x68\x11\x24\x9d\x25\x40\xc2\x94\xb0\x68\x3d\x91\x53\x03\x4b\xb7\x30\xf9\x65\x58\xb4\xa4\xa8\x0b\xa5\x9b\x1b\x00\xec\x75\xbb\xe2\xa0\xf4\x8b\xdb\x2f\x2b\x1e\x2d\xde\x4f\x3f\x13\x6d\x0a\x5a\x55\xf9\x3e\x79\xfc\x63\xb5\xe7\x13\x64\xd0\x44\x22\xf2\x6c\x80\xd0\xee\x2f\x3d\x5c\xb9\xf4\xbc\x8d\x8e\xe4\xd4\xe2\x72\xe5\x35\x2c\x2d\x6a\x71\xe8\x35\x17\x03\xb9\xed\xa9\x10\xb0\x66\x30\x75\xb8\x6a\x81\xa9\xd9\xf2\x54\x59\x4e\xd5\xad\x28\x37\x26\xaf\xba\xd2\x4e\x1b\x97\x1e\x44\xa7\x8d\x4b\x81\x7e\xf2\x66\x33\x14\x18\x02\xd4\x0f\xad\xd3\x5a\x35\x48\x56\x1e\xfe\x12\x2c\x6a\x0e\x8e\xaa\x30\x5d\x04\xb0\x6c\xee\xb3\x06\x30\xac\x5f\xd4\xb1\xb9\x10\x22\xb9\x9c\x55\x6b\x98\xff\x1c\x2f\xb7\xa2\xd8\x88\x29\x5d\xbe\xbf\x1d\xb8\xae\xdb\x2c\x3e\xe2\x9e\x94\xf4\x30\x1d\x65\x56\x76\xe7\xae\x3a\xf6\xf2\xcf\x81\x52\xd2\x9a\xa5\x54\x7c\x56\xfa\xcb\x6e\xb2\xad\xdd\x44\x6a\x61\x5f\xc1\x6e\xf2\xa9\x62\xe7\xce\x2b\xe6\x25\x3e\x19\xab\x90\x69\x5d\xfd\x17\x4c\x4a\x8b\xa7\xd8\x45\xbb\x30\x53\x60\x2d\xfa\xbb\x82\x79\x4d\xe4\x1c\x7d\xf2\x66\xef\x2f\x10\x63\xb8\x42\xfa\x9d\x51\x1a\x22\x8f\xdc\xb3\x75\x51\x2f\x86\x0f\xf1\x95\x47\xb5\xd5\x49\x03\xbd\x8c\xef\x40\x1a\xa1\xfe\x2b\xbe\x33\xa9\x06\x4a\x4d\x0d\x15\x85\x9d\x44\x61\xd2\x4c\xbd\x8d\xcc\xac\xd8\xa6\x61\xd6\x95\x26\x97\x3f\xd5\xc2\x26\x21\x54\xbd\xf0\xb9\xd6\x97\x25\xa3\xe4\xfe\xa5\x5a\xfe\xc4\x2d\xd9\xcf\x5b\xca\x33\x77\x37\x75\x92\xcc\x5c\xe4\xfc\xb0\xb2\x40\x2c\xe4\x13\x44\xec\xf4\xbe\x29\x65\xf6\x9a\xdd\x40\x5f\xe0\x0f\x92\x6b\x7b\x73\x63\x1f\x37\x35\x2a\xdf\x50\x17\xc6\xba\x77\xc6\x90\x77\xbe\xa7\x6a\x25\x57\x66\xb5\xd5\x12\x86\xca\xd5\x8b\x2f\xdd\x6a\xab\xc5\x1c\x96\xab\x25\xc1\x87\xb1\xcb\x80\x90\x92\x5d\x5b\x42\x2b\xf2\x41\x26\x82\x9d\xd7\xa5\x6e\xfa\x4b\xb0\xef\xc6\x9d\x4e\x93\x34\xf8\x3a\x97\x33\xf2\x60\xf9\x0a\x85\xde\xaa\x0d\xf0\x07\x34\xf7\x2e\x30\x6d\x15\x6c\x9f\x3e\xbd\x6d\xc5\xa4\x52\x80\x6d\xb0\xd5\xdd\xea\x46\x29\xce\x02\xd5\x94\x2a\xea\xeb\x3d\xc0\xa1\x3c\x36\xd9\x57\xcc\x24\x92\x4b\x1d\xa2\x12\x6f\xb0\x98\xe1\xaf\x43\x6a\xf2\x3c\xa5\x6b\x21\xfe\xf4\x69\xb5\x44\x90\x51\x2a\x3e\x9f\xbe\xcd\x14\x9b\x2f\x6b\x7b\x8f\xe4\xfd\x0a\x8c\x33\x9d\x42\xcf\x02\x81\x0f\xe0\xf5\xd2\x13\xf3\x11\x38\x1c\x05\x3e\x58\xc3\x1a\xc0\x24\x6c\x31\x01\x4f\xbe\xd4\xd7\x99\xab\xe7\x74\x0d\xfc\xdf\x89\x5c\x6f\xea\x26\x3c\x0b\x44\xf4\xb6\x1d\x43\xe9\x9f\x1d\x9b\x1c\x55\x37\x99\xf1\xb1\x4f\x40\x33\xdf\x6a\x1b\x47\x01\x16\x99\xc6\x71\x00\x0a\x0d\x6b\xcf\x92\x14\xc4\xfc\x2e\x0d\xe0\xfc\x22\x85\x39\xbf\xa8\xef\x30\x76\x6d\x4f\x48\x74\x8e\x56\xc5\x3e\xf3\x83\x92\x10\x87\xea\x53\xcb\xd0\x14\x60\x32\xbe\x1c\xa8\x64\x8d\x7e\x57\x54\x54\xf8\x65\x02\xa5\x7e\xdd\x0b\xfd\x4a\x53\xa8\x44\x64\x0c\x58\x6c\x28\x89\xca\xcd\x70\xa3\xf9\x52\x62\x2f\x31\x35\x0f\x7d\xc5\x94\x91\xff\x6a\x44\x53\x59\x40\xea\x65\x80\x4a\xc7\x18\xe7\x27\xba\x73\x6f\xcc\x24\xc8\x2f\xfe\xc3\x84\x0a\xc2\xaa\x37\xfc\x84\xce\x02\xf5\x16\xf3\x38\x28\xd0\x32\xf2\x31\xa4\x5e\xa0\xc4\x6b\xd9\xab\x50\x89\x92\x44\x92\x6c\xec\x59\xbb\x86\x0c\x2d\x69\x8d\xcb\x6e\xe0\x67\x93\x1a\x98\x41\x64\x1d\xc4\x63\xfb\xb3\xf6\x82\xe3\x2e\x89\xc2\x50\xa5\xe6\x0a\x7c\xb0\xef\xba\x39\xc7\x78\x6d\xd9\x55\x03\x52\x1a\xd2\x8f\x94\xe9\x1e\x6e\x6e\xae\x03\x9f\x2b\xb7\x37\x18\xf8\xa3\x6b\x09\xae\x7e\xad\xd7\x7b\xbc\xa4\xca\x19\xa7\xfc\x19\x12\x2f\x7c\x81\x2f\x90\x45\x9d\xc0\x77\x64\x1d\x28\xff\xe2\x89\xa7\xa9\x21\xcf\xc7\xdf\x3e\x38\x73\x8f\xcf\xad\x64\xfc\xfb\x59\x3a\x27\xb1\xb4\x45\x4a\xcb\xa1\xf0\x52\xcc\x77\xe2\x4e\x98\x84\x91\x16\x9c\xc4\x98\x1b\x66\x1c\x82\x15\x8e\x59\xfe\xb2\xf7\x94\x47\x5f\xea\x47\x69\xa9\xf4\x80\x4b\x46\x17\x98\x23\xc7\x3c\xb7\x91\x6d\x94\x58\x36\x2c\xd6\x49\x51\x19\xca\x61\x0a\x27\xf0\xf5\x22\x5b\xdb\x70\x7f\xb0\x86\xca\x83\xbc\xc6\xdb\x1e\x32\x37\xf6\x91\x43\x8e\x4f\x03\x74\x73\x03\x00\x34\xf9\x70\x46\xc8\x31\x7f\xdd\xdc\x20\x13\x3f\x78\x73\x03\xd4\x93\x1e\x40\xf2\x5e\x45\xc0\xa2\xca\x02\x9a\xf9\x0d\x59\xdc\x86\xcb\x1c\x81\x45\x88\x6e\x6e\x58\xb1\x2d\x1b\x2a\x77\x2f\x66\x82\x19\x65\x1b\x49\xad\x18\x64\x2f\x65\xad\x02\xc7\xec\xd5\x4d\xb1\x46\x9c\x49\xa2\x0c\xf6\xd3\xe6\x1d\x41\x75\x9a\x35\xcb\x4e\xa3\x21\x1f\x56\x9c\xa0\x4a\xcc\x65\x8f\xc2\x83\x83\xd0\x09\xfc\xe7\xf2\x3f\x86\x37\x63\x7a\x40\xc9\xb6\xa6\x9c\x2b\x00\x3e\x1a\x4f\x4a\x3c\x83\xf4\x24\xd6\xf3\x8d\x28\x72\x0c\xd2\x04\x28\xcc\x3a\x52\x13\xec\xe8\x97\x5c\x0a\x1c\x72\xeb\xc6\x63\xd2\xa5\x5c\x54\xe7\x48\x6a\xa4\x66\xde\x1f\xe4\x7f\x89\xeb\x7a\xb3\xf0\x6d\x93\xa4\xdd\x13\xc9\x28\x76\x2f\x2d\xe0\x72\x70\x8c\xe6\xd7\xda\xe5\xa0\xc4\xaa\x14\x2a\xc1\x8b\x30\xb4\xca\x19\x2c\x58\x21\xee\x29\xad\xaa\x5c\x8c\x19\xbc\x0e\x7c\xd3\xc0\x0f\xab\x8f\x61\x34\xb3\x90\x13\xf8\x90\x69\x39\xab\x23\xb9\x54\xb6\xcb\x34\x83\x5c\x21\x7c\xa8\xdd\x1f\x1c\x95\xe4\x61\x2b\xe3\x29\x3d\x25\xd6\x2e\x9a\x5d\x97\x4b\x19\x86\xee\x3c\x6b\x50\x8e\x05\x32\x4e\xc9\x02\x2d\x96\xa1\xa7\x37\xbf\x11\x88\x07\xad\x75\xa8\x06\x46\xd1\xf9\x30\xe0\x19\x9a\x52\x86\x7e\xcd\xb3\x41\xbc\xf7\x94\xe4\x55\x1a\x4d\x64\xd9\xeb\x22\xf3\xa4\xea\x85\x9c\x0b\x2c\xd0\xa2\x6e\x4f\x35\x9e\xd5\x45\xcf\x73\x55\x27\x7f\xf8\xab\x90\xc9\xf1\x2e\x1c\xd7\xce\xb0\xa1\x89\xd2\xdc\x1f\x40\xcc\xdf\xa6\x3b\x97\x6c\x76\x94\x76\x10\xe7\xfe\x5b\x78\xc4\x9b\xe9\x2c\x15\x10\xf8\x21\x56\x87\xaf\x7b\xe2\x34\x18\x20\xc9\x18\x17\x5e\x3e\x2c\x28\x4f\x1f\x83\x3f\xe6\xef\xd0\x25\x88\xb3\x66\xc8\x6f\x4e\xa0\xd3\x3f\x9c\x22\x9f\xb2\xc0\xea\xca\xb8\x9a\x07\xbe\x53\xb6\x6d\x67\xcd\xdb\x48\xb1\x3a\x41\x55\xe6\x8b\x1a\x16\xcd\x89\x22\x1c\x34\x33\xe2\xb7\xe0\xa8\x6e\x2c\x60\xce\x3f\x3b\xcb\x03\xf9\x88\x8b\x5c\x70\x05\x34\x8f\x2b\xca\x2e\xeb\x65\x51\x3b\x07\xf0\x26\x16\x78\x11\x86\x3f\xac\x52\x81\x64\x35\xb1\xc1\xb7\x9a\xe8\xd2\x1c\xdf\x71\xf4\x65\x5e\xf7\xa8\xd9\x30\xaa\xb3\x14\x64\x92\x54\x75\x40\x23\xb6\xac\x74\xdb\x71\x6b\xb2\xd0\xdc\x91\xf9\x6c\xc7\x55\xc4\x26\xfd\x20\x43\xe8\x56\x2d\x21\x9b\x66\x02\xc6\xb3\x79\x5a\x0f\x9f\x4e\xf8\xff\x46\xcd\xa2\x5a\x9c\x64\xc9\xb6\x85\x58\x81\x7a\x0e\xe4\xfe\xe2\x85\x21\xbd\x04\x50\xbb\x6f\x56\x9e\xff\x92\x28\xfa\xeb\x35\x44\xf0\x5a\x0f\x68\x6c\x4e\x93\x7f\x07\xeb\x24\xe8\x18\xa9\x11\xcb\x03\xab\x7e\x7a\xb4\x21\xd4\x4a\x2e\x3d\x82\x7c\xf5\x5a\xc5\xd5\x0a\xe4\xee\x0b\x91\xb9\x96\x36\x4a\xfa\x9f\x43\x79\x2a\x2d\x90\xbf\xe4\xd0\x1d\xc8\xa1\xfb\x91\x32\xdf\x5a\xf5\xfb\x6b\xc9\x7f\x2b\xa5\x27\x59\xa3\x5d\x74\xdc\x7b\x5a\xa4\x77\xa1\xe9\xe6\x98\xff\x1e\xf4\xdd\x3f\x83\x8a\x7b\x7e\xd1\x4d\x27\x2c\x66\xba\xfa\x76\x46\x98\xf3\x8b\x76\xe5\xea\xfc\xe2\x6e\xd5\xa4\x4c\x82\x8b\x73\xb4\xba\xb9\x01\x87\xa0\x22\xcf\x8d\x6e\x09\xb2\x2a\x7b\x77\x3c\xd1\x7b\x25\x7d\x8b\x6c\xa0\x5a\xb1\xad\x15\x28\x9d\xb2\x3c\x6f\x09\x14\x90\xfd\x59\x14\x8b\x84\x29\x36\x62\xe2\xb2\xbf\xb8\xc7\x7d\x44\x82\xbb\x7b\x7c\xed\x7f\x85\x92\xa1\x56\x5b\x26\xc9\x53\xad\x6e\xa1\x9d\xae\xea\x6c\xe3\x22\x4e\xc4\xa1\x16\x19\xe4\x4d\xcb\x08\xd2\x4d\x0d\xe9\xb9\xc5\x61\x56\x03\xcd\xae\x86\xbc\x37\x4c\x08\x87\xb6\x5e\xe7\x5c\xab\x22\x79\xe0\x10\xf2\xb2\x2d\xbe\xc2\xc6\x8f\x0c\x8f\xc7\xb1\x1b\xc9\xf8\xc8\xf3\xe2\x08\x99\x51\x33\x0c\x99\x46\xb9\x84\x56\xb9\x14\x5a\x66\x20\xbf\xa0\x95\x45\xd4\x38\xec\x11\xfa\x86\xfb\x47\xec\x23\x51\xbd\xf4\x72\xa0\x5a\xa1\xb8\x0f\x0f\xa1\xc4\x8a\x52\xb5\x5f\x18\x3d\xa6\xc9\xc0\xd2\x21\x75\x95\x82\x5b\x2a\x0d\xc2\xf0\x63\x26\x43\x9c\xad\xee\xcc\x75\xaa\x44\xbd\x47\x90\x28\x0c\xcd\x0f\x3b\x2b\xf4\x4b\xa6\x9c\x34\xa5\x62\x27\x72\x77\x51\xca\xee\x7a\xb7\xfe\xd3\x1d\x99\x3a\x6b\x83\x6d\x62\xaf\x51\x94\xb1\xac\xbe\x50\x79\x0d\x9e\xc8\xb3\x3a\x2f\x89\x8d\xe4\x19\xcf\x8a\x28\xf5\xe4\x66\xfd\xa9\xa6\xd8\x60\xfe\x94\xc3\x53\x05\x56\x35\x96\x95\x68\xba\xb3\x38\x83\x60\x58\x71\x9d\x5d\x9f\xe3\x35\xc3\xeb\xf1\xed\x63\xe2\x9c\x52\x76\x79\xa8\x4b\xb0\x7c\x70\x00\x4e\x06\x27\xc0\xcd\xfa\x2c\xc4\x39\x95\x3b\x2e\xb5\x3d\x31\x67\xf4\xb2\x87\xd6\xdf\x4c\x6c\xe6\x9c\xbc\x3a\xc8\x4e\x03\x7a\x9f\xc2\xb3\x6d\xd4\xca\xf5\xef\x2b\x5b\xc5\x1b\x16\xa0\xf6\x44\xfc\xeb\x68\xd7\x38\x5b\xda\x1f\xb3\x9a\xbf\xb4\xce\x1b\x60\xfd\x84\x54\x45\x91\xa0\x0b\x3a\x63\xde\xb2\x10\x5b\xd9\x21\x9f\xf1\x96\xf9\xc2\xb4\xbf\xd0\x77\xbf\xcf\x40\xee\x16\xd2\x5b\x25\x7f\x36\x6f\x41\xac\x93\x42\x1d\x33\xf6\x26\x3a\xf5\xb6\x8b\x20\x9f\xc7\xb5\x66\x77\x6a\x38\xeb\x36\x27\x7b\x6d\x5a\x60\x45\xb7\x14\x95\x3a\x52\x94\xf7\x2c\x12\x3b\x1e\x66\x12\x99\x19\xb5\xfa\x65\x92\xdb\xa8\x94\x28\x31\xed\x2c\x94\x3b\x1b\x81\xd7\x29\x33\x67\x56\x36\x4d\x43\x64\xfe\x39\x7c\xce\xe3\x61\xd0\xbc\x4b\x76\x71\xdb\x34\xd6\x3a\xf5\x58\x9b\xd2\xbf\x95\xe7\x63\x51\x6a\x90\x44\xf9\x4f\x8c\xaa\x86\x3a\x7c\xc4\x0c\xee\xef\x68\x80\xac\x72\x15\x15\xd5\x60\x2b\x6a\xdc\xdb\x11\x3d\xd9\x78\x3b\x67\xf9\xad\xe5\x0e\x51\xc8\x01\xdc\xc6\x16\xa5\x64\x97\x8d\x39\x80\x8b\xc9\xee\x50\x4c\x9f\xbd\x4c\x12\xe6\xba\xec\xc0\x09\x48\x86\xde\x21\x14\x15\x8a\x51\x36\xc9\x64\x3c\x5e\xae\xb2\x9a\x7f\xcd\x04\xc0\xa9\xfc\x8e\x4d\xdd\xdf\xcf\x86\x9b\xb9\x1d\xd8\x7e\xcf\xad\xce\xc3\xd8\xa6\xed\xaa\xa5\x85\x76\x66\xef\x4d\xe6\xae\xb8\xfd\xfe\xe5\x65\x79\xef\x2c\xb6\xad\xab\x65\xfd\x9e\xd4\xc8\x60\xb5\x27\x2c\x91\x3b\x51\x69\x4b\x90\xa3\x62\x60\xbf\xd1\xe5\xce\x77\x23\x47\x94\x2b\xed\x46\xca\x78\x9d\x2e\x9e\x71\xf3\x56\x67\xc9\xa9\x40\xec\xd7\x5a\xdb\xbd\x6c\x69\xaf\xc5\x9f\x06\xe6\xec\x6e\x3a\x5f\x5b\x1b\x61\xd3\x10\x96\x6f\xeb\x2a\xa4\x45\x2b\x41\x97\x86\x88\xca\xc9\xdd\x02\x1f\xbc\x19\xea\x11\x2a\xf4\x4b\x41\xe9\xd9\x57\xc5\x0e\xb8\x27\x83\x13\x58\xfb\x70\xbe\x41\x30\x8d\xe6\xf9\xfa\x0f\x9c\xec\xb8\xb0\xcb\x18\x9d\x02\xbf\x41\x93\x57\x6b\xa2\x51\x30\xd6\x39\x65\xe6\xb2\x77\x6f\xb3\x88\x12\xef\x72\x15\x0d\x51\x04\xd2\xa3\xce\x82\x55\x85\xd4\x34\x59\x95\x14\xea\xf9\xd7\x18\x93\x46\xd3\x05\xac\x54\x68\x71\xcf\x7e\xe9\xa9\xd2\xfb\xd5\xdf\x91\x8a\xdf\xf7\x88\x27\xb2\x77\x29\xf5\xc5\xae\x4f\xa6\xc4\xb5\xbe\xc9\x6b\x50\x7a\xe8\xe9\xc8\xe7\xde\x05\xd2\x2f\xbe\x30\xc4\x91\xa8\x19\xbe\x2a\x13\x95\x18\xd4\x89\x1b\x8e\x58\xf2\x86\x75\x29\xe7\x72\x0d\x58\x36\xf4\xaf\x3a\x73\xf3\x9d\x3e\x7a\x57\xbe\x71\x58\x32\xbc\xf0\xd8\xea\x17\xb4\x1a\x91\x6c\xe0\x7c\x17\x1c\xab\xe3\x16\x73\x89\x07\xb2\xf0\x0c\xf1\xdb\x87\x31\x37\x22\x43\x28\x5b\xa8\xde\x4e\x11\x5f\x52\xc2\xf3\xaf\xf3\xc4\x69\xb3\xb2\x46\x56\xbd\x04\x55\xa9\xce\x1a\x45\x5d\xa1\xb5\x25\x75\x38\xc4\xfa\x70\x98\xb4\xfb\xc1\x5b\x85\xd4\x0b\x2c\xdd\x12\xa4\x52\x06\xf3\x9a\x04\x09\x1c\xd2\x78\xe0\x78\xa3\x04\x09\x7c\x4c\x27\x2e\x86\xdc\x56\x9d\xe8\xac\x06\x90\x42\xbc\x86\xc5\x71\x54\x39\x38\xd5\xee\x86\xd9\xa9\xa8\x4b\xab\xbc\x15\x97\x66\x1a\xfb\x7e\x99\xb5\x98\x3e\xb8\x1d\xd3\x7b\xe5\xd4\x0c\x26\x26\x51\x44\xc3\xdc\x66\x92\x75\x68\x21\x69\xd2\x9c\x10\x93\xe6\xc4\xec\x6d\xc9\xb3\x4e\xa8\xce\xfe\x23\x24\x87\x67\x8f\x16\x0c\x82\x74\x24\xc0\x86\xa1\x8b\x20\x91\x4c\x2d\x6a\x98\x5a\x2e\x22\x83\x63\xb8\x11\x53\x8b\x31\x99\xb8\x21\x14\xfa\x10\x08\x09\x0c\xd7\x19\x2d\xa4\xc3\x04\xd6\x64\xeb\xdd\x8a\x63\xd3\xb6\xbe\x11\xc3\x42\x4f\x08\xc6\x47\x44\xa7\x08\xed\x80\x7d\x31\x8f\xe7\x56\x68\x9f\x5f\x7c\xcb\x05\x5a\x91\xd0\x26\x3b\xf6\x72\x52\xc5\xad\x70\x54\xcd\x7c\xbf\x62\xa8\x32\x29\xdb\x56\x88\xc6\x2d\x7d\xcf\xb8\x56\xe4\x29\xda\x12\x57\xdd\xd2\x77\x87\xab\x36\x93\xe9\x24\x16\x35\x87\xcb\x3a\x4d\xee\x0e\x8e\xd5\xc6\x34\x9f\x8c\x9f\x0b\xca\x50\xdd\x09\x4c\x96\x01\xbb\xe2\x2d\xf7\x9a\x17\x9a\x4c\x05\xf3\x2c\x3b\x50\x0a\x6d\xf6\x14\x24\xb2\x14\xb2\xed\x35\xac\xb0\x6e\x6e\xd2\x8f\x32\x98\x99\x7e\xe4\x89\x0b\x35\x1c\xd4\x44\xcb\x3b\x4b\x22\xef\x78\x18\x1b\xc1\x53\xe3\x58\xf5\x23\x83\x8d\x43\x33\xee\x7c\x66\x80\x38\x18\x21\x95\xfa\xa0\xd5\x49\xc1\xbc\xeb\x94\x8b\x31\xb1\xa1\x72\x79\x2a\xbe\x1e\xda\x3e\x17\xaa\x42\x76\x24\xf6\x1a\x9a\x43\x5a\xa5\x2e\xe8\xc8\x13\x9a\xa5\x92\x6c\xc8\xd3\x50\x85\x99\x31\x76\xf5\x88\xcf\xde\x3a\x95\x96\x72\x04\xca\xf9\x31\x9a\x2c\x0b\xea\xc1\x27\x7d\x73\x64\x43\x40\x15\x07\x03\x37\xf6\x03\x17\xab\x25\x7a\x3f\xb5\x94\xb1\xa1\xe2\x16\xc5\x60\xb1\x44\xe8\x3c\x47\x4d\x36\xce\x31\xd3\xc4\xb6\x55\x16\xcc\x9c\x17\x65\x0b\x95\xcd\x03\x41\x71\x17\x11\x91\x2a\x94\xa9\xab\x0d\xe9\xe9\x85\x47\x93\x3f\x6d\xbe\x81\x17\x61\x18\x93\xb9\x55\x06\xfc\xdb\xbb\xaa\x3a\x05\xc9\xef\x05\xa8\x5b\xcb\x81\xda\x0a\x46\x3c\x00\x78\x9d\x57\xcd\x66\x48\x54\x45\x4d\x25\x09\xaa\x5a\x91\x13\xf4\xac\xf9\x1a\x5e\x41\xdc\xbb\x80\xab\x78\x41\xb1\x88\x8b\xb1\xa6\x68\x36\xce\xe6\x99\x69\xc3\xf1\x4c\x50\xaf\x19\x47\x05\xb1\xd3\x38\xfa\x34\x40\xfd\x05\x56\x09\x38\x72\xa8\xe2\x8b\x55\x5f\x16\xea\xb2\xea\x0a\xbb\xc7\xb6\xea\x44\xc2\x16\xcd\xb3\x1a\x03\x7d\xfd\x89\xcd\x66\x31\x4e\x5d\x30\xa4\xb8\x6a\x47\x2c\xf5\x1c\xb8\x3f\xab\xfd\x36\xea\xc5\xdd\x28\x01\x59\xf3\x84\xd1\x05\x5a\x69\x72\x0f\xf9\x60\xca\xbc\x97\x67\x11\x3c\x35\x8f\xb4\x62\xae\x43\xa7\x90\x1d\x7b\xf0\xa1\x3d\x3c\xb5\x3e\xae\x16\x67\x34\x74\xb0\x40\xcc\x13\xea\x46\xbd\xa7\x47\x92\x01\xcc\xb7\xa8\xf7\xe4\xf1\x04\x32\x77\x7f\x00\x43\x77\x7f\x08\x79\x9c\xdd\x52\xb0\x55\x72\x67\x40\x21\x76\xd1\xb8\xd0\xfe\xc4\xb2\x9f\xed\x5b\xcc\xb5\xa8\x8b\x75\x1a\x1e\xdb\x76\x02\x4a\x90\xdc\x8e\x89\xb3\x8c\xf8\xdc\xa2\xfa\x89\x52\x1b\xee\x8b\x9b\x1b\x62\xee\x1a\xf6\x5d\x57\xd8\xcf\x64\x97\xf6\xb3\xb5\xf6\x87\x44\xf6\x75\x28\x87\xc0\x5d\xb4\x36\x49\xa1\xae\xe5\x00\xf6\xd9\xc1\x01\x76\xf4\xd8\xd3\xbf\x2c\x3b\x01\xc2\x53\x2b\xb4\xf5\x15\x0d\x5f\xc7\x07\x7c\x62\x1e\xde\xd3\xdf\x09\xba\xec\x7d\x5a\x2d\x91\xb9\xe0\x79\xa3\x37\xe6\x9e\x27\x04\x5a\x2c\x45\x4f\xd0\x9e\xda\xfb\x23\x5f\x44\x0c\xf5\x08\x25\x7d\x85\xe1\x59\x88\x7a\x98\x18\x6f\x2f\x7b\xbd\xb6\xca\xde\x4a\xdb\x70\x6c\xf7\x74\x12\xb5\xea\x23\x9e\x2a\xfb\xff\xbe\x9b\x6a\x55\xe6\x22\xd6\x32\xc6\x1d\xa4\x3c\xac\x49\xf9\x51\xc5\xd8\xbf\x24\xee\x24\x31\xb9\xe7\x2f\x1d\x48\x7c\x99\x57\x52\x37\x93\xe4\xbd\xca\x65\xbc\xaa\xc6\xda\x5e\x6b\xd3\x51\xe1\x6a\x2d\xf5\x3f\xd1\xce\xa7\xae\x72\x4b\x85\x71\xaa\x2c\xb7\x78\xf3\x06\x3f\x98\x24\x61\x0c\x29\x1e\xd6\x2e\xaf\x7c\x34\x66\x2a\x97\x45\x72\x17\x92\x25\x0e\x29\xbc\x52\x5a\x8f\x78\x3c\x9a\xb8\x13\x2f\x0c\xad\x31\xba\xb9\x09\xb3\x57\xda\xfa\x9e\x89\xdc\xdc\x28\xab\x6c\x72\xa5\x33\xa9\x8d\x65\x90\xaa\xf7\x91\x0d\xb9\x4b\xc6\x83\x09\xa4\x2e\x19\x0f\x27\x7b\x05\xa7\x1a\xd5\x32\x87\xb4\xc2\x15\xb8\xe8\xed\x41\x21\x50\x0f\x8d\xea\x55\x9c\x0e\x3a\xac\x99\x36\x94\x4e\x9b\xd2\xf8\xd3\x53\xd2\x06\x4a\x7f\x8c\xa6\xbe\xba\xaf\x53\x83\x91\xc3\x29\x13\x09\xc7\xd9\x1d\x24\x26\x2a\xeb\x01\xc5\xcd\x11\xdd\xcd\xce\xff\x15\xb7\xc6\xaa\x67\xc7\x2b\x36\x7d\x2f\x0c\xa5\x40\xe9\xcb\x13\xcb\xfd\xe3\x47\xa8\xc0\xd3\x55\xdd\xbb\xe4\xa1\xc7\xe7\xbf\xea\xdc\x73\xea\x11\x5d\x3a\x9b\x21\x56\x03\xac\x0b\x81\x0d\xcb\x24\xcb\xbd\x0a\x52\x7c\x8c\x5c\x3d\x98\x62\x7c\xee\x01\xdc\x1f\xd8\xe6\xa1\x9b\x3c\x7d\x89\x0d\x71\xe1\x13\xb3\xa1\x57\x5a\xb6\x1a\x9f\x94\xff\x51\xe9\x7c\x66\x5f\x7b\x8e\x17\x04\xd6\xb5\x24\xf0\x08\xf0\xc8\xf7\x55\xfe\xf9\x38\xcf\x1f\xb5\x2a\x93\xc9\xa1\xec\x89\x8c\xc3\x04\xdd\xe4\xee\x0f\xd9\x10\x7c\x4a\xdc\x1f\x5e\x9c\x51\x26\x50\xa0\xe3\x56\x88\xb7\x40\xcf\x3b\xf4\x3a\xca\xc1\x98\x45\x10\x43\xe0\x38\xcb\x5d\x45\x26\xc4\x5a\x6a\x0e\xbb\xac\x36\x35\xcd\xfd\x45\x3c\xcf\x15\x47\x45\x3f\xc4\x7d\x05\x55\x5b\x69\xf7\x54\xf0\x6c\xae\xdc\x46\x03\x59\x9d\x49\x7e\xa7\xf5\xd8\xcc\x9d\xc5\xf7\x6c\xd2\xca\x0e\xf3\x7b\x30\x6c\xa5\xe3\xd9\x75\xf3\x56\x86\xb2\xdf\xd0\xc8\xd5\x42\xfc\x8c\xad\x2b\x4b\xf9\xb6\xa5\x5d\x7f\x25\x55\x78\x35\xee\x2b\xdc\x44\xdd\x6e\xa1\x97\x17\x10\x51\x4a\x7d\x51\xf9\x88\x95\xd9\xff\xf8\xf8\xfe\x9d\xa3\x13\xce\xe3\xe9\xca\x1a\x13\x88\x26\x76\x95\x76\x5b\xc1\x11\x92\x6a\xe9\x6b\xe7\xe1\xcd\x8d\x65\x42\xeb\x2a\xe2\xe8\x43\x1d\xa6\x06\x51\xe1\x63\x76\xc9\x11\x3b\x17\x55\x9f\xea\xe8\x2a\xa1\xbd\x15\x16\xd2\x11\xb7\x48\x02\x95\x59\x3c\x16\x01\x64\x1b\x11\x40\x8c\x08\xc8\x45\xe0\x8d\xaa\x4e\xb9\x19\x8f\x2f\x13\x7a\x7a\x70\x60\x21\x17\x00\x93\x61\x5b\xad\xc5\x82\x40\xcd\x8d\x4f\x40\x8e\x96\x9e\x3a\xfa\xea\x1c\xde\xe5\xd0\x8b\xf8\xfc\x59\x4a\x69\x92\x79\x5a\x3d\xff\xd0\x89\x89\x0c\x5c\xdb\xcd\x31\x14\xf5\xc2\xaf\xa4\xd3\x30\xc5\x4b\x2c\x09\x05\x64\x15\xa1\x80\xac\x1c\x0a\x68\x02\x07\x8a\xbc\x26\x14\xaf\x71\xb7\x52\x22\x14\x19\x2d\xb4\xf7\xf8\xc1\x01\x2f\x65\x24\xd0\xe7\x70\x76\x07\xa2\xfa\xfc\xe2\xb6\x32\x5a\x7c\x6b\x19\xad\x56\xe4\x98\xdc\x85\x70\x16\xf7\x2a\x9c\x15\xad\xdb\xa4\xb2\xd1\x98\xbf\x9e\x55\xb0\xc2\x5e\xdd\x3a\x48\xf3\x28\xc3\x77\x65\xb9\xcc\xd8\x54\x6b\x20\xb3\x56\xd7\xbb\xd2\x10\xb5\xd7\x43\x6c\xe3\xac\x57\xe7\x4a\x22\xb3\x9b\x3c\x37\xcd\x77\x57\xea\x8c\x40\xc9\xa1\x5a\x1d\xb7\x52\xaf\xca\x22\x27\x13\xd9\xe6\x0a\x63\xf9\x68\xe3\x89\xd4\x87\xfd\xbb\x62\x8b\xbb\x99\xe6\xc4\xed\xe3\x7b\x3e\x06\xa4\x83\x6c\xe4\x17\x2d\xb4\x4b\x01\x6d\x72\x4d\x90\xfc\x4e\x9a\xb3\x95\x41\xe6\x0a\x87\xa1\x20\xf2\x33\x1e\xbc\xd9\x31\xa2\x38\x21\x59\xae\x8d\x78\xce\xd4\x23\x79\xf6\xcd\xcd\x78\x62\xaf\xe1\x78\x22\x05\x23\xfe\xdd\x2a\xc4\xc3\x98\xf0\x37\xfd\xa0\x1e\xcb\x6a\x4d\x24\x79\xc0\x4d\xd8\x90\x74\x62\xc7\x92\xf3\xca\x77\xc2\x8d\x3a\x0c\x70\x8b\x79\x4e\x03\x6e\xbb\x49\x84\x5a\x37\xc2\x56\x15\x28\x66\xc8\x5f\xd0\xea\x56\xfc\xb8\xc9\x68\x5b\x0f\xa5\x8d\xfa\x47\x62\xc9\xba\xeb\x5d\xbf\x9d\xcf\xca\xc1\x29\x5f\x81\xd1\xbc\x19\x1a\x5d\x62\x12\xd0\x4b\xf5\xd2\x53\xf8\x51\x7f\x53\xf3\xf6\x33\xf2\x02\xc4\x78\x7b\xd8\xa3\x69\x48\x07\x42\xbc\x11\x68\x61\x01\x41\xcf\x51\x9a\x28\xe8\x1a\xfc\x9f\xfe\x4b\x8d\xf3\x27\x55\x30\xd2\x79\x65\x5c\xf4\x1c\x80\x11\x5a\x57\x9a\xc3\xb7\xec\xab\xd7\x70\x0a\xba\x56\xa0\x85\xce\xeb\xc4\x66\x55\x38\x48\xb1\xc1\xf6\xd1\x21\xbb\x46\x2f\x2e\x8a\xcf\x42\x03\x31\x36\x66\xd6\xcf\xd1\x8a\x5b\xc8\x2e\xef\x0a\xc4\xbe\x96\xf2\x41\x77\x46\x20\x1a\x93\x89\xe4\xb4\x86\x31\xa3\x8a\xd8\x8b\xdb\xe1\xaa\x57\x54\x6e\x32\x3a\x30\xbc\x5a\x2a\xb5\xf1\x02\xaa\xf4\x5e\x5d\xaf\x8b\x1e\x70\x19\xb1\xa4\xce\x9d\x5e\xe0\x2d\x85\x7e\xab\x04\xc5\xee\x6f\x0a\xf3\xeb\x24\x2e\x60\x84\xd6\x50\xe8\x53\xea\x17\x2c\xa5\x0c\xf1\xc2\x5f\x4d\x88\xe5\x9b\x40\x35\x69\xce\x49\x1f\x89\xb7\xe4\x73\x2a\xac\xeb\x75\x07\xda\x08\x74\x25\xfa\x0b\xe4\xf1\x88\x55\xbf\x53\x97\x03\xa8\xab\xb7\x7b\x56\x62\x81\x17\x88\x46\xb5\x09\xf0\xb4\x3d\xc9\xbc\x51\x14\x7c\x32\xc0\x77\x14\xa1\x97\x37\xf2\x94\xd7\x41\xfb\x2d\x73\xc5\x55\x8c\xc8\xe4\x5f\x55\x69\x21\x6a\x28\x10\xe7\xfb\xe2\x87\x7d\xf3\x31\x7e\xff\xe9\xfe\xb6\x83\x9f\x3f\xfd\xfa\xf6\x07\x8f\x71\x27\xee\xdc\x92\xfb\x2b\xf8\xef\xe1\x4f\xf3\xf0\x68\xf0\x0b\x80\x67\x21\xf5\xcf\x47\x7f\xbb\x06\x5c\xf9\x17\x70\x30\x1a\x4f\xe4\xb2\xf7\x84\x7a\xc3\x40\xfe\x1e\x3f\x82\x80\x5f\xcc\xc0\x04\x8e\x9f\x42\x70\xb5\x08\x55\x3e\xc2\xb9\x10\xcb\xd1\xe1\xe1\xe5\xe5\xa5\x73\x79\xec\x50\x36\x3b\x3c\x1a\x0c\x06\x87\x12\xb0\xb6\x50\xd5\x3d\xcc\x36\x34\xba\x0a\x31\x39\xaf\xac\x31\x7c\xfa\xf4\xe9\x61\x7d\x71\xb9\xc1\x4b\x1c\x88\x39\x80\xe0\xe4\x64\x79\x65\x3e\xcd\x11\x9e\xcd\x45\xfe\xdb\x05\x46\x97\x3f\xd0\x2b\x00\xc1\xa0\x37\xe8\x9d\x9c\xf4\x4e\x4e\xe2\x12\x29\xc1\x95\x53\xf2\xd0\x19\xca\x6f\x8f\x27\x70\x3c\x80\xe0\x7f\xfe\x87\xf4\x7a\xbd\x9e\xfc\xf2\x08\x82\x59\xb9\x24\x53\xea\x63\xe6\x87\xc8\x34\xc8\x64\x53\xe6\x6f\x5f\xf6\x78\xf4\x38\xfe\xb5\x92\xbf\xcc\x0f\x2e\x56\x72\x1e\x81\x0a\xf5\x9d\x52\xb6\xe8\x53\x86\x67\x98\x8c\x7a\x47\x8f\x97\x57\xbd\x23\x3d\x74\xd9\xe5\x93\xed\xba\x1d\x6e\xd8\xed\xf0\x6e\xba\xcd\x63\x7b\xd2\x11\xdd\x93\xbb\xc6\xb7\x43\xc7\xc3\x3b\xea\xf8\x28\xdb\x6f\x32\x8a\x06\x84\x97\x57\xaa\xef\xbb\xec\xf6\xa8\x5b\xb7\x47\xb7\xef\xf6\x64\x53\x74\x4f\xee\x08\xdf\x93\x4d\x11\x3e\xb9\x23\x8c\x8f\x8f\x73\x8c\xd5\xda\xef\xf1\xb1\xe4\xab\xdb\xf3\xf3\x70\xb3\x6e\x87\xc3\x3b\xe9\xb6\x80\xed\xa0\x23\xba\x83\xbb\xc6\xb7\xbd\x63\x8d\xf0\xed\x3b\x4e\xba\xd2\x1c\x3d\x6c\x67\xac\x81\xe4\xe8\xe1\xed\x3b\xce\xf6\x9b\xd0\xbd\xa1\xdf\xe5\x95\xa2\xf6\x9d\xe2\xdb\xa5\xdf\xc1\x1d\x75\xbc\x29\x9d\x1b\xc9\x0c\x2a\xbf\x6c\xbe\x57\x1f\xe5\x84\xea\xd1\x66\x8b\xee\xe8\x68\xdb\x45\xd7\xd4\x6d\x3b\xef\xeb\x7e\xb7\xe2\xfd\xa3\xda\x29\x39\x6a\xdf\x33\xe5\x94\x1c\x6d\xb5\x65\x1e\xd5\xb3\x60\x97\x7e\x07\x77\xd3\xf1\xd3\x6c\xbf\x4f\x5b\xbb\x7d\xba\xbc\x92\xff\xbb\x65\xa7\xc7\x0f\x73\x0b\xee\x61\xbb\x48\x7d\x28\x17\xdc\xc3\x3b\xee\xb8\x1d\x5d\xd5\xef\xed\xf1\x7d\xba\x21\xba\x4f\x1b\xb1\xbd\x9f\x75\xfe\x24\x3b\xc6\x27\xed\xeb\xed\xc9\xf2\xaa\xf7\xe4\xd6\x94\x19\x3e\xda\xac\xdb\xe1\xa3\x3b\xe9\x36\x8f\xed\xf1\xa3\x6e\xe8\x1e\x3f\xba\x63\x7c\x3b\x74\xac\x10\xbe\x83\x8e\x73\x08\x1f\xb5\x13\x5a\xe2\x7b\x74\x7b\x42\xe7\xba\x1d\xb6\xa3\x2b\xbb\x1d\xde\x1e\xdb\xe3\x47\x1b\xa2\x2b\x29\x7c\x17\xf8\xe6\x3b\xee\x80\xb0\xea\xb8\x1e\xe3\xbb\x5a\xeb\x0f\xeb\x37\xd7\x61\x87\x13\xa9\x3a\xb1\x6c\xb5\xd9\x34\x74\x7c\xdc\xb1\xe3\xe3\xdb\x77\x3c\x3c\xda\x70\x7b\x1d\x1e\x6d\xbd\xbd\xe6\x3a\x3e\xde\xb4\xe3\xe3\x3b\xea\x78\x98\xdb\xe9\x86\xed\x7b\xce\x50\x6e\x75\xc3\xad\xb6\xd8\xfc\x1c\xe7\x36\xbb\xa3\xf6\x3d\xf6\x48\xee\x76\x47\x5b\x6d\xb2\x0d\x1d\x77\xc0\x58\x75\x7c\x07\x18\xe7\x49\xdd\x01\x63\x45\xea\x7a\x8c\xef\x6a\xcd\x3f\xad\x5f\x7a\x1d\x18\xf1\xa8\x99\x11\xf3\x83\xcc\xff\x52\x1f\x26\x10\xcc\x3d\xfe\xfa\xc2\x0b\xc1\x68\xea\x85\x1c\xad\xff\x06\x17\x48\x78\xa3\xeb\x85\xb2\xd8\xea\xf7\x1e\x3a\xd8\x85\x9d\xf9\x19\x07\xed\xd6\xe4\x9a\xd4\x4c\x5f\xc7\x92\x8c\x3f\xff\x78\x32\x3f\x3d\xf9\xb1\x9b\x25\xf9\x04\x02\x3c\x05\x70\x3c\x3e\x7a\xa8\x02\x0b\xd4\x9f\x03\x38\x06\xb1\x25\x7c\x32\x99\x40\x12\x85\xa1\xf9\x07\x5e\x17\x1a\x18\x40\x10\xcf\xc1\x10\x8e\x87\x4f\x20\xa0\x91\x08\x91\xba\x20\x90\xa4\xce\xcc\xcb\x04\x02\xf5\x04\x06\x12\x88\xa9\xc1\xac\x4b\xad\x9d\xa8\x99\x9a\x63\x9f\xb2\xa5\xa1\x3d\xd0\xfd\x8e\xc7\x00\x07\x00\x82\xc0\x97\x5d\x81\x4b\xe6\x2d\x97\xea\x96\x46\x8d\x56\x7e\x95\x23\xad\x68\xcf\x5b\x2e\xfb\x17\x18\x5d\xa6\xed\xf8\xa1\xc7\xb9\x6a\xc5\x20\xd9\x53\x99\x56\xab\xab\x2b\x22\xf6\x79\xa8\x48\x23\x67\x5b\xa8\xf7\x83\x1b\x89\x91\x5d\x0d\xc3\xa3\xe4\x2e\x25\xbd\x5c\x98\x34\x53\xc5\xd0\xfb\xb6\xdf\x73\x7c\x2f\x58\xd4\x9d\xed\x33\x0c\xdc\x91\xe5\x7d\xba\x58\x52\x22\xe9\x70\xe8\xf9\x61\x3f\xce\xb5\xfa\xb5\x99\xff\xff\x9c\x5d\x1d\x2f\x10\x9b\x76\x63\xfe\x3c\xef\xca\x15\x30\x65\x08\xa9\xeb\xbc\x18\x81\x98\x67\x28\xf1\xe7\x1e\x99\x49\xf9\xa4\x46\x05\x20\x50\x59\x64\xe7\xda\xc9\x7a\x62\x56\x90\x4e\x11\x28\x17\xd1\xf0\x29\x1c\xe8\x89\x56\x0c\x9a\xd4\x4f\xd6\x93\x29\xe0\xc8\x63\xfe\x5c\xb1\xc1\x47\xf5\x67\xef\x6c\xd5\x23\xde\x02\x1d\xea\x3b\x5d\x09\x9f\x5f\x48\x85\x21\x33\x2f\xc0\xb4\x3f\x63\x34\x5a\xa6\xc3\x25\x2a\xee\x31\x19\xaa\x4a\x23\x0b\x60\x66\x14\x70\x0c\x54\xf0\x95\x19\x85\x0e\xc4\x8a\xc7\xa4\x91\xe7\xfa\xc3\x66\x58\x95\x87\x7b\x1b\xf1\x5b\xc9\x55\xdb\x70\xa4\x1c\x7f\x4c\xa3\xaf\xcd\x93\x33\xfc\xfb\xf2\xf7\xd3\x57\x8b\x4a\x9e\x04\x07\xf1\xad\x6e\xd5\x25\x1f\x26\x4b\x75\xd5\x2a\xb7\x47\x3d\x5d\x7a\xba\xcd\x27\x33\xcb\xe9\x4b\x3d\xe3\xe1\x00\x2a\x31\x39\x3e\x7a\x04\xc7\x71\xc1\x17\xa0\x05\xb3\x79\x41\x49\x4e\x91\x02\xf4\xe7\xc8\x3f\x47\x0a\xfa\x61\x76\x1b\x40\xbf\x27\xbb\x40\x0c\x22\xe7\x30\xf9\xdb\xf0\x6f\xe9\x83\x61\x6b\xd9\x74\xba\x5c\x36\x5e\x16\x99\x56\x0c\xfb\x66\x07\x1f\x17\x3f\x2e\xed\xf9\x8f\x20\x08\xbd\x33\x14\xc6\x64\x98\xaa\xe8\xca\x36\x3a\x54\x5e\x28\xf2\xa5\x47\x32\xfa\xcd\xfb\x25\x22\x4d\x3a\x46\xb1\xf7\xa7\xa6\xf3\xa4\x5f\x3f\xa4\x1c\x95\x15\xa6\xea\xde\x5e\xc6\xc0\x4d\xdd\x0d\x87\x70\x78\xf7\x2b\x2c\x5d\x25\x5b\xac\xb1\x64\x9b\xfd\xda\xeb\xeb\xc5\x7f\xa3\xa7\xbf\xfd\xc7\xaf\x1f\xaa\xd7\x97\x8a\xe1\x53\xfe\x0c\x66\xa8\x00\x26\xdf\xea\xd7\xde\x70\x08\x4f\x72\x04\xbf\xb5\x9e\xf4\x08\x82\xb9\xf2\xec\xca\xf3\x81\x69\x1b\x79\xfe\x3c\x69\x32\x1f\x92\x0a\xc1\xef\x11\x8a\xd2\x15\x52\xa5\xa4\xe4\x02\x15\xd3\x5d\x40\xe3\x39\xd1\xcb\x6e\x28\x97\x5d\x85\x8e\x93\xd1\x59\x0c\x43\x2e\xe3\x25\xa4\x15\xa5\x6a\xf1\x30\x7c\x0a\x8f\xe2\xb1\x7e\x32\xbb\x07\xf8\x68\x42\x3e\x13\x11\x91\xc4\x80\x82\x38\x96\x39\xb7\xc8\x1f\x9b\x15\x20\x18\x25\xc9\x89\x62\x78\x9b\x0e\xcd\x87\x7d\x00\x81\x0a\xb3\xdf\x4f\x8a\xe2\x5d\xc9\x2c\xa4\x44\x63\x7d\x0a\x8f\xe1\x18\xc4\xb4\x9b\x94\x00\x2b\x95\xb4\x23\x78\x5c\xa7\x77\x0d\xe3\x02\x98\xd5\x2f\x1e\x41\x10\xe0\x8b\xc6\x43\x93\x29\x7f\x9a\xd0\x3d\x2b\xd8\xcb\xb5\xe2\x9a\x27\x10\xac\x30\x0a\x83\x44\x47\x4d\x6b\x55\xb2\x8b\x62\xed\x3a\x25\xb2\x6a\x68\xb5\x47\xd1\x5a\x84\x32\x30\xc4\x8b\x91\xf2\x18\xf6\xfa\x5a\x46\x42\xf0\x03\x43\x5e\xe0\xb3\x68\x71\x56\xdf\x42\x2d\x82\x67\x49\xe5\xbb\x41\xb2\x1e\xd1\xda\x21\x24\x4b\xf9\x3e\x48\x5c\xfd\xa5\x34\x06\x41\x69\x78\xe6\xdd\x6e\x10\xc5\x9d\xa5\x1e\xbe\x6a\xbe\xf3\x82\xb1\x4a\x1a\xb6\x49\x9b\x6d\x4e\x47\x2d\x4d\x96\xe8\xd4\x72\x66\xeb\x48\xa7\x8a\xe3\x55\x89\x74\xdb\x1d\xb6\x2a\x36\xd1\xcd\xb7\x5f\xdf\x13\x5e\x48\x67\xdf\xec\xe0\xf5\x94\x79\xff\xfd\x74\x38\xfc\xcf\x3b\x3f\x78\x55\x9d\xb6\xf2\x07\x99\x96\x73\xd4\xe6\x2a\xe8\xfd\x1d\xb9\x74\x70\x61\x7c\xe8\x32\xbf\xbe\xeb\x63\x57\x9e\xaf\x52\xbe\xac\x48\x27\x5b\x5d\x9f\x06\xa8\x8f\x02\x2c\xe8\x37\x60\xca\x73\xf4\xfa\x8f\x47\x17\xe4\xb2\x1b\x53\xc6\x7a\x47\x2e\x19\x5b\x99\x11\xe5\xe6\x43\x49\xb8\x02\xc9\xf9\x2b\xde\xae\xe9\xd2\xa4\x5a\xd0\xb0\x9f\xd5\x2b\x01\x41\xca\xa2\xba\x85\x64\x16\x93\x86\x92\x2f\xaa\xbd\xe4\x97\x31\x53\x19\x66\x98\x2b\x95\x35\x1e\x4c\x88\x09\x7a\x17\x49\xdc\x65\x77\x0b\x15\x67\x06\xc4\x1c\x2d\x14\x9f\x49\xf1\x63\x1a\x51\x45\x86\x15\xe6\x58\x31\x4c\x1b\x6f\x9d\xa3\x55\xb4\xfc\x1a\xac\x95\xb2\xc6\x16\xf2\x4e\xe7\x8d\x53\x26\xaa\x7e\x80\x25\x8f\x7e\x7d\xfe\x7a\xff\x69\xfa\xd3\xd9\xdb\x47\x74\xd3\x93\x7d\xe1\xf4\x56\x38\x5d\xa8\x95\xa2\x27\x63\x89\xd8\xc2\x53\x87\x96\x78\xde\xb2\x47\x0f\x43\x83\x8a\xd3\x47\xc3\x51\xa1\x4a\x57\x34\x22\x52\x9b\x3a\xf5\x2b\xc5\xc5\x4e\x32\x6c\xe9\x11\x5f\x9e\x73\x37\xb0\x0b\x97\x0e\xab\x1b\x19\x36\x8b\x3a\x46\x05\xda\xed\x88\x26\x2c\x52\x8b\xa8\xf1\x23\xaf\x40\x34\x5e\x4a\xe9\x11\x41\x7f\xd0\xc4\x7b\xa7\x17\xed\xd7\xa2\xc6\x1d\x2d\xbd\xd2\xea\xd9\x7c\x09\x06\x49\x94\x57\x7f\x89\xfd\xf3\x6f\xa1\x75\x20\xfc\xfb\xff\x19\x9c\xff\x84\xaa\x17\xa0\xdc\x82\xcb\x8b\xaf\x78\xda\xd6\x1b\x75\x57\x85\xf5\x11\x04\x21\xee\x72\x3e\x36\x4a\x49\x88\x7c\xb9\x07\x98\x4c\x64\xca\xf2\xa6\x8e\xe2\x20\xcf\x36\x10\x60\xae\x5e\xdc\xbe\x40\x55\x67\xe4\xda\xe3\x95\x07\x52\x83\x5b\x88\x55\xde\xad\x5b\xd8\xdb\xe6\x0c\x4d\x4d\x0b\xf2\xcf\xbe\xa0\x6a\xe9\x64\x5f\xb8\xaa\x19\x7e\x66\xac\xc3\x22\x4c\xc5\x81\xba\xfa\xec\x53\x5e\x0c\xc3\x3b\xe5\xfa\x12\xc3\x6e\xce\xf3\x71\x7e\xb3\x6f\xb7\xe5\xa0\xf7\xef\x06\x87\x6f\xcf\xef\x76\xcb\xc9\xb2\xac\xf0\xb4\x10\x4c\x6c\x37\x9d\x25\x5b\x49\xe8\xe6\x5b\xa8\x3a\x79\x0d\xb7\x3a\x79\x55\x89\xfb\x16\x64\x72\xe6\xa7\x2d\x50\xc9\xd6\xbf\x43\x44\xea\x6e\xee\x36\x51\x09\x2a\xf1\x95\xca\xe5\x0a\x74\x52\x0a\x9a\xd0\x6e\xd7\x0f\x92\x39\x8e\x45\x8c\xa1\x54\x4b\xa7\x86\x1f\xcd\x95\x71\xb2\xb5\x1a\x59\x71\x2b\x5a\xde\x5e\x50\x14\x56\xf9\x16\x62\xa2\x78\x9a\xfd\xea\xe1\x64\xff\xfa\xef\xf9\xfb\x23\xfe\x74\xe3\xe3\x78\xe1\x1e\x23\x3e\xda\x88\xd5\x12\xf5\xe3\x23\x76\x83\x0d\xb3\x70\x97\xf1\x31\xa9\x51\x6b\x44\xac\xb8\xe2\x4a\xfb\xd1\x3b\x9b\xf9\x7d\xab\xad\x2d\x3e\xd6\x6c\xdd\x48\x7a\xdd\x96\x5c\xb4\xc5\xd7\x53\x47\x8f\x64\x43\x4f\x60\x72\xc0\x8b\x2f\xd8\x72\x36\x8b\x14\x2c\x77\x71\xac\x80\x9f\x42\xe0\x45\x82\x4e\xa9\x1f\x29\xbb\x6f\xf2\x77\x27\x7f\x97\xbb\xe1\xf9\x3c\xcb\x6e\xce\xf3\x65\xbf\x8d\xaf\xcd\xf4\xe1\x74\xfa\xf8\x28\xfc\x71\xe3\x8b\xd6\x3c\xfb\xa7\xb7\x34\x4f\x21\x60\x54\xb9\x24\x9d\x79\x84\x14\x6f\x6e\xea\x34\xb1\x6a\x25\x2a\x7f\x7d\x59\xb8\xfe\x48\x42\x39\xe3\x80\xc9\xc4\x4b\x34\x09\x97\x4c\x42\xaa\xee\x26\xda\xd3\xf4\xbc\xf4\xc4\xdc\xb4\x2b\x75\xd4\x5f\x87\xc7\xce\xd1\x93\x93\xde\xf0\x91\x33\x7c\xfc\xc4\x3b\x72\x9e\x3c\x7e\xd4\xd3\xff\x1d\xf4\x86\xbd\x61\xdf\x19\x0c\x9e\xf4\x1f\x3a\x8f\x1f\x0e\x7b\xf9\x42\x59\x2c\x0b\x7b\xb2\xf0\x8f\xc5\x43\xe7\xe1\xd3\x47\xfd\xa1\xf3\xf0\xe4\xb1\x37\x74\x8e\x8f\x8f\x7b\xfa\xbf\xaa\x95\xde\xa0\x7f\xe4\x3c\x7a\xf4\xb8\x97\x2f\x91\x65\x83\x9e\x2a\xf9\x63\x71\xe2\x3c\x79\x28\x8b\x8e\x4e\x9e\x7a\x43\xe7\xe8\xf1\xb0\xa7\xff\xab\x1b\x70\x06\x47\x8f\xfb\xce\x70\xf0\xd8\x1f\xf4\x9c\xc1\xb1\xfc\xec\x0c\x1e\x3d\xee\xcb\xef\xf2\xf3\x1f\x8b\xbe\xf3\xf4\xf8\x71\xff\xd8\x39\x39\x7e\x54\x31\x00\xe7\xe9\x13\x35\xba\xa7\x0f\x7d\x67\x70\x7c\xec\x0c\x1f\x1f\xa9\x7f\x8f\x4f\x9e\xc8\xa6\x1e\x1e\xf5\x9d\xc1\x63\xe7\xe1\x71\xdf\x39\x79\xf4\xd0\x79\xfa\xa8\x2f\x2b\xf4\x86\xce\x40\xe2\x76\xe2\x3c\x3e\xea\x1d\x3b\x47\x4f\xcb\x0d\xf7\x25\x88\x6a\xf9\xf8\x49\x05\x76\x43\x67\x38\x94\xcd\x9c\x0c\x1f\x3b\xc7\x27\x47\xf2\x7f\xaa\x64\x60\x06\x7e\x34\xef\x3b\x83\xe1\xf1\x1f\x0b\xd9\xce\x93\xfe\xb1\x73\x7c\x5c\x31\x78\x59\xf6\x44\xf6\x71\x74\xe2\x3b\x83\xe1\x89\x33\x1c\x9e\xa8\x7f\x8f\x8e\x9e\xca\xd1\x1f\x9f\x1c\x79\xa5\xbe\xfb\x43\x67\x38\x38\x92\x23\x78\x72\x34\x97\xd0\x8a\x44\x47\x0f\x7b\x8f\xe5\x7f\xab\x48\x34\x7c\xf4\xb0\x2f\xe7\xcf\x97\x63\x72\x86\x4f\x8f\xfb\x92\xc4\xce\xf1\x93\xbe\x2c\x92\x25\xb2\x89\x93\x27\xfd\xe1\x91\x33\x7c\x3a\xac\x6a\xe2\xe1\xe0\x71\x7f\xe8\x3c\x19\x9e\xf8\x8e\x1c\xde\xf1\x63\x67\xf8\xf4\x89\xf3\x50\x0e\xf9\xd1\x89\xf3\xf8\x89\x9c\xb0\xe3\x27\xce\x89\x6c\xf3\xe8\xc9\x53\xe7\xc9\xd1\x93\xbe\xa3\xf8\x62\x70\xf2\xf0\x42\xf6\xfb\xe4\x0f\xc9\x92\xb2\xc9\xa3\x47\x92\x31\x5e\x3e\x74\x9e\x3e\x36\x7f\x4b\x66\x19\x38\x8f\x9e\xca\x3f\x0c\xd0\xa0\xa7\xca\xd5\x7f\xd2\x8f\xfe\x91\xf3\xf4\xe8\xa9\x6c\x4e\x72\xe7\xe3\x27\xce\xd3\x87\x0f\x7b\x4f\x9c\xc1\xd3\x27\xbd\x23\xe7\xf1\xc9\xd1\xdb\xe1\x53\xe7\x49\xef\xc4\x79\xf2\xd4\x1b\x0e\x34\x63\x0e\x4c\x07\x72\x6e\x86\x8f\x9d\xe1\xf1\x71\xef\x89\x73\x72\x72\xd2\xab\x00\xe8\x29\x80\xc7\x12\xe0\xf1\x30\x1c\x3a\x8f\x8e\x1e\xf7\x8e\x9c\xe1\xb1\x27\x17\xd3\xf0\x49\xcf\xfc\xa3\xa7\x21\xe9\xf6\xf8\xf8\x8f\xd8\x55\x00\x87\x21\x80\xe0\xff\xf3\xe3\x8f\x3f\x66\xc5\xfd\x93\x0d\x37\x4d\xe5\x8c\x71\x46\xaf\xf2\xae\x21\x0b\x44\x22\xf3\x45\x79\xcf\x2d\x3c\x4c\xfa\xc4\xbb\xe8\x0b\x3a\x9b\x69\x0f\xcd\x6e\xee\x1a\x20\xb1\x62\x97\x24\x58\xed\x28\xcb\x0e\x11\x15\xdd\x37\x1e\x6f\x0b\xea\xc4\x27\x55\xab\xf7\xab\x41\xaa\xed\x3c\x59\x68\xac\xcb\x75\xe5\x2d\x86\x5c\x3f\xec\x0a\x8f\x8e\x62\xa5\xfa\x92\xf4\x0e\xb5\xe9\xde\x4d\xfb\x3f\x76\x72\x4a\xcc\x34\x1c\x85\xcd\xe8\xf4\x3a\x1a\x3d\x94\x19\x85\xbf\x62\x74\x19\xd0\x4b\xf2\x1b\xe6\xf8\x2c\xd4\xc7\x90\x0e\x56\x8d\xd2\x61\x74\x26\x32\x68\xc9\xad\x4e\x3f\x93\x20\xdb\x1b\x74\x3a\xc9\x34\xa0\xa1\x76\xe9\xe3\x6a\x0e\x0f\xcc\xf8\xc1\x24\xe3\x88\x60\x88\x0b\x1b\x8c\x19\x9b\x5e\x52\x36\x0d\xad\xab\x15\xa6\x34\xae\x6a\x43\xcc\xb6\xc3\x2f\xf1\x57\xd5\xec\x6e\x37\x09\xe6\x96\xa5\xc2\x78\x98\xb8\xf9\x9a\x9b\xb3\x8c\xf1\x2c\xe3\x7d\x49\xc9\x59\x18\xb1\xf4\x52\x45\xb2\x48\xa2\xbe\xab\x65\x50\x2f\xc3\xd2\x19\x4e\x5c\x30\x3b\x42\xd6\x5c\x82\xd4\x1f\x52\xeb\xd0\xaf\x5e\xe8\x9b\x2c\xb6\x87\x6a\x55\x19\x26\xe9\xca\x16\x9b\x9a\x17\xbf\x06\x7f\x0e\x92\xf4\x72\xbc\x1b\x65\xee\x81\x76\x71\x2a\xcc\x6f\x4b\xb8\xd6\x51\xe4\xbb\xd4\xa9\xf4\xbe\x15\xc9\x54\xe2\xe9\x6f\x4b\xaf\xe6\x21\xe4\xfb\xfb\x05\xad\x0e\x7f\xd3\xe7\xf2\xaf\x46\x30\x63\xae\x2b\x91\xce\xbc\x54\x59\x3b\xf2\x9a\x2a\x01\xe6\xde\x59\x98\x71\xb4\xad\x80\x8b\x88\x17\x89\x39\x65\xf8\x8f\x14\xee\xab\xce\x49\x1b\x6e\xf9\x1e\x5f\xbc\x7c\xfb\xcd\x18\x38\xf7\x28\xc2\xb7\x65\xe4\x6e\x43\xc9\xf7\xfb\x26\xad\x73\x4b\x0a\x76\x77\xfe\xaa\x88\x6b\xd9\x4c\x63\xad\xa1\xd0\xc6\x0a\xe8\xf6\xb4\x7f\x14\xdf\x0e\x10\x95\x38\xff\xe5\xfb\x77\x1f\x3f\xbf\xfd\xf2\xea\xfd\xcb\xcf\xbf\xbe\x7e\xf7\xe9\xc5\xa7\x37\xef\xdf\x7d\xf9\x7c\xfa\x36\xf5\x24\x3d\x54\x26\x1b\x67\x2e\x16\xfa\xb2\x5b\x99\x83\x94\xf3\xe2\x1c\x85\xcb\x1e\xa1\x74\x89\x08\x62\x3d\x42\x19\x9a\x22\xc6\x12\x9b\x91\xf0\xd8\x0c\x09\x00\xc1\x97\xb3\xd0\x23\xe7\x99\x61\xbf\xa2\xbe\x7a\x2b\x30\x7e\xe1\xfc\x1b\xf0\x7f\x92\xf0\xf2\xeb\xf1\x79\xa9\xcb\xa2\x0e\x90\x14\xdf\x21\x37\xb7\x97\x94\xbf\x56\x7f\x79\xa4\xcf\x80\xe5\x13\x58\xee\x4a\xad\xb1\xfe\x94\x52\x51\x34\x29\x1a\xaf\x44\x4c\xa6\x74\x33\xbb\x62\x35\x23\xbf\x7c\xff\xe1\x5f\xa7\x6f\x7e\xfa\xf9\x53\x81\x89\x0b\xac\xbb\x0d\xd7\xfe\x3f\xff\xff\xac\xe2\x5e\xd7\xed\xbf\x5e\xbf\x38\x2d\xf9\x5a\x4b\xb9\xf1\xb3\xc7\xe7\xf8\x25\x65\xcb\xc6\xc3\xf9\x32\x7b\x5e\x56\xf6\xe3\xfa\x4e\x7f\x7b\x7d\xfa\xf1\xcd\xfb\x77\x75\x9e\xdd\x5b\xd3\xf0\xbb\x13\x06\x19\x0a\xe8\xe4\xcc\x72\xb0\xff\x73\x75\xec\xef\xf7\xfb\x3d\x50\x45\x9a\x9f\xde\x7c\xfa\xf2\xf1\xe7\x17\xe9\xc0\xfb\xfd\xff\xb9\x3a\x4e\x97\xb6\x60\x51\xd9\xab\xf1\xc9\xdd\xdc\x20\x14\x2f\x00\xb6\xb8\x42\x40\x5e\x28\xe6\xca\x92\xd5\x4f\x5c\x25\xbf\xb6\x23\xeb\xdb\xd3\xff\x7c\xfa\x1f\x42\x54\x5f\x22\x94\xe3\x19\x2a\x82\x31\x8a\x3e\x02\xf1\xc1\xd6\xbc\x76\x36\x06\x98\x84\x98\xa0\x4e\x01\xa7\xe6\x70\x5a\xef\xe6\xe4\xd3\xe5\xaa\x7f\x16\x09\x21\x8f\xb0\x71\x4f\xc5\x61\xca\x8d\x01\x2f\xcf\xa8\xc7\x82\x4f\xe8\x4a\xb2\xa3\xc0\x22\x34\xde\xb3\x35\xf7\x61\x49\xc8\x48\xcb\x59\xd9\x04\x66\x14\xe3\x1a\x69\x24\x94\x8d\x72\xa2\x87\xd8\xd3\xbf\x7b\x82\xf6\x92\xa1\x54\x11\xa0\xa0\x69\xbc\x94\x55\xdf\xab\xaa\x1b\x5d\xa1\x0f\xe1\x51\xce\x7d\x2c\x4f\xd4\x16\xbf\x84\x9a\x88\x98\xea\xfd\xf0\x25\x5d\x62\x14\x18\xf4\xf6\x37\xf0\x6d\x69\x18\x5f\xa3\xb3\xc1\x46\xa3\xfb\x48\x19\x5b\xc1\x1e\xa7\x0b\x24\xe6\x98\xcc\x7a\x97\x88\x88\xde\x25\xa3\x64\xb6\xcd\x58\x9b\xc3\x03\x2a\x94\xb9\xd8\xf4\x2a\x40\xc6\x3b\xe8\x89\x31\x4f\xd7\x49\xee\xb8\x52\x50\xac\x44\x85\x7e\x85\xad\xb1\x96\xc8\x8c\xe2\xbd\x61\xc2\xd6\x1e\xaa\xf7\x24\x96\x98\x7b\x1f\x49\x2e\x0e\x50\x61\x40\x31\x8f\xe7\x46\xd4\x9e\x9b\xe0\x6e\xa4\x6d\x49\x56\xde\x4a\xde\xa2\xa0\xcf\x10\xa7\x11\xf3\xd1\xd7\x97\xb9\x83\xcb\xa3\x33\xf6\x73\xf8\xb9\x5a\xe6\xaa\xf1\x75\xbf\xb5\xad\xf7\x4d\x28\x46\x99\xc9\x29\xf4\x82\x80\x69\x51\xb0\xb1\x16\x21\xab\xab\x3f\xbb\xb9\xea\x65\x2f\x05\xba\xac\x82\x0d\x75\xd4\xe2\x51\xaa\x7c\x47\x90\x84\x1b\xdc\xc2\xd3\x51\x8e\x3a\x6e\x67\x33\x07\xc5\x0d\x89\xd6\x36\x6f\xc9\x20\xea\x89\x57\x4d\xae\x8e\xe2\xae\x62\xa7\xcd\x79\x8d\x46\x44\x2f\x9e\xd5\x2d\xe9\xa9\x3d\x25\x3f\x26\x91\x20\xdf\x8c\xa6\xc5\x81\x34\xd3\xb5\x81\xaf\xbb\xfa\x7e\x6e\x3f\x3d\xc3\x2a\xcf\xf0\xfc\xc5\x51\x3c\x39\xdb\x5d\x1e\xe5\xa6\x2b\xe3\x05\xb5\xf4\x38\xc7\xa4\x26\xf5\xcd\x3d\x4f\x50\x2d\x5a\x5b\x4d\xd4\x00\x02\x2a\xe6\x88\xf5\x0c\x4a\xd9\xa3\xc6\x32\x8c\x98\x17\xe2\x3f\x50\xff\x92\xb2\xa0\x91\xa4\x89\x6c\x1e\x8f\x01\x5d\x60\xf1\x92\x46\x2a\xd0\x4e\x45\xa0\x4c\xee\x69\xfa\x1b\xdd\x26\x33\xbb\xc2\xdd\x6f\xb4\x99\x4d\x72\xf3\xcd\x36\xb1\xf5\x7d\x33\xa7\xc0\x1f\xa3\x4f\x4f\x57\x9f\xce\x3a\x66\x06\xfa\x6e\x93\xa3\x7c\x54\x13\xd0\xa3\xac\xf7\x0a\x71\x81\x89\x39\x40\xdf\x5b\xd0\x5e\xea\x03\x3b\xc8\x1c\xc8\xbe\xe7\xa0\xbd\x22\xab\x6d\xce\xac\x21\xe6\xa2\xef\xd3\x30\x44\xfe\xb7\xc9\x62\xf5\xdb\x2f\x17\xff\xf5\x8a\x9c\xfe\x56\xa3\x12\x22\xe5\x33\x53\xef\xd2\x77\x12\x3f\xd9\x21\xb9\xe3\x02\xf5\xb9\xcf\xa8\x7e\x73\x3c\x73\x1c\xf7\x66\xef\x4c\x00\x9f\xb6\xc5\xf5\x39\xfe\x43\x39\x78\x2a\xe0\x7e\x88\xa6\x22\xfd\x25\xe8\x32\xf9\xf1\x32\xe6\x76\x3f\xc4\x88\x88\x8f\xf8\x0f\xf4\x32\xe5\x17\xe5\xfa\xa8\xa6\xfc\x8b\x69\x58\x02\xa4\x1c\xf3\x45\x37\xf2\x56\x36\x5f\xfc\xf8\x89\x2e\x1b\x6f\xb0\x73\xfd\xb7\xde\x62\x57\x0c\xcf\x30\x5e\x93\x13\x78\xce\xd8\xfe\xa4\x22\x3f\xc5\x17\x49\xfe\x06\x45\x32\xd5\x74\x1e\x26\x59\xdd\x62\xbd\x40\xff\x4c\x52\xaf\x0c\x87\xf0\x28\x49\x4f\x61\x02\x76\xd2\x18\x99\x6c\xa2\x96\x27\x8d\xc1\x21\xf7\xe7\x0b\x5e\x58\x09\x9b\x2f\xa5\x9c\xdc\xf9\xea\xb9\x51\x5e\xfd\xf1\xd3\xc9\xbb\x17\xaf\x36\x09\x90\xaa\xcc\x5a\x91\x1a\xae\x15\x3e\x1a\x9d\x52\xc2\xa1\xb4\x2c\xce\xb5\x63\x42\x5a\x27\xad\x49\x4f\x36\x0f\xc3\xca\x26\xfb\x49\xee\x3d\x1e\x99\x64\x56\x7d\x35\x94\x9e\xa2\x58\x3f\x65\xbf\xd4\x43\x3b\x1d\x90\x6a\xb0\x56\x97\x6b\x4a\xc1\x24\x7b\x36\x21\xe6\x99\xe3\x1c\xa9\x48\x60\x94\xef\x3d\x0b\xd2\x92\x84\xe9\xff\x65\xef\x6b\xb7\xda\x36\xba\x46\xff\xe7\x2a\xa6\x7a\xd7\x49\xed\x22\x1b\x9b\x90\xb4\xa1\xaf\x9b\x45\x08\x6d\x68\x53\xc2\x0b\x4e\xfa\xf4\xe1\x65\x75\x0d\xd6\x60\x2b\xc8\x92\x33\x33\x06\xdc\x94\x7b\x39\xd7\x72\xae\xec\x2c\xcd\x87\x34\x92\x46\xd2\x48\x96\x0d\xe4\xa1\x3f\x1a\x2c\x69\xbe\xf6\xec\xd9\xb3\xbf\xb7\xaa\xfa\x4d\x46\x11\xc7\xbd\xa4\x96\x26\x55\xbf\x2b\x4d\xcf\xa4\x77\xfc\x2b\x15\x0b\xc4\xa6\x35\x12\x13\x96\x62\xf6\x1a\xbe\x43\x95\x63\x5b\xfd\xcc\x53\x78\xde\xf1\xe1\xd5\xfa\xcf\xfb\x9f\x9f\xde\xb8\x37\x07\xc1\xdf\xf5\xcf\x7b\x8e\x1e\x61\xfd\x21\x93\xf2\x1a\x24\xde\x7c\xec\x5e\x2c\x2c\x35\x8b\x51\xca\xbf\x21\x6e\x63\x62\xc1\xd4\xd1\x0f\x25\x59\x58\x1e\x0d\xb3\xad\xbf\xac\xd2\x19\x9d\x25\x48\x25\x85\xe7\x72\x00\x96\xfd\x26\x64\x07\x70\xe0\x91\x26\x86\xb2\xad\xbf\x66\xd0\x17\x81\xd8\xe5\xf2\x24\xcc\x9c\xc5\xd5\x0b\xe7\xab\x3c\x9e\xe2\x84\xd5\x3b\x9a\xe4\xae\xb8\x5a\xd4\x9f\x8e\xfe\xfc\xf0\xfa\x5f\xcb\xa4\x02\x6c\x86\x7e\x17\xdf\x66\xe5\x97\xb9\xd9\xbd\xaf\x45\x64\x4e\x42\x9c\xa4\x1c\xa4\x0f\xa8\xe2\x5f\x15\xdd\x94\x69\x92\x91\x77\xeb\x69\xf3\x05\x66\x78\x1b\x0a\xcf\xc5\xa1\xaa\xb6\x42\x39\xd7\xa2\x43\x69\x65\x63\xff\x1b\xd3\x51\x28\x48\x5d\xeb\x44\xcc\x3d\x88\xef\x54\xdc\x1b\x5e\xfc\x72\xf4\x3f\x2f\xdf\x6f\xd6\x14\xf7\x42\x31\x42\xa7\x14\xa7\x13\x04\x8b\x6d\x3c\x34\xc7\x66\x00\x6a\x67\xfe\xda\x32\x4a\xba\x95\x54\xcd\x4f\x20\xd9\x95\x59\xdb\x0c\xfd\xaa\xa3\x15\x5a\x65\x39\xe3\x76\xe3\x27\x75\x52\x8a\xca\xd0\x91\x4c\x32\xcb\x2a\x84\xa8\x80\x0c\x49\xdd\x8b\x95\x0a\x7e\xa9\xe4\x95\x56\x66\x28\x91\x5c\xcc\x5d\xe9\x04\xe8\x79\xe0\x2c\x1e\xae\x5a\x40\x7b\x60\x6a\xaa\x06\x78\x07\xc5\xaa\x01\xa3\x8c\x16\x16\x7f\x6f\xcc\xf3\x65\x8e\x32\x0e\xae\x8b\xce\xb1\x89\x5e\x62\xad\x47\xdd\x29\x3d\xea\xa6\x19\x14\x65\xcb\x9c\xb8\xfe\x84\xd0\x99\x5e\x73\x42\x41\x1b\x9d\x6b\x3b\x9d\x2f\xb8\x34\xfc\xbf\x11\xf8\xe6\xf3\xab\x95\x08\xc7\xdd\xa8\x96\xb2\x17\x6f\x8d\xcb\x3b\x98\x06\x63\x0c\x67\x93\x45\x87\xfd\xb3\xfe\xab\xfb\x86\xf6\xfb\xfd\xf9\xf1\x71\x15\x91\x33\x0e\x7a\x0e\x4f\xba\x88\x7a\x8e\xd9\x3e\xc2\x09\xa3\xa4\x04\x32\x08\x5a\xfb\x81\xd6\xdf\x64\x9c\x3a\x29\x31\x94\xe4\x98\x51\x91\x89\x48\x69\x14\x3e\x60\xab\x12\xa7\x22\x64\x0e\x23\x0e\x53\x50\xea\xad\x48\xf8\xb2\x81\xd1\x67\xed\x3c\xf1\xcc\x2a\x2c\xa3\xa1\x7c\x91\x28\xa5\x21\xd7\x73\x0e\x47\x97\x21\x33\xea\x3b\x72\x3d\x51\x82\x01\x7e\x0c\x79\x23\xdb\xea\xa9\xba\xae\x42\x7d\x8d\x76\x1c\x78\xe3\x92\x92\x11\xfa\x2b\x1f\x61\x6b\xe5\x23\x3c\x5b\x6e\x84\xf3\x00\x0b\x33\x57\xc1\x18\xdb\xc5\x63\xe4\x0b\xdb\x1a\x74\xf6\x5c\x1f\x91\x72\x35\x8d\xe3\x12\x0a\x7d\x16\x5e\x55\x45\x55\xe3\xa3\xdc\x43\x82\x03\x2a\x4e\x88\xa0\xd4\xfc\x01\xa3\xb5\xed\xf8\xea\x5e\x6c\x45\x87\x95\x7f\xb6\x88\xf6\x30\x7c\xed\x40\x0a\x3b\x3e\xcb\x59\x98\xf8\xca\xc9\x7c\x24\x57\x90\xfe\x50\x3e\x4e\x7f\x4f\xd0\x78\xca\x52\x7c\x27\x3e\x97\x4f\xb5\x1b\x60\xa2\xc1\xa8\xbe\x41\xf0\x1c\x79\x25\x79\x9b\xf3\xb0\x69\x16\xb8\xbe\x64\xd0\xa3\xfa\x3e\xa5\x88\x99\xa1\x78\x9c\x35\x2a\xa3\x75\x3d\x4e\xc7\x7a\x52\x35\x4b\x04\xcd\x28\xa0\x5c\x39\xf8\xf2\xd2\xb6\x6e\xb6\x2c\xdb\xfa\xbe\x57\x3c\x5f\xa5\x29\x45\x37\x72\xa1\x37\x61\x4b\x59\xbb\x68\xc1\x27\xc1\xd3\x52\x84\x3f\xba\xcf\xb6\xd8\x2d\xa2\xa6\x07\x0f\xd7\x03\x69\xc7\x67\x49\x30\x23\x94\x9f\xba\x9e\xe7\x12\x34\x0a\x7c\x47\x2e\x85\x39\x27\xf0\xaf\x19\x2b\xde\xb3\x7b\xbd\x5e\xb7\xd7\x4b\x5b\x8c\xa7\x05\x11\x08\xc5\x6f\x9a\x05\x7f\xff\xeb\x01\x7f\xff\x01\x82\x7f\xeb\xeb\x01\xff\xd6\x03\x04\xff\xb3\xaf\x07\xfc\xcf\x56\x0a\x7e\x13\x8d\x47\xf3\x5a\xf7\x94\xb4\x61\x28\xaf\x38\xa3\xf5\x8b\x25\xf4\xe2\xe0\xfc\xe3\x1b\xfa\xdc\xb8\x0c\x5a\x69\xdd\x31\x52\x50\x7d\x2c\x9d\x83\xa0\xd8\xc3\xa2\x46\xa1\xb4\x25\xa5\x4e\x67\x64\xbc\x55\x9b\x70\xe4\x91\xcd\x8e\x38\xb2\x6b\xde\xb5\xef\x7f\xfd\xbc\x8d\xff\xb5\x7d\xa8\x17\x26\x65\x8a\x5e\x5b\xe6\xe6\x8d\x33\x07\xda\x51\xa2\x62\x5b\xc6\xd9\x64\x65\x4e\xb6\xa6\x3c\x99\xf1\xc2\x45\x9e\x43\x10\x2d\x54\x17\x97\x38\x1d\x30\xa2\x92\x4d\x8d\x12\xca\xc0\x71\x28\x8e\x74\x8d\xb5\xc0\x04\x92\x4e\x32\x31\x65\x3e\xd5\xb3\xb4\x4e\x9c\xbc\xaf\x22\xca\x21\xb3\x9a\x33\xb5\x6f\x84\xd2\x52\xe9\x2a\xd5\xbd\x6a\xc2\xb9\xc4\xa4\xa3\xc9\x66\xbf\xd4\x3a\xdc\x15\x13\x2c\x33\x7f\x92\xbc\x0c\x33\x49\x30\x46\x35\x60\xb2\x60\xcc\xea\x02\x75\xe2\x51\xb8\x63\xe6\x4a\x38\x1d\x12\x18\x05\x2e\x67\x1d\x21\x9e\x1b\x18\x5f\x53\x9d\x98\x69\xed\x87\x22\xc0\x2c\x6b\x4b\x13\x83\x56\x29\xfd\x25\xc0\x2d\xc1\x1c\xf7\xd1\x88\x7f\x49\xa5\x7c\x52\x7a\xc4\xca\xd2\xc9\xe7\xd5\x25\xb7\xdc\x64\x97\x92\x3f\xa8\x72\x1a\x8f\x02\xcf\x1d\x2d\x1a\x8c\x3a\x1d\xcf\x5d\x07\xb1\xda\x77\x2c\xf4\xf4\xbf\xf0\xdc\x43\x1d\x32\x43\x23\xf7\x42\xd6\x17\x6d\x28\x1a\xb5\xf5\x76\xef\x1d\xf8\x99\x31\x2a\x6d\x2b\x37\xfd\x18\x48\x07\xa5\x2a\xc5\x1d\xfc\x44\x75\x4d\x3b\xed\x5a\x1b\xd5\x15\x90\x31\xe9\xb9\x07\xfb\x78\xee\xb1\x73\x99\xae\x19\x25\x5e\xd8\x89\x46\xd1\xc7\x86\x88\x66\x4a\xb5\xac\x6c\xda\x79\x8c\xb8\xaa\xa3\xba\x87\x5a\x13\xb8\x75\xf0\xa6\x3e\x9d\x4f\x13\xf5\xb0\xaf\x02\x48\x28\x93\x40\xea\x7d\xf9\x07\xfa\xdf\x6f\x3d\x0f\x8c\x43\xdc\x82\x14\x01\x08\x3e\x7c\x38\x78\x03\xdc\x0b\x40\x27\x2e\x01\xec\x02\x05\x2e\x01\x1e\xba\xa0\x00\x4d\x67\x74\xd1\x35\xe1\x5d\x0d\x34\xf5\x7a\x66\xd7\xac\x24\x51\xf5\x7d\x13\xd1\xb4\xc9\x9c\xb4\xf3\xf3\xa9\x1b\xf9\x5f\x44\x19\x59\x72\x32\xbf\x28\xb0\x76\xc9\x11\x76\x09\x15\xf1\xbe\xa9\x57\x07\xfe\x15\xf4\x5c\x47\x97\xca\x39\x7d\xff\x66\x92\xba\x64\xb2\xc2\x08\x90\xe5\x64\x37\x13\x60\x50\x67\x90\x60\x37\x4e\xe0\x55\xa6\xea\x60\xf5\xe2\x4b\x4b\x40\x2f\x0f\x30\x55\x17\x3a\x67\x65\x57\x96\x5d\x68\xf6\x0a\xc9\x59\x1a\x46\x82\x67\xcc\x83\xbb\xe0\x54\xf3\xa6\xb3\x27\xaa\x4c\xe4\x13\x9f\x90\xae\xf9\x8e\xae\xf6\x60\x0a\xb5\xe3\x12\x21\x9d\x2c\x22\xb1\x33\x1f\xf6\x14\xf8\x8b\x69\x30\x27\x66\xb9\xc1\xb7\x23\xbe\x3a\x59\x69\x45\xd2\x97\x38\x71\xf7\xa9\xb5\x8b\x11\x58\x04\x73\x40\xe6\xe2\x8f\x6b\xe8\xb3\x80\x6f\x07\x79\x88\x22\x4e\x24\x76\xf7\xde\x01\x56\xd8\xf6\x55\x03\xe1\xef\xf9\x2c\x9a\x6e\xa7\x12\xcf\x12\xf4\x98\x4f\x30\x7f\x13\xc3\xbf\xb6\x45\x62\x38\xfe\x69\x72\x3b\xc5\xaf\x19\xc4\x52\xcd\x1c\x67\x79\x88\xba\x2e\xc2\xb7\xed\x82\x60\x70\x01\xf2\xca\x20\x28\x88\x0a\x4f\xdd\x1b\xbc\x04\x62\xe1\x75\xa0\x27\xc2\x6b\x01\xbc\xac\x5b\xa9\x64\x07\x09\xf1\x11\xe8\x20\xdb\xc8\xa4\x46\xd1\x89\xcc\x9f\xd4\x56\x7a\x52\xda\x53\xac\xc9\x4a\x90\x5f\x37\xb2\x3c\xc6\x3e\xc7\x9f\x52\xfd\xb5\xa4\xaa\x27\x21\xfd\x57\xd4\x18\x84\x7c\xe0\xfa\x15\x06\xbf\xff\xfd\xe2\xfa\x5f\xe4\x75\x4e\x09\x26\x43\x85\x81\x49\x56\x8f\xbc\x32\xf2\x76\x5c\xf3\x90\xd1\x2b\x0f\x08\x40\x88\xbc\x99\xef\xe2\x8a\x88\x06\xf4\xce\xa0\xb4\x65\xf6\x5a\x0a\x0a\x84\x51\x2b\x1d\xd0\x63\x9e\xcc\x2d\x27\x73\x9b\xe7\x81\x61\x48\xc0\x49\xbe\x98\x60\xce\xdf\x65\x00\x50\xec\x5e\x97\x5d\xfb\xa4\x5f\xc6\xff\x25\x19\x29\x73\x8f\x9b\xbe\xad\x65\xc4\x0c\x74\x75\x25\x9d\x1f\xa2\x6b\x7e\x05\x1a\xb3\x1e\x35\x61\x59\x52\x08\x36\xb7\xa4\x71\x9a\xad\xc8\x6f\xff\xf5\xa4\xba\x79\x6e\x9a\xea\xe6\x45\x36\xd5\x4d\x92\xc3\x62\xd9\x6e\xd8\xfe\x82\x83\x37\x55\xf3\xdd\xc8\xff\xf6\xd4\x4e\x2a\x45\x41\x3f\xb7\x5f\x34\x92\xf9\x26\x75\xce\x0c\xb8\x09\x91\x09\x47\xce\xfa\x9b\xa5\x32\x0e\xd6\xce\x89\x53\x7d\xde\x86\x39\x72\xaa\xae\xc1\xd8\xfd\xac\x94\x41\xc9\x93\x2c\xbc\xc0\x2f\x90\x73\xf6\xc2\xd7\x7c\x37\xb4\xd2\xc5\xf2\x7c\xfd\x9c\xdc\x27\xa6\xbe\x94\x75\x9f\x93\x02\x68\x7d\x20\x39\xb0\x7a\x64\xd5\x97\x67\xd5\x3f\x14\xa5\xbf\xaf\x3d\xa3\x7b\xca\xa7\x57\xa0\x69\x25\x95\xa9\x35\xb5\xb2\x25\xb3\x2d\xac\x73\x25\x95\xb2\x8d\x5d\x58\x2b\xd5\xab\x56\x19\xfe\x8a\x32\x02\x77\xec\x5d\x7f\xd8\xd5\x8b\xbd\x4f\x33\xf7\x53\x4e\x32\x01\xa9\x74\xe2\x73\x8b\xfe\x8d\x52\x5a\x44\xb6\x92\x62\x71\xc2\x8f\xe2\x06\x78\xbd\xd4\xa5\x64\x08\xcf\x25\xb5\x65\x88\x65\x59\x68\xf5\x60\x86\x94\x9d\x73\xfb\xcd\xb0\xf7\x25\x2c\x69\x76\x72\xb9\x46\x94\xac\xc0\xd2\x95\x7c\x6b\x32\x16\x33\x49\x33\xe4\x27\x11\x41\x88\x1e\xd4\x5c\x51\x49\x15\xfe\xbc\x04\x45\x32\xd3\x48\xdd\xf4\x44\x7d\xc9\xa4\x7a\x99\x6c\x2c\x32\x23\x49\x54\x48\x2d\xa2\xa5\xd9\xaa\xe9\xe1\x8b\x9f\xd5\x0c\x26\x6a\x4a\x13\x56\xea\x2c\xfb\x90\x4a\x8b\x60\x9e\xc5\x45\x4c\xa8\x38\xcb\xc9\x8a\x69\x67\x1e\xdc\xf9\xdc\xd8\x69\x36\x06\xfd\x36\x0b\x0c\xcc\xc4\xc8\xf9\x89\x7a\x15\x52\xbe\x20\x01\xa6\x9d\xf3\x85\x25\x72\x60\xed\x40\x32\xb2\xec\xd4\xd8\x4a\xe4\x63\x83\xc7\x39\x75\x72\xa8\x5a\xa9\xaf\xd8\x4f\x20\xaf\x95\xb4\x27\xd7\x3c\x1a\x05\xf1\x35\x05\xd3\xce\x89\xd7\xd3\x7c\x6a\xa6\xc7\xe8\x0a\x7d\x8c\x70\x06\xe6\xb6\xaf\xac\x6a\xa3\x6a\xea\xb2\x2c\x21\x34\x5c\x50\x46\xb9\xcf\x75\xf5\x62\xf0\xc8\xab\x61\x0a\x7d\x38\x46\xd3\x18\xc5\xeb\x17\xbd\xc9\x4b\xf6\x26\xc7\x5a\x59\x49\x1b\x7d\x0e\x88\x6a\xc3\x26\xe5\x25\x3d\xe4\x1b\xd6\x8a\x14\x4a\x46\x4a\xf5\x71\x3b\xaa\xea\x77\xca\xd6\x12\xb3\xe7\x8d\xa8\x3b\xf8\x07\xe9\x8c\x55\x92\x3d\x51\x72\x6e\xc5\x8e\x18\xa7\xb1\x80\x50\xa4\xc3\xd8\xce\x28\x3b\x34\xd8\xf8\x3c\xfa\xfc\x59\x91\xdb\x87\xb9\x4e\x23\x85\x1c\x26\xb9\xff\x35\xcd\x4c\xeb\x00\x68\x9a\x36\x4a\x2f\x7a\xb6\xb5\xef\xb8\x9a\xdc\xb2\x65\x13\xa9\xf6\x75\x43\x2b\x4e\x06\x53\x7e\x9e\x23\xe6\x1b\x54\x98\xfb\xb9\xa7\x4a\xcd\x55\x92\x41\x6f\x6b\x52\x74\x24\x01\x57\x2a\x0f\xde\x53\xb8\x69\x43\x6f\x91\xef\xec\x09\x4d\x4c\x1a\x4a\xc9\x55\xf3\xaf\x96\x58\x77\xe6\xe6\x88\x2d\xbd\x2a\xb2\xe6\x1a\x79\xab\x1e\xd4\x75\xc3\x52\x45\xbb\xc8\xce\x5a\x0c\x53\x13\x23\x60\x19\x50\x2b\xd7\xf1\xaa\xda\x51\x46\xbd\xd0\x6b\x52\x87\x94\x02\xf7\xcc\x94\xdb\x78\x99\x00\xf4\xd2\x88\x52\xd1\xfa\xbf\x44\x65\xf7\x78\xee\x73\xb2\xca\x89\x6b\xd4\x9b\x25\xd8\x55\x2f\x57\x69\x39\x6a\x95\x6c\x79\xf3\x8a\xbc\x17\x29\xb5\xd9\x3a\x11\x29\x69\xdf\xbf\x3f\x78\xa2\x28\x33\xbf\x06\x34\x30\xd1\x9e\x7e\x5f\x4b\x7b\xfa\xc2\xfe\xde\xfe\xc1\x7e\x59\x55\x4f\xfa\xcc\xde\xb6\x9f\x1b\xd4\x73\x30\x97\x4c\x8a\xb5\xeb\xc3\x09\xc2\x08\x40\x8c\x80\x1f\x84\x47\x9c\x74\x73\xe2\x68\x34\x9b\xb6\x8c\xae\xb5\x9e\x47\x86\xa8\x3f\x63\xaa\x6d\x8d\x0b\x8b\xdd\x55\x24\x07\xa2\xbf\x91\xeb\xe1\x9f\x39\x19\x7d\x0c\x1d\x33\xf8\x32\x98\x26\x8b\x15\x8a\x4c\xfd\xc5\x93\x03\xdf\xdb\x40\x0f\x9e\xa8\xb8\xc9\x70\x0f\x91\xfa\x58\x94\xcd\xd4\x72\x87\xb3\xe0\x1a\xe1\x0e\x4f\x89\xd5\xb9\x76\xe9\x44\xea\x3c\x23\x09\x32\x98\x89\xe4\x1f\x02\x82\x3f\x87\xf0\x48\xd6\x5b\xe5\x2f\x8e\x94\x1c\xce\x21\x29\x71\x3d\xe7\x64\x3e\x1e\x23\xc2\x89\x85\x45\x26\xc1\x35\xd7\x9f\xfe\x31\x41\x3e\x97\x49\xc5\x58\x1a\x55\xa3\x4c\x1a\x28\x94\x51\x42\x27\x96\x82\xd1\x70\x31\x43\x40\x6c\x31\x90\xc9\xc8\xf2\x1c\xbb\xd9\x58\x87\xe8\xfa\x9d\xd8\xab\x62\xd1\xc4\x72\xc9\x07\xdf\xfd\x3c\x37\x49\x90\x23\x25\x6b\x75\x76\x4b\x34\xd2\xab\x03\x34\xf7\xd5\xf7\x8a\xfa\xc9\xfa\xae\xfa\x9d\xf5\x1d\x68\xed\x7a\x9e\xc4\x0f\xd2\x5e\x42\x95\x13\x1b\x1c\xbf\xd7\xe8\xc4\x2a\xd0\xc6\xef\xed\x1f\x0a\x0b\x09\xa6\xbd\xcc\xf7\x26\x41\x40\x10\x80\x40\x94\x01\x13\x8b\xb1\xc1\x35\x76\x29\x02\xae\x0f\x20\xb8\x98\xd3\x90\x5b\x4b\x7f\x11\x60\xf1\x11\xf4\x17\xf2\x29\xe0\x11\x15\x55\x53\x2e\x34\x71\xfe\x95\xf4\xe4\x4d\x12\x01\xa5\xdb\xaf\x89\x12\xe8\xa0\x75\xcf\xc8\x41\x66\x8a\xcb\xb6\x34\x26\x0c\x2f\xee\x23\x61\x78\xb1\x1c\x61\x78\xf1\x00\xc9\x42\x63\xd1\x8b\xbb\x51\x19\x81\xda\xf1\x8b\x9c\x39\xfa\xba\x22\x18\x25\x58\x9a\x8d\x61\x8c\x81\xfd\xb5\x47\x31\x2e\x77\x5b\x8d\xb0\x3b\x2b\x40\xcb\x3a\x37\x55\xdc\x65\x49\x0e\x09\x0d\x7e\x28\x29\x1a\x04\x7a\xa4\x3a\xcc\xe0\x88\xba\xae\xe4\x7a\xa4\x37\xc0\x2c\x71\x93\x69\xfa\x5b\xc1\xa6\xdf\x33\x72\xa6\x7f\xf2\x35\x05\xd3\x3d\xc6\xc8\x95\x2c\xf4\x6b\x8e\x91\xcb\x78\x64\xdf\xff\xa0\xb8\xa8\xa4\xfb\x63\x50\xdc\xa3\xa7\xed\x63\x50\xdc\xfd\x0b\x8a\x4b\xab\x53\x6b\x29\x62\xef\x26\x40\xee\x5d\xef\xf5\xd5\xc5\xe1\x2f\xff\xae\x5f\xcf\xfa\x3f\x2a\xf2\x2d\xde\xaf\x82\xf8\xb7\x03\xf5\xa3\x07\x14\x03\xa7\xf8\xb2\x94\xd1\xcc\x7d\xc7\xa5\xf1\x3a\x97\x88\x74\x2b\xeb\xe3\x31\xda\xed\x81\x15\x76\xd7\x44\xbb\xb1\x34\x20\xcb\x44\xba\x85\x1d\xdc\x49\x7d\xf7\xd4\x49\x32\x8f\x72\x0b\x67\xfc\x18\xe1\x76\xbf\x22\x63\x94\xbb\xf6\x8e\xe3\x63\x52\xb7\x7e\x2d\x76\xe1\x8e\x62\x65\x7e\x7e\x71\x1e\x5c\x6d\x91\xf7\x2b\x8d\x95\xa9\x13\x24\x73\x16\xa9\x3c\xdd\xc0\xe7\x01\x32\xeb\x8e\x86\x89\xef\xfd\xfb\x1d\x0d\x13\x63\xd1\x7f\x62\x4c\x4c\xb6\xd0\xb2\x5f\x23\x32\x86\xef\x4d\xd5\xd8\x18\xb5\x3a\xf0\x63\x74\x4c\x08\x73\x1c\x70\x2a\x70\x89\x16\x51\x81\xe5\xb3\x0c\x9a\x72\xc9\x41\x35\x9d\x6b\x62\x66\xd6\x15\x2c\xc3\x67\x61\x24\x44\x27\xda\xfd\xbf\xff\x5b\xbd\x4d\xa2\x84\x75\xd5\xc6\x47\x18\x8d\x90\x83\xfc\xac\x01\x78\x3d\x51\x3a\x0a\x25\x21\x11\xcc\x8a\x58\x91\x3c\x32\x56\x44\xc1\x8c\xdd\xf1\x4b\x63\x6b\x52\xbe\x26\x35\xcc\xa8\xfc\x3f\xd5\x92\x0a\x5a\xdf\x2d\x63\x4c\x4d\x10\x2f\xed\x34\x0d\x0c\xab\x39\x2e\x84\xe5\xf8\x64\x18\xbb\x94\x31\x2a\xf1\xdd\x51\xca\xea\x46\x06\xb6\x92\xb4\xdd\xa0\x3c\x28\x29\xb6\xd5\x35\x1a\x7f\xa5\xa0\xaa\x93\x3c\x72\xa6\xd8\xa3\xf3\x4f\xb8\xaf\x28\xa4\x99\x6b\x5d\x3c\xaa\x07\xe3\x59\x82\x32\x99\x2a\x5c\xc3\xa9\xab\x34\xad\x9a\xf2\xf5\x8e\xa3\xc0\x6c\x55\xef\x9f\x8a\x07\xb3\xcd\xec\x00\x6e\x93\x76\x80\xc7\x08\xb2\x9c\x66\x2b\x8d\x20\xab\x73\x71\x3d\xd8\x38\xb2\x1c\xe4\x78\xb9\xe6\x10\x1e\xf3\x2f\xcd\x79\xeb\x97\xeb\x0b\xd1\x29\x9b\x3e\xa7\x8d\x3f\x18\x18\xa3\xaa\x81\x67\x65\xc6\xa9\x74\xf4\x48\x15\xe3\xd4\x52\x93\x5b\x71\x4c\xc3\xdd\x47\x34\x98\x21\x51\x22\xb2\x41\x21\x48\x95\x94\x16\xeb\x8d\x71\x48\xeb\xbd\xcc\x35\x66\x97\x57\x77\x15\xe1\x80\xb7\x47\x6f\x82\x97\xcf\x72\x34\x65\xf5\xf4\x5f\xf5\x62\x17\xb2\x06\x9f\x25\x92\x95\x2f\xe1\x42\xf6\x1b\x5a\x34\xe5\x3a\xf6\x1b\x5a\x80\x00\x83\x0b\xee\xa9\x55\xc5\x79\x2c\xae\x50\x91\xaa\x6b\x91\x75\x2c\x4b\xf8\x8f\x3d\xb7\x2d\x0f\x5d\xd0\x0e\xc5\xee\x34\xbd\x40\xb1\xb0\x84\x47\x83\x7c\x9a\xba\xd5\x94\x1a\xca\x8e\xe3\x86\x38\x08\xbd\xf8\xf6\xe4\x39\xf0\xab\xba\x96\x65\xfc\xd6\x32\xd0\xa9\xe6\x7c\x36\x0c\x00\x47\x10\x00\x45\x1f\x36\x40\xbe\x03\x20\xb8\x44\x0b\x70\xed\xd2\x89\x6c\x36\x0a\x1c\x95\x83\xdf\x6c\xc8\xde\x99\xf1\xf1\x8a\xdd\x36\x97\xdd\x06\x2b\xe9\x95\xad\xf8\xfd\x84\x3d\x43\x42\xf5\x9d\x46\xcd\x37\xcd\x7c\x80\xb2\x07\x28\xe3\x3d\xa7\xd9\x85\xfc\x8a\x00\xc1\x78\xec\x95\xc8\x4a\x1a\x5c\x17\xf8\xcc\x18\xea\xf3\xe0\xc6\x4a\xe0\xdf\x27\x12\x7b\x35\xe6\xd6\xce\xe7\x1f\xdd\x99\x1f\x6c\xf6\xe0\xef\x09\x8c\xab\x59\x8a\xac\x09\x32\xf6\x91\x17\x4d\xa8\x4a\xc8\xf2\xd7\xf4\x51\x56\x61\x28\xaa\x2f\x21\x76\xa2\x9a\x2a\xae\xa8\x10\x47\x5e\xe5\x0d\x48\x83\xf3\xf4\xd2\xe5\x92\x4d\x23\x1a\xaa\x69\xaf\x2b\xe8\x15\xd1\x0d\x85\x18\x45\xf2\x86\x42\xcc\xcb\xb3\xdf\x67\x28\x70\x54\xfd\x62\x19\xe2\xab\x94\x7d\x33\x81\x5c\xdd\xac\x36\x7a\xcc\x36\xa7\xaa\xb9\x7e\x50\xb5\x18\x83\x02\xf6\x7b\xad\x95\x30\x1a\xbb\x0d\x1a\x75\x09\x2e\x70\x6d\x5c\xce\x45\x78\x49\xb0\xaf\xd4\x3d\xb8\xd2\xa2\x4b\x16\x53\xcb\x21\xb8\x60\x02\x5c\x88\x03\xfc\x00\x13\xed\x19\x68\xd6\x63\xf7\x12\x2d\x56\xe0\xab\xbb\x32\x89\xfc\x61\xf8\xe9\x9a\xa9\x47\x2c\x63\xff\xdc\x52\x66\xe1\xfe\xf8\xe5\xae\x4c\xdd\xb1\x56\x9f\xdc\x86\xbd\x6e\xa5\x68\x5f\x49\x19\x70\x37\x5e\xb6\xfd\x5f\xa7\x64\xdf\x3f\xbf\x59\x2a\xdb\x41\xec\x02\xab\xa4\x96\xab\xeb\x8f\x7b\x79\xf5\x35\xb8\xe3\x5e\x5e\x75\x25\x20\x74\xba\xe3\x50\x0e\xde\x04\x8c\x05\xd3\xb8\xe2\x5a\xf9\xf9\xb2\x74\x3c\x43\x46\xf6\xd3\x41\x29\x8a\xc0\x7c\x6e\x5b\xc4\x63\x89\x28\x4e\x7b\x76\x47\x30\x88\x64\xe6\xb9\xd4\xb8\xff\x0a\xc5\x28\x6a\x41\x4e\xa8\x07\xf8\xdb\x4f\x81\x1b\x52\x07\x6b\x53\x32\xc0\xb3\x19\x8a\xab\x4a\xc5\x4b\xe1\x2f\x1d\x27\x0e\x3c\x3e\x3b\xb3\xfb\x09\xb9\xa0\xe2\x2a\x33\x4c\x98\x9e\xc1\xd6\x86\x95\x16\xc6\x49\xc6\x95\x0e\x8c\x73\xed\xdf\x37\x8f\x6c\x95\x5b\x35\x2d\x4a\xb2\x2c\x4f\xdc\x4c\x05\x13\xe5\xe8\xad\xda\xb3\xdb\xd8\x99\x29\x5c\x34\x41\x84\xb8\x15\x24\x69\x95\xe9\x50\x2e\x58\x3f\xa0\xee\x08\x81\x6b\x88\x7d\x37\x76\x53\xc8\xf8\x2d\xf4\x6c\xeb\x0f\xfe\x89\x5a\xea\x10\x0c\x43\x1e\xf1\xb7\x8f\x60\x02\x09\x80\x20\x5c\x0c\x10\xf3\xea\x82\x3f\x83\x39\x18\x41\x9f\x51\x67\xf0\xdb\xc7\xff\xfd\x96\x70\x8d\x9b\xfa\x15\xb1\xc1\xf9\x9c\x82\x6b\x04\x30\x1a\x05\xd3\x29\xf2\x1d\xe0\x04\xae\x3f\x06\x24\xe0\x5f\x8f\x20\xe6\xa1\xa0\x7e\x40\xe3\x57\x90\x02\xe8\x79\x5d\x70\x40\xc1\x14\x2e\x80\x8f\xc6\x90\xba\x57\xc8\x5b\x00\x77\x3a\x83\x23\x0a\xe8\x04\x81\x90\x53\xb8\x42\xc0\x0f\x1c\x04\x5c\x1a\x8e\x0f\x09\x09\x46\x2e\xa4\xc8\x61\x9d\x77\xc1\x09\x42\xe0\x1c\x79\xc1\x35\xb8\x08\x30\x98\x06\x18\x01\x07\x51\xe8\x7a\x04\x04\x3e\xeb\xe4\x5d\x38\xdb\x13\x3e\x5b\x00\x7d\x07\x10\x84\x4a\xbd\x14\xcd\x0b\x9f\xba\x3e\x45\xd8\x87\x1e\xd9\x94\x10\x61\x05\x50\xa3\x6a\xa7\x9a\x42\xa6\x51\x0d\xd4\x9c\xf2\xa7\x62\xbb\x82\x39\x06\x4e\x30\x9a\x87\xf8\x90\x76\xbb\x8a\x57\xeb\xfa\xbc\x68\x7b\xb8\x63\x95\x65\x70\x90\x74\x4b\xbe\xbc\xd2\xb9\x23\xa7\x09\x42\x4d\xbc\x2d\xd4\x3a\xaa\x14\x69\xab\xe8\xf6\x68\x6c\x83\xfe\x4b\xfc\xea\x38\x88\xb8\xe3\xc6\xaa\xd3\xaa\xd8\x96\xaf\x82\x4e\x83\xc6\x3c\xc3\x81\xa3\x1a\x54\xca\x73\x65\xab\x2d\xa3\xec\xce\xb2\x5e\x94\xdc\xc8\x6c\xc9\x28\xb3\x0e\xd5\xa9\xec\x8e\x39\xdd\xab\x3a\x17\x23\xdb\x6a\x39\x0f\x11\x92\x08\xd2\x25\x93\xe0\xda\xca\x2e\x8e\xe9\x69\x73\xee\x72\xfd\xb7\x15\x00\x51\x1d\x54\x45\xf5\x71\x2b\xec\x19\x0f\x6b\xa9\xbf\x63\xaf\xd1\x04\x5e\xb9\x41\x81\xc9\xaa\xc2\x64\xe2\xce\xf4\x7a\xc5\x3c\x1a\xc2\x24\x77\x68\xce\x59\x14\x2d\x48\xf4\xd4\xc0\x6a\xc2\x43\x2c\xe7\x65\x9a\x73\x3c\x63\x32\x4a\x76\x39\x1c\xbe\x6b\x64\x91\xac\x9f\x06\x96\xc8\xe7\x53\x7b\x71\x49\x67\xec\xb8\xdb\xbd\x09\x1a\x5d\xd6\xf5\x8c\x2f\x5b\xfb\x5b\x04\x3d\x3a\x01\x62\x8c\x55\xd0\x9a\x7e\x52\x02\xb1\x41\x96\x9a\xc8\xd1\x73\x98\xd4\xbc\x9e\x6b\xa7\xf1\x5d\x91\x8a\xd0\xe5\x3a\x57\x28\xd5\x84\x62\x81\x2b\x0a\xeb\x4f\x6d\xc9\xca\xd4\x86\xf1\xaa\xe4\x15\x6c\x67\x78\x16\x85\x10\xc7\x30\x48\xdd\xd8\x6b\x09\xf4\x4f\x01\xa5\x96\xaf\x95\x49\xe0\xbf\x1e\x93\xd6\xbe\x45\x79\x0a\xc7\x68\x17\x4a\x03\x0b\x96\x9a\xe2\x1d\xab\x20\x4b\xac\x68\x8d\x86\x2e\xd6\x0e\x06\x14\x4a\xc9\x4a\x5a\xcc\x3b\x0a\xfe\x73\x87\x1f\x83\x17\xce\x71\x6f\x2d\xc1\x7f\xcd\x2b\x3c\x97\xa9\x9a\xd5\x88\xc2\xb3\x71\x8d\x63\x43\xaa\xc0\x9a\xea\xd3\x42\x9e\x25\x47\x13\xba\xf5\xb5\x6a\x42\xfb\xfd\x75\xaa\x42\x45\x1d\x04\x53\x5d\x68\xbf\x67\xf7\xfb\xf7\x5b\xeb\x99\xcc\xcc\xb7\xec\x49\x08\xff\x2b\x57\x41\x56\x74\xa3\xa1\xf0\x92\xf9\xab\xf3\x5f\x0e\x0e\x66\xf1\x2f\x8c\xae\x10\x26\x51\xd5\x94\x5a\xf8\x5d\x41\x05\xdb\xd8\x26\xae\x3d\x16\x57\x20\x83\x74\x63\x55\x38\x06\x1e\xcb\xd9\xe1\xb3\xd1\x32\x65\xca\x5e\x5c\x60\x84\x28\xba\xa1\x99\x20\x5e\x25\x6c\x45\x7a\x38\xa9\xce\x91\x05\x99\x2f\x52\xa1\xb7\x89\x08\x53\x19\xd0\x6b\x9d\xb0\xc8\x60\x70\xbe\xe0\xf9\x66\xb3\x1e\x4e\xcd\x30\x1a\xcb\x27\x30\x69\xea\x5e\x29\x53\xda\x66\x88\x69\xd2\x25\x46\x6f\x64\x68\x38\xd0\xbc\xc9\xe9\xe3\x20\xa0\x9d\x55\x86\xc5\x3f\xcc\x38\xed\xdc\x2a\x86\x2e\xf9\x99\x9d\xac\x1d\x07\x91\x11\xdf\xe7\x7b\x5b\xd4\x70\x5d\x81\xcf\xfa\x3c\xbf\x22\xd2\x4b\x02\x8c\x4d\x46\x72\x1e\x21\x9c\xb4\xae\x85\xa5\x9a\x96\x62\x06\xa7\x64\xf8\x24\xfb\x23\x7e\x22\x1e\x6f\x26\x3d\x7c\x79\x33\xad\x33\xbb\xe2\xfb\x88\xdd\xf1\x44\xb1\x39\xa6\x8d\x90\x89\x5e\x4a\x7c\xf2\x22\xe2\x54\x39\xc4\xf7\xe1\x87\x78\x36\xe4\x38\xf6\x18\xdc\x99\xd3\x6c\x65\xc1\x9d\x6b\x3b\x68\xf9\x63\x7c\x64\x32\xb0\x0c\x17\x35\x3d\x41\xc5\x27\x6a\x8d\x60\x7d\x8c\x1d\x7d\x8c\x1d\x7d\x8c\x1d\xbd\x6f\xb1\xa3\x89\x60\x51\x45\xa2\x06\x33\xe8\x62\x62\xe0\xfc\xb0\xde\x40\x51\xa9\x1b\x35\xd7\xa6\x32\x23\xf6\x66\x67\xc2\xcc\x5c\x23\x61\x49\x5b\xb7\x62\xf5\xf7\x8f\xde\xa7\xef\x5f\xff\xfe\x52\xaf\x58\x65\xb3\xd2\x06\x83\xa6\x6f\x41\x4b\xa3\x7b\x8b\x58\xf5\xfc\x1a\xa5\x01\xa6\xdc\xc2\xf6\x7a\x71\x30\x0d\xd7\x0d\x79\x66\x0e\x55\x0e\x16\xda\xdd\xb4\x25\xae\x88\x52\x29\xe2\xba\x02\xde\x4e\xf8\xe5\x9c\x28\xe9\x31\xe1\x98\xa7\x74\x92\x71\x39\x51\xd2\x20\xf1\x61\x28\xce\xa2\xf0\xdf\x60\x4e\x45\x70\x5b\x78\xe3\x64\x2a\x72\x47\x19\x72\x78\xbb\xc2\x27\x87\xac\x4b\xe5\xc1\x7b\xd1\xb7\x51\xd0\x92\xa2\x4b\x6b\xd2\xa3\x59\x83\x8b\x95\x11\x19\xd3\x3b\xf0\x6f\x7e\xfb\xef\x67\xbb\x70\xba\x7d\xa5\xc5\x5f\x1d\xe2\xea\x9c\x6f\x74\xd6\x67\x85\x8c\xff\xee\xfa\xee\x74\x3e\x2d\xad\x52\x90\x63\x79\x56\x75\x47\xcc\x77\xab\xe3\xcf\xc3\x25\x45\x42\x33\x0d\xa6\xc1\x18\xc3\xd9\x64\x11\x32\xf2\xae\x70\x4e\x3a\x15\x5f\x33\xa4\xeb\xd9\xbd\x5e\xaf\xdb\xeb\xa5\xd5\x3e\x53\x52\x3a\xab\xbc\x45\x21\xc7\x85\xfe\x9a\xd6\xc4\xc6\x5a\xcf\xb2\xe0\xcd\x1a\xf7\x0a\xde\x2c\xbf\xa8\xec\x2f\xa9\xf4\x8d\x86\xea\xb0\x7f\x14\xba\x15\x4f\xe2\x4c\x33\x31\x1d\x25\x69\x8c\x44\x60\x5a\xc1\x60\x28\xda\x88\x42\x59\x77\x70\xbd\xbd\x98\xce\x16\x97\xe8\x70\xcb\xcc\x6e\xc8\xe2\x8e\x34\x76\xbe\x46\x15\xd1\xf5\xd5\xd0\xf7\x4e\x09\xbd\x19\x6e\x65\x8e\x26\xba\x92\x7e\xe4\x0e\x74\x89\xab\xce\xe1\xa8\xd3\x0b\x6a\x8b\xe2\x95\x35\x3a\x62\x30\xae\xd2\x62\x08\xc7\x99\x50\xc3\x95\xa8\x1f\x13\xaa\xc7\x52\x4f\xdf\x72\x83\xa8\xa4\x13\xd2\xb3\x53\xf2\x50\x12\x6c\x05\x56\xc9\xc4\x67\x25\xca\x87\x72\x68\x66\xb2\xc6\x89\x3d\x30\x09\xfe\x63\xb9\xe2\xf8\xb9\x58\x26\x04\x30\x33\x07\x2a\x76\xb5\xc0\x8c\xc9\x0f\x8f\x98\x03\xc3\x82\x6a\x47\x47\x72\xef\x6a\x0f\x35\xbd\x18\xb3\xa5\xd9\x9e\xe5\x94\x66\xcb\xe2\xe6\xfa\xbc\x6b\x56\x5d\xbd\x39\x42\x68\x63\xa2\xd8\xc8\x1d\x1d\x0d\x5b\xfd\xa2\xe6\x2e\xf3\xeb\xbf\xa8\x2f\xf7\xdf\x7e\x7c\x73\x79\xf8\xce\xe8\xa2\xae\x9d\xc2\xbb\xc8\x9d\xb5\xea\x61\x29\xba\x67\xe4\x99\x8d\xb3\x0a\x2b\x90\x4d\x0e\xba\xa6\xab\xa7\x38\x76\x40\xd7\xa2\xc8\x6f\x5d\xf7\x7d\x89\x3f\xb6\xf6\xa2\x2a\xf2\x6e\xd6\x35\x28\x77\x62\xd7\xb5\x2a\xf3\x20\xd6\xb5\x49\xa7\x4c\x5e\x97\x01\xaf\x4a\x46\x52\x7d\x06\x55\x5d\x1b\xc3\x3c\xa9\x55\x06\x4f\xc6\x23\xac\x75\x68\xad\xbf\xfe\x5a\x67\x90\x72\xa7\x5f\xeb\xd8\xba\xd8\x8b\x06\x27\x50\xcc\x54\x34\xea\xf0\xaf\x73\xbf\x4f\x8e\x53\x29\x36\xd4\xd4\xa9\xbe\x1a\x20\x1e\xaa\xef\x3d\x58\xb9\x31\x44\xef\x7f\xaf\x71\xfd\x8e\x1d\xef\x4b\x2d\x12\xab\x30\x50\x81\x5a\x46\x2a\x90\x38\x78\xcf\x2b\x18\xaa\xca\x31\x6e\x2d\x7b\xf4\xac\xcc\x01\xbf\x82\x79\xb2\xd6\x24\x4d\x0c\x57\xdb\xb5\x0c\x57\x85\x36\xa8\x4a\xe1\x35\xf7\x45\x50\x50\xe3\x48\x09\x8b\xf6\x65\xa4\xe2\x30\x70\x50\x17\xfc\xac\x09\xfe\xb5\xc1\x95\x8b\xae\xd7\x18\xd8\xbc\x64\xa0\x6c\x51\x74\xb3\xb9\x70\x94\x78\xd9\x80\x98\x24\x17\x5a\x4d\x4c\xba\xa3\x18\x88\xa3\xbf\x2f\xa6\xfb\x6f\xaf\x72\x0a\x26\x9a\xea\x32\x8b\xaa\x18\xb1\xc8\xfb\x3b\x29\x60\x14\x22\x7a\x43\xb5\x8b\xee\xb6\xc6\xcf\x08\xd2\xf0\x9a\x32\xa9\xf0\x13\x19\x19\xb3\x35\x7e\xb8\x1d\xae\x6a\x8d\x1f\x12\xdb\x1a\x1f\x62\x8d\x9f\xb9\xcf\x97\xbd\x58\x62\x17\xe2\xc4\x03\xca\x6d\x14\x77\x5c\x96\x04\x28\x4e\x46\xd0\xb3\xad\x0f\xb2\x19\xc7\xce\x52\xf9\x51\x5b\xa5\x3c\x5f\xde\xcb\x09\xd1\x89\x2c\xe8\x49\x90\xd4\x51\xc3\x65\x2d\xe0\xc8\xe9\x60\x24\x2a\xe2\x68\xac\xe0\xfc\xf6\x50\xd2\x04\x63\x5e\xd2\x71\x14\x09\xd0\xdc\xfa\x5d\x12\x90\xcf\x99\x8f\x74\x20\x7e\xea\x71\xf4\x7b\x57\x0c\xa3\x3c\x8a\xb9\x7f\x03\x04\x7d\x56\x93\xf5\xd7\xbf\xa9\x44\x78\xb4\x68\xbc\x22\x24\xae\x85\xc2\x6f\xf3\x11\x58\xa0\x5b\x48\xed\x0b\x4c\x25\xb6\x35\x42\x9e\xd7\xf1\xe0\x22\xe0\xee\x0f\xc9\x35\x4a\x52\x33\x43\x78\x14\x5e\xec\x63\x14\xf6\x35\x9f\xfa\x44\x36\x29\x82\x8a\x48\xfc\xc9\x1b\x84\x0f\xfa\xbd\x5e\x25\xef\xcc\x0a\x48\x9e\x87\xda\x92\x66\x6a\x9d\xf2\xd3\x58\xdd\xd7\x63\x75\x3f\x85\xd5\xfd\x2c\x56\x27\x64\xe7\x9e\x65\xc7\x7e\x21\x46\x4e\x1f\x6a\x51\xd1\xba\xd1\x37\xd9\x82\xf4\xc9\xc0\xa7\x22\xbc\xb5\xd3\x1f\x9b\xd0\x6a\x33\x3b\xa8\x09\x77\xcc\xf7\xa1\xa2\x61\x71\xad\xde\x5f\x0a\x63\x18\x73\x93\x4f\x8c\xb8\x49\x81\x5e\xeb\xf6\xfb\xda\xdf\xeb\xfb\x8b\x8d\xb9\x9e\x99\x9c\x41\x9f\xb1\xfb\xcb\x54\xe1\x8e\x18\x4a\xb6\xc0\x07\x94\xda\xcf\x17\xc4\x52\xe7\xd7\xbb\xeb\x79\x49\x62\xba\xe4\x0d\xb2\x3a\x8e\xda\x52\xf2\x80\x08\xd9\x20\x9d\xed\xa6\x68\xde\x1a\x12\x4b\xe1\x79\xc7\x87\x57\xd9\x2b\x82\xa0\xf0\xf6\x10\x8e\xfc\x3c\xeb\x37\x4b\xea\x15\xd1\x1b\x8c\x21\x8b\xd8\x49\x66\xf4\x88\xec\xe1\x24\x9b\x2c\x38\xe1\xcd\x62\x5b\xc7\xc1\xdc\x77\xc0\x10\xbb\x33\x30\x74\x19\x05\x8f\x25\xd7\x84\xe8\x9c\x0e\x6b\x4d\xf7\x2b\xa7\x3a\x84\xe7\xf1\x0d\x94\x7a\x28\x28\x61\x27\xe2\x7a\x2a\xf3\xca\xb5\x62\x3e\xbe\x9e\x22\xde\x5b\xa6\x45\xbc\x9f\xe5\x16\xf1\x56\xae\x4f\x5e\xc9\xfb\xe0\x08\x88\x4b\xbb\x6e\x3d\x6f\xcd\x81\x50\x46\xa9\x2b\x06\x29\xb9\x20\xee\xa6\xde\xf7\xc1\x11\x10\xcb\x78\xac\xfa\xbd\x06\xc9\x55\x75\x70\xce\x21\x73\xfc\x4a\x81\x44\x71\xd8\x73\x1d\x8b\x4d\x82\xba\xbc\xe6\x8a\x08\xe8\x9f\x8f\xdd\x0b\x0d\x61\x8c\x48\x5b\xc4\x23\x24\x7c\xc4\xcf\x24\xd7\x5d\x6d\x8c\x88\xd8\x66\xbb\x8f\xfc\xf3\xa2\xae\x73\x49\x71\x8d\x81\xd3\xc4\x3b\x3b\x3e\xa6\xdc\x8b\x2c\x55\x22\xa5\xda\x30\xda\x7b\x20\xb1\x48\xd5\xe2\x6e\xc0\xa7\x72\x1b\x7f\x87\x64\x24\x23\x47\xbd\xf3\x52\xea\x1b\x19\x1e\xa5\x2b\x18\xd0\xc8\x3d\x64\x27\x86\x28\xa3\xb2\x99\xb2\x18\xc6\x62\xd5\x56\x34\x50\x04\xf3\xf0\x71\x9d\xc2\xf5\xfd\xbb\x48\x6e\x13\x33\xd7\xe6\xea\x5d\x79\x0e\xee\xac\xc4\x3d\xfd\xf3\x97\xa3\xe7\x07\x87\xab\xd3\xf0\x8a\x15\xde\x8d\x92\x57\x52\xa0\x47\x3d\xef\x7f\xb4\x9e\x77\xbd\xb5\xdc\x53\x3e\xa5\xd6\x9d\xd5\x70\x0f\x65\x4e\x42\x17\x1e\x0b\x1e\xfd\xc1\xb6\x30\x9a\x42\xd7\x77\xfd\xf1\x1f\xae\xc3\xbe\x48\x4a\x9a\xa5\xbe\xc2\xa5\x03\xd0\x89\xbe\xe7\x50\x0c\x04\x9c\xe3\x58\xa2\xf7\x92\xe9\xaf\xcd\x07\x19\xe8\x52\x20\x54\x98\xe8\x32\x39\x0e\x0a\x7c\x96\x0f\x65\x8e\x96\x8c\x16\x21\xdf\x7f\xcb\x34\xd9\x40\xe1\xf2\xcd\xd2\x0a\x1b\xc6\xb0\x5b\x6a\xb0\xcd\x4b\x29\xfe\xd9\xd6\x11\x24\x44\xa4\x37\xcb\x94\x33\x9b\x89\x77\xe9\x00\x7d\x2f\xeb\x47\x14\x75\xa3\x64\xaf\xb2\xc0\xdf\x08\x07\xba\xaa\x66\x6f\x15\x26\x18\x28\x33\x30\x8e\xbb\x75\x1c\xc3\x65\xd4\x9a\xb7\x3a\xed\x88\x55\x4b\xe1\xa2\x80\x8d\x06\x13\x13\x41\x47\x7f\xc5\x41\x47\x39\xe3\x56\x8f\xb2\xcf\xdb\xca\x3f\xe2\x44\xf2\x99\xad\x94\x49\xe6\x0d\x40\x12\x75\x53\x7d\x2b\x95\x19\xd4\xdf\xca\xbc\x65\xd4\x9a\xb7\xc9\x56\x0a\xd8\xd4\xdd\xca\x78\xdc\xe6\xb6\x72\x0f\xbb\xd4\x1d\x41\x7d\x95\xc1\x91\x7c\x69\x00\x94\xb8\xa3\xea\xbb\xa9\x4e\xa2\xfe\x76\xe6\x2e\xa5\xde\xd4\x4d\x36\x54\x02\xa8\xee\x8e\x2a\x23\x57\xdb\xd2\x72\xe2\x6e\x48\xfa\x35\xc1\x22\xd5\x2f\x43\xeb\x31\xa8\x24\x17\xfa\x2b\xf6\x17\x03\x46\x5a\xb4\xc2\x00\x13\xb3\x39\xd7\x60\xfe\x1b\xf1\xbb\x4a\xca\xd7\x35\xe4\xf2\xbb\xb1\x95\xbd\xde\xdd\x78\xff\xf7\xe6\x2c\x30\x17\xcb\x97\x94\xce\x1f\x9a\xc5\x8c\xa4\x55\x8b\x59\xa3\x99\xa2\x7d\x7c\x70\x76\x33\x29\x8a\xd9\xba\x10\xc3\xc6\x16\xf1\xa8\xcf\x78\xa0\xfa\x0c\x81\x25\x55\xaf\x46\x8d\xd3\x4f\xf9\xc1\x4c\x16\x87\xc8\xc8\xd8\xba\x16\xfa\xb0\x86\x84\x9b\x99\xba\x82\xaa\x85\x9e\xb5\xd7\xf2\xb6\xf1\xb5\xbc\x6d\x18\xd3\x59\xeb\x80\xfd\x67\xb8\x19\xe6\xec\x4a\x59\x86\x9e\xaf\xc3\xbf\x30\x53\xef\x47\xff\x56\x56\x23\xf7\x47\x50\xb2\xb3\xca\x47\xaa\xb5\x79\x27\x1e\x20\xa6\xfb\x47\x32\xcf\x41\x72\x8c\x47\x3f\xc5\x47\x3f\xc5\x35\xf9\x29\xca\xfc\x60\x35\x1d\x16\xb5\xc7\x24\xfb\x56\x73\x4c\xfa\x85\xc7\xa4\x5f\x76\x4c\x12\xc1\x7c\xf7\xc0\x15\x72\xad\x5e\x81\x09\xb9\xc5\x50\xdc\x11\x9e\x37\xeb\x96\x71\x7e\x25\x87\x7f\x5c\xef\x1d\xbe\x37\xcb\xa3\xb5\xcd\x8d\xf3\xee\x28\xc0\xb3\x0e\x5f\x45\xca\x3e\xee\x8c\x08\xfb\x3f\x23\xea\xd7\x18\xce\x66\xbc\xcc\x41\x8f\x21\xaa\xc2\x06\x86\x9f\xe4\x58\x5f\x8a\x44\x25\x06\x26\x73\x41\xa9\x4e\xfc\x7c\x79\xad\x54\xe9\x25\x15\x31\xb0\xd5\x58\x27\x29\x66\x64\xba\x51\x4f\x01\x68\xe9\x3e\x8d\x02\x4f\x13\xdf\xb6\x1b\xa8\x36\x50\x3e\x8a\x51\x61\x80\xca\x77\x54\x25\x56\xbc\x56\x7d\xf6\x3d\x86\xa7\x00\x23\x3a\xc7\x3e\x72\x00\xf4\x01\x5b\x68\x57\xf3\xed\x9f\xc1\x9c\x15\x2a\x9d\xc0\x2b\x04\xae\x5c\xe2\xd2\xb0\x01\xf8\x70\xfc\x0e\xd0\x09\xa4\xc0\x25\x40\x94\x78\x09\xbb\x99\xfb\x97\x7e\x70\xed\x03\x49\xce\x6d\x40\x02\x16\x02\x3c\x82\x3e\xa0\x78\x01\xc6\xac\x0e\xea\x39\x1c\x5d\x02\x1a\xb0\x32\xa5\x38\x08\xa8\x6e\xe0\x83\x8b\xb0\x21\x06\xbb\x7b\xef\x00\x0d\x2e\x91\x0f\xae\x21\x61\xb5\x54\x2f\x98\x1b\x8d\xec\x15\x23\x82\x28\x70\xa9\xcd\x6a\x9c\xd2\x09\xf2\x79\xcc\xb1\xeb\x79\xe0\x1c\x01\x8c\x1c\x17\x33\x67\x12\x39\x20\x41\x94\xba\xfe\x98\x80\x19\x1c\xa3\xf0\x21\xf2\x29\xc2\x00\x02\x1f\x5d\xc7\xa3\xe9\xa6\x34\xc4\x0b\xe0\x05\xc1\x65\xb8\x04\xd7\x07\xe1\xec\xf2\xac\x63\x75\xc2\x0e\x99\x26\xa8\xa4\x84\xaa\xd8\xd4\x92\x00\x42\x3d\xde\x69\x10\xa5\x44\xa3\x21\x41\x95\xa3\xce\x38\x66\xa0\x8f\x40\x66\x36\x5a\x1c\x41\x19\xf0\x44\x1c\xf9\xc3\x17\xd5\x15\xff\x25\x88\xb0\x28\xc4\xa0\xfb\x77\xeb\xf1\x13\x65\x76\xd5\xc9\x22\x49\x6b\xbf\xec\xc6\x97\x9f\x7b\x2f\xdf\x5c\x23\xa3\xcb\xae\x3e\x2c\xc4\xfa\x0c\xa1\xe1\x07\x94\x1d\xf1\xf5\x83\x63\x73\xef\xd7\xde\xcd\x78\xe4\xac\x18\x1c\x72\x81\x86\xf0\x88\x4e\xe1\xda\xe1\x81\x27\xc7\xf8\x64\xe3\x78\xf4\x50\x78\xa1\x88\xb4\xaf\x95\x1d\x4a\x5f\x13\x27\x62\x16\x85\xc4\xf8\xce\x99\x80\xe1\x04\x11\xe5\x32\x84\x9e\x17\x5c\xb3\x9b\x93\x06\x80\xa5\xfa\x18\x8b\xf4\x1d\x18\x9c\xe3\xe0\x9a\x20\x2c\xa2\xf1\x91\xe4\x1f\xfe\x40\xe7\xe0\xc3\x41\x17\xec\x5f\x21\xbc\xe0\xfe\xcb\x2e\x01\x04\x5e\xf1\xbb\xd6\x0b\x46\xd0\x23\x34\xc0\x70\x8c\xf8\xe5\x3c\x43\x98\xb8\x84\x12\x40\x27\x38\x98\x8f\x27\x9c\x9f\x20\xec\x9d\x1c\x63\x1e\xf2\x59\xe9\xcb\xb7\xf4\x66\x91\x39\x24\x4b\x75\x41\x17\x2e\xf2\x1c\x82\x4a\x72\xd8\x29\x0d\x3c\x78\xce\xd3\x30\xa4\x93\x3a\x50\x74\x63\xd0\x0b\xd0\x2b\x07\x7b\xb6\x15\x5e\x9b\xc3\xe2\x6b\x33\xdd\x89\x2c\x3f\xe0\xcf\x42\x39\xdd\x57\xa3\x22\xe2\x24\x97\xbe\xc8\x6f\xc5\x5c\x2c\xae\x03\xec\x58\x49\x1d\xbe\xb8\xaa\xcf\xe2\xbf\xaa\xe4\x9d\x7f\x61\x5b\x48\x85\xf4\x70\x82\x04\x77\xc6\xf2\xb9\xf8\x94\xd7\xbe\xc7\xe8\xf3\x1c\x85\x3b\x0d\x09\xc3\x18\xd1\x74\xc4\x84\xeb\xa8\xf1\xbf\x3a\x1c\x93\x3a\x69\x38\x80\xb7\xc3\xe1\x11\xe0\x67\x13\x44\x87\xa4\xcb\x8b\xf6\xbb\x04\xcc\x09\x47\xb1\xf0\x5c\xe0\xc0\x63\x43\x84\xf0\x94\x08\x7a\xcd\x31\xb3\x1c\xb2\x0d\x58\x9c\x75\xa9\x40\xc8\xfc\x7c\xca\x2a\x40\xe4\x24\xfc\xb0\xe6\x33\x47\x29\xd0\xc4\x4d\x64\xaa\x23\xc1\x09\xbc\x2a\x49\x52\x76\xbf\x98\x1d\x49\x46\x0a\x6f\xb4\x39\x75\x3d\xb2\x09\xc9\x08\xad\xe8\x6e\x8f\xbb\xb1\x69\xfb\xcb\x15\xc4\xc0\x1f\xa0\x2e\xab\x03\xd7\xb2\x36\xad\xf6\x13\x2e\xf1\x00\xbf\xcb\x55\x5a\x3f\xd1\x57\x7e\x97\x55\x13\x6c\xf5\xec\x0e\x6d\x77\xb9\xbe\xa7\x65\x59\xed\xee\xa7\xc0\xf5\x59\xa3\x1d\x2b\x5c\x50\xfe\x72\x68\x70\xae\x2e\xc6\xb6\xfc\xd9\x74\x87\x65\xa4\x45\x7e\x88\xef\xb8\xe3\xb9\xcc\x25\x33\x7c\x7e\x0e\x09\x7a\xb1\xdd\xf9\x94\x5c\xb4\x4d\x6d\xbf\xa1\x85\xf3\x65\xe3\x01\xc4\x63\x26\x20\x10\xb9\xd4\xfe\xd3\xa7\x57\x81\xeb\x80\xde\x37\x83\xf8\xe5\x69\xff\xec\x95\xfa\x63\xc7\x9a\xd3\x8b\xce\x0f\x96\xed\x0d\x7c\xd9\x79\x97\x06\xaf\x17\x14\xed\x62\x0c\x17\x2d\x14\xc3\x10\x5d\xb7\x86\xe8\x86\xbe\x41\x6c\x91\xff\xfc\x43\x65\x83\x76\x0b\xb7\xbb\x0e\x7b\xdc\xf2\xda\x45\xb0\x3b\xa7\x01\xfc\x2a\x61\x27\x60\xb3\xef\x6b\x61\xc3\xd7\xa6\xc2\x32\x82\xf5\x05\x0e\xa6\x31\xb4\x8b\xa1\x37\x82\x9e\x07\xcf\x3d\xd4\xe1\x44\x67\xa5\xe7\xa9\xfd\x85\x4f\xd5\x92\x8f\xac\x6f\x06\xe1\xb0\xc1\x05\x40\xaf\xa2\xcf\xe4\x57\x00\xdd\xee\xa0\xc2\xa9\xcb\xfa\xb2\x2b\x9e\x74\xb8\x9f\xb4\xee\x7e\x8a\x49\xca\x4d\xe2\x8c\xf2\xf1\xc9\xc7\xa3\xee\x11\x0e\xa6\x2e\x41\x5d\x8c\x48\xe0\x5d\xa1\x16\x6d\xa1\x76\xf1\x56\xb1\xba\x74\xa1\xbc\xbf\x2e\xb2\x97\x59\xf3\x96\x76\xcd\x5b\xea\x9a\xb7\xce\x76\xbe\xdc\xda\x1a\xfc\x7f\xa6\x6d\xfb\x4c\x6d\xfb\xec\x6c\x87\xa3\xf5\x87\xe3\x83\xbd\x60\x3a\x0b\x7c\xe4\x53\x01\xba\x53\x24\x09\x2b\x55\xe8\xaa\x2d\x56\x7c\x89\x16\xa4\xe5\xb7\xbb\x53\x38\x6b\x65\x11\x0e\x84\x17\xd3\x37\x03\xff\x14\x9d\xbd\xc2\x2d\xd4\xde\xb0\x06\xd6\x06\x6e\x85\xbf\xdb\x3b\xe8\x56\xf6\xf7\xd4\x6a\x9f\x75\xb9\x69\x5c\xd3\x89\x65\x7d\x33\x18\xc4\x1f\xbf\xb2\x0a\x37\xcb\x71\x09\x2f\xbe\xb1\xfa\xbd\x42\x03\xd4\xdd\x0b\x02\xec\xd8\x74\x40\xf9\x5f\x4f\x2e\x02\xdc\xe2\x7b\xd8\xb3\xf1\xa0\xf7\x23\xfe\x6f\xd4\xfd\x88\x46\x62\x2f\x7e\xc4\x1b\x1b\x7c\x8f\xbd\x01\x7b\x7e\x8a\xcf\x3a\x54\xfc\xf1\xc4\xdf\x18\x78\xdf\x79\xb7\xe1\x6b\x32\xf8\x1d\xd2\x49\x97\x7c\xc6\xb4\xe5\xb7\x37\x50\xf7\x2d\x72\xc7\x13\xba\x41\xc5\x1f\x76\x30\x20\x1b\xa8\xbb\xeb\x7c\x9a\x13\x1a\xee\xe1\x06\x55\x7e\x48\xa4\x0f\x7e\xea\x3d\x7d\xda\x22\x83\xa0\x6d\xb3\xee\x70\x28\xb6\xb6\xfa\xe8\xf9\x77\xa4\xbd\xd9\xef\xf5\x8a\xe0\xb8\x3a\xfd\xbd\xda\x4d\x38\x6a\xe0\xa1\x2e\x1b\xad\x85\x0a\x77\x76\x8c\x68\x67\x24\x91\xb3\x73\x01\x47\x34\xc0\x8b\xfb\x4d\x84\x2c\x26\xeb\x76\x30\x1a\xbb\x84\xe2\xc5\xce\x14\xba\xbe\x65\x87\x6c\x8d\x17\x04\x97\xf3\x59\x8b\x46\xb7\x48\x76\x4c\xd4\x1d\x23\xba\x4b\x29\x76\xcf\xe7\x14\xb5\x42\x19\xbc\xfd\xc4\xbd\x68\xd1\xb6\x3c\x5c\xa7\xf4\xec\xb6\x08\x62\x13\x48\x84\x51\x6a\x5d\x84\xab\xf7\x84\x5c\xbb\x74\x34\x69\xd1\xf6\x97\x11\x24\x28\xf2\xa2\xdf\x61\xbf\x22\xe7\x5d\xfe\x53\xfa\x65\xef\xf8\x42\x91\x31\x46\xb4\x85\x04\x25\x78\xbd\x68\x49\x93\x9a\x4d\xdb\x91\x1d\xb3\xfd\xe4\x1c\x23\x78\xf9\x84\x75\x10\xb6\xec\xdf\x4a\x70\xfc\x54\x88\xd0\x13\x4a\x67\x9b\x53\x44\x27\x41\x83\xec\xeb\x13\xd4\x3d\xfa\x30\x1c\x58\x47\x1f\x86\x96\x8d\xba\x6f\xf6\xdf\xed\x0f\xf7\x07\x16\xff\x37\x7c\x72\xf4\xfe\x24\x7c\xfd\xfe\x64\x68\x95\xcd\x8d\x34\xbc\x55\x4f\x50\xf7\xfd\x6f\x83\xad\x5e\xcf\x46\xdd\x0f\x87\xbb\x1f\x86\x6f\xdf\x1f\x1f\xfc\x7b\xff\xcd\x60\xbb\xd7\xb7\x51\xf7\xe0\x70\xb8\x7f\x7c\xb8\xfb\xee\xaf\x93\xfd\xe3\x8f\xfb\xc7\x7f\xed\x1f\x1f\xbf\x3f\x1e\x3c\xef\xf5\x0a\xe6\xe9\xfa\xe1\x14\x42\xb6\xe5\x98\x8b\x88\xc3\xe0\xd7\xff\x99\x23\xbc\xd8\xfd\x04\x6f\xde\xb2\x30\xd7\xb5\x30\x32\x5f\xfe\xc2\xfa\xf1\x77\xa2\x6f\x23\x94\xfc\x72\xfb\xc4\xef\x86\xdc\xce\x80\x76\xf9\xf6\xdb\x7e\x77\x8e\xbd\x01\x0d\xff\x6f\xfb\x5d\x07\x52\x38\x0c\xdf\x5b\x9f\x08\xb3\xc8\x77\x99\xc2\xe6\x86\x0e\xe8\xc4\x25\x36\x65\x1f\x3c\x7d\xda\xb2\x7e\xd9\x1f\x86\x57\x91\xec\xe6\x55\x4b\x7c\xe9\x53\xde\x1c\xce\x66\x9e\x3b\x62\x4a\xfd\xcd\xb0\xab\x1f\xc1\x68\x02\x31\x41\x74\x20\x78\x4b\x3e\x56\x28\xd4\x50\xec\xfa\x63\xf7\x62\xd1\xe2\xbd\xb7\xdb\x3b\xe2\x9d\xf8\xfd\x84\xf3\xb2\xb4\xcb\xa5\x68\x22\xa9\x44\x44\x73\xf0\xd3\xa7\x2d\xbf\x7b\x8e\x2e\x02\x8c\x4e\x90\xef\x0c\x34\xb0\x66\x77\x33\x6e\x77\x2f\x02\xbc\x0f\x47\x93\x96\x0a\x1b\xc9\xea\x75\x09\xa2\x62\x2f\xdf\xb2\xa1\x5a\xd4\xc6\xa7\xf4\xac\xcd\xa4\x40\xff\xf6\xb6\x9b\x07\xea\xa2\xb3\x16\x55\x55\x5c\x29\x3a\x44\x0c\xc2\xa6\x35\x18\x0c\x5a\x19\x1a\xdd\xd3\xd2\xe8\x9e\x4a\xa3\x7b\x67\x3b\xa1\xb8\xc8\xa5\xc8\x4e\xbf\xf0\xfe\xb9\x44\x8b\x61\xb0\x2b\x73\x80\xdc\xe3\x4b\x67\xd3\x12\xe8\xd2\x42\x83\xc1\x80\xbe\xb2\xac\x1d\xd4\x16\x82\x34\x2d\x5c\xa2\x5a\x71\x76\xa5\x1b\x17\x2e\x10\x65\x17\x68\xba\x63\x76\x7d\xe0\x48\xd8\x80\xde\x60\x30\x40\x5d\x66\xa4\x7a\x1f\x5e\xaa\xaf\x50\x97\xcc\xcf\x09\xc5\x2d\x2a\xba\x6c\x17\x8b\x41\x53\x78\x89\xc2\xeb\x39\x24\x87\xeb\xa2\x7b\x90\x52\xbc\xa3\x3b\xc7\x9c\x15\x28\x9a\x6d\xe0\x20\x6f\xf3\x1a\xbb\x74\x0d\xf3\x0d\x85\xfa\x1c\x69\xdc\x54\x92\x79\x12\x51\x28\x04\x47\x93\x98\x0d\x4a\x00\xe5\x14\x9d\x0d\x68\x2b\x14\x7b\x4e\xd1\x99\xfd\x85\x20\xec\x42\xcf\xfd\x1b\xed\x74\xfa\xdf\x0c\x06\x7e\xb4\xbb\x88\x93\x33\x7c\xfb\x84\x9f\x2c\xb1\xc8\x90\x37\x19\xfb\xff\xfc\xa3\xf6\x29\x19\x78\x3a\xe8\xff\x48\xff\x3b\x3d\xfd\x1f\xa9\x64\xe0\x15\x21\xed\x94\x9e\x45\x7c\x3f\x06\xae\x0f\xfc\xb6\x18\x60\x86\x03\x1a\x84\x77\x4f\x77\x02\xc9\xfb\x6b\x5f\xc2\xb3\x1b\x4a\xff\x2d\xdf\xc6\xed\xa7\x4f\x5b\xe8\x14\x9f\x0d\xfc\x53\x7c\xd6\xbe\x8d\x04\xf0\x82\x8d\x9c\x71\xf1\xd5\x19\xba\x53\xc4\x3d\xbe\xee\xef\x49\x15\xa2\x76\xfd\xe3\x4a\x10\x15\xeb\xcc\xf0\xc9\x21\xf4\xbe\xc4\xea\x2c\x90\x42\x8d\x16\x6d\x29\xab\x40\x2d\xbf\x7d\x6b\xfb\xed\x10\x0f\x8a\x80\xfb\x99\xc0\xf5\x48\x16\x4b\x02\x56\x7a\x10\x64\xa0\x12\x91\x84\xec\x75\x68\x06\x71\xd4\xee\x7e\x0e\xaf\xf9\x13\x96\xc4\x23\xc0\xbb\x9e\xc7\x2e\x8d\x02\xa0\x25\x2a\x97\xdf\x5f\x98\x2d\x77\x6d\x70\xa9\x8d\xb5\xe9\xc8\x0b\x22\xa6\x51\x1e\x24\xf4\x20\xba\x4a\x06\x83\x81\x1f\x5f\x27\x3d\xdb\x2f\xb9\x49\xc8\x7c\xfa\xfe\xe2\x43\xec\xcf\xbd\x96\xbb\x04\xa0\x2e\x46\xce\x7c\xa4\x9e\x9b\x58\x90\x8b\xe5\x31\x1a\x79\x36\x4a\x59\x35\x16\xe3\xc2\x85\xfe\xf3\x4f\x24\xc7\xf1\x75\x6f\xf4\x77\xd0\xad\xdd\x2b\x64\x34\xd4\xf2\x83\xeb\x59\x6d\x7c\x42\xe2\xcb\xe9\xa5\xfc\xcf\xf6\x06\x9d\xf8\x07\x19\x9c\x9e\x3d\xf1\xb3\x6c\x73\xd0\xfe\x12\x8a\xdf\x83\x41\xd0\x3d\x0c\x1c\xc1\xa6\xb9\x83\xa0\x7b\x82\xc6\xec\x30\x6a\xda\x50\xd6\x86\x37\xf8\x66\x40\xd9\xbf\x4f\x9f\x52\xd9\x64\x30\x70\x23\x53\x47\x2b\xb0\x69\xfb\x09\xe9\xce\xe6\x64\xd2\xfa\xe2\x07\x0e\xda\xe1\xdf\xdb\x52\xa7\xb5\xe3\xdb\x84\xb7\xdb\x71\x43\xee\xfc\xbf\x43\x29\x00\x0f\xfc\xb6\xed\xff\xe4\x3d\x7d\xda\xf2\x06\x3e\x83\x3a\xe9\x92\x00\xd3\xd4\xbe\x46\xbb\x2e\x7b\xeb\xd0\xe8\xcf\x90\x2a\x72\x99\x23\xb0\xdd\x81\x3c\x1b\x36\xe4\x5a\xa8\x0b\x2f\x08\x70\xcb\xdd\xdc\x8a\x94\x15\xee\x4f\xbd\x57\xc1\xc0\xfd\x3f\x5b\xaf\xc8\x29\x3c\x8b\xba\xd9\x69\x91\x53\xd8\xe9\xc7\x0f\x36\x12\xaf\xdb\x9b\x5b\x3b\xad\x80\x2b\xc6\x6c\x6f\xd0\x6b\xdb\x5f\xe4\x2b\xb2\x43\x6c\x7f\x27\x1a\x78\xea\xfa\x3b\xb3\x50\x74\x3a\xf0\x69\xab\xdf\xeb\x7d\x87\x99\xbe\xca\xe6\x05\x38\x93\xaf\x02\xf1\x0a\xde\x24\x9f\x7b\x42\xc5\x75\xab\x4d\x60\xc9\xd1\x70\x3e\xba\x70\x31\x59\xf1\x1d\xaa\x28\x32\x37\x14\xa2\xd0\x6f\x77\x69\xf0\x61\x36\x43\x78\x0f\x12\xd4\x6a\xc7\xef\xf2\xc4\x10\x51\xef\xc0\x0d\x7c\xb2\x09\x47\x5e\xd2\xf6\x82\xc2\x03\xdb\xe1\x81\x4c\x04\xd1\x8e\xfa\xb1\xf8\x3b\xc0\x69\xe3\xcb\x52\x0b\xfc\xc2\x0c\x8a\xa7\xad\x9e\x4d\xbb\xb2\x14\xc3\x11\x46\x04\x85\x5b\xdd\xfa\xa6\xd7\xb6\x13\xaf\xde\x71\x9e\xba\xf5\x25\xdc\xdc\xfe\x6d\xfb\xcc\x0e\xa5\xe6\x9d\xfc\xe6\xe5\x40\x70\x99\xf0\xcd\xcd\xb3\x77\x0b\x8a\x13\xe6\x2f\xb9\x2c\x40\xde\x20\x42\x5d\x9f\xcd\x74\xd9\xae\x76\xd9\xc2\x96\x82\xee\xe5\xd5\x9d\x83\xf5\x37\xb4\x58\x06\x08\xac\xce\x7a\x65\x18\x70\xf7\x99\x4d\xe4\x5f\xb9\x38\xf0\x43\x6a\x6b\xd9\xa7\xca\xaa\xda\x5f\x28\x5e\x08\x56\xa4\xa4\x19\x1d\x48\x46\x2d\xc9\x54\xb5\xbe\x9d\x22\x0a\x4f\x7d\x38\x45\x03\xeb\xdb\x0d\xb4\xf1\xad\x75\xf6\x6d\x3b\xa5\xef\x95\x6e\x43\x6d\xdb\x1f\x7c\x11\x20\xd9\xf9\xf5\xe4\xfd\x61\x97\xd1\xb9\xd6\xdc\x47\x64\x04\x67\xa8\x45\xdb\xed\x48\x5c\xd2\x43\xdb\xcf\x87\xb6\x7f\x3b\x82\x42\x61\x4b\x27\x38\xb8\x66\xbc\xf4\x3e\x53\xc7\x7f\xbb\x17\xcc\x3d\x87\x39\xed\x62\x04\x1d\xe1\x57\x04\x2e\x70\x30\x05\xe1\xfc\x01\x85\x63\xee\x30\x12\x2e\x04\x88\x85\x74\xbf\x65\x70\xc5\x73\x3f\xe4\x09\x86\x88\x50\xf2\xcf\x3f\x18\x7d\x9e\xbb\x38\x01\x66\x38\x9b\x59\xed\xc8\x74\xca\x6d\x6c\xad\x2f\x7e\xd2\x49\xc1\xb2\xaf\x10\x26\x21\x26\x5b\x5b\xdd\x5e\xb7\x67\xdd\xb6\x9f\xfc\xff\x00\x00\x00\xff\xff\xff\xa0\xf8\x0a\x1c\xa9\x03\x00") -func web_uiV2AssetsConsulUiE51248f3d8659994e198565dbadc4fcfJsBytes() ([]byte, error) { +func web_uiV2AssetsConsulUi61975faed99637b13431ce396e4422b4JsBytes() ([]byte, error) { return bindataRead( - _web_uiV2AssetsConsulUiE51248f3d8659994e198565dbadc4fcfJs, - "web_ui/v2/assets/consul-ui-e51248f3d8659994e198565dbadc4fcf.js", + _web_uiV2AssetsConsulUi61975faed99637b13431ce396e4422b4Js, + "web_ui/v2/assets/consul-ui-61975faed99637b13431ce396e4422b4.js", ) } -func web_uiV2AssetsConsulUiE51248f3d8659994e198565dbadc4fcfJs() (*asset, error) { - bytes, err := web_uiV2AssetsConsulUiE51248f3d8659994e198565dbadc4fcfJsBytes() +func web_uiV2AssetsConsulUi61975faed99637b13431ce396e4422b4Js() (*asset, error) { + bytes, err := web_uiV2AssetsConsulUi61975faed99637b13431ce396e4422b4JsBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/consul-ui-e51248f3d8659994e198565dbadc4fcf.js", size: 238468, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/consul-ui-61975faed99637b13431ce396e4422b4.js", size: 239900, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1067,7 +1067,7 @@ func web_uiV2AssetsFavicon12808e1368e84f412f6ad30279d849b1df9Png() (*asset, erro return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/favicon-128-08e1368e84f412f6ad30279d849b1df9.png", size: 11154, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/favicon-128-08e1368e84f412f6ad30279d849b1df9.png", size: 11154, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1087,7 +1087,7 @@ func web_uiV2AssetsFavicon16x16672c31374646b24b235b9511857cdadePng() (*asset, er return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/favicon-16x16-672c31374646b24b235b9511857cdade.png", size: 821, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/favicon-16x16-672c31374646b24b235b9511857cdade.png", size: 821, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1107,7 +1107,7 @@ func web_uiV2AssetsFavicon196x19657be5a82d3da06c261f9e4eb972a8a3aPng() (*asset, return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/favicon-196x196-57be5a82d3da06c261f9e4eb972a8a3a.png", size: 37174, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/favicon-196x196-57be5a82d3da06c261f9e4eb972a8a3a.png", size: 37174, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1127,7 +1127,7 @@ func web_uiV2AssetsFavicon32x32646753a205c6a6db7f93d0d1ba30bd93Png() (*asset, er return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/favicon-32x32-646753a205c6a6db7f93d0d1ba30bd93.png", size: 2075, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/favicon-32x32-646753a205c6a6db7f93d0d1ba30bd93.png", size: 2075, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1147,7 +1147,7 @@ func web_uiV2AssetsFavicon672c31374646b24b235b9511857cdadePng() (*asset, error) return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/favicon-672c31374646b24b235b9511857cdade.png", size: 821, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/favicon-672c31374646b24b235b9511857cdade.png", size: 821, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1167,7 +1167,7 @@ func web_uiV2AssetsFavicon96x966f8f8393df02b51582417746da41b274Png() (*asset, er return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/favicon-96x96-6f8f8393df02b51582417746da41b274.png", size: 10171, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/favicon-96x96-6f8f8393df02b51582417746da41b274.png", size: 10171, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1187,7 +1187,7 @@ func web_uiV2AssetsFaviconIco() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/favicon.ico", size: 34494, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/favicon.ico", size: 34494, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1207,7 +1207,7 @@ func web_uiV2AssetsLoadingCylonPinkSvg() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/loading-cylon-pink.svg", size: 983, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/loading-cylon-pink.svg", size: 983, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1227,7 +1227,7 @@ func web_uiV2AssetsMstile144x144Ac561ffa84c7e8ce1fe68d70f1c16d1dPng() (*asset, e return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/mstile-144x144-ac561ffa84c7e8ce1fe68d70f1c16d1d.png", size: 20027, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/mstile-144x144-ac561ffa84c7e8ce1fe68d70f1c16d1d.png", size: 20027, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1247,7 +1247,7 @@ func web_uiV2AssetsMstile150x1506b13ab220a09a9e72328a3b05d5b9eecPng() (*asset, e return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/mstile-150x150-6b13ab220a09a9e72328a3b05d5b9eec.png", size: 64646, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/mstile-150x150-6b13ab220a09a9e72328a3b05d5b9eec.png", size: 64646, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1267,7 +1267,7 @@ func web_uiV2AssetsMstile310x150Ccc673174b188a92f1e78bc25aa6f3f8Png() (*asset, e return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/mstile-310x150-ccc673174b188a92f1e78bc25aa6f3f8.png", size: 112362, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/mstile-310x150-ccc673174b188a92f1e78bc25aa6f3f8.png", size: 112362, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1287,7 +1287,7 @@ func web_uiV2AssetsMstile310x31049242d1935854126c10457d1cdb1762bPng() (*asset, e return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/mstile-310x310-49242d1935854126c10457d1cdb1762b.png", size: 201893, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/mstile-310x310-49242d1935854126c10457d1cdb1762b.png", size: 201893, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1307,7 +1307,7 @@ func web_uiV2AssetsMstile70x7008e1368e84f412f6ad30279d849b1df9Png() (*asset, err return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/mstile-70x70-08e1368e84f412f6ad30279d849b1df9.png", size: 11154, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/mstile-70x70-08e1368e84f412f6ad30279d849b1df9.png", size: 11154, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1327,7 +1327,7 @@ func web_uiV2AssetsSafariPinnedTabSvg() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/safari-pinned-tab.svg", size: 3798, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/safari-pinned-tab.svg", size: 3798, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1347,7 +1347,7 @@ func web_uiV2AssetsVendorC3a9380433ef2f2efb4ed437d3b54b31Css() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/vendor-c3a9380433ef2f2efb4ed437d3b54b31.css", size: 5357, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/vendor-c3a9380433ef2f2efb4ed437d3b54b31.css", size: 5357, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1367,12 +1367,12 @@ func web_uiV2AssetsVendorFde82111a09b6c6f4c6d2c876b3dc13bJs() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v2/assets/vendor-fde82111a09b6c6f4c6d2c876b3dc13b.js", size: 1364694, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/assets/vendor-fde82111a09b6c6f4c6d2c876b3dc13b.js", size: 1364694, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _web_uiV2IndexHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x58\xdd\x72\xdb\x38\xb2\xbe\xcf\x53\xe0\xf0\x94\xaf\xce\x41\x0b\x68\xfc\xcf\x4a\xae\xf2\x3a\x9e\x99\x6c\x8d\xe3\x94\xe3\xa4\x66\xae\x52\x14\x09\x59\x9c\xa5\x44\x2d\x49\xcb\xb1\x9f\x7e\xab\x01\x4a\x96\x33\x99\xa9\xda\xda\x0b\x83\x4d\x74\xe3\xeb\xff\x06\xad\xf9\xff\xbc\xbd\xb9\xbc\xfb\xed\xc3\x15\x5b\x8f\x9b\xf6\xfc\xcd\x9c\x1e\xac\x6a\xcb\x61\x58\x14\x71\xb3\x8c\x3d\x6f\xbb\xb2\x6e\xb6\xf7\xc5\xf9\x1b\xc6\xe6\xeb\x58\xd6\x44\x30\x36\xdf\xc4\xb1\x64\xd5\xba\xec\x87\x38\x2e\x8a\x87\x71\xc5\x7d\x71\xca\x5a\x8f\xe3\x8e\xc7\x7f\x3d\x34\xfb\x45\xf1\x2b\xff\x74\xc1\x2f\xbb\xcd\xae\x1c\x9b\x65\x1b\x0b\x56\x75\xdb\x31\x6e\xc7\x45\xf1\xee\x6a\x11\xeb\xfb\x78\x38\x39\x36\x63\x1b\xcf\x2f\xbb\xed\xf0\xd0\xb2\xe5\x13\xfb\xb9\x1c\xd6\xcd\x65\xd7\xef\xe6\xb3\xcc\x3a\x51\xb0\x2d\x37\x71\x51\xd4\x71\xa8\xfa\x66\x37\x36\xdd\xf6\x04\xb6\xf8\xa3\xe0\xbe\x89\x8f\xbb\xae\x1f\x4f\xa4\x1e\x9b\x7a\x5c\x2f\xea\xb8\x6f\xaa\xc8\xd3\xcb\xff\xb3\x66\xdb\x8c\x4d\xd9\xf2\xa1\x2a\xdb\xb8\x90\xc5\xf9\x9b\x84\xf4\xe6\x14\xaa\x4a\xf6\xf1\x87\x66\x56\x75\xdb\x55\x73\x3f\x8b\xdb\x7d\xd3\x77\xdb\x4d\xdc\x9e\xc2\x9f\xb9\xbf\x9f\x21\x6e\xba\xfa\xa1\x8d\x1f\xfa\xb8\x6a\xbe\x9e\x21\x9e\xa9\x8b\x33\xc4\x23\x02\xed\xe0\xe5\x19\xe2\x09\xc4\x51\x6a\xd7\x77\xf5\x43\x45\xae\x1d\xc5\xfa\xae\x1b\x3f\xdd\xfe\x72\x14\x99\x3d\x34\xb3\x23\xb3\xed\xaa\x92\xa4\xef\x9e\x76\xf1\x28\x51\x3e\x8c\xdd\x51\xe2\x8a\x52\x7a\xf5\xfe\xf3\xc4\x4d\x06\xfe\x78\x75\x71\xf7\xe9\xf6\xea\xe3\xe9\x5e\x3d\xf0\x66\xb3\xeb\xbb\x7d\xac\x79\xf9\x7b\x39\x19\x3e\xf6\x0f\xf1\xcc\xbd\x9d\xa0\x7e\xbd\xbb\x7a\xff\xf6\xcb\x87\xdb\x9b\xbb\x1b\xaa\xa0\x57\xe7\xdf\x96\xe3\x64\xc1\xaa\x6c\x87\x74\xe8\x70\xee\xe2\xc3\x87\x53\x49\x8a\xe8\x5f\x84\x65\x1f\xfb\x61\xf2\x3f\x49\x20\x08\x10\xff\x57\xc6\xe8\x85\x0a\x96\xb6\x0f\xb8\x7d\x1c\x9a\xe7\xf8\x31\xf6\x94\xcd\xb7\x71\x55\x3e\xb4\xe3\x70\xaa\xa9\xd9\xfe\x1e\x53\x30\x7f\x2c\xab\xb1\xeb\x9b\x78\xe0\x1a\xe2\x52\x79\x1c\xb5\x52\x0a\xfb\xae\x6d\x63\x7f\xb2\xb5\xd9\x75\xdb\x29\x3b\xe6\xc5\x9d\xcb\x9b\xf7\x1f\x3f\xfd\xf2\xe5\xa7\x77\x77\x5f\x3e\xfe\x7c\xf1\x12\xf5\xc9\x42\x73\x04\x98\x04\x3f\x5f\xdd\x7e\x7c\x77\xf3\xfe\x28\x28\x01\x41\x7c\x2b\xf4\xf6\xe6\xf2\xd3\xf5\xd5\xfb\xbb\x8b\xbb\x77\x37\xef\xbf\x9c\xe6\x9b\xba\x6a\x38\x53\x17\xb3\xd9\xe3\xe3\x23\xe4\x78\x41\xd3\xcd\xea\xae\x1a\xbe\x45\xb9\xbc\xf9\xf0\xdb\xed\xbb\x9f\x7e\xbe\xfb\x0b\x84\x35\x35\x58\xd5\xf5\x3b\xa8\xba\xcd\x9f\x03\xfc\x76\x75\x71\xfb\x92\x03\x21\xfd\x51\x74\xd9\x77\x8f\x43\xec\x9b\xd5\xd3\x69\xac\xc7\x38\x1c\x82\xff\xaa\x68\xe2\x57\x6a\xc1\x8b\xdd\xae\x6d\x72\xa9\xfe\xd4\x76\xcb\xb2\x7d\x5d\x2a\x05\x9b\x4d\xed\xdb\x36\xdb\x7f\xb2\x3e\xb6\x8b\xa2\xa9\xa8\xbf\xc7\xa7\x5d\x5c\x14\xcd\xa6\xbc\x8f\xb3\xdd\xf6\xbe\x60\xeb\x3e\xae\x16\x05\x35\x41\x39\x0c\x71\x1c\x66\xab\x72\x4f\xa2\x5c\xe1\x57\x85\xdc\x6a\xeb\x8c\x2a\x51\x98\xca\x96\xb6\x5e\xba\x55\x50\xb5\xa8\xe5\xb2\x54\x62\x59\x07\x05\x09\x84\xea\x66\x58\x14\xe9\x48\xf1\x5f\x6b\x96\xf6\xab\xb4\xdc\x3a\xac\x94\x54\x4e\x5b\x6d\x97\xa8\x97\xa8\xcc\x32\x18\x29\xbd\x71\x55\x5d\xd6\xf1\x95\xe6\x74\xe4\x30\x69\xb2\xea\x66\x3b\xc6\xfb\xbe\x19\x9f\x16\x45\x91\xed\x18\xc6\xa7\x36\x0e\xeb\x18\xc7\xef\x28\xdf\xc7\x6d\xdd\xf5\xbc\x52\x65\x50\x5e\x68\xa5\xe2\x0a\x57\x18\x57\x4b\x1d\x6b\xad\x5c\xad\x96\x46\x2f\x95\x84\x6a\x18\x5e\x79\xf8\x1f\xaa\x39\xf6\x27\x17\xae\xae\x4b\x25\xeb\xe8\xb4\x58\xc9\xd2\xac\x7c\x6d\x6d\x25\xad\xad\x9d\x37\xa5\x0f\x93\xa6\x3c\x3b\x19\x9b\xcf\x0e\x37\xc7\x7c\xd9\xd5\x4f\x93\x09\xdb\x2e\xcf\xee\xfc\x9a\xb6\xea\x66\xcf\x92\x09\x8b\x62\x53\xf6\xf7\xcd\xf6\x07\x26\x18\x8d\xb0\xbf\x15\x2f\x52\x49\x72\x8d\xe7\xff\x28\xf7\xe5\xc7\x84\xc0\x6e\xe9\xa6\xe9\x63\x3d\x9f\xad\xf1\x1b\xc1\xdd\xf9\x87\x36\x96\x43\x64\x71\x5b\x2e\xdb\xc8\x4e\x4e\x35\x5b\xf6\xd4\x3d\xf4\xec\x31\x2e\xd9\x54\xc8\x6c\xec\xd8\xc3\x10\xd9\x74\x07\x7d\x7a\x07\xf3\xd9\xee\xc4\xc0\x59\xdd\xec\x27\xf3\x67\xaf\xed\xcf\x9b\xc3\xfe\x9e\xe5\x7b\xa5\x90\xd6\x17\x6c\x1d\x9b\xfb\xf5\xb8\x28\x8c\x2a\xd8\xd7\x4d\xbb\x1d\x16\x05\x75\xe0\x0f\xb9\xfd\x1e\x15\x74\xfd\xfd\x0c\x85\x10\xb3\x61\x7f\x5f\x9c\xcf\xef\xd9\xaa\x69\xdb\x45\xf1\xbf\x41\x86\x1f\x2f\x7c\x91\x5e\x79\xff\x40\x11\x89\xfb\xb8\xed\xea\xba\x38\x9f\xef\xca\x71\xcd\xea\x45\x71\x8d\x16\x84\xf3\x4c\x21\x48\x2c\x0d\x18\x6f\x59\x5e\x05\x93\x4c\x12\xed\x1c\x37\x60\x42\xc8\xf4\xb4\x0a\xe2\xf3\xc3\x8e\xbd\x56\x0e\x84\x08\x0c\x03\x28\xf4\x25\x82\xb1\x2c\x2d\x19\x85\x48\x7e\xd8\x34\x72\x5a\x33\xc6\x51\xf6\x5a\x5b\x08\xd2\x32\x25\xc1\xda\xf0\x07\x0c\x10\x46\x72\x40\x59\x71\x10\xe8\x41\x58\x9b\x09\xa9\x38\xb1\x00\xe5\xb5\xd6\x60\xbc\x67\x68\x40\xd8\x6c\x84\x61\x79\x3d\xa8\xb2\x0e\x39\x84\x80\xc9\x02\x3f\xad\x99\x09\x52\x20\x47\xf0\x3a\x9f\xd1\xd3\x9a\x98\x4c\x83\x75\x16\x9c\xd5\x15\x08\x87\xe4\x23\x08\x4f\x76\x3a\x10\xe8\x98\x2c\x11\xb4\x55\x2c\xaf\x19\x4f\x42\x40\x82\x12\x4e\x5d\x1b\x05\x41\x21\x79\xa6\x05\x92\x61\xda\xb1\xbc\x1e\x0c\x0b\x59\x34\x07\x28\x4c\xeb\x81\x29\xac\x26\x91\xec\xcd\xeb\xa3\x0c\x21\x68\x4f\x32\x2e\xed\x4f\xcb\x81\x27\x91\x9c\x74\xd2\x82\xb5\x9a\xfe\x12\x47\x50\xc4\x00\xd1\x5f\x1b\x09\x9e\xce\x18\x90\x42\x7d\x27\x6d\x52\x10\x76\xd0\xa6\x02\xa1\x34\xa0\xf4\x20\xb4\x02\xad\x02\xf9\x0d\xd6\xf8\xef\x7a\x23\x15\x99\x8a\xde\x5d\xeb\x00\xc1\x68\xa6\x05\x48\xf9\x47\x05\xa0\xa4\xe6\x12\x84\x72\x29\xab\xa0\x28\xa9\x12\x3d\x38\x94\x9c\x98\x2c\x31\xaf\xb5\x87\xe0\x34\x93\x16\x7c\xf8\x0e\x4a\x70\x5c\x81\xf6\xae\x02\xb4\x1a\xb4\xb6\xa0\x9c\xa1\x78\x81\x92\x8e\x49\xd0\xee\x75\x39\xe5\xf4\xa0\x27\xb3\x85\xf0\xc5\xec\x75\x3f\x18\xb4\xcc\x20\x58\xa1\x2a\x2e\x35\xa8\xa0\x98\xe0\xd4\x27\x96\x4b\x09\xc6\xba\xe9\x05\x0d\x78\x65\x2f\x41\x5b\xcb\x24\x82\x0e\x9e\x1e\x52\x51\xb2\x25\xcb\x40\x44\x96\x68\x20\x28\xcb\xa6\x47\xce\x8d\x4c\x87\x99\x01\x25\x5c\xcb\x15\x48\xeb\x98\x06\x29\xdd\x05\x0a\x08\x16\xd9\xf4\x10\x59\xde\x81\x12\x9a\x79\x40\x75\x29\x05\xc8\xa0\x99\x94\xe0\xa4\x62\x06\x1c\x93\x1e\xa8\x3b\x9d\xd4\xa4\xd5\x59\x45\xd1\x94\x9a\x39\xf0\x16\x99\x06\xed\x49\x9d\x30\x74\xc6\x04\x12\x37\x4a\x97\xdf\xd3\x83\x12\x7c\xa0\x04\xa2\x6b\xc9\x26\x4f\x36\xa1\x4a\x1e\x78\xc9\xa6\xc7\x14\xc1\xa3\x07\xf2\xf9\xda\x4a\xa6\x04\x48\xf3\x59\x3a\x2a\xc8\x4a\x70\x9d\xd1\xa9\xa1\xb8\x03\x6f\x58\x00\x1d\x26\x12\x41\x7b\x2a\x45\x03\x42\x7b\x50\x68\x99\x03\x34\x08\x3e\x98\x96\x83\xb1\xd4\x70\x5a\xa9\x8a\x2c\xb1\x9a\x83\x16\x96\x6b\xb0\xde\x73\xb0\x41\x72\x0b\x46\x65\x8a\xd2\x6e\x19\x29\xb3\xc2\x33\x09\x88\x13\xa9\xa9\x74\xf7\x52\x80\x96\x58\x09\x32\xc3\x13\x5f\x22\x66\x16\x75\xb4\x70\x13\x2d\xc1\x6b\x6a\x5b\x0d\xca\x3a\x0e\xe8\x35\x3b\xaa\x68\xc1\xe4\x18\x66\x73\x50\x68\x30\x8e\x6b\x70\xd6\x93\xb9\xfc\x68\xf8\xa5\x55\x69\x78\x28\xcf\x28\x18\x06\xa4\x44\x76\x08\xcb\xf3\x46\x59\xf0\x82\xba\xa7\x22\x3d\xc2\x7a\x2e\xc1\x0b\xa4\x1c\x49\xee\x41\x07\x95\x49\x0b\x36\x30\x91\x76\x34\x57\xe0\x34\x4e\x34\x71\xf7\xdc\x80\x10\x98\xa2\x2b\x2c\x95\xb6\x17\x2a\x71\xd8\x8b\x10\xcb\x10\x2c\x83\x12\x84\x62\x2f\x0a\xf6\x84\xa0\x9e\x37\x5c\x83\xf7\x34\xd9\xa5\x35\x95\x20\xcf\x94\xa3\x56\x14\x48\x41\x95\x96\x2b\xb0\xc2\x66\x7a\xc8\x2f\x14\x3f\x3f\x91\x69\x7f\x6f\x68\x12\xa6\xf0\xa2\x22\x6b\x84\xc0\xc4\x71\xec\x45\xca\x0d\x19\x89\x0e\xb3\x17\x50\x47\xbe\x28\xf4\xcf\x1b\x34\xe0\x02\x75\x8e\xb1\x7e\x9d\xcd\xfa\x8c\x2a\x0d\x5b\x41\x16\x19\xc7\x41\xd3\x98\x30\x9e\x50\x8c\x55\x47\x1a\x85\x64\x82\x2b\x50\x84\xa1\x90\xa2\x93\xd2\x6c\xad\xdf\x4b\xf2\xdc\x1d\x00\xa5\x03\xe7\xcd\x5a\x81\x43\xdb\xa6\x86\x20\xa1\x0a\x41\x27\x68\x44\xba\xd7\x54\x9a\xa3\xa9\x6f\xe4\x81\x56\x80\xda\xa4\xda\xd0\x82\xe6\x85\x92\x7e\xa2\x0d\x8d\xce\x3d\x0d\x08\xe3\x9e\x37\xd2\x03\x5a\x6e\xc0\x39\x53\x09\xa6\xc0\x23\x99\x29\x2d\x32\x0b\xd2\x23\x77\x20\xcc\x81\x46\x10\x5e\xa5\xaa\x0d\x48\xce\x79\xcf\x2d\xb8\x24\xaf\x42\x0b\x96\x82\xec\x3c\x56\x12\x2c\x0d\x5b\x4f\x71\xf3\x82\x3a\x47\x93\x56\x99\x88\xd4\x1a\xd4\xb2\xd4\x4b\x09\x64\x22\x25\x84\x34\xb1\x52\x37\x00\xd2\x86\x57\x14\x73\x61\x78\x9a\x6a\xb9\x7a\x53\x42\x52\x0e\xa8\xe1\xf5\x44\x59\x40\x54\x29\xa6\xc1\x53\xc2\x1d\x2a\x72\x4a\x19\xea\xd0\x80\x13\x9d\xea\x2e\x05\x45\x5a\x0b\xa8\x93\x59\x26\xb5\x0b\x59\x42\x45\x40\x77\x06\x97\xf4\x92\x1a\x8a\x53\x73\x58\xea\xed\x40\x9a\xac\xcf\x24\x4d\x1a\x4d\xd3\x95\x2c\x4f\x81\xc8\xa3\x41\x82\x13\x74\x31\x53\xa2\x1d\x48\x4c\x1b\x74\x2d\xba\x74\x31\x93\x3e\xad\x03\x8d\x76\x4b\x74\xa0\xcc\xc8\x60\x33\x69\xe9\x1a\x78\xde\xa4\xc4\xd0\x00\x17\x4a\xad\x53\x15\x50\x4d\x78\x55\x11\xac\x30\x0e\xb4\x96\x49\x01\xf9\x69\xac\x3a\xd2\x28\x68\x1c\xa6\xaa\xe2\xe0\x14\xcd\x58\xe1\x29\x5e\xd6\xfa\x43\x21\x11\x9e\xdf\xcb\x40\x4d\xbd\xe6\xb9\xac\xc8\x75\x99\xc4\x1c\x8d\x0a\xad\x75\x9e\x48\x06\x0c\x86\x74\xbd\xe7\x06\xc5\x89\xa6\xd2\xca\xc3\x4b\x0b\xc7\x53\x69\x4d\x74\x2a\xad\x49\xd5\xf3\x35\x8d\x61\xe5\xc0\x04\x95\xab\xd9\x7f\x0e\x14\xea\x5f\x68\x3f\xec\xd1\x13\xe7\x99\x2e\xb1\xd9\xfd\xf9\x9c\xbe\x02\xa7\xcf\xcb\xfc\x6d\xc9\x86\xbe\xfa\xde\xa7\xfe\xaa\x8e\x1e\xa5\x94\xa5\x08\x4b\x5b\xd9\x95\xae\x6c\x8d\x95\x77\x76\xa9\xea\x4a\xaa\x25\xfc\x3e\x14\x04\x77\xf2\x81\xfa\x67\x88\x2f\x5f\xf5\xd1\x48\xd4\x7e\xa5\x6a\x6f\x4d\x08\x41\x47\x19\xbc\xb1\xa6\x5e\x96\x75\xa5\x57\xd5\xea\x1b\xd0\x37\xc7\x4f\xf6\xa6\x3e\xfc\x5c\xb4\x2c\x87\xa6\xe2\x75\xdf\xed\xea\xee\x71\xcb\x1f\xbb\x7e\xb3\xee\xda\x48\xc7\xa6\x2f\xe7\xf9\x2c\xff\x0b\x30\x9f\xe5\xdf\x9c\xfe\x1d\x00\x00\xff\xff\xf4\x8e\x19\x4e\x84\x12\x00\x00") +var _web_uiV2IndexHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x58\xdb\x6e\x23\x37\xd2\xbe\x9f\xa7\xe0\xdf\x3f\x7c\xb7\x2c\x91\xc5\x73\x56\x32\xe0\xf5\x38\xc9\x2c\xe2\xb1\xe1\xf1\x0c\x92\xab\x41\x1f\x28\xa9\xb3\x2d\xb5\xb6\xbb\x2d\x8f\xfd\xf4\x8b\x62\xb7\x64\x39\xe3\x04\x58\xec\x85\xa9\x6a\x56\xf1\xab\x73\x91\xf0\xfc\xff\xde\xdf\x5c\xde\xff\x76\x7b\xc5\xd6\xc3\xa6\x39\x7f\x37\xa7\x1f\x56\x36\x79\xdf\x2f\xb2\xb8\x29\x62\xc7\x9b\x36\xaf\xea\xed\x2a\x3b\x7f\xc7\xd8\x7c\x1d\xf3\x8a\x08\xc6\xe6\x9b\x38\xe4\xac\x5c\xe7\x5d\x1f\x87\x45\xf6\x30\x2c\xb9\xcf\x4e\x59\xeb\x61\xd8\xf1\xf8\xef\x87\x7a\xbf\xc8\x7e\xe5\x9f\x2f\xf8\x65\xbb\xd9\xe5\x43\x5d\x34\x31\x63\x65\xbb\x1d\xe2\x76\x58\x64\x1f\xae\x16\xb1\x5a\xc5\xc3\xc9\xa1\x1e\x9a\x78\x7e\xd9\x6e\xfb\x87\x86\x15\x4f\xec\xe7\xbc\x5f\xd7\x97\x6d\xb7\x9b\xcf\x46\xd6\x89\x82\x6d\xbe\x89\x8b\xac\x8a\x7d\xd9\xd5\xbb\xa1\x6e\xb7\x27\xb0\xd9\xf7\x82\xfb\x3a\x3e\xee\xda\x6e\x38\x91\x7a\xac\xab\x61\xbd\xa8\xe2\xbe\x2e\x23\x4f\x1f\x7f\x63\xf5\xb6\x1e\xea\xbc\xe1\x7d\x99\x37\x71\x21\xb3\xf3\x77\x09\xe9\xdd\x29\x54\x99\xec\xe3\x0f\xf5\xac\x6c\xb7\xcb\x7a\x35\x8b\xdb\x7d\xdd\xb5\xdb\x4d\xdc\x9e\xc2\x9f\xb9\x7f\x9c\x21\x6e\xda\xea\xa1\x89\xb7\x5d\x5c\xd6\xdf\xce\x10\xcf\xd4\xc5\x19\xe2\x11\x81\x76\xf0\xf2\x0c\xf1\x04\xe2\x28\xb5\xeb\xda\xea\xa1\x24\xd7\x8e\x62\x5d\xdb\x0e\x9f\xef\x7e\x39\x8a\xcc\x1e\xea\xd9\x91\xd9\xb4\x65\x4e\xd2\xf7\x4f\xbb\x78\x94\xc8\x1f\x86\xf6\x28\x71\x45\x29\xbd\xfa\xf8\x65\xe2\x26\x03\x7f\xbc\xba\xb8\xff\x7c\x77\xf5\xe9\x74\xaf\xea\x79\xbd\xd9\x75\xed\x3e\x56\x3c\xff\x3d\x9f\x0c\x1f\xba\x87\x78\xe6\xde\x4f\x50\xbf\xde\x5f\x7d\x7c\xff\xf5\xf6\xee\xe6\xfe\x86\x2a\xe8\xd5\xf9\xf7\xf9\x30\x59\xb0\xcc\x9b\x3e\x1d\x3a\x9c\xbb\xb8\xbd\x3d\x95\xa4\x88\xfe\x45\x58\xf6\xb1\xeb\x27\xff\x93\x04\x82\x00\x41\x5f\x07\xb8\x2e\xf6\xf5\x73\xfc\x14\x3b\x4a\xe2\xfb\xb8\xcc\x1f\x9a\xa1\x3f\x55\x50\x6f\x7f\x8f\x29\x86\x3f\xe6\xe5\xd0\x76\x75\x3c\x70\x0d\x71\xa9\x2a\x8e\xca\x28\x73\x5d\xdb\x34\xb1\x3b\xd9\xda\xec\xda\xed\x94\x14\xf3\xe2\xc5\xe5\xcd\xc7\x4f\x9f\x7f\xf9\xfa\xd3\x87\xfb\xaf\x9f\x7e\xbe\x38\x9a\x67\xbd\x54\x79\x08\xe2\x78\x7e\x92\xfb\x72\x75\xf7\xe9\xc3\xcd\xc7\xa3\xdc\x5e\x82\x04\xc1\x9d\xb5\x7c\xf5\x67\x67\xde\xdf\x5c\x7e\xbe\xbe\xfa\x78\x7f\x71\xff\xe1\xe6\xe3\xd7\xd3\xa4\x53\x6b\xf5\x67\xea\x62\x36\x7b\x7c\x7c\x84\x31\x68\x50\xb7\xb3\xaa\x2d\xfb\x3f\xa2\x5c\xde\xdc\xfe\x76\xf7\xe1\xa7\x9f\xef\xff\x02\x61\x4d\x5d\x56\xb6\xdd\x0e\xca\x76\xf3\xe7\x00\xbf\x5d\x5d\xdc\xbd\x24\x42\x48\x7f\x14\x2d\xba\xf6\xb1\x8f\x5d\xbd\x7c\x3a\x8d\xfc\x10\xfb\x43\x2a\x5e\x55\x4e\xfc\x46\x7d\x78\xb1\xdb\x35\xf5\x58\xaf\x3f\x35\x6d\x91\x37\xaf\xeb\x25\x63\xb3\xa9\x87\x9b\x7a\xfb\x2f\xd6\xc5\x66\x91\xd5\x25\x35\xf9\xf0\xb4\x8b\x8b\xac\xde\xe4\xab\x38\xdb\x6d\x57\x19\x5b\x77\x71\xb9\xc8\xa8\x13\xf2\xbe\x8f\x43\x3f\x5b\xe6\x7b\x12\xe5\x0a\xbf\x29\xe4\x56\x5b\x67\x54\x8e\xc2\x94\x36\xb7\x55\xe1\x96\x41\x55\xa2\x92\x45\xae\x44\x51\x05\x05\x09\x84\xaa\xa8\x5f\x64\xe9\x48\xf6\x3f\x6b\x96\xf6\x9b\xb4\xdc\x3a\x2c\x95\x54\x4e\x5b\x6d\x0b\xd4\x05\x2a\x53\x04\x23\xa5\x37\xae\xac\xf2\x2a\xbe\xd2\x9c\x8e\x1c\xc6\xcd\xa8\xba\xde\x0e\x71\xd5\xd5\xc3\xd3\x22\xcb\x46\x3b\xfa\xe1\xa9\x89\xfd\x3a\xc6\xe1\x0d\xe5\xfb\xb8\xad\xda\x8e\x97\x2a\x0f\xca\x0b\xad\x54\x5c\xe2\x12\xe3\xb2\xd0\xb1\xd2\xca\x55\xaa\x30\xba\x50\x12\xca\xbe\x7f\xe5\xe1\x7f\xa9\xe6\xd8\xa4\x5c\x89\xc2\x96\x79\xb9\x0c\xde\x46\xa3\x9d\x40\x1f\x84\x29\x4a\x53\x18\xef\x4b\x74\x7e\xd2\x34\x0e\x50\xc6\xe6\xb3\xc3\xf5\x31\x2f\xda\xea\x69\x32\x61\xdb\x8e\x03\x7c\xfc\x4c\x5b\x55\xbd\x67\xc9\x84\x45\xb6\xc9\xbb\x55\xbd\xfd\x81\x09\x46\x73\xec\xef\xd9\x8b\x54\x92\x5c\xe3\xf9\x3f\xf3\x7d\xfe\x29\x21\xb0\x3b\xba\x6e\xba\x58\xcd\x67\x6b\xfc\x83\xe0\xee\xfc\xb6\x89\x79\x1f\x59\xdc\xe6\x45\x13\xd9\xc9\xa9\x7a\xcb\x9e\xda\x87\x8e\x3d\xc6\x82\x4d\x85\xcc\x86\x96\x3d\xf4\x91\x4d\x17\xd1\xe7\x0f\x30\x9f\xed\x4e\x0c\x9c\x55\xf5\x7e\x32\x7f\xf6\xda\xfe\x79\xbf\x5f\xb1\xf1\x5e\xc9\xa4\xf5\x19\x5b\xc7\x7a\xb5\x1e\x16\x99\x51\x19\xfb\xb6\x69\xb6\xfd\x22\xa3\xe6\xfb\x61\xec\xbc\x47\x05\x6d\xb7\x9a\xa1\x10\x62\xd6\xef\x57\xd9\xf9\x7c\xc5\x96\x75\xd3\x2c\xb2\xff\x0f\x32\xfc\x78\xe1\xb3\xf4\xc9\xbb\x07\x0a\x46\xdc\xc7\x6d\x5b\x55\xd9\xf9\x7c\x97\x0f\x6b\x56\x2d\xb2\x6b\xb4\x20\x9c\x67\x0a\x41\x62\x6e\xc0\x78\xcb\xc6\x55\x30\xc9\x24\xd1\xce\x71\x03\x26\x84\x91\x9e\x56\x41\x7c\x7e\xd8\xb1\xd7\xca\x81\x10\x81\x61\x00\x85\x3e\x47\x30\x96\xa5\x65\x44\x21\x92\x1f\x36\x8d\x9c\xd6\x11\xe3\x28\x7b\xad\x2d\x04\x69\x99\x92\x60\x6d\xf8\x0e\x03\x84\x91\x1c\x50\x96\x1c\x04\x7a\x10\xd6\x8e\x84\x54\x9c\x58\x80\xf2\x5a\x6b\x30\xde\x33\x34\x20\xec\x68\x84\x61\xe3\x7a\x50\x65\x1d\x72\x08\x01\x93\x05\x7e\x5a\x47\x26\x48\x81\x1c\xc1\xeb\xf1\x8c\x9e\xd6\xc4\x64\x1a\xac\xb3\xe0\xac\x2e\x41\x38\x24\x1f\x41\x78\xb2\xd3\x81\x40\xc7\x64\x8e\xa0\xad\x62\xe3\x3a\xe2\x49\x08\x48\x50\xc2\xa9\x6b\xa3\x20\x28\x24\xcf\xb4\x40\x32\x4c\x3b\x36\xae\x07\xc3\xc2\x28\x3a\x06\x28\x4c\xeb\x81\x29\xac\x26\x91\xd1\x9b\xd7\x47\x19\x42\xd0\x9e\x64\x5c\xda\x9f\x96\x03\x4f\x22\x39\xe9\xa4\x05\x6b\x35\xfd\x25\x8e\xa0\x88\x01\xa2\xbf\x36\x12\x3c\x9d\x31\x20\x85\x7a\x23\x6d\x52\x10\x76\xd0\xa6\x04\xa1\x34\xa0\xf4\x20\xb4\x02\xad\x02\xf9\x0d\xd6\xf8\x37\xbd\x91\x8a\x4c\x45\xef\xae\x75\x80\x60\x34\xd3\x02\xa4\xfc\x5e\x01\x28\xa9\xb9\x04\xa1\x5c\xca\x2a\x28\x4a\xaa\x44\x0f\x0e\x25\x27\x26\x4b\xcc\x6b\xed\x21\x38\xcd\xa4\x05\x1f\xde\x40\x09\x8e\x2b\xd0\xde\x95\x80\x56\x83\xd6\x16\x94\x33\x14\x2f\x50\xd2\x31\x09\xda\xbd\x2e\xa7\x31\x3d\xe8\xc9\x6c\x21\x7c\x36\x7b\xdd\x0f\x06\x2d\x33\x08\x56\xa8\x92\x4b\x0d\x2a\x28\x26\x38\xf5\x89\xe5\x52\x82\xb1\x6e\xfa\x40\x03\x5e\xd9\x4b\xd0\xd6\x32\x89\xa0\x83\xa7\x1f\xa9\x28\xd9\x92\x8d\x40\x44\xe6\x68\x20\x28\xcb\xa6\x9f\x31\x37\x32\x1d\x66\x06\x94\x70\x0d\x57\x20\xad\x63\x1a\xa4\x74\x17\x28\x20\x58\x64\xd3\x8f\x18\xe5\x1d\x28\xa1\x99\x07\x54\x97\x52\x80\x0c\x9a\x49\x09\x4e\x2a\x66\xc0\x31\xe9\x81\xba\xd3\x49\x4d\x5a\x9d\x55\x14\x4d\xa9\x99\x03\x6f\x91\x69\xd0\x9e\xd4\x09\x43\x67\x4c\x20\x71\xa3\x74\xfe\x96\x1e\x94\xe0\x03\x25\x10\x5d\x43\x36\x79\xb2\x09\x55\xf2\xc0\x4b\x36\xfd\x4c\x11\x3c\x7a\x20\x9f\xaf\xad\x64\x4a\x80\x34\x5f\xa4\xa3\x82\x2c\x05\xd7\x23\x3a\x35\x14\x77\xe0\x0d\x0b\xa0\xc3\x44\x22\x68\x4f\xa5\x68\x40\x68\x0f\x0a\x2d\x73\x80\x06\xc1\x07\xd3\x70\x30\x96\x1a\x4e\x2b\x55\x92\x25\x56\x73\xd0\xc2\x72\x0d\xd6\x7b\x0e\x36\x48\x6e\xc1\xa8\x91\xa2\xb4\x5b\x46\xca\xac\xf0\x4c\x02\xe2\x44\x6a\x2a\xdd\xbd\x14\xa0\x25\x96\x82\xcc\xf0\xc4\x97\x88\x23\x8b\x3a\x5a\xb8\x89\x96\xe0\x35\xb5\xad\x06\x65\x1d\x07\xf4\x9a\x1d\x55\x34\x60\xc6\x18\x8e\xe6\xa0\xd0\x60\x1c\xd7\xe0\xac\x27\x73\xf9\xd1\xf0\x4b\xab\xd2\xf0\x50\x9e\x51\x30\x0c\x48\x89\xec\x10\x96\xe7\x8d\xb2\xe0\x05\x75\x4f\x49\x7a\x84\xf5\x5c\x82\x17\x48\x39\x92\xdc\x83\x0e\x6a\x24\x2d\xd8\xc0\x44\xda\xd1\x5c\x81\xd3\x38\xd1\xc4\xdd\x73\x03\x42\x60\x8a\xae\xb0\x54\xda\x5e\xa8\xc4\x61\x2f\x42\x6c\x84\x60\x23\x28\x41\x28\xf6\xa2\x60\x4f\x08\xea\x79\xc3\x35\x78\x4f\x93\x5d\x5a\x53\x0a\xf2\x4c\x39\x6a\x45\x81\x14\x54\x69\xb9\x02\x2b\xec\x48\xf7\xe3\x07\xc5\xcf\x4f\x64\xda\xdf\x1b\x9a\x84\x29\xbc\xa8\xc8\x1a\x21\x30\x71\x1c\x7b\x91\x72\xfd\x88\x44\x87\xd9\x0b\xa8\x23\x5f\x14\xfa\xe7\x0d\x1a\x70\x81\x3a\xc7\x58\xbf\x1e\xcd\xfa\x82\x2a\x0d\x5b\x41\x16\x19\xc7\x41\xd3\x98\x30\x9e\x50\x8c\x55\x47\x1a\x85\x64\x82\x2b\x50\x84\xa1\x90\xa2\x93\xd2\x6c\xad\xdf\x4b\xf2\xdc\x1d\x00\xa5\x03\xe7\xcd\x5a\x81\x43\xdb\xa4\x86\x20\xa1\x12\x41\x27\x68\x44\xba\xd7\x54\x9a\xa3\xa9\x6f\xe4\x81\x56\x80\xda\xa4\xda\xd0\x82\xe6\x85\x92\x7e\xa2\x0d\x8d\xce\x3d\x0d\x08\xe3\x9e\x37\xd2\x03\x5a\x6e\xc0\x39\x53\x0a\xa6\xc0\x23\x99\x29\x2d\x32\x0b\xd2\x23\x77\x20\xcc\x81\x46\x10\x5e\xa5\xaa\x0d\x48\xce\x79\xcf\x2d\xb8\x24\xaf\x42\x03\x96\x82\xec\x3c\x96\x12\x2c\x0d\x5b\x4f\x71\xf3\x82\x3a\x47\x93\x56\x99\x88\xd4\x1a\xd4\xb2\xd4\x4b\x09\x64\x22\x25\x84\x34\xb1\x52\x37\x00\xd2\x86\x57\x14\x73\x61\x78\x9a\x6a\x63\xf5\xa6\x84\xa4\x1c\x50\xc3\xeb\x89\xb2\x80\xa8\x52\x4c\x83\xa7\x84\x3b\x54\xe4\x94\x32\xd4\xa1\x01\x27\x3a\xd5\x5d\x0a\x8a\xb4\x16\x50\x27\xb3\x4c\x6a\x17\xb2\x84\x8a\x80\xee\x0c\x2e\xe9\x23\x35\x14\xa7\xe6\xb0\xd4\xdb\x81\x34\x59\x3f\x92\x34\x69\x34\x4d\x57\xb2\x3c\x05\x62\x1c\x0d\x12\x9c\xa0\x8b\x99\x12\xed\x40\x62\xda\xa0\x6b\xd1\xa5\x8b\x99\xf4\x69\x1d\x68\xb4\x5b\xa2\x03\x65\x46\x06\x3b\x92\x96\xae\x81\xe7\x4d\x4a\x0c\x0d\x70\xa1\xd4\x3a\x55\x01\xd5\x84\x57\x25\xc1\x0a\xe3\x40\x6b\x99\x14\x90\x9f\xc6\xaa\x23\x8d\x82\xc6\x61\xaa\x2a\x0e\x4e\xd1\x8c\x15\x9e\xe2\x65\xad\x3f\x14\x12\xe1\xf9\xbd\x0c\xd4\xd4\x6b\x3e\x96\x15\xb9\x2e\x93\x98\xa3\x51\xa1\xb5\x1e\x27\x92\x01\x83\x21\x5d\xef\x63\x83\xe2\x44\x53\x69\x8d\xc3\x4b\x0b\xc7\x53\x69\x4d\x74\x2a\xad\x49\xd5\xf3\x35\x8d\x61\xe5\xc0\x04\x35\x56\xb3\xff\x12\x28\xd4\xbf\xd0\x7e\xd8\xa3\x27\xce\x33\x5d\x62\xb3\xd5\xf9\x9c\x5e\x81\x87\x97\xe4\xf8\x3c\xed\xbb\xf2\xad\x57\xfe\xb2\x8a\x1e\xa5\x94\xb9\x08\x85\x2d\xed\x52\x97\xb6\xc2\xd2\x3b\x5b\xa8\xaa\x94\xaa\x80\xdf\xfb\x8c\xe0\x5e\xbd\x4d\xdf\x46\x7c\x79\xd0\x5b\x19\x9c\x59\xe6\xb1\x0a\xc1\x2a\x57\x48\xa5\x95\x2c\xa3\x0a\x36\x6a\x8d\x58\xe8\x3f\x80\xbe\x3b\xbe\xd6\xeb\xea\xf0\xef\xa2\x22\xef\xeb\x92\x57\x5d\xbb\xab\xda\xc7\x2d\x7f\x6c\xbb\xcd\xba\x6d\x22\x1d\x9b\x1e\xcd\xf3\xd9\xf8\xfa\x9f\xcf\xc6\xff\x39\xfd\x27\x00\x00\xff\xff\x14\xd8\x7c\xa0\x84\x12\x00\x00") func web_uiV2IndexHtmlBytes() ([]byte, error) { return bindataRead( @@ -1387,7 +1387,7 @@ func web_uiV2IndexHtml() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v2/index.html", size: 4740, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/index.html", size: 4740, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1407,7 +1407,7 @@ func web_uiV2RobotsTxt() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "web_ui/v2/robots.txt", size: 51, mode: os.FileMode(420), modTime: time.Unix(1529533038, 0)} + info := bindataFileInfo{name: "web_ui/v2/robots.txt", size: 51, mode: os.FileMode(420), modTime: time.Unix(1529955918, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1508,8 +1508,8 @@ var _bindata = map[string]func() (*asset, error){ "web_ui/v2/assets/apple-touch-icon-76x76-c5fff53d5f3e96dbd2fe49c5cc472022.png": web_uiV2AssetsAppleTouchIcon76x76C5fff53d5f3e96dbd2fe49c5cc472022Png, "web_ui/v2/assets/apple-touch-icon-d2b583b1104a1e6810fb3984f8f132ae.png": web_uiV2AssetsAppleTouchIconD2b583b1104a1e6810fb3984f8f132aePng, "web_ui/v2/assets/consul-logo-707625c5eb04f602ade1f89a8868a329.png": web_uiV2AssetsConsulLogo707625c5eb04f602ade1f89a8868a329Png, - "web_ui/v2/assets/consul-ui-07dda31de740f1a5f8d66c166d785a89.css": web_uiV2AssetsConsulUi07dda31de740f1a5f8d66c166d785a89Css, - "web_ui/v2/assets/consul-ui-e51248f3d8659994e198565dbadc4fcf.js": web_uiV2AssetsConsulUiE51248f3d8659994e198565dbadc4fcfJs, + "web_ui/v2/assets/consul-ui-30b6cacf986e547028905bc5b588c278.css": web_uiV2AssetsConsulUi30b6cacf986e547028905bc5b588c278Css, + "web_ui/v2/assets/consul-ui-61975faed99637b13431ce396e4422b4.js": web_uiV2AssetsConsulUi61975faed99637b13431ce396e4422b4Js, "web_ui/v2/assets/favicon-128-08e1368e84f412f6ad30279d849b1df9.png": web_uiV2AssetsFavicon12808e1368e84f412f6ad30279d849b1df9Png, "web_ui/v2/assets/favicon-16x16-672c31374646b24b235b9511857cdade.png": web_uiV2AssetsFavicon16x16672c31374646b24b235b9511857cdadePng, "web_ui/v2/assets/favicon-196x196-57be5a82d3da06c261f9e4eb972a8a3a.png": web_uiV2AssetsFavicon196x19657be5a82d3da06c261f9e4eb972a8a3aPng, @@ -1622,8 +1622,8 @@ var _bintree = &bintree{nil, map[string]*bintree{ "apple-touch-icon-76x76-c5fff53d5f3e96dbd2fe49c5cc472022.png": &bintree{web_uiV2AssetsAppleTouchIcon76x76C5fff53d5f3e96dbd2fe49c5cc472022Png, map[string]*bintree{}}, "apple-touch-icon-d2b583b1104a1e6810fb3984f8f132ae.png": &bintree{web_uiV2AssetsAppleTouchIconD2b583b1104a1e6810fb3984f8f132aePng, map[string]*bintree{}}, "consul-logo-707625c5eb04f602ade1f89a8868a329.png": &bintree{web_uiV2AssetsConsulLogo707625c5eb04f602ade1f89a8868a329Png, map[string]*bintree{}}, - "consul-ui-07dda31de740f1a5f8d66c166d785a89.css": &bintree{web_uiV2AssetsConsulUi07dda31de740f1a5f8d66c166d785a89Css, map[string]*bintree{}}, - "consul-ui-e51248f3d8659994e198565dbadc4fcf.js": &bintree{web_uiV2AssetsConsulUiE51248f3d8659994e198565dbadc4fcfJs, map[string]*bintree{}}, + "consul-ui-30b6cacf986e547028905bc5b588c278.css": &bintree{web_uiV2AssetsConsulUi30b6cacf986e547028905bc5b588c278Css, map[string]*bintree{}}, + "consul-ui-61975faed99637b13431ce396e4422b4.js": &bintree{web_uiV2AssetsConsulUi61975faed99637b13431ce396e4422b4Js, map[string]*bintree{}}, "favicon-128-08e1368e84f412f6ad30279d849b1df9.png": &bintree{web_uiV2AssetsFavicon12808e1368e84f412f6ad30279d849b1df9Png, map[string]*bintree{}}, "favicon-16x16-672c31374646b24b235b9511857cdade.png": &bintree{web_uiV2AssetsFavicon16x16672c31374646b24b235b9511857cdadePng, map[string]*bintree{}}, "favicon-196x196-57be5a82d3da06c261f9e4eb972a8a3a.png": &bintree{web_uiV2AssetsFavicon196x19657be5a82d3da06c261f9e4eb972a8a3aPng, map[string]*bintree{}}, diff --git a/version/version.go b/version/version.go index 7f7c097f0..1daac796c 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ var ( // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. - VersionPrerelease = "beta4" + VersionPrerelease = "" ) // GetHumanVersion composes the parts of the version in a way that's suitable From aca96840b21087d3672ef9214850898c0b81f581 Mon Sep 17 00:00:00 2001 From: mkeeler Date: Mon, 25 Jun 2018 20:15:14 +0000 Subject: [PATCH 617/627] Update Consul version for the website --- website/config.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/config.rb b/website/config.rb index eca59c546..d2fb815bc 100644 --- a/website/config.rb +++ b/website/config.rb @@ -2,7 +2,7 @@ set :base_url, "https://www.consul.io/" activate :hashicorp do |h| h.name = "consul" - h.version = "1.1.0" + h.version = "1.2.0" h.github_slug = "hashicorp/consul" end From 26b611dd9e86c5226a1acede1267550dc058a3c3 Mon Sep 17 00:00:00 2001 From: mkeeler Date: Mon, 25 Jun 2018 20:30:43 +0000 Subject: [PATCH 618/627] Remove empty file --- key.pem | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 key.pem diff --git a/key.pem b/key.pem deleted file mode 100644 index e69de29bb..000000000 From 00afb89d957424f43cda6e6732718dcd9f209066 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Tue, 26 Jun 2018 00:34:18 -0700 Subject: [PATCH 619/627] correct README --- README.md | 105 +++++++++++++++++++++++++++++------------------------- 1 file changed, 57 insertions(+), 48 deletions(-) diff --git a/README.md b/README.md index 1d7c55f37..1e29e765f 100644 --- a/README.md +++ b/README.md @@ -1,66 +1,75 @@ -**This is a temporary README. We'll restore the old README prior to PR upstream.** +# Consul [![Build Status](https://travis-ci.org/hashicorp/consul.svg?branch=master)](https://travis-ci.org/hashicorp/consul) [![Join the chat at https://gitter.im/hashicorp-consul/Lobby](https://badges.gitter.im/hashicorp-consul/Lobby.svg)](https://gitter.im/hashicorp-consul/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -# Consul Connect +* Website: https://www.consul.io +* Chat: [Gitter](https://gitter.im/hashicorp-consul/Lobby) +* Mailing list: [Google Groups](https://groups.google.com/group/consul-tool/) -This repository is the forked repository for Consul Connect work to happen -in private prior to public release. This README will explain how to safely -use this fork, how to bring in upstream changes, etc. +Consul is a tool for service discovery and configuration. Consul is +distributed, highly available, and extremely scalable. -## Cloning +Consul provides several key features: -To use this repository, clone it into your GOPATH as usual but you must -**rename `consul-connect` to `consul`** so that Go imports continue working -as usual. +* **Service Discovery** - Consul makes it simple for services to register + themselves and to discover other services via a DNS or HTTP interface. + External services such as SaaS providers can be registered as well. -## Important: Never Modify Master +* **Health Checking** - Health Checking enables Consul to quickly alert + operators about any issues in a cluster. The integration with service + discovery prevents routing traffic to unhealthy hosts and enables service + level circuit breakers. -**NEVER MODIFY MASTER! NEVER MODIFY MASTER!** +* **Key/Value Storage** - A flexible key/value store enables storing + dynamic configuration, feature flagging, coordination, leader election and + more. The simple HTTP API makes it easy to use anywhere. -We want to keep the "master" branch equivalent to OSS master. This will make -rebasing easy for master. Instead, we'll use the branch `f-connect`. All -feature branches should branch from `f-connect` and make PRs against -`f-connect`. +* **Multi-Datacenter** - Consul is built to be datacenter aware, and can + support any number of regions without complex configuration. -When we're ready to merge back to upstream, we can make a single mega PR -merging `f-connect` into OSS master. This way we don't have a sudden mega -push to master on OSS. +Consul runs on Linux, Mac OS X, FreeBSD, Solaris, and Windows. A commercial +version called [Consul Enterprise](https://www.hashicorp.com/products/consul) +is also available. -## Creating a Feature Branch +## Quick Start -To create a feature branch, branch from `f-connect`: +An extensive quick start is viewable on the Consul website: -```sh -git checkout f-connect -git checkout -b my-new-branch +https://www.consul.io/intro/getting-started/install.html + +## Documentation + +Full, comprehensive documentation is viewable on the Consul website: + +https://www.consul.io/docs + +## Developing Consul + +If you wish to work on Consul itself, you'll first need [Go](https://golang.org) +installed (version 1.9+ is _required_). Make sure you have Go properly installed, +including setting up your [GOPATH](https://golang.org/doc/code.html#GOPATH). + +Next, clone this repository into `$GOPATH/src/github.com/hashicorp/consul` and +then just type `make`. In a few moments, you'll have a working `consul` executable: + +``` +$ make +... +$ bin/consul +... ``` -All merged Connect features will be in `f-connect`, so you want to work -from that branch. When making a PR for your feature branch, target the -`f-connect` branch as the merge target. You can do this by using the dropdowns -in the GitHub UI when creating a PR. +*Note: `make` will build all os/architecture combinations. Set the environment variable `CONSUL_DEV=1` to build it just for your local machine's os/architecture, or use `make dev`.* -## Syncing Upstream +*Note: `make` will also place a copy of the binary in the first part of your `$GOPATH`.* -First update our local master: +You can run tests by typing `make test`. The test suite may fail if +over-parallelized, so if you are seeing stochastic failures try +`GOTEST_FLAGS="-p 2 -parallel 2" make test`. -```sh -# This has to happen on forked master -git checkout master +If you make any changes to the code, run `make format` in order to automatically +format the code according to Go standards. -# Add upstream to OSS Consul -git remote add upstream https://github.com/hashicorp/consul.git +## Vendoring -# Fetch it -git fetch upstream - -# Rebase forked master onto upstream. This should have no changes since -# we're never modifying master. -git rebase upstream master -``` - -Next, update the `f-connect` branch: - -```sh -git checkout f-connect -git rebase origin master -``` +Consul currently uses [govendor](https://github.com/kardianos/govendor) for +vendoring and [vendorfmt](https://github.com/magiconair/vendorfmt) for formatting +`vendor.json` to a more merge-friendly "one line per package" format. From 19b2344f87b813ccf0b709c565d004f77c7c246f Mon Sep 17 00:00:00 2001 From: Jeff Escalante Date: Tue, 26 Jun 2018 10:00:26 +0200 Subject: [PATCH 620/627] fix progress bar animation on configuration page --- .../assets/stylesheets/consul-connect/_animations.scss | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/website/source/assets/stylesheets/consul-connect/_animations.scss b/website/source/assets/stylesheets/consul-connect/_animations.scss index 32eff8aaa..9f27bc6e3 100644 --- a/website/source/assets/stylesheets/consul-connect/_animations.scss +++ b/website/source/assets/stylesheets/consul-connect/_animations.scss @@ -32,10 +32,6 @@ & #c-box-8 { opacity: 0.5; } - - & #c-loading-bar > rect:last-child { - width: 0; - } } #configuration-solution-animation { @@ -60,9 +56,6 @@ & #s-service-box-8 { opacity: 0.5; } - & #s-progress-indicator { - width: 0; - } & #s-dots { opacity: 0; } @@ -82,7 +75,7 @@ } & #c-computer { - opacity: .12; + opacity: 0.12; } & #c-computer-to-load-balancers #c-arrow-down { From 566e21850ab92853f8ba8525873392026668f9d4 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Tue, 26 Jun 2018 01:00:54 -0700 Subject: [PATCH 621/627] Update README.md --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 1e29e765f..bbcdfeb4c 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,9 @@ Consul provides several key features: * **Multi-Datacenter** - Consul is built to be datacenter aware, and can support any number of regions without complex configuration. +* **Service Segmentation** - Consul Connect enables secure service-to-service +communication with automatic TLS encryption and identity-based authorization. + Consul runs on Linux, Mac OS X, FreeBSD, Solaris, and Windows. A commercial version called [Consul Enterprise](https://www.hashicorp.com/products/consul) is also available. From fd4ab1d2c361af56999cafa46f95b12b5eb528c0 Mon Sep 17 00:00:00 2001 From: sandstrom Date: Tue, 26 Jun 2018 13:38:16 +0200 Subject: [PATCH 622/627] spelling --- website/source/intro/vs/proxies.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/vs/proxies.html.md b/website/source/intro/vs/proxies.html.md index 80ecb8f8c..9b9971710 100644 --- a/website/source/intro/vs/proxies.html.md +++ b/website/source/intro/vs/proxies.html.md @@ -49,7 +49,7 @@ by the user. Further, by supporting a pluggable data plane model, the right proxy can be deployed as needed. For non-performance critical applications, the built-in -proxy can be used. For performance critical applicaiotns, Envoy can be used. +proxy can be used. For performance critical applications, Envoy can be used. For some applications that may require hardware, a hardware load balancer such an F5 appliance may be deployed. Consul provides an API for all of these solutions to be integrated. From 7c7b2bb1fa0ad9905d6c1c98f2a8e530ad94e500 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Tue, 26 Jun 2018 11:35:56 -0400 Subject: [PATCH 623/627] Putting source back into Dev Mode --- CHANGELOG.md | 2 ++ version/version.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c3cf4e3e8..1334252a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,5 @@ +## UNRELEASED + ## 1.2.0 (June 26, 2018) FEATURES: diff --git a/version/version.go b/version/version.go index 1daac796c..92fbce3d0 100644 --- a/version/version.go +++ b/version/version.go @@ -20,7 +20,7 @@ var ( // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. - VersionPrerelease = "" + VersionPrerelease = "dev" ) // GetHumanVersion composes the parts of the version in a way that's suitable From 685838ea0efe6ee68c11f40d5132e8b0a086c2b7 Mon Sep 17 00:00:00 2001 From: Matt Keeler Date: Tue, 26 Jun 2018 15:09:14 -0400 Subject: [PATCH 624/627] Fix layout issue with discovery_max_stale It was indented when it shouldn't have been. --- website/source/docs/agent/options.html.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/agent/options.html.md b/website/source/docs/agent/options.html.md index d2ba21f42..2078fc68d 100644 --- a/website/source/docs/agent/options.html.md +++ b/website/source/docs/agent/options.html.md @@ -752,7 +752,7 @@ Consul will not enable TLS for the HTTP API unless the `https` port has been ass to the Consul raft log in environments where health checks have volatile output like timestamps, process ids, ... - * `discovery_max_stale` - Enables +* `discovery_max_stale` - Enables stale requests for all service discovery HTTP endpoints. This is equivalent to the [`max_stale`](#max_stale) configuration for DNS requests. If this value is zero (default), all service discovery HTTP endpoints are forwarded to the leader. If this value is greater than zero, any Consul server @@ -1360,4 +1360,4 @@ items which are reloaded include: * Node Metadata * Metric Prefix Filter * Discard Check Output -* RPC rate limiting \ No newline at end of file +* RPC rate limiting From 45e9d5e0092e0c452c0d5b068d17b913c94036d4 Mon Sep 17 00:00:00 2001 From: Jack Pearkes Date: Tue, 26 Jun 2018 12:15:23 -0700 Subject: [PATCH 625/627] website: correct paramater for service meta on catalog register I believe this may have been missed as part of #3994. Note that the API _returns_ `ServiceMeta`, but accepts `Meta`. $ curl -X PUT -d \ '{ "Datacenter": "dc1", "Node": "example", "Address": "www.example.com", "Service": { "Service": "example-service", "Port": 80, "Meta": {"foo": "bar"} } }' \ http://localhost:8500/v1/catalog/register $ curl localhost:8500/v1/catalog/service/example-service [ { "ID": "", "Node": "example", "Address": "www.example.com", "Datacenter": "dc1", "TaggedAddresses": null, "NodeMeta": null, "ServiceKind": "", "ServiceID": "example-service", "ServiceName": "example-service", "ServiceTags": [], "ServiceAddress": "", "ServiceMeta": { "foo": "bar" }, "ServicePort": 80, "ServiceEnableTagOverride": false, "ServiceProxyDestination": "", "ServiceConnect": { "Native": false, "Proxy": null }, "CreateIndex": 11, "ModifyIndex": 37 } ] --- website/source/api/catalog.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/api/catalog.html.md b/website/source/api/catalog.html.md index 9e0ae90a3..331df26f2 100644 --- a/website/source/api/catalog.html.md +++ b/website/source/api/catalog.html.md @@ -54,7 +54,7 @@ The table below shows this endpoint's support for - `Service` `(Service: nil)` - Specifies to register a service. If `ID` is not provided, it will be defaulted to the value of the `Service.Service` property. Only one service with a given `ID` may be present per node. The service - `Tags`, `Address`, `ServiceMeta`, and `Port` fields are all optional. + `Tags`, `Address`, `Meta`, and `Port` fields are all optional. - `Check` `(Check: nil)` - Specifies to register a check. The register API manipulates the health check entry in the Catalog, but it does not setup the From faaf755b211034e31fcddd05c557611bb285b5e3 Mon Sep 17 00:00:00 2001 From: Chris Beck <35543060+cbeckhashicorp@users.noreply.github.com> Date: Tue, 26 Jun 2018 15:19:25 -0400 Subject: [PATCH 626/627] Clarify beta release/feature and spelling Changed beta release to beta feature to clarify connect is beta feature and 1.2 is not beta release. --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1334252a4..99dd47d9d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,9 +6,9 @@ FEATURES: * **Connect Feature Beta**: This version includes a major new feature for Consul named Connect. Connect enables secure service-to-service communication with automatic TLS encryption and identity-based authorization. For more details and links to demos and getting started guides, see the [announcement blog post](https://www.hashicorp.com/blog/consul-1-2-service-mesh). * Connect must be enabled explicitly in configuration so upgrading a cluster will not affect any existing functionality until it's enabled. - * This is a Beta release, we don't recommend enabling this in production yet. Please see the documentation for more information. + * This is a Beta feature, we don't recommend enabling this in production yet. Please see the documentation for more information. * dns: Enable PTR record lookups for services with IPs that have no registered node [[PR-4083](https://github.com/hashicorp/consul/pull/4083)] -* ui: Default to serving the new UI. Setting the `CONSUL_UI_LEGACY` environment variable to `1` of `true` will revert to serving the old UI +* ui: Default to serving the new UI. Setting the `CONSUL_UI_LEGACY` environment variable to `1` or `true` will revert to serving the old UI IMPROVEMENTS: From 8e9d1c584f8bd9f00358befbe8f8241f90092d8d Mon Sep 17 00:00:00 2001 From: Mitchell Hashimoto Date: Tue, 26 Jun 2018 21:52:27 +0200 Subject: [PATCH 627/627] website: update vs. istio page and clarify pluggable data layer --- website/source/intro/index.html.md | 12 +++++++++--- website/source/intro/vs/istio.html.md | 16 +++++++++------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/website/source/intro/index.html.md b/website/source/intro/index.html.md index ff914838b..27ec9b047 100644 --- a/website/source/intro/index.html.md +++ b/website/source/intro/index.html.md @@ -16,9 +16,15 @@ detailed reference of available features. ## What is Consul? -Consul is a service mesh solution that has multiple components for -discovering, connecting, configuring, and securing services in your -infrastructure. It provides several key features: +Consul is a service mesh solution providing a full featured control plane +with service discovery, configuration, and segmentation functionality. Each +of these features can be used individually as needed, or they can be used +together to build a full service mesh. Consul requires a data plane and +supports both a proxy and native integration model. Consul ships with a +simple built-in proxy so that everything works out of the box, but also +supports 3rd party proxy integrations such as Envoy. + +The key features of Consul are: * **Service Discovery**: Clients of Consul can register a service, such as `api` or `mysql`, and other clients can use Consul to discover providers diff --git a/website/source/intro/vs/istio.html.md b/website/source/intro/vs/istio.html.md index 49437e9de..4710db7da 100644 --- a/website/source/intro/vs/istio.html.md +++ b/website/source/intro/vs/istio.html.md @@ -14,14 +14,10 @@ To enable the full functionality of Istio, multiple services must be deployed. For the control plane: Pilot, Mixer, and Citadel must be deployed and for the data plane an Envoy sidecar is deployed. Additionally, Istio requires a 3rd party service catalog from Kubernetes, Consul, Eureka, -or others. At a minimum, three Istio-dedicated services along with at +or others. Finally, Istio requires an external system for storing state, +typically etcd. At a minimum, three Istio-dedicated services along with at least one separate distributed system (in addition to Istio) must be -configured for the full functionality of Istio. - -Istio plans to work on any platform, but currently has a hard dependency -on the Kubernetes API. While the documentation covers connecting non-Kubernetes -services, it assumes that a Kubernetes deployment exists for the control -plane. +configured to use the full functionality of Istio. Istio provides layer 7 features for path-based routing, traffic shaping, load balancing, and telemetry. Access control policies can be configured @@ -75,3 +71,9 @@ Because Consul's service connection feature "Connect" is built-in, it inherits the operational stability of Consul. Consul has been in production for large companies since 2014 and is known to be deployed on as many as 50,000 nodes in a single cluster. + +This comparison is based on our own limited usage of Istio as well as +talking to Istio users. If you feel there are inaccurate statements in this +comparison, please click "Edit This Page" in the footer of this page and +propose edits. We strive for technical accuracy and will review and update +this post for inaccuracies as quickly as possible.