Create configutil and move some common config and setup functions there (#8362)

This commit is contained in:
Jeff Mitchell 2020-05-14 09:19:27 -04:00 committed by GitHub
parent 724af764bb
commit 1d3d89e2aa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
57 changed files with 2067 additions and 1781 deletions

View File

@ -12,7 +12,7 @@ require (
github.com/hashicorp/go-retryablehttp v0.6.2
github.com/hashicorp/go-rootcerts v1.0.1
github.com/hashicorp/hcl v1.0.0
github.com/hashicorp/vault/sdk v0.1.14-0.20200215195600-2ca765f0a500
github.com/hashicorp/vault/sdk v0.1.14-0.20200310200459-8f075a2701b4
github.com/mitchellh/mapstructure v1.1.2
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4

View File

@ -3,12 +3,12 @@ package okta
import (
"context"
"fmt"
"github.com/hashicorp/go-cleanhttp"
"net/http"
"net/url"
"strings"
"time"
"github.com/hashicorp/go-cleanhttp"
oktaold "github.com/chrismalek/oktasdk-go/okta"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/helper/tokenutil"

View File

@ -2,12 +2,13 @@ package database
import (
"context"
"strings"
"testing"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/sdk/database/dbplugin"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/logical"
"strings"
"testing"
)
const (

View File

@ -482,15 +482,8 @@ func (c *AgentCommand) Run(args []string) int {
// Parse 'require_request_header' listener config option, and wrap
// the request handler if necessary
muxHandler := cacheHandler
if v, ok := lnConfig.Config[agentConfig.RequireRequestHeader]; ok {
switch v {
case true:
muxHandler = verifyRequestHeader(muxHandler)
case false /* noop */ :
default:
c.UI.Error(fmt.Sprintf("Invalid value for 'require_request_header': %v", v))
return 1
}
if lnConfig.RequireRequestHeader {
muxHandler = verifyRequestHeader(muxHandler)
}
// Create a muxer and add paths relevant for the lease cache layer

View File

@ -7,16 +7,13 @@ import (
"strings"
"github.com/hashicorp/vault/command/agent/config"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/internalshared/listenerutil"
)
func StartListener(lnConfig *config.Listener) (net.Listener, *tls.Config, error) {
addr, ok := lnConfig.Config["address"].(string)
if !ok {
return nil, nil, fmt.Errorf("invalid address")
}
func StartListener(lnConfig *configutil.Listener) (net.Listener, *tls.Config, error) {
addr := lnConfig.Address
var ln net.Listener
var err error
@ -41,13 +38,13 @@ func StartListener(lnConfig *config.Listener) (net.Listener, *tls.Config, error)
case "unix":
var uConfig *listenerutil.UnixSocketsConfig
if lnConfig.Config["socket_mode"] != nil &&
lnConfig.Config["socket_user"] != nil &&
lnConfig.Config["socket_group"] != nil {
if lnConfig.SocketMode != "" &&
lnConfig.SocketUser != "" &&
lnConfig.SocketGroup != "" {
uConfig = &listenerutil.UnixSocketsConfig{
Mode: lnConfig.Config["socket_mode"].(string),
User: lnConfig.Config["socket_user"].(string),
Group: lnConfig.Config["socket_group"].(string),
Mode: lnConfig.SocketMode,
User: lnConfig.SocketUser,
Group: lnConfig.SocketGroup,
}
}
ln, err = listenerutil.UnixSocketListener(addr, uConfig)
@ -60,10 +57,13 @@ func StartListener(lnConfig *config.Listener) (net.Listener, *tls.Config, error)
}
props := map[string]string{"addr": ln.Addr().String()}
ln, props, _, tlsConf, err := listenerutil.WrapTLS(ln, props, lnConfig.Config, nil)
tlsConf, _, err := listenerutil.TLSConfig(lnConfig, props, nil)
if err != nil {
return nil, nil, err
}
if tlsConf != nil {
ln = tls.NewListener(ln, tlsConf)
}
return ln, tlsConf, nil
}

View File

@ -72,11 +72,11 @@ type mockTokenVerifierProxier struct {
func (p *mockTokenVerifierProxier) Send(ctx context.Context, req *SendRequest) (*SendResponse, error) {
p.currentToken = req.Token
resp := newTestSendResponse(http.StatusOK,
`{"data": {"id": "` + p.currentToken + `"}}`)
`{"data": {"id": "`+p.currentToken+`"}}`)
return resp, nil
}
func (p *mockTokenVerifierProxier) GetCurrentRequestToken() (string) {
func (p *mockTokenVerifierProxier) GetCurrentRequestToken() string {
return p.currentToken
}
}

View File

@ -14,16 +14,17 @@ import (
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/mitchellh/mapstructure"
)
// Config is the configuration for the vault server.
type Config struct {
*configutil.SharedConfig `hcl:"-"`
AutoAuth *AutoAuth `hcl:"auto_auth"`
ExitAfterAuth bool `hcl:"exit_after_auth"`
PidFile string `hcl:"pid_file"`
Listeners []*Listener `hcl:"listeners"`
Cache *Cache `hcl:"cache"`
Vault *Vault `hcl:"vault"`
Templates []*ctconfig.TemplateConfig `hcl:"templates"`
@ -48,15 +49,6 @@ type Cache struct {
ForceAutoAuthToken bool `hcl:"-"`
}
// Listener contains configuration for any Vault Agent listeners
type Listener struct {
Type string
Config map[string]interface{}
}
// RequireRequestHeader is a listener configuration option
const RequireRequestHeader = "require_request_header"
// AutoAuth is the configured authentication method and sinks
type AutoAuth struct {
Method *Method `hcl:"-"`
@ -89,6 +81,12 @@ type Sink struct {
Config map[string]interface{}
}
func NewConfig() *Config {
return &Config{
SharedConfig: new(configutil.SharedConfig),
}
}
// LoadConfig loads the configuration at the given path, regardless if
// its a file or directory.
func LoadConfig(path string) (*Config, error) {
@ -114,29 +112,31 @@ func LoadConfig(path string) (*Config, error) {
}
// Start building the result
var result Config
if err := hcl.DecodeObject(&result, obj); err != nil {
result := NewConfig()
if err := hcl.DecodeObject(result, obj); err != nil {
return nil, err
}
sharedConfig, err := configutil.ParseConfig(string(d))
if err != nil {
return nil, err
}
result.SharedConfig = sharedConfig
list, ok := obj.Node.(*ast.ObjectList)
if !ok {
return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
}
if err := parseAutoAuth(&result, list); err != nil {
if err := parseAutoAuth(result, list); err != nil {
return nil, errwrap.Wrapf("error parsing 'auto_auth': {{err}}", err)
}
if err := parseListeners(&result, list); err != nil {
return nil, errwrap.Wrapf("error parsing 'listeners': {{err}}", err)
}
if err := parseCache(&result, list); err != nil {
if err := parseCache(result, list); err != nil {
return nil, errwrap.Wrapf("error parsing 'cache':{{err}}", err)
}
if err := parseTemplates(&result, list); err != nil {
if err := parseTemplates(result, list); err != nil {
return nil, errwrap.Wrapf("error parsing 'template': {{err}}", err)
}
@ -161,12 +161,12 @@ func LoadConfig(path string) (*Config, error) {
}
}
err = parseVault(&result, list)
err = parseVault(result, list)
if err != nil {
return nil, errwrap.Wrapf("error parsing 'vault':{{err}}", err)
}
return &result, nil
return result, nil
}
func parseVault(result *Config, list *ast.ObjectList) error {
@ -245,47 +245,6 @@ func parseCache(result *Config, list *ast.ObjectList) error {
return nil
}
func parseListeners(result *Config, list *ast.ObjectList) error {
name := "listener"
listenerList := list.Filter(name)
var listeners []*Listener
for _, item := range listenerList.Items {
var lnConfig map[string]interface{}
err := hcl.DecodeObject(&lnConfig, item.Val)
if err != nil {
return err
}
var lnType string
switch {
case lnConfig["type"] != nil:
lnType = lnConfig["type"].(string)
delete(lnConfig, "type")
case len(item.Keys) == 1:
lnType = strings.ToLower(item.Keys[0].Token.Value().(string))
default:
return errors.New("listener type must be specified")
}
switch lnType {
case "unix", "tcp":
default:
return fmt.Errorf("invalid listener type %q", lnType)
}
listeners = append(listeners, &Listener{
Type: lnType,
Config: lnConfig,
})
}
result.Listeners = listeners
return nil
}
func parseAutoAuth(result *Config, list *ast.ObjectList) error {
name := "auto_auth"

View File

@ -7,6 +7,7 @@ import (
"github.com/go-test/deep"
ctconfig "github.com/hashicorp/consul-template/config"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/sdk/helper/pointerutil"
)
@ -17,6 +18,30 @@ func TestLoadConfigFile_AgentCache(t *testing.T) {
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
PidFile: "./pidfile",
Listeners: []*configutil.Listener{
{
Type: "unix",
Address: "/path/to/socket",
TLSDisable: true,
SocketMode: "configmode",
SocketUser: "configuser",
SocketGroup: "configgroup",
},
{
Type: "tcp",
Address: "127.0.0.1:8300",
TLSDisable: true,
},
{
Type: "tcp",
Address: "127.0.0.1:8400",
TLSKeyFile: "/path/to/cakey.pem",
TLSCertFile: "/path/to/cacert.pem",
},
},
},
AutoAuth: &AutoAuth{
Method: &Method{
Type: "aws",
@ -26,7 +51,7 @@ func TestLoadConfigFile_AgentCache(t *testing.T) {
},
},
Sinks: []*Sink{
&Sink{
{
Type: "file",
DHType: "curve25519",
DHPath: "/tmp/file-foo-dhpath",
@ -42,33 +67,6 @@ func TestLoadConfigFile_AgentCache(t *testing.T) {
UseAutoAuthTokenRaw: true,
ForceAutoAuthToken: false,
},
Listeners: []*Listener{
&Listener{
Type: "unix",
Config: map[string]interface{}{
"address": "/path/to/socket",
"tls_disable": true,
"socket_mode": "configmode",
"socket_user": "configuser",
"socket_group": "configgroup",
},
},
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:8300",
"tls_disable": true,
},
},
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:8400",
"tls_key_file": "/path/to/cakey.pem",
"tls_cert_file": "/path/to/cacert.pem",
},
},
},
Vault: &Vault{
Address: "http://127.0.0.1:1111",
CACert: "config_ca_cert",
@ -78,9 +76,11 @@ func TestLoadConfigFile_AgentCache(t *testing.T) {
ClientCert: "config_client_cert",
ClientKey: "config_client_key",
},
PidFile: "./pidfile",
}
config.Listeners[0].RawConfig = nil
config.Listeners[1].RawConfig = nil
config.Listeners[2].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
@ -91,6 +91,9 @@ func TestLoadConfigFile_AgentCache(t *testing.T) {
}
expected.Vault.TLSSkipVerifyRaw = interface{}(true)
config.Listeners[0].RawConfig = nil
config.Listeners[1].RawConfig = nil
config.Listeners[2].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
@ -112,6 +115,9 @@ func TestLoadConfigFile(t *testing.T) {
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
PidFile: "./pidfile",
},
AutoAuth: &AutoAuth{
Method: &Method{
Type: "aws",
@ -122,7 +128,7 @@ func TestLoadConfigFile(t *testing.T) {
},
},
Sinks: []*Sink{
&Sink{
{
Type: "file",
DHType: "curve25519",
DHPath: "/tmp/file-foo-dhpath",
@ -131,7 +137,7 @@ func TestLoadConfigFile(t *testing.T) {
"path": "/tmp/file-foo",
},
},
&Sink{
{
Type: "file",
WrapTTL: 5 * time.Minute,
DHType: "curve25519",
@ -143,7 +149,6 @@ func TestLoadConfigFile(t *testing.T) {
},
},
},
PidFile: "./pidfile",
}
if diff := deep.Equal(config, expected); diff != nil {
@ -167,6 +172,9 @@ func TestLoadConfigFile_Method_Wrapping(t *testing.T) {
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
PidFile: "./pidfile",
},
AutoAuth: &AutoAuth{
Method: &Method{
Type: "aws",
@ -177,7 +185,7 @@ func TestLoadConfigFile_Method_Wrapping(t *testing.T) {
},
},
Sinks: []*Sink{
&Sink{
{
Type: "file",
Config: map[string]interface{}{
"path": "/tmp/file-foo",
@ -185,7 +193,6 @@ func TestLoadConfigFile_Method_Wrapping(t *testing.T) {
},
},
},
PidFile: "./pidfile",
}
if diff := deep.Equal(config, expected); diff != nil {
@ -201,18 +208,19 @@ func TestLoadConfigFile_AgentCache_NoAutoAuth(t *testing.T) {
expected := &Config{
Cache: &Cache{},
Listeners: []*Listener{
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:8300",
"tls_disable": true,
SharedConfig: &configutil.SharedConfig{
PidFile: "./pidfile",
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:8300",
TLSDisable: true,
},
},
},
PidFile: "./pidfile",
}
config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
@ -267,6 +275,16 @@ func TestLoadConfigFile_AgentCache_AutoAuth_NoSink(t *testing.T) {
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:8300",
TLSDisable: true,
},
},
PidFile: "./pidfile",
},
AutoAuth: &AutoAuth{
Method: &Method{
Type: "aws",
@ -281,18 +299,9 @@ func TestLoadConfigFile_AgentCache_AutoAuth_NoSink(t *testing.T) {
UseAutoAuthTokenRaw: true,
ForceAutoAuthToken: false,
},
Listeners: []*Listener{
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:8300",
"tls_disable": true,
},
},
},
PidFile: "./pidfile",
}
config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
@ -305,6 +314,16 @@ func TestLoadConfigFile_AgentCache_AutoAuth_Force(t *testing.T) {
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:8300",
TLSDisable: true,
},
},
PidFile: "./pidfile",
},
AutoAuth: &AutoAuth{
Method: &Method{
Type: "aws",
@ -319,18 +338,9 @@ func TestLoadConfigFile_AgentCache_AutoAuth_Force(t *testing.T) {
UseAutoAuthTokenRaw: "force",
ForceAutoAuthToken: true,
},
Listeners: []*Listener{
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:8300",
"tls_disable": true,
},
},
},
PidFile: "./pidfile",
}
config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
@ -343,6 +353,16 @@ func TestLoadConfigFile_AgentCache_AutoAuth_True(t *testing.T) {
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:8300",
TLSDisable: true,
},
},
PidFile: "./pidfile",
},
AutoAuth: &AutoAuth{
Method: &Method{
Type: "aws",
@ -357,18 +377,9 @@ func TestLoadConfigFile_AgentCache_AutoAuth_True(t *testing.T) {
UseAutoAuthTokenRaw: "true",
ForceAutoAuthToken: false,
},
Listeners: []*Listener{
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:8300",
"tls_disable": true,
},
},
},
PidFile: "./pidfile",
}
config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
@ -381,6 +392,16 @@ func TestLoadConfigFile_AgentCache_AutoAuth_False(t *testing.T) {
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:8300",
TLSDisable: true,
},
},
PidFile: "./pidfile",
},
AutoAuth: &AutoAuth{
Method: &Method{
Type: "aws",
@ -390,7 +411,7 @@ func TestLoadConfigFile_AgentCache_AutoAuth_False(t *testing.T) {
},
},
Sinks: []*Sink{
&Sink{
{
Type: "file",
DHType: "curve25519",
DHPath: "/tmp/file-foo-dhpath",
@ -406,18 +427,9 @@ func TestLoadConfigFile_AgentCache_AutoAuth_False(t *testing.T) {
UseAutoAuthTokenRaw: "false",
ForceAutoAuthToken: false,
},
Listeners: []*Listener{
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:8300",
"tls_disable": true,
},
},
},
PidFile: "./pidfile",
}
config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
@ -494,6 +506,9 @@ func TestLoadConfigFile_Template(t *testing.T) {
}
expected := &Config{
SharedConfig: &configutil.SharedConfig{
PidFile: "./pidfile",
},
AutoAuth: &AutoAuth{
Method: &Method{
Type: "aws",
@ -504,7 +519,7 @@ func TestLoadConfigFile_Template(t *testing.T) {
},
},
Sinks: []*Sink{
&Sink{
{
Type: "file",
DHType: "curve25519",
DHPath: "/tmp/file-foo-dhpath",
@ -516,7 +531,6 @@ func TestLoadConfigFile_Template(t *testing.T) {
},
},
Templates: tc.expectedTemplates,
PidFile: "./pidfile",
}
if diff := deep.Equal(config, expected); diff != nil {

View File

@ -547,7 +547,7 @@ func testSealMigrationTransitToShamir(t *testing.T, setup teststorage.ClusterSet
// Create a Shamir seal.
logger := cluster.Logger.Named("shamir")
shamirSeal := vault.NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: logger,
}),
})

View File

@ -20,33 +20,25 @@ import (
"sync"
"time"
monitoring "cloud.google.com/go/monitoring/apiv3"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/circonus"
"github.com/armon/go-metrics/datadog"
"github.com/armon/go-metrics/prometheus"
stackdriver "github.com/google/go-metrics-stackdriver"
stackdrivervault "github.com/google/go-metrics-stackdriver/vault"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-hclog"
log "github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/command/server"
serverseal "github.com/hashicorp/vault/command/server/seal"
"github.com/hashicorp/vault/helper/builtinplugins"
"github.com/hashicorp/vault/helper/metricsutil"
"github.com/hashicorp/vault/helper/namespace"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/internalshared/gatedwriter"
"github.com/hashicorp/vault/internalshared/listenerutil"
"github.com/hashicorp/vault/internalshared/reloadutil"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/helper/mlock"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/hashicorp/vault/sdk/helper/useragent"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/sdk/physical"
@ -59,7 +51,6 @@ import (
"github.com/posener/complete"
"go.uber.org/atomic"
"golang.org/x/net/http/httpproxy"
"google.golang.org/api/option"
"google.golang.org/grpc/grpclog"
)
@ -130,14 +121,6 @@ type ServerCommand struct {
flagExitOnCoreShutdown bool
}
type ServerListener struct {
net.Listener
config map[string]interface{}
maxRequestSize int64
maxRequestDuration time.Duration
unauthenticatedMetricsAccess bool
}
func (c *ServerCommand) Synopsis() string {
return "Start a Vault server"
}
@ -497,9 +480,10 @@ func (c *ServerCommand) runRecoveryMode() int {
var barrierSeal vault.Seal
var sealConfigError error
var wrapper wrapping.Wrapper
if len(config.Seals) == 0 {
config.Seals = append(config.Seals, &server.Seal{Type: wrapping.Shamir})
config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.Shamir})
}
if len(config.Seals) > 1 {
@ -516,13 +500,17 @@ func (c *ServerCommand) runRecoveryMode() int {
sealType = configSeal.Type
}
infoKeys = append(infoKeys, "Seal Type")
info["Seal Type"] = sealType
var seal vault.Seal
sealLogger := c.logger.Named(sealType)
seal, sealConfigError = serverseal.ConfigureSeal(configSeal, &infoKeys, &info, sealLogger, vault.NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
defaultSeal := vault.NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: c.logger.Named("shamir"),
}),
}))
})
sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType))
wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &infoKeys, &info, sealLogger)
if sealConfigError != nil {
if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) {
c.UI.Error(fmt.Sprintf(
@ -530,12 +518,13 @@ func (c *ServerCommand) runRecoveryMode() int {
return 1
}
}
if seal == nil {
c.UI.Error(fmt.Sprintf(
"After configuring seal nil returned, seal type was %s", sealType))
return 1
if wrapper == nil {
seal = defaultSeal
} else {
seal = vault.NewAutoSeal(&vaultseal.Access{
Wrapper: wrapper,
})
}
barrierSeal = seal
// Ensure that the seal finalizer is called, even if using verify-only
@ -579,17 +568,17 @@ func (c *ServerCommand) runRecoveryMode() int {
}
// Initialize the listeners
lns := make([]ServerListener, 0, len(config.Listeners))
lns := make([]listenerutil.Listener, 0, len(config.Listeners))
for _, lnConfig := range config.Listeners {
ln, _, _, err := server.NewListener(lnConfig.Type, lnConfig.Config, c.gatedWriter, c.UI)
ln, _, _, err := server.NewListener(lnConfig, c.gatedWriter, c.UI)
if err != nil {
c.UI.Error(fmt.Sprintf("Error initializing listener of type %s: %s", lnConfig.Type, err))
return 1
}
lns = append(lns, ServerListener{
lns = append(lns, listenerutil.Listener{
Listener: ln,
config: lnConfig.Config,
Config: lnConfig,
})
}
@ -628,8 +617,7 @@ func (c *ServerCommand) runRecoveryMode() int {
for _, ln := range lns {
handler := vaulthttp.Handler(&vault.HandlerProperties{
Core: core,
MaxRequestSize: ln.maxRequestSize,
MaxRequestDuration: ln.maxRequestDuration,
ListenerConfig: ln.Config,
DisablePrintableCheck: config.DisablePrintableCheck,
RecoveryMode: c.flagRecovery,
RecoveryToken: atomic.NewString(""),
@ -805,6 +793,7 @@ func (c *ServerCommand) Run(args []string) int {
// Load the configuration
var config *server.Config
var err error
if c.flagDev {
var devStorageType string
switch {
@ -819,9 +808,13 @@ func (c *ServerCommand) Run(args []string) int {
default:
devStorageType = "inmem"
}
config = server.DevConfig(devStorageType)
config, err = server.DevConfig(devStorageType)
if err != nil {
c.UI.Error(err.Error())
return 1
}
if c.flagDevListenAddr != "" {
config.Listeners[0].Config["address"] = c.flagDevListenAddr
config.Listeners[0].Address = c.flagDevListenAddr
}
}
@ -918,11 +911,19 @@ func (c *ServerCommand) Run(args []string) int {
"in a Docker container, provide the IPC_LOCK cap to the container."))
}
metricsHelper, metricSink, err := c.setupTelemetry(config)
inmemMetrics, metricSink, prometheusEnabled, err := configutil.SetupTelemetry(&configutil.SetupTelemetryOpts{
Config: config.Telemetry,
Ui: c.UI,
ServiceName: "vault",
DisplayName: "Vault",
UserAgent: useragent.String(),
ClusterName: config.ClusterName,
})
if err != nil {
c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err))
return 1
}
metricsHelper := metricsutil.NewMetricsHelper(inmemMetrics, prometheusEnabled)
// Initialize the backend
factory, exists := c.PhysicalBackends[config.Storage.Type]
@ -1010,18 +1011,20 @@ func (c *ServerCommand) Run(args []string) int {
var unwrapSeal vault.Seal
var sealConfigError error
var wrapper wrapping.Wrapper
var barrierWrapper wrapping.Wrapper
if c.flagDevAutoSeal {
barrierSeal = vault.NewAutoSeal(vaultseal.NewTestSeal(nil))
} else {
// Handle the case where no seal is provided
switch len(config.Seals) {
case 0:
config.Seals = append(config.Seals, &server.Seal{Type: wrapping.Shamir})
config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.Shamir})
case 1:
// If there's only one seal and it's disabled assume they want to
// migrate to a shamir seal and simply didn't provide it
if config.Seals[0].Disabled {
config.Seals = append(config.Seals, &server.Seal{Type: wrapping.Shamir})
config.Seals = append(config.Seals, &configutil.KMS{Type: wrapping.Shamir})
}
}
for _, configSeal := range config.Seals {
@ -1034,13 +1037,14 @@ func (c *ServerCommand) Run(args []string) int {
}
var seal vault.Seal
sealLogger := c.logger.Named(sealType)
sealLogger := c.logger.ResetNamed(fmt.Sprintf("seal.%s", sealType))
allLoggers = append(allLoggers, sealLogger)
seal, sealConfigError = serverseal.ConfigureSeal(configSeal, &infoKeys, &info, sealLogger, vault.NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
defaultSeal := vault.NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: c.logger.Named("shamir"),
}),
}))
})
wrapper, sealConfigError = configutil.ConfigureWrapper(configSeal, &infoKeys, &info, sealLogger)
if sealConfigError != nil {
if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) {
c.UI.Error(fmt.Sprintf(
@ -1048,16 +1052,19 @@ func (c *ServerCommand) Run(args []string) int {
return 1
}
}
if seal == nil {
c.UI.Error(fmt.Sprintf(
"After configuring seal nil returned, seal type was %s", sealType))
return 1
if wrapper == nil {
seal = defaultSeal
} else {
seal = vault.NewAutoSeal(&vaultseal.Access{
Wrapper: wrapper,
})
}
if configSeal.Disabled {
unwrapSeal = seal
} else {
barrierSeal = seal
barrierWrapper = wrapper
}
// Ensure that the seal finalizer is called, even if using verify-only
@ -1077,7 +1084,7 @@ func (c *ServerCommand) Run(args []string) int {
}
// prepare a secure random reader for core
secureRandomReader, err := createSecureRandomReaderFunc(config, &barrierSeal)
secureRandomReader, err := configutil.CreateSecureRandomReaderFunc(config.SharedConfig, barrierWrapper)
if err != nil {
c.UI.Error(err.Error())
return 1
@ -1233,7 +1240,7 @@ func (c *ServerCommand) Run(args []string) int {
}
}
if coreConfig.RedirectAddr == "" && c.flagDev {
coreConfig.RedirectAddr = fmt.Sprintf("http://%s", config.Listeners[0].Config["address"])
coreConfig.RedirectAddr = fmt.Sprintf("http://%s", config.Listeners[0].Address)
}
// After the redirect bits are sorted out, if no cluster address was
@ -1248,7 +1255,7 @@ func (c *ServerCommand) Run(args []string) int {
case coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "":
addrToUse = coreConfig.RedirectAddr
case c.flagDev:
addrToUse = fmt.Sprintf("http://%s", config.Listeners[0].Config["address"])
addrToUse = fmt.Sprintf("http://%s", config.Listeners[0].Address)
default:
goto CLUSTER_SYNTHESIS_COMPLETE
}
@ -1366,10 +1373,10 @@ CLUSTER_SYNTHESIS_COMPLETE:
clusterAddrs := []*net.TCPAddr{}
// Initialize the listeners
lns := make([]ServerListener, 0, len(config.Listeners))
lns := make([]listenerutil.Listener, 0, len(config.Listeners))
c.reloadFuncsLock.Lock()
for i, lnConfig := range config.Listeners {
ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, c.gatedWriter, c.UI)
ln, props, reloadFunc, err := server.NewListener(lnConfig, c.gatedWriter, c.UI)
if err != nil {
c.UI.Error(fmt.Sprintf("Error initializing listener of type %s: %s", lnConfig.Type, err))
return 1
@ -1382,12 +1389,9 @@ CLUSTER_SYNTHESIS_COMPLETE:
}
if !disableClustering && lnConfig.Type == "tcp" {
var addrRaw interface{}
var addr string
var ok bool
if addrRaw, ok = lnConfig.Config["cluster_address"]; ok {
addr = addrRaw.(string)
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
addr := lnConfig.ClusterAddress
if addr != "" {
tcpAddr, err := net.ResolveTCPAddr("tcp", lnConfig.ClusterAddress)
if err != nil {
c.UI.Error(fmt.Sprintf("Error resolving cluster_address: %s", err))
return 1
@ -1409,59 +1413,19 @@ CLUSTER_SYNTHESIS_COMPLETE:
props["cluster address"] = addr
}
var maxRequestSize int64 = vaulthttp.DefaultMaxRequestSize
if valRaw, ok := lnConfig.Config["max_request_size"]; ok {
val, err := parseutil.ParseInt(valRaw)
if err != nil {
c.UI.Error(fmt.Sprintf("Could not parse max_request_size value %v", valRaw))
return 1
}
if val >= 0 {
maxRequestSize = val
}
if lnConfig.MaxRequestSize == 0 {
lnConfig.MaxRequestSize = vaulthttp.DefaultMaxRequestSize
}
props["max_request_size"] = fmt.Sprintf("%d", maxRequestSize)
props["max_request_size"] = fmt.Sprintf("%d", lnConfig.MaxRequestSize)
maxRequestDuration := vault.DefaultMaxRequestDuration
if valRaw, ok := lnConfig.Config["max_request_duration"]; ok {
val, err := parseutil.ParseDurationSecond(valRaw)
if err != nil {
c.UI.Error(fmt.Sprintf("Could not parse max_request_duration value %v", valRaw))
return 1
}
if val >= 0 {
maxRequestDuration = val
}
if lnConfig.MaxRequestDuration == 0 {
lnConfig.MaxRequestDuration = vault.DefaultMaxRequestDuration
}
props["max_request_duration"] = fmt.Sprintf("%s", maxRequestDuration.String())
props["max_request_duration"] = fmt.Sprintf("%s", lnConfig.MaxRequestDuration.String())
var unauthenticatedMetricsAccess bool
if telemetryRaw, ok := lnConfig.Config["telemetry"]; ok {
telemetry, ok := telemetryRaw.([]map[string]interface{})
if !ok {
c.UI.Error(fmt.Sprintf("Could not parse telemetry sink value %v", telemetryRaw))
return 1
}
for _, item := range telemetry {
if valRaw, ok := item["unauthenticated_metrics_access"]; ok {
unauthenticatedMetricsAccess, err = parseutil.ParseBool(valRaw)
if err != nil {
c.UI.Error(fmt.Sprintf("Could not parse unauthenticated_metrics_access value %v", valRaw))
return 1
}
}
}
}
lns = append(lns, ServerListener{
Listener: ln,
config: lnConfig.Config,
maxRequestSize: maxRequestSize,
maxRequestDuration: maxRequestDuration,
unauthenticatedMetricsAccess: unauthenticatedMetricsAccess,
lns = append(lns, listenerutil.Listener{
Listener: ln,
Config: lnConfig,
})
// Store the listener props for output later
@ -1629,7 +1593,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
c.UI.Warn("You may need to set the following environment variable:")
c.UI.Warn("")
endpointURL := "http://" + config.Listeners[0].Config["address"].(string)
endpointURL := "http://" + config.Listeners[0].Address
if runtime.GOOS == "windows" {
c.UI.Warn("PowerShell:")
c.UI.Warn(fmt.Sprintf(" $env:VAULT_ADDR=\"%s\"", endpointURL))
@ -1686,24 +1650,19 @@ CLUSTER_SYNTHESIS_COMPLETE:
// Initialize the HTTP servers
for _, ln := range lns {
if ln.Config == nil {
c.UI.Error("Found nil listener config after parsing")
return 1
}
handler := vaulthttp.Handler(&vault.HandlerProperties{
Core: core,
MaxRequestSize: ln.maxRequestSize,
MaxRequestDuration: ln.maxRequestDuration,
DisablePrintableCheck: config.DisablePrintableCheck,
UnauthenticatedMetricsAccess: ln.unauthenticatedMetricsAccess,
RecoveryMode: c.flagRecovery,
Core: core,
ListenerConfig: ln.Config,
DisablePrintableCheck: config.DisablePrintableCheck,
RecoveryMode: c.flagRecovery,
})
// We perform validation on the config earlier, we can just cast here
if _, ok := ln.config["x_forwarded_for_authorized_addrs"]; ok {
hopSkips := ln.config["x_forwarded_for_hop_skips"].(int)
authzdAddrs := ln.config["x_forwarded_for_authorized_addrs"].([]*sockaddr.SockAddrMarshaler)
rejectNotPresent := ln.config["x_forwarded_for_reject_not_present"].(bool)
rejectNonAuthz := ln.config["x_forwarded_for_reject_not_authorized"].(bool)
if len(authzdAddrs) > 0 {
handler = vaulthttp.WrapForwardedForHandler(handler, authzdAddrs, rejectNotPresent, rejectNonAuthz, hopSkips)
}
if len(ln.Config.XForwardedForAuthorizedAddrs) > 0 {
handler = vaulthttp.WrapForwardedForHandler(handler, ln.Config)
}
// server defaults
@ -1716,40 +1675,17 @@ CLUSTER_SYNTHESIS_COMPLETE:
}
// override server defaults with config values for read/write/idle timeouts if configured
if readHeaderTimeoutInterface, ok := ln.config["http_read_header_timeout"]; ok {
readHeaderTimeout, err := parseutil.ParseDurationSecond(readHeaderTimeoutInterface)
if err != nil {
c.UI.Error(fmt.Sprintf("Could not parse a time value for http_read_header_timeout %v", readHeaderTimeout))
return 1
}
server.ReadHeaderTimeout = readHeaderTimeout
if ln.Config.HTTPReadHeaderTimeout > 0 {
server.ReadHeaderTimeout = ln.Config.HTTPReadHeaderTimeout
}
if readTimeoutInterface, ok := ln.config["http_read_timeout"]; ok {
readTimeout, err := parseutil.ParseDurationSecond(readTimeoutInterface)
if err != nil {
c.UI.Error(fmt.Sprintf("Could not parse a time value for http_read_timeout %v", readTimeout))
return 1
}
server.ReadTimeout = readTimeout
if ln.Config.HTTPReadTimeout > 0 {
server.ReadTimeout = ln.Config.HTTPReadTimeout
}
if writeTimeoutInterface, ok := ln.config["http_write_timeout"]; ok {
writeTimeout, err := parseutil.ParseDurationSecond(writeTimeoutInterface)
if err != nil {
c.UI.Error(fmt.Sprintf("Could not parse a time value for http_write_timeout %v", writeTimeout))
return 1
}
server.WriteTimeout = writeTimeout
if ln.Config.HTTPWriteTimeout > 0 {
server.WriteTimeout = ln.Config.HTTPWriteTimeout
}
if idleTimeoutInterface, ok := ln.config["http_idle_timeout"]; ok {
idleTimeout, err := parseutil.ParseDurationSecond(idleTimeoutInterface)
if err != nil {
c.UI.Error(fmt.Sprintf("Could not parse a time value for http_idle_timeout %v", idleTimeout))
return 1
}
server.IdleTimeout = idleTimeout
if ln.Config.HTTPIdleTimeout > 0 {
server.IdleTimeout = ln.Config.HTTPIdleTimeout
}
// server config tests can exit now
@ -2305,24 +2241,14 @@ func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect,
}
// Check if TLS is disabled
if val, ok := list.Config["tls_disable"]; ok {
disable, err := parseutil.ParseBool(val)
if err != nil {
return "", errwrap.Wrapf("tls_disable: {{err}}", err)
}
if disable {
scheme = "http"
}
if list.TLSDisable {
scheme = "http"
}
// Check for address override
var addr string
addrRaw, ok := list.Config["address"]
if !ok {
addr := list.Address
if addr == "" {
addr = "127.0.0.1:8200"
} else {
addr = addrRaw.(string)
}
// Check for localhost
@ -2352,165 +2278,6 @@ func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect,
return url.String(), nil
}
// setupTelemetry is used to setup the telemetry sub-systems and returns the in-memory sink to be used in http configuration
func (c *ServerCommand) setupTelemetry(config *server.Config) (*metricsutil.MetricsHelper, *metricsutil.ClusterMetricSink, error) {
/* Setup telemetry
Aggregate on 10 second intervals for 1 minute. Expose the
metrics over stderr when there is a SIGUSR1 received.
*/
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
metrics.DefaultInmemSignal(inm)
var telConfig *server.Telemetry
if config.Telemetry != nil {
telConfig = config.Telemetry
} else {
telConfig = &server.Telemetry{}
}
serviceName := "vault"
if telConfig.MetricsPrefix != "" {
serviceName = telConfig.MetricsPrefix
}
metricsConf := metrics.DefaultConfig(serviceName)
metricsConf.EnableHostname = !telConfig.DisableHostname
metricsConf.EnableHostnameLabel = telConfig.EnableHostnameLabel
// Configure the statsite sink
var fanout metrics.FanoutSink
var prometheusEnabled bool
// Configure the Prometheus sink
if telConfig.PrometheusRetentionTime != 0 {
prometheusEnabled = true
prometheusOpts := prometheus.PrometheusOpts{
Expiration: telConfig.PrometheusRetentionTime,
}
sink, err := prometheus.NewPrometheusSinkFrom(prometheusOpts)
if err != nil {
return nil, nil, err
}
fanout = append(fanout, sink)
}
metricHelper := metricsutil.NewMetricsHelper(inm, prometheusEnabled)
if telConfig.StatsiteAddr != "" {
sink, err := metrics.NewStatsiteSink(telConfig.StatsiteAddr)
if err != nil {
return nil, nil, err
}
fanout = append(fanout, sink)
}
// Configure the statsd sink
if telConfig.StatsdAddr != "" {
sink, err := metrics.NewStatsdSink(telConfig.StatsdAddr)
if err != nil {
return nil, nil, err
}
fanout = append(fanout, sink)
}
// Configure the Circonus sink
if telConfig.CirconusAPIToken != "" || telConfig.CirconusCheckSubmissionURL != "" {
cfg := &circonus.Config{}
cfg.Interval = telConfig.CirconusSubmissionInterval
cfg.CheckManager.API.TokenKey = telConfig.CirconusAPIToken
cfg.CheckManager.API.TokenApp = telConfig.CirconusAPIApp
cfg.CheckManager.API.URL = telConfig.CirconusAPIURL
cfg.CheckManager.Check.SubmissionURL = telConfig.CirconusCheckSubmissionURL
cfg.CheckManager.Check.ID = telConfig.CirconusCheckID
cfg.CheckManager.Check.ForceMetricActivation = telConfig.CirconusCheckForceMetricActivation
cfg.CheckManager.Check.InstanceID = telConfig.CirconusCheckInstanceID
cfg.CheckManager.Check.SearchTag = telConfig.CirconusCheckSearchTag
cfg.CheckManager.Check.DisplayName = telConfig.CirconusCheckDisplayName
cfg.CheckManager.Check.Tags = telConfig.CirconusCheckTags
cfg.CheckManager.Broker.ID = telConfig.CirconusBrokerID
cfg.CheckManager.Broker.SelectTag = telConfig.CirconusBrokerSelectTag
if cfg.CheckManager.API.TokenApp == "" {
cfg.CheckManager.API.TokenApp = "vault"
}
if cfg.CheckManager.Check.DisplayName == "" {
cfg.CheckManager.Check.DisplayName = "Vault"
}
if cfg.CheckManager.Check.SearchTag == "" {
cfg.CheckManager.Check.SearchTag = "service:vault"
}
sink, err := circonus.NewCirconusSink(cfg)
if err != nil {
return nil, nil, err
}
sink.Start()
fanout = append(fanout, sink)
}
if telConfig.DogStatsDAddr != "" {
var tags []string
if telConfig.DogStatsDTags != nil {
tags = telConfig.DogStatsDTags
}
sink, err := datadog.NewDogStatsdSink(telConfig.DogStatsDAddr, metricsConf.HostName)
if err != nil {
return nil, nil, errwrap.Wrapf("failed to start DogStatsD sink: {{err}}", err)
}
sink.SetTags(tags)
fanout = append(fanout, sink)
}
// Configure the stackdriver sink
if telConfig.StackdriverProjectID != "" {
client, err := monitoring.NewMetricClient(context.Background(), option.WithUserAgent(useragent.String()))
if err != nil {
return nil, nil, fmt.Errorf("Failed to create stackdriver client: %v", err)
}
sink := stackdriver.NewSink(client, &stackdriver.Config{
LabelExtractor: stackdrivervault.Extractor,
Bucketer: stackdrivervault.Bucketer,
ProjectID: telConfig.StackdriverProjectID,
Location: telConfig.StackdriverLocation,
Namespace: telConfig.StackdriverNamespace,
DebugLogs: telConfig.StackdriverDebugLogs,
})
fanout = append(fanout, sink)
}
// Initialize the global sink
if len(fanout) > 1 {
// Hostname enabled will create poor quality metrics name for prometheus
if !telConfig.DisableHostname {
c.UI.Warn("telemetry.disable_hostname has been set to false. Recommended setting is true for Prometheus to avoid poorly named metrics.")
}
} else {
metricsConf.EnableHostname = false
}
fanout = append(fanout, inm)
_, err := metrics.NewGlobal(metricsConf, fanout)
if err != nil {
return nil, nil, err
}
// Intialize a wrapper around the global sink; this will be passed to Core
// and to any backend.
wrapper := &metricsutil.ClusterMetricSink{
ClusterName: config.ClusterName,
MaxGaugeCardinality: 500,
GaugeInterval: 10 * time.Minute,
Sink: fanout,
}
return metricHelper, wrapper, nil
}
func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reloadutil.ReloadFunc, configPath []string) error {
lock.RLock()
defer lock.RUnlock()
@ -2522,7 +2289,7 @@ func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]rel
case strings.HasPrefix(k, "listener|"):
for _, relFunc := range relFuncs {
if relFunc != nil {
if err := relFunc(nil); err != nil {
if err := relFunc(); err != nil {
reloadErrors = multierror.Append(reloadErrors, errwrap.Wrapf("error encountered reloading listener: {{err}}", err))
}
}
@ -2531,7 +2298,7 @@ func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]rel
case strings.HasPrefix(k, "audit_file|"):
for _, relFunc := range relFuncs {
if relFunc != nil {
if err := relFunc(nil); err != nil {
if err := relFunc(); err != nil {
reloadErrors = multierror.Append(reloadErrors, errwrap.Wrapf(fmt.Sprintf("error encountered reloading file audit device at path %q: {{err}}", strings.TrimPrefix(k, "audit_file|")), err))
}
}

View File

@ -15,60 +15,40 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/parseutil"
)
const (
prometheusDefaultRetentionTime = 24 * time.Hour
)
// Config is the configuration for the vault server.
type Config struct {
entConfig
Listeners []*Listener `hcl:"-"`
Storage *Storage `hcl:"-"`
HAStorage *Storage `hcl:"-"`
*configutil.SharedConfig `hcl:"-"`
Storage *Storage `hcl:"-"`
HAStorage *Storage `hcl:"-"`
ServiceRegistration *ServiceRegistration `hcl:"-"`
Seals []*Seal `hcl:"-"`
Entropy *Entropy `hcl:"-"`
CacheSize int `hcl:"cache_size"`
DisableCache bool `hcl:"-"`
DisableCacheRaw interface{} `hcl:"disable_cache"`
DisableMlock bool `hcl:"-"`
DisableMlockRaw interface{} `hcl:"disable_mlock"`
DisablePrintableCheck bool `hcl:"-"`
DisablePrintableCheckRaw interface{} `hcl:"disable_printable_check"`
EnableUI bool `hcl:"-"`
EnableUIRaw interface{} `hcl:"ui"`
Telemetry *Telemetry `hcl:"telemetry"`
MaxLeaseTTL time.Duration `hcl:"-"`
MaxLeaseTTLRaw interface{} `hcl:"max_lease_ttl"`
DefaultLeaseTTL time.Duration `hcl:"-"`
DefaultLeaseTTLRaw interface{} `hcl:"default_lease_ttl"`
DefaultMaxRequestDuration time.Duration `hcl:"-"`
DefaultMaxRequestDurationRaw interface{} `hcl:"default_max_request_duration"`
ClusterName string `hcl:"cluster_name"`
ClusterCipherSuites string `hcl:"cluster_cipher_suites"`
PluginDirectory string `hcl:"plugin_directory"`
LogLevel string `hcl:"log_level"`
// LogFormat specifies the log format. Valid values are "standard" and "json". The values are case-insenstive.
// If no log format is specified, then standard format will be used.
LogFormat string `hcl:"log_format"`
PidFile string `hcl:"pid_file"`
EnableRawEndpoint bool `hcl:"-"`
EnableRawEndpointRaw interface{} `hcl:"raw_storage_endpoint"`
@ -88,58 +68,36 @@ type Config struct {
}
// DevConfig is a Config that is used for dev mode of Vault.
func DevConfig(storageType string) *Config {
ret := &Config{
DisableMlock: true,
EnableRawEndpoint: true,
func DevConfig(storageType string) (*Config, error) {
hclStr := `
disable_mlock = true
Storage: &Storage{
Type: storageType,
},
listener "tcp" {
address = "127.0.0.1:8200"
tls_disable = true
proxy_protocol_behavior = "allow_authorized"
proxy_protocol_authorized_addrs = "127.0.0.1:8200"
}
Listeners: []*Listener{
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:8200",
"tls_disable": true,
"proxy_protocol_behavior": "allow_authorized",
"proxy_protocol_authorized_addrs": "127.0.0.1:8200",
},
},
},
telemetry {
prometheus_retention_time = "24h"
disable_hostname = true
}
EnableUI: true,
enable_raw_endpoint = true
Telemetry: &Telemetry{
PrometheusRetentionTime: prometheusDefaultRetentionTime,
DisableHostname: true,
},
storage "%s" {
}
enable_ui = true
`
hclStr = fmt.Sprintf(hclStr, storageType)
parsed, err := ParseConfig(hclStr)
if err != nil {
return nil, fmt.Errorf("error parsing dev config: %w", err)
}
return ret
}
// Listener is the listener configuration for the server.
type Listener struct {
Type string
Config map[string]interface{}
}
func (l *Listener) GoString() string {
return fmt.Sprintf("*%#v", *l)
}
// Entropy contains Entropy configuration for the server
type EntropyMode int
const (
Unknown EntropyMode = iota
Augmentation
)
type Entropy struct {
Mode EntropyMode
return parsed, nil
}
// Storage is the underlying storage configuration for the server.
@ -165,125 +123,10 @@ func (b *ServiceRegistration) GoString() string {
return fmt.Sprintf("*%#v", *b)
}
// Seal contains Seal configuration for the server
type Seal struct {
Type string
Disabled bool
Config map[string]string
}
func (h *Seal) GoString() string {
return fmt.Sprintf("*%#v", *h)
}
// Telemetry is the telemetry configuration for the server
type Telemetry struct {
StatsiteAddr string `hcl:"statsite_address"`
StatsdAddr string `hcl:"statsd_address"`
DisableHostname bool `hcl:"disable_hostname"`
EnableHostnameLabel bool `hcl:"enable_hostname_label"`
MetricsPrefix string `hcl:"metrics_prefix"`
// Circonus: see https://github.com/circonus-labs/circonus-gometrics
// for more details on the various configuration options.
// Valid configuration combinations:
// - CirconusAPIToken
// metric management enabled (search for existing check or create a new one)
// - CirconusSubmissionUrl
// metric management disabled (use check with specified submission_url,
// broker must be using a public SSL certificate)
// - CirconusAPIToken + CirconusCheckSubmissionURL
// metric management enabled (use check with specified submission_url)
// - CirconusAPIToken + CirconusCheckID
// metric management enabled (use check with specified id)
// CirconusAPIToken is a valid API Token used to create/manage check. If provided,
// metric management is enabled.
// Default: none
CirconusAPIToken string `hcl:"circonus_api_token"`
// CirconusAPIApp is an app name associated with API token.
// Default: "consul"
CirconusAPIApp string `hcl:"circonus_api_app"`
// CirconusAPIURL is the base URL to use for contacting the Circonus API.
// Default: "https://api.circonus.com/v2"
CirconusAPIURL string `hcl:"circonus_api_url"`
// CirconusSubmissionInterval is the interval at which metrics are submitted to Circonus.
// Default: 10s
CirconusSubmissionInterval string `hcl:"circonus_submission_interval"`
// CirconusCheckSubmissionURL is the check.config.submission_url field from a
// previously created HTTPTRAP check.
// Default: none
CirconusCheckSubmissionURL string `hcl:"circonus_submission_url"`
// CirconusCheckID is the check id (not check bundle id) from a previously created
// HTTPTRAP check. The numeric portion of the check._cid field.
// Default: none
CirconusCheckID string `hcl:"circonus_check_id"`
// CirconusCheckForceMetricActivation will force enabling metrics, as they are encountered,
// if the metric already exists and is NOT active. If check management is enabled, the default
// behavior is to add new metrics as they are encountered. If the metric already exists in the
// check, it will *NOT* be activated. This setting overrides that behavior.
// Default: "false"
CirconusCheckForceMetricActivation string `hcl:"circonus_check_force_metric_activation"`
// CirconusCheckInstanceID serves to uniquely identify the metrics coming from this "instance".
// It can be used to maintain metric continuity with transient or ephemeral instances as
// they move around within an infrastructure.
// Default: hostname:app
CirconusCheckInstanceID string `hcl:"circonus_check_instance_id"`
// CirconusCheckSearchTag is a special tag which, when coupled with the instance id, helps to
// narrow down the search results when neither a Submission URL or Check ID is provided.
// Default: service:app (e.g. service:consul)
CirconusCheckSearchTag string `hcl:"circonus_check_search_tag"`
// CirconusCheckTags is a comma separated list of tags to apply to the check. Note that
// the value of CirconusCheckSearchTag will always be added to the check.
// Default: none
CirconusCheckTags string `hcl:"circonus_check_tags"`
// CirconusCheckDisplayName is the name for the check which will be displayed in the Circonus UI.
// Default: value of CirconusCheckInstanceID
CirconusCheckDisplayName string `hcl:"circonus_check_display_name"`
// CirconusBrokerID is an explicit broker to use when creating a new check. The numeric portion
// of broker._cid. If metric management is enabled and neither a Submission URL nor Check ID
// is provided, an attempt will be made to search for an existing check using Instance ID and
// Search Tag. If one is not found, a new HTTPTRAP check will be created.
// Default: use Select Tag if provided, otherwise, a random Enterprise Broker associated
// with the specified API token or the default Circonus Broker.
// Default: none
CirconusBrokerID string `hcl:"circonus_broker_id"`
// CirconusBrokerSelectTag is a special tag which will be used to select a broker when
// a Broker ID is not provided. The best use of this is to as a hint for which broker
// should be used based on *where* this particular instance is running.
// (e.g. a specific geo location or datacenter, dc:sfo)
// Default: none
CirconusBrokerSelectTag string `hcl:"circonus_broker_select_tag"`
// Dogstats:
// DogStatsdAddr is the address of a dogstatsd instance. If provided,
// metrics will be sent to that instance
DogStatsDAddr string `hcl:"dogstatsd_addr"`
// DogStatsdTags are the global tags that should be sent with each packet to dogstatsd
// It is a list of strings, where each string looks like "my_tag_name:my_tag_value"
DogStatsDTags []string `hcl:"dogstatsd_tags"`
// Prometheus:
// PrometheusRetentionTime is the retention time for prometheus metrics if greater than 0.
// Default: 24h
PrometheusRetentionTime time.Duration `hcl:"-"`
PrometheusRetentionTimeRaw interface{} `hcl:"prometheus_retention_time"`
// Stackdriver:
// StackdriverProjectID is the project to publish stackdriver metrics to.
StackdriverProjectID string `hcl:"stackdriver_project_id"`
// StackdriverLocation is the GCP or AWS region of the monitored resource.
StackdriverLocation string `hcl:"stackdriver_location"`
// StackdriverNamespace is the namespace identifier, such as a cluster name.
StackdriverNamespace string `hcl:"stackdriver_namespace"`
// StackdriverDebugLogs will write additional stackdriver related debug logs to stderr.
StackdriverDebugLogs bool `hcl:"stackdriver_debug_logs"`
}
func (s *Telemetry) GoString() string {
return fmt.Sprintf("*%#v", *s)
func NewConfig() *Config {
return &Config{
SharedConfig: new(configutil.SharedConfig),
}
}
// Merge merges two configurations.
@ -292,12 +135,11 @@ func (c *Config) Merge(c2 *Config) *Config {
return c
}
result := new(Config)
for _, l := range c.Listeners {
result.Listeners = append(result.Listeners, l)
}
for _, l := range c2.Listeners {
result.Listeners = append(result.Listeners, l)
result := NewConfig()
result.SharedConfig = c.SharedConfig
if c2.SharedConfig != nil {
result.SharedConfig = c.SharedConfig.Merge(c2.SharedConfig)
}
result.Storage = c.Storage
@ -315,23 +157,6 @@ func (c *Config) Merge(c2 *Config) *Config {
result.ServiceRegistration = c2.ServiceRegistration
}
result.Entropy = c.Entropy
if c2.Entropy != nil {
result.Entropy = c2.Entropy
}
for _, s := range c.Seals {
result.Seals = append(result.Seals, s)
}
for _, s := range c2.Seals {
result.Seals = append(result.Seals, s)
}
result.Telemetry = c.Telemetry
if c2.Telemetry != nil {
result.Telemetry = c2.Telemetry
}
result.CacheSize = c.CacheSize
if c2.CacheSize != 0 {
result.CacheSize = c2.CacheSize
@ -343,11 +168,6 @@ func (c *Config) Merge(c2 *Config) *Config {
result.DisableCache = c2.DisableCache
}
result.DisableMlock = c.DisableMlock
if c2.DisableMlock {
result.DisableMlock = c2.DisableMlock
}
result.DisablePrintableCheck = c.DisablePrintableCheck
if c2.DisablePrintableCheckRaw != nil {
result.DisablePrintableCheck = c2.DisablePrintableCheck
@ -364,26 +184,6 @@ func (c *Config) Merge(c2 *Config) *Config {
result.DefaultLeaseTTL = c2.DefaultLeaseTTL
}
result.DefaultMaxRequestDuration = c.DefaultMaxRequestDuration
if c2.DefaultMaxRequestDuration > result.DefaultMaxRequestDuration {
result.DefaultMaxRequestDuration = c2.DefaultMaxRequestDuration
}
result.LogLevel = c.LogLevel
if c2.LogLevel != "" {
result.LogLevel = c2.LogLevel
}
result.LogFormat = c.LogFormat
if c2.LogFormat != "" {
result.LogFormat = c2.LogFormat
}
result.ClusterName = c.ClusterName
if c2.ClusterName != "" {
result.ClusterName = c2.ClusterName
}
result.ClusterCipherSuites = c.ClusterCipherSuites
if c2.ClusterCipherSuites != "" {
result.ClusterCipherSuites = c2.ClusterCipherSuites
@ -422,11 +222,6 @@ func (c *Config) Merge(c2 *Config) *Config {
result.PluginDirectory = c2.PluginDirectory
}
result.PidFile = c.PidFile
if c2.PidFile != "" {
result.PidFile = c2.PidFile
}
result.DisablePerformanceStandby = c.DisablePerformanceStandby
if c2.DisablePerformanceStandby {
result.DisablePerformanceStandby = c2.DisablePerformanceStandby
@ -479,9 +274,26 @@ func LoadConfig(path string) (*Config, error) {
}
if fi.IsDir() {
return LoadConfigDir(path)
return CheckConfig(LoadConfigDir(path))
}
return LoadConfigFile(path)
return CheckConfig(LoadConfigFile(path))
}
func CheckConfig(c *Config, e error) (*Config, error) {
if e != nil {
return c, e
}
if len(c.Seals) == 2 {
switch {
case c.Seals[0].Disabled && c.Seals[1].Disabled:
return nil, errors.New("seals: two seals provided but both are disabled")
case !c.Seals[0].Disabled && !c.Seals[1].Disabled:
return nil, errors.New("seals: two seals provided but neither is disabled")
}
}
return c, nil
}
// LoadConfigFile loads the configuration from the given file.
@ -491,7 +303,13 @@ func LoadConfigFile(path string) (*Config, error) {
if err != nil {
return nil, err
}
return ParseConfig(string(d))
conf, err := ParseConfig(string(d))
if err != nil {
return nil, err
}
return conf, nil
}
func ParseConfig(d string) (*Config, error) {
@ -502,11 +320,17 @@ func ParseConfig(d string) (*Config, error) {
}
// Start building the result
var result Config
if err := hcl.DecodeObject(&result, obj); err != nil {
result := NewConfig()
if err := hcl.DecodeObject(result, obj); err != nil {
return nil, err
}
sharedConfig, err := configutil.ParseConfig(d)
if err != nil {
return nil, err
}
result.SharedConfig = sharedConfig
if result.MaxLeaseTTLRaw != nil {
if result.MaxLeaseTTL, err = parseutil.ParseDurationSecond(result.MaxLeaseTTLRaw); err != nil {
return nil, err
@ -518,12 +342,6 @@ func ParseConfig(d string) (*Config, error) {
}
}
if result.DefaultMaxRequestDurationRaw != nil {
if result.DefaultMaxRequestDuration, err = parseutil.ParseDurationSecond(result.DefaultMaxRequestDurationRaw); err != nil {
return nil, err
}
}
if result.EnableUIRaw != nil {
if result.EnableUI, err = parseutil.ParseBool(result.EnableUIRaw); err != nil {
return nil, err
@ -536,12 +354,6 @@ func ParseConfig(d string) (*Config, error) {
}
}
if result.DisableMlockRaw != nil {
if result.DisableMlock, err = parseutil.ParseBool(result.DisableMlockRaw); err != nil {
return nil, err
}
}
if result.DisablePrintableCheckRaw != nil {
if result.DisablePrintableCheck, err = parseutil.ParseBool(result.DisablePrintableCheckRaw); err != nil {
return nil, err
@ -585,24 +397,24 @@ func ParseConfig(d string) (*Config, error) {
// Look for storage but still support old backend
if o := list.Filter("storage"); len(o.Items) > 0 {
if err := ParseStorage(&result, o, "storage"); err != nil {
if err := ParseStorage(result, o, "storage"); err != nil {
return nil, errwrap.Wrapf("error parsing 'storage': {{err}}", err)
}
} else {
if o := list.Filter("backend"); len(o.Items) > 0 {
if err := ParseStorage(&result, o, "backend"); err != nil {
if err := ParseStorage(result, o, "backend"); err != nil {
return nil, errwrap.Wrapf("error parsing 'backend': {{err}}", err)
}
}
}
if o := list.Filter("ha_storage"); len(o.Items) > 0 {
if err := parseHAStorage(&result, o, "ha_storage"); err != nil {
if err := parseHAStorage(result, o, "ha_storage"); err != nil {
return nil, errwrap.Wrapf("error parsing 'ha_storage': {{err}}", err)
}
} else {
if o := list.Filter("ha_backend"); len(o.Items) > 0 {
if err := parseHAStorage(&result, o, "ha_backend"); err != nil {
if err := parseHAStorage(result, o, "ha_backend"); err != nil {
return nil, errwrap.Wrapf("error parsing 'ha_backend': {{err}}", err)
}
}
@ -610,47 +422,17 @@ func ParseConfig(d string) (*Config, error) {
// Parse service discovery
if o := list.Filter("service_registration"); len(o.Items) > 0 {
if err := parseServiceRegistration(&result, o, "service_registration"); err != nil {
if err := parseServiceRegistration(result, o, "service_registration"); err != nil {
return nil, errwrap.Wrapf("error parsing 'service_registration': {{err}}", err)
}
}
if o := list.Filter("hsm"); len(o.Items) > 0 {
if err := parseSeals(&result, o, "hsm"); err != nil {
return nil, errwrap.Wrapf("error parsing 'hsm': {{err}}", err)
}
}
if o := list.Filter("seal"); len(o.Items) > 0 {
if err := parseSeals(&result, o, "seal"); err != nil {
return nil, errwrap.Wrapf("error parsing 'seal': {{err}}", err)
}
}
if o := list.Filter("entropy"); len(o.Items) > 0 {
if err := parseEntropy(&result, o, "entropy"); err != nil {
return nil, errwrap.Wrapf("error parsing 'entropy': {{err}}", err)
}
}
if o := list.Filter("listener"); len(o.Items) > 0 {
if err := parseListeners(&result, o); err != nil {
return nil, errwrap.Wrapf("error parsing 'listener': {{err}}", err)
}
}
if o := list.Filter("telemetry"); len(o.Items) > 0 {
if err := parseTelemetry(&result, o); err != nil {
return nil, errwrap.Wrapf("error parsing 'telemetry': {{err}}", err)
}
}
entConfig := &(result.entConfig)
if err := entConfig.parseConfig(list); err != nil {
return nil, errwrap.Wrapf("error parsing enterprise config: {{err}}", err)
}
return &result, nil
return result, nil
}
// LoadConfigDir loads all the configurations in the given directory
@ -702,7 +484,7 @@ func LoadConfigDir(dir string) (*Config, error) {
}
}
var result *Config
result := NewConfig()
for _, f := range files {
config, err := LoadConfigFile(f)
if err != nil {
@ -902,106 +684,6 @@ func parseServiceRegistration(result *Config, list *ast.ObjectList, name string)
return nil
}
func parseSeals(result *Config, list *ast.ObjectList, blockName string) error {
if len(list.Items) > 2 {
return fmt.Errorf("only two or less %q blocks are permitted", blockName)
}
seals := make([]*Seal, 0, len(list.Items))
for _, item := range list.Items {
key := "seal"
if len(item.Keys) > 0 {
key = item.Keys[0].Token.Value().(string)
}
var m map[string]string
if err := hcl.DecodeObject(&m, item.Val); err != nil {
return multierror.Prefix(err, fmt.Sprintf("seal.%s:", key))
}
var disabled bool
var err error
if v, ok := m["disabled"]; ok {
disabled, err = strconv.ParseBool(v)
if err != nil {
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", blockName, key))
}
delete(m, "disabled")
}
seals = append(seals, &Seal{
Type: strings.ToLower(key),
Disabled: disabled,
Config: m,
})
}
if len(seals) == 2 &&
(seals[0].Disabled && seals[1].Disabled || !seals[0].Disabled && !seals[1].Disabled) {
return errors.New("seals: two seals provided but both are disabled or neither are disabled")
}
result.Seals = seals
return nil
}
func parseListeners(result *Config, list *ast.ObjectList) error {
listeners := make([]*Listener, 0, len(list.Items))
for _, item := range list.Items {
key := "listener"
if len(item.Keys) > 0 {
key = item.Keys[0].Token.Value().(string)
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, item.Val); err != nil {
return multierror.Prefix(err, fmt.Sprintf("listeners.%s:", key))
}
lnType := strings.ToLower(key)
listeners = append(listeners, &Listener{
Type: lnType,
Config: m,
})
}
result.Listeners = listeners
return nil
}
func parseTelemetry(result *Config, list *ast.ObjectList) error {
if len(list.Items) > 1 {
return fmt.Errorf("only one 'telemetry' block is permitted")
}
// Get our one item
item := list.Items[0]
var t Telemetry
if err := hcl.DecodeObject(&t, item.Val); err != nil {
return multierror.Prefix(err, "telemetry:")
}
if result.Telemetry == nil {
result.Telemetry = &Telemetry{}
}
if err := hcl.DecodeObject(&result.Telemetry, item.Val); err != nil {
return multierror.Prefix(err, "telemetry:")
}
if result.Telemetry.PrometheusRetentionTimeRaw != nil {
var err error
if result.Telemetry.PrometheusRetentionTime, err = parseutil.ParseDurationSecond(result.Telemetry.PrometheusRetentionTimeRaw); err != nil {
return err
}
} else {
result.Telemetry.PrometheusRetentionTime = prometheusDefaultRetentionTime
}
return nil
}
// Sanitized returns a copy of the config with all values that are considered
// sensitive stripped. It also strips all `*Raw` values that are mainly
// used for parsing.
@ -1012,10 +694,15 @@ func parseTelemetry(result *Config, list *ast.ObjectList) error {
// - Seals.Config
// - Telemetry.CirconusAPIToken
func (c *Config) Sanitized() map[string]interface{} {
// Create shared config if it doesn't exist (e.g. in tests) so that map
// keys are actually populated
if c.SharedConfig == nil {
c.SharedConfig = new(configutil.SharedConfig)
}
sharedResult := c.SharedConfig.Sanitized()
result := map[string]interface{}{
"cache_size": c.CacheSize,
"disable_cache": c.DisableCache,
"disable_mlock": c.DisableMlock,
"disable_printable_check": c.DisablePrintableCheck,
"enable_ui": c.EnableUI,
@ -1023,17 +710,10 @@ func (c *Config) Sanitized() map[string]interface{} {
"max_lease_ttl": c.MaxLeaseTTL,
"default_lease_ttl": c.DefaultLeaseTTL,
"default_max_request_duration": c.DefaultMaxRequestDuration,
"cluster_name": c.ClusterName,
"cluster_cipher_suites": c.ClusterCipherSuites,
"plugin_directory": c.PluginDirectory,
"log_level": c.LogLevel,
"log_format": c.LogFormat,
"pid_file": c.PidFile,
"raw_storage_endpoint": c.EnableRawEndpoint,
"api_addr": c.APIAddr,
@ -1046,18 +726,8 @@ func (c *Config) Sanitized() map[string]interface{} {
"disable_indexing": c.DisableIndexing,
}
// Sanitize listeners
if len(c.Listeners) != 0 {
var sanitizedListeners []interface{}
for _, ln := range c.Listeners {
cleanLn := map[string]interface{}{
"type": ln.Type,
"config": ln.Config,
}
sanitizedListeners = append(sanitizedListeners, cleanLn)
}
result["listeners"] = sanitizedListeners
for k, v := range sharedResult {
result[k] = v
}
// Sanitize storage stanza
@ -1090,49 +760,5 @@ func (c *Config) Sanitized() map[string]interface{} {
result["service_registration"] = sanitizedServiceRegistration
}
// Sanitize seals stanza
if len(c.Seals) != 0 {
var sanitizedSeals []interface{}
for _, s := range c.Seals {
cleanSeal := map[string]interface{}{
"type": s.Type,
"disabled": s.Disabled,
}
sanitizedSeals = append(sanitizedSeals, cleanSeal)
}
result["seals"] = sanitizedSeals
}
// Sanitize telemetry stanza
if c.Telemetry != nil {
sanitizedTelemetry := map[string]interface{}{
"statsite_address": c.Telemetry.StatsiteAddr,
"statsd_address": c.Telemetry.StatsdAddr,
"disable_hostname": c.Telemetry.DisableHostname,
"metrics_prefix": c.Telemetry.MetricsPrefix,
"circonus_api_token": "",
"circonus_api_app": c.Telemetry.CirconusAPIApp,
"circonus_api_url": c.Telemetry.CirconusAPIURL,
"circonus_submission_interval": c.Telemetry.CirconusSubmissionInterval,
"circonus_submission_url": c.Telemetry.CirconusCheckSubmissionURL,
"circonus_check_id": c.Telemetry.CirconusCheckID,
"circonus_check_force_metric_activation": c.Telemetry.CirconusCheckForceMetricActivation,
"circonus_check_instance_id": c.Telemetry.CirconusCheckInstanceID,
"circonus_check_search_tag": c.Telemetry.CirconusCheckSearchTag,
"circonus_check_tags": c.Telemetry.CirconusCheckTags,
"circonus_check_display_name": c.Telemetry.CirconusCheckDisplayName,
"circonus_broker_id": c.Telemetry.CirconusBrokerID,
"circonus_broker_select_tag": c.Telemetry.CirconusBrokerSelectTag,
"dogstatsd_addr": c.Telemetry.DogStatsDAddr,
"dogstatsd_tags": c.Telemetry.DogStatsDTags,
"prometheus_retention_time": c.Telemetry.PrometheusRetentionTime,
"stackdriver_project_id": c.Telemetry.StackdriverProjectID,
"stackdriver_location": c.Telemetry.StackdriverLocation,
"stackdriver_namespace": c.Telemetry.StackdriverNamespace,
"stackdriver_debug_logs": c.Telemetry.StackdriverDebugLogs,
}
result["telemetry"] = sanitizedTelemetry
}
return result
}

View File

@ -2,7 +2,6 @@ package server
import (
"fmt"
"reflect"
"strings"
"testing"
"time"
@ -10,6 +9,7 @@ import (
"github.com/go-test/deep"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/internalshared/configutil"
)
func testConfigRaftRetryJoin(t *testing.T) {
@ -19,13 +19,14 @@ func testConfigRaftRetryJoin(t *testing.T) {
}
retryJoinConfig := `[{"leader_api_addr":"http://127.0.0.1:8200"},{"leader_api_addr":"http://127.0.0.2:8200"},{"leader_api_addr":"http://127.0.0.3:8200"}]` + "\n"
expected := &Config{
Listeners: []*Listener{
{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:8200",
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:8200",
},
},
DisableMlock: true,
},
Storage: &Storage{
@ -36,26 +37,58 @@ func testConfigRaftRetryJoin(t *testing.T) {
"retry_join": retryJoinConfig,
},
},
DisableMlock: true,
DisableMlockRaw: true,
}
if !reflect.DeepEqual(config, expected) {
t.Fatalf("\nexpected: %#v\n actual:%#v\n", config, expected)
config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
func testLoadConfigFile_topLevel(t *testing.T, entropy *Entropy) {
func testLoadConfigFile_topLevel(t *testing.T, entropy *configutil.Entropy) {
config, err := LoadConfigFile("./test-fixtures/config2.hcl")
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &Config{
Listeners: []*Listener{
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:443",
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
},
},
Telemetry: &configutil.Telemetry{
StatsdAddr: "bar",
StatsiteAddr: "foo",
DisableHostname: false,
DogStatsDAddr: "127.0.0.1:7254",
DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
PrometheusRetentionTime: 30 * time.Second,
},
DisableMlock: true,
PidFile: "./pidfile",
ClusterName: "testcluster",
Seals: []*configutil.KMS{
{
Type: "nopurpose",
},
{
Type: "stringpurpose",
Purpose: []string{"foo"},
},
{
Type: "commastringpurpose",
Purpose: []string{"foo", "bar"},
},
{
Type: "slicepurpose",
Purpose: []string{"zip", "zap"},
},
},
},
@ -86,20 +119,8 @@ func testLoadConfigFile_topLevel(t *testing.T, entropy *Entropy) {
},
},
Telemetry: &Telemetry{
StatsdAddr: "bar",
StatsiteAddr: "foo",
DisableHostname: false,
DogStatsDAddr: "127.0.0.1:7254",
DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
PrometheusRetentionTime: 30 * time.Second,
PrometheusRetentionTimeRaw: "30s",
},
DisableCache: true,
DisableCacheRaw: true,
DisableMlock: true,
DisableMlockRaw: true,
EnableUI: true,
EnableUIRaw: true,
@ -113,9 +134,6 @@ func testLoadConfigFile_topLevel(t *testing.T, entropy *Entropy) {
MaxLeaseTTLRaw: "10h",
DefaultLeaseTTL: 10 * time.Hour,
DefaultLeaseTTLRaw: "10h",
ClusterName: "testcluster",
PidFile: "./pidfile",
APIAddr: "top_level_api_addr",
ClusterAddr: "top_level_cluster_addr",
@ -123,30 +141,49 @@ func testLoadConfigFile_topLevel(t *testing.T, entropy *Entropy) {
if entropy != nil {
expected.Entropy = entropy
}
if !reflect.DeepEqual(config, expected) {
t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
func testLoadConfigFile_json2(t *testing.T, entropy *Entropy) {
func testLoadConfigFile_json2(t *testing.T, entropy *configutil.Entropy) {
config, err := LoadConfigFile("./test-fixtures/config2.hcl.json")
if err != nil {
t.Fatalf("err: %s", err)
}
expected := &Config{
Listeners: []*Listener{
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:443",
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
},
{
Type: "tcp",
Address: "127.0.0.1:444",
},
},
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:444",
},
Telemetry: &configutil.Telemetry{
StatsiteAddr: "foo",
StatsdAddr: "bar",
DisableHostname: true,
CirconusAPIToken: "0",
CirconusAPIApp: "vault",
CirconusAPIURL: "http://api.circonus.com/v2",
CirconusSubmissionInterval: "10s",
CirconusCheckSubmissionURL: "https://someplace.com/metrics",
CirconusCheckID: "0",
CirconusCheckForceMetricActivation: "true",
CirconusCheckInstanceID: "node1:vault",
CirconusCheckSearchTag: "service:vault",
CirconusCheckDisplayName: "node1:vault",
CirconusCheckTags: "cat1:tag1,cat2:tag2",
CirconusBrokerID: "0",
CirconusBrokerSelectTag: "dc:sfo",
PrometheusRetentionTime: 30 * time.Second,
},
},
@ -182,33 +219,14 @@ func testLoadConfigFile_json2(t *testing.T, entropy *Entropy) {
DisableSealWrap: true,
DisableSealWrapRaw: true,
Telemetry: &Telemetry{
StatsiteAddr: "foo",
StatsdAddr: "bar",
DisableHostname: true,
CirconusAPIToken: "0",
CirconusAPIApp: "vault",
CirconusAPIURL: "http://api.circonus.com/v2",
CirconusSubmissionInterval: "10s",
CirconusCheckSubmissionURL: "https://someplace.com/metrics",
CirconusCheckID: "0",
CirconusCheckForceMetricActivation: "true",
CirconusCheckInstanceID: "node1:vault",
CirconusCheckSearchTag: "service:vault",
CirconusCheckDisplayName: "node1:vault",
CirconusCheckTags: "cat1:tag1,cat2:tag2",
CirconusBrokerID: "0",
CirconusBrokerSelectTag: "dc:sfo",
PrometheusRetentionTime: 30 * time.Second,
PrometheusRetentionTimeRaw: "30s",
},
}
if entropy != nil {
expected.Entropy = entropy
}
if !reflect.DeepEqual(config, expected) {
t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
config.Listeners[0].RawConfig = nil
config.Listeners[1].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
@ -216,14 +234,14 @@ func testParseEntropy(t *testing.T, oss bool) {
var tests = []struct {
inConfig string
outErr error
outEntropy Entropy
outEntropy configutil.Entropy
}{
{
inConfig: `entropy "seal" {
mode = "augmentation"
}`,
outErr: nil,
outEntropy: Entropy{Augmentation},
outEntropy: configutil.Entropy{configutil.EntropyAugmentation},
},
{
inConfig: `entropy "seal" {
@ -248,13 +266,15 @@ func testParseEntropy(t *testing.T, oss bool) {
},
}
var config Config
config := Config{
SharedConfig: &configutil.SharedConfig{},
}
for _, test := range tests {
obj, _ := hcl.Parse(strings.TrimSpace(test.inConfig))
list, _ := obj.Node.(*ast.ObjectList)
objList := list.Filter("entropy")
err := parseEntropy(&config, objList, "entropy")
err := configutil.ParseEntropy(config.SharedConfig, objList, "entropy")
// validate the error, both should be nil or have the same Error()
switch {
case oss:
@ -281,13 +301,31 @@ func testLoadConfigFile(t *testing.T) {
}
expected := &Config{
Listeners: []*Listener{
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:443",
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
},
},
Telemetry: &configutil.Telemetry{
StatsdAddr: "bar",
StatsiteAddr: "foo",
DisableHostname: false,
DogStatsDAddr: "127.0.0.1:7254",
DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
PrometheusRetentionTime: configutil.PrometheusDefaultRetentionTime,
MetricsPrefix: "myprefix",
},
DisableMlock: true,
Entropy: nil,
PidFile: "./pidfile",
ClusterName: "testcluster",
},
Storage: &Storage{
@ -314,20 +352,8 @@ func testLoadConfigFile(t *testing.T) {
},
},
Telemetry: &Telemetry{
StatsdAddr: "bar",
StatsiteAddr: "foo",
DisableHostname: false,
DogStatsDAddr: "127.0.0.1:7254",
DogStatsDTags: []string{"tag_1:val_1", "tag_2:val_2"},
PrometheusRetentionTime: prometheusDefaultRetentionTime,
MetricsPrefix: "myprefix",
},
DisableCache: true,
DisableCacheRaw: true,
DisableMlock: true,
DisableMlockRaw: true,
DisablePrintableCheckRaw: true,
DisablePrintableCheck: true,
EnableUI: true,
@ -339,18 +365,14 @@ func testLoadConfigFile(t *testing.T) {
DisableSealWrap: true,
DisableSealWrapRaw: true,
Entropy: nil,
MaxLeaseTTL: 10 * time.Hour,
MaxLeaseTTLRaw: "10h",
DefaultLeaseTTL: 10 * time.Hour,
DefaultLeaseTTLRaw: "10h",
ClusterName: "testcluster",
PidFile: "./pidfile",
}
if !reflect.DeepEqual(config, expected) {
t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
@ -361,13 +383,37 @@ func testLoadConfigFile_json(t *testing.T) {
}
expected := &Config{
Listeners: []*Listener{
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:443",
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
},
},
Telemetry: &configutil.Telemetry{
StatsiteAddr: "baz",
StatsdAddr: "",
DisableHostname: false,
CirconusAPIToken: "",
CirconusAPIApp: "",
CirconusAPIURL: "",
CirconusSubmissionInterval: "",
CirconusCheckSubmissionURL: "",
CirconusCheckID: "",
CirconusCheckForceMetricActivation: "",
CirconusCheckInstanceID: "",
CirconusCheckSearchTag: "",
CirconusCheckDisplayName: "",
CirconusCheckTags: "",
CirconusBrokerID: "",
CirconusBrokerSelectTag: "",
PrometheusRetentionTime: configutil.PrometheusDefaultRetentionTime,
},
PidFile: "./pidfile",
Entropy: nil,
ClusterName: "testcluster",
},
Storage: &Storage{
@ -387,44 +433,21 @@ func testLoadConfigFile_json(t *testing.T) {
ClusterCipherSuites: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
Telemetry: &Telemetry{
StatsiteAddr: "baz",
StatsdAddr: "",
DisableHostname: false,
CirconusAPIToken: "",
CirconusAPIApp: "",
CirconusAPIURL: "",
CirconusSubmissionInterval: "",
CirconusCheckSubmissionURL: "",
CirconusCheckID: "",
CirconusCheckForceMetricActivation: "",
CirconusCheckInstanceID: "",
CirconusCheckSearchTag: "",
CirconusCheckDisplayName: "",
CirconusCheckTags: "",
CirconusBrokerID: "",
CirconusBrokerSelectTag: "",
PrometheusRetentionTime: prometheusDefaultRetentionTime,
},
MaxLeaseTTL: 10 * time.Hour,
MaxLeaseTTLRaw: "10h",
DefaultLeaseTTL: 10 * time.Hour,
DefaultLeaseTTLRaw: "10h",
ClusterName: "testcluster",
DisableCacheRaw: interface{}(nil),
DisableMlockRaw: interface{}(nil),
EnableUI: true,
EnableUIRaw: true,
PidFile: "./pidfile",
EnableRawEndpoint: true,
EnableRawEndpointRaw: true,
DisableSealWrap: true,
DisableSealWrapRaw: true,
Entropy: nil,
}
if !reflect.DeepEqual(config, expected) {
t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
@ -435,24 +458,32 @@ func testLoadConfigDir(t *testing.T) {
}
expected := &Config{
DisableCache: true,
DisableMlock: true,
SharedConfig: &configutil.SharedConfig{
DisableMlock: true,
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
},
},
Telemetry: &configutil.Telemetry{
StatsiteAddr: "qux",
StatsdAddr: "baz",
DisableHostname: true,
PrometheusRetentionTime: configutil.PrometheusDefaultRetentionTime,
},
ClusterName: "testcluster",
},
DisableCache: true,
DisableClustering: false,
DisableClusteringRaw: false,
APIAddr: "https://vault.local",
ClusterAddr: "https://127.0.0.1:444",
Listeners: []*Listener{
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:443",
},
},
},
Storage: &Storage{
Type: "consul",
Config: map[string]string{
@ -467,19 +498,12 @@ func testLoadConfigDir(t *testing.T) {
EnableRawEndpoint: true,
Telemetry: &Telemetry{
StatsiteAddr: "qux",
StatsdAddr: "baz",
DisableHostname: true,
PrometheusRetentionTime: prometheusDefaultRetentionTime,
},
MaxLeaseTTL: 10 * time.Hour,
DefaultLeaseTTL: 10 * time.Hour,
ClusterName: "testcluster",
}
if !reflect.DeepEqual(config, expected) {
t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, expected)
config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, expected); diff != nil {
t.Fatal(diff)
}
}
@ -567,6 +591,7 @@ func testConfig_Sanitized(t *testing.T) {
"statsite_address": ""},
}
config.Listeners[0].RawConfig = nil
if diff := deep.Equal(sanitizedConfig, expected); len(diff) > 0 {
t.Fatalf("bad, diff: %#v", diff)
}
@ -586,10 +611,12 @@ listener "tcp" {
tls_disable_client_certs = true
}`))
var config Config
config := Config{
SharedConfig: &configutil.SharedConfig{},
}
list, _ := obj.Node.(*ast.ObjectList)
objList := list.Filter("listener")
parseListeners(&config, objList)
configutil.ParseListeners(config.SharedConfig, objList)
listeners := config.Listeners
if len(listeners) == 0 {
t.Fatalf("expected at least one listener in the config")
@ -600,26 +627,24 @@ listener "tcp" {
}
expected := &Config{
Listeners: []*Listener{
&Listener{
Type: "tcp",
Config: map[string]interface{}{
"address": "127.0.0.1:443",
"cluster_address": "127.0.0.1:8201",
"tls_disable": false,
"tls_cert_file": "./certs/server.crt",
"tls_key_file": "./certs/server.key",
"tls_client_ca_file": "./certs/rootca.crt",
"tls_min_version": "tls12",
"tls_require_and_verify_client_cert": true,
"tls_disable_client_certs": true,
SharedConfig: &configutil.SharedConfig{
Listeners: []*configutil.Listener{
{
Type: "tcp",
Address: "127.0.0.1:443",
ClusterAddress: "127.0.0.1:8201",
TLSCertFile: "./certs/server.crt",
TLSKeyFile: "./certs/server.key",
TLSClientCAFile: "./certs/rootca.crt",
TLSMinVersion: "tls12",
TLSRequireAndVerifyClientCert: true,
TLSDisableClientCerts: true,
},
},
},
}
if !reflect.DeepEqual(config, *expected) {
t.Fatalf("expected \n\n%#v\n\n to be \n\n%#v\n\n", config, *expected)
config.Listeners[0].RawConfig = nil
if diff := deep.Equal(config, *expected); diff != nil {
t.Fatal(diff)
}
}

View File

@ -12,7 +12,3 @@ type entConfig struct {
func (ec *entConfig) parseConfig(list *ast.ObjectList) error {
return nil
}
func parseEntropy(result *Config, list *ast.ObjectList, blockName string) error {
return nil
}

View File

@ -10,12 +10,13 @@ import (
"net"
"github.com/hashicorp/vault/helper/proxyutil"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/internalshared/reloadutil"
"github.com/mitchellh/cli"
)
// ListenerFactory is the factory function to create a listener.
type ListenerFactory func(map[string]interface{}, io.Writer, cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error)
type ListenerFactory func(*configutil.Listener, io.Writer, cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error)
// BuiltinListeners is the list of built-in listener types.
var BuiltinListeners = map[string]ListenerFactory{
@ -24,39 +25,24 @@ var BuiltinListeners = map[string]ListenerFactory{
// NewListener creates a new listener of the given type with the given
// configuration. The type is looked up in the BuiltinListeners map.
func NewListener(t string, config map[string]interface{}, logger io.Writer, ui cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error) {
f, ok := BuiltinListeners[t]
func NewListener(l *configutil.Listener, logger io.Writer, ui cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error) {
f, ok := BuiltinListeners[l.Type]
if !ok {
return nil, nil, nil, fmt.Errorf("unknown listener type: %q", t)
return nil, nil, nil, fmt.Errorf("unknown listener type: %q", l.Type)
}
return f(config, logger, ui)
return f(l, logger, ui)
}
func listenerWrapProxy(ln net.Listener, config map[string]interface{}) (net.Listener, error) {
behaviorRaw, ok := config["proxy_protocol_behavior"]
if !ok {
func listenerWrapProxy(ln net.Listener, l *configutil.Listener) (net.Listener, error) {
behavior := l.ProxyProtocolBehavior
if behavior == "" {
return ln, nil
}
behavior, ok := behaviorRaw.(string)
if !ok {
return nil, fmt.Errorf("failed parsing proxy_protocol_behavior value: not a string")
}
proxyProtoConfig := &proxyutil.ProxyProtoConfig{
Behavior: behavior,
}
if proxyProtoConfig.Behavior == "allow_authorized" || proxyProtoConfig.Behavior == "deny_unauthorized" {
authorizedAddrsRaw, ok := config["proxy_protocol_authorized_addrs"]
if !ok {
return nil, fmt.Errorf("proxy_protocol_behavior set but no proxy_protocol_authorized_addrs value")
}
if err := proxyProtoConfig.SetAuthorizedAddrs(authorizedAddrsRaw); err != nil {
return nil, errwrap.Wrapf("failed parsing proxy_protocol_authorized_addrs: {{err}}", err)
}
Behavior: behavior,
AuthorizedAddrs: l.ProxyProtocolAuthorizedAddrs,
}
newLn, err := proxyutil.WrapInProxyProto(ln, proxyProtoConfig)

View File

@ -1,6 +1,7 @@
package server
import (
"crypto/tls"
"fmt"
"io"
"net"
@ -8,23 +9,19 @@ import (
"strings"
"time"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/internalshared/listenerutil"
"github.com/hashicorp/vault/internalshared/reloadutil"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/mitchellh/cli"
)
func tcpListenerFactory(config map[string]interface{}, _ io.Writer, ui cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error) {
bindProto := "tcp"
var addr string
addrRaw, ok := config["address"]
if !ok {
func tcpListenerFactory(l *configutil.Listener, _ io.Writer, ui cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, error) {
addr := l.Address
if addr == "" {
addr = "127.0.0.1:8200"
} else {
addr = addrRaw.(string)
}
bindProto := "tcp"
// If they've passed 0.0.0.0, we only want to bind on IPv4
// rather than golang's dual stack default
if strings.HasPrefix(addr, "0.0.0.0:") {
@ -38,67 +35,41 @@ func tcpListenerFactory(config map[string]interface{}, _ io.Writer, ui cli.Ui) (
ln = TCPKeepAliveListener{ln.(*net.TCPListener)}
ln, err = listenerWrapProxy(ln, config)
ln, err = listenerWrapProxy(ln, l)
if err != nil {
return nil, nil, nil, err
}
props := map[string]string{"addr": addr}
ffAllowedRaw, ffAllowedOK := config["x_forwarded_for_authorized_addrs"]
if ffAllowedOK {
ffAllowed, err := parseutil.ParseAddrs(ffAllowedRaw)
if err != nil {
return nil, nil, nil, errwrap.Wrapf("error parsing \"x_forwarded_for_authorized_addrs\": {{err}}", err)
// X-Forwarded-For props
{
if len(l.XForwardedForAuthorizedAddrs) > 0 {
props["x_forwarded_for_authorized_addrs"] = fmt.Sprintf("%v", l.XForwardedForAuthorizedAddrs)
}
if l.XForwardedForHopSkips > 0 {
props["x_forwarded_for_hop_skips"] = fmt.Sprintf("%d", l.XForwardedForHopSkips)
} else if len(l.XForwardedForAuthorizedAddrs) > 0 {
props["x_forwarded_for_hop_skips"] = "0"
}
if len(l.XForwardedForAuthorizedAddrs) > 0 {
props["x_forwarded_for_reject_not_present"] = strconv.FormatBool(l.XForwardedForRejectNotPresent)
}
if len(l.XForwardedForAuthorizedAddrs) > 0 {
props["x_forwarded_for_reject_not_authorized"] = strconv.FormatBool(l.XForwardedForRejectNotAuthorized)
}
props["x_forwarded_for_authorized_addrs"] = fmt.Sprintf("%v", ffAllowed)
config["x_forwarded_for_authorized_addrs"] = ffAllowed
}
if ffHopsRaw, ok := config["x_forwarded_for_hop_skips"]; ok {
ffHops64, err := parseutil.ParseInt(ffHopsRaw)
if err != nil {
return nil, nil, nil, errwrap.Wrapf("error parsing \"x_forwarded_for_hop_skips\": {{err}}", err)
}
if ffHops64 < 0 {
return nil, nil, nil, fmt.Errorf("\"x_forwarded_for_hop_skips\" cannot be negative")
}
ffHops := int(ffHops64)
props["x_forwarded_for_hop_skips"] = strconv.Itoa(ffHops)
config["x_forwarded_for_hop_skips"] = ffHops
} else if ffAllowedOK {
props["x_forwarded_for_hop_skips"] = "0"
config["x_forwarded_for_hop_skips"] = int(0)
}
if ffRejectNotPresentRaw, ok := config["x_forwarded_for_reject_not_present"]; ok {
ffRejectNotPresent, err := parseutil.ParseBool(ffRejectNotPresentRaw)
if err != nil {
return nil, nil, nil, errwrap.Wrapf("error parsing \"x_forwarded_for_reject_not_present\": {{err}}", err)
}
props["x_forwarded_for_reject_not_present"] = strconv.FormatBool(ffRejectNotPresent)
config["x_forwarded_for_reject_not_present"] = ffRejectNotPresent
} else if ffAllowedOK {
props["x_forwarded_for_reject_not_present"] = "true"
config["x_forwarded_for_reject_not_present"] = true
}
if ffRejectNonAuthorizedRaw, ok := config["x_forwarded_for_reject_not_authorized"]; ok {
ffRejectNonAuthorized, err := parseutil.ParseBool(ffRejectNonAuthorizedRaw)
if err != nil {
return nil, nil, nil, errwrap.Wrapf("error parsing \"x_forwarded_for_reject_not_authorized\": {{err}}", err)
}
props["x_forwarded_for_reject_not_authorized"] = strconv.FormatBool(ffRejectNonAuthorized)
config["x_forwarded_for_reject_not_authorized"] = ffRejectNonAuthorized
} else if ffAllowedOK {
props["x_forwarded_for_reject_not_authorized"] = "true"
config["x_forwarded_for_reject_not_authorized"] = true
}
ln, props, reloadFunc, _, err := listenerutil.WrapTLS(ln, props, config, ui)
tlsConfig, reloadFunc, err := listenerutil.TLSConfig(l, props, ui)
if err != nil {
return nil, nil, nil, err
}
if tlsConfig != nil {
ln = tls.NewListener(ln, tlsConfig)
}
return ln, props, reloadFunc, nil
}

View File

@ -11,13 +11,14 @@ import (
"testing"
"time"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/mitchellh/cli"
)
func TestTCPListener(t *testing.T) {
ln, _, _, err := tcpListenerFactory(map[string]interface{}{
"address": "127.0.0.1:0",
"tls_disable": "1",
ln, _, _, err := tcpListenerFactory(&configutil.Listener{
Address: "127.0.0.1:0",
TLSDisable: true,
}, nil, cli.NewMockUi())
if err != nil {
t.Fatalf("err: %s", err)
@ -49,12 +50,12 @@ func TestTCPListener_tls(t *testing.T) {
t.Fatal("not ok when appending CA cert")
}
ln, _, _, err := tcpListenerFactory(map[string]interface{}{
"address": "127.0.0.1:0",
"tls_cert_file": wd + "reload_foo.pem",
"tls_key_file": wd + "reload_foo.key",
"tls_require_and_verify_client_cert": "true",
"tls_client_ca_file": wd + "reload_ca.pem",
ln, _, _, err := tcpListenerFactory(&configutil.Listener{
Address: "127.0.0.1:0",
TLSCertFile: wd + "reload_foo.pem",
TLSKeyFile: wd + "reload_foo.key",
TLSRequireAndVerifyClientCert: true,
TLSClientCAFile: wd + "reload_ca.pem",
}, nil, cli.NewMockUi())
if err != nil {
t.Fatalf("err: %s", err)
@ -87,24 +88,24 @@ func TestTCPListener_tls(t *testing.T) {
testListenerImpl(t, ln, connFn(true), "foo.example.com")
ln, _, _, err = tcpListenerFactory(map[string]interface{}{
"address": "127.0.0.1:0",
"tls_cert_file": wd + "reload_foo.pem",
"tls_key_file": wd + "reload_foo.key",
"tls_require_and_verify_client_cert": "true",
"tls_disable_client_certs": "true",
"tls_client_ca_file": wd + "reload_ca.pem",
ln, _, _, err = tcpListenerFactory(&configutil.Listener{
Address: "127.0.0.1:0",
TLSCertFile: wd + "reload_foo.pem",
TLSKeyFile: wd + "reload_foo.key",
TLSRequireAndVerifyClientCert: true,
TLSDisableClientCerts: true,
TLSClientCAFile: wd + "reload_ca.pem",
}, nil, cli.NewMockUi())
if err == nil {
t.Fatal("expected error due to mutually exclusive client cert options")
}
ln, _, _, err = tcpListenerFactory(map[string]interface{}{
"address": "127.0.0.1:0",
"tls_cert_file": wd + "reload_foo.pem",
"tls_key_file": wd + "reload_foo.key",
"tls_disable_client_certs": "true",
"tls_client_ca_file": wd + "reload_ca.pem",
ln, _, _, err = tcpListenerFactory(&configutil.Listener{
Address: "127.0.0.1:0",
TLSCertFile: wd + "reload_foo.pem",
TLSKeyFile: wd + "reload_foo.key",
TLSDisableClientCerts: true,
TLSClientCAFile: wd + "reload_ca.pem",
}, nil, cli.NewMockUi())
if err != nil {
t.Fatalf("err: %s", err)
@ -131,13 +132,13 @@ func TestTCPListener_tls13(t *testing.T) {
t.Fatal("not ok when appending CA cert")
}
ln, _, _, err := tcpListenerFactory(map[string]interface{}{
"address": "127.0.0.1:0",
"tls_cert_file": wd + "reload_foo.pem",
"tls_key_file": wd + "reload_foo.key",
"tls_require_and_verify_client_cert": "true",
"tls_client_ca_file": wd + "reload_ca.pem",
"tls_min_version": "tls13",
ln, _, _, err := tcpListenerFactory(&configutil.Listener{
Address: "127.0.0.1:0",
TLSCertFile: wd + "reload_foo.pem",
TLSKeyFile: wd + "reload_foo.key",
TLSRequireAndVerifyClientCert: true,
TLSClientCAFile: wd + "reload_ca.pem",
TLSMinVersion: "tls13",
}, nil, cli.NewMockUi())
if err != nil {
t.Fatalf("err: %s", err)
@ -170,26 +171,26 @@ func TestTCPListener_tls13(t *testing.T) {
testListenerImpl(t, ln, connFn(true), "foo.example.com")
ln, _, _, err = tcpListenerFactory(map[string]interface{}{
"address": "127.0.0.1:0",
"tls_cert_file": wd + "reload_foo.pem",
"tls_key_file": wd + "reload_foo.key",
"tls_require_and_verify_client_cert": "true",
"tls_disable_client_certs": "true",
"tls_client_ca_file": wd + "reload_ca.pem",
"tls_min_version": "tls13",
ln, _, _, err = tcpListenerFactory(&configutil.Listener{
Address: "127.0.0.1:0",
TLSCertFile: wd + "reload_foo.pem",
TLSKeyFile: wd + "reload_foo.key",
TLSRequireAndVerifyClientCert: true,
TLSDisableClientCerts: true,
TLSClientCAFile: wd + "reload_ca.pem",
TLSMinVersion: "tls13",
}, nil, cli.NewMockUi())
if err == nil {
t.Fatal("expected error due to mutually exclusive client cert options")
}
ln, _, _, err = tcpListenerFactory(map[string]interface{}{
"address": "127.0.0.1:0",
"tls_cert_file": wd + "reload_foo.pem",
"tls_key_file": wd + "reload_foo.key",
"tls_disable_client_certs": "true",
"tls_client_ca_file": wd + "reload_ca.pem",
"tls_min_version": "tls13",
ln, _, _, err = tcpListenerFactory(&configutil.Listener{
Address: "127.0.0.1:0",
TLSCertFile: wd + "reload_foo.pem",
TLSKeyFile: wd + "reload_foo.key",
TLSDisableClientCerts: true,
TLSClientCAFile: wd + "reload_ca.pem",
TLSMinVersion: "tls13",
}, nil, cli.NewMockUi())
if err != nil {
t.Fatalf("err: %s", err)

View File

@ -1,45 +0,0 @@
package seal
import (
"fmt"
"github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/vault"
)
var (
ConfigureSeal = configureSeal
)
func configureSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger hclog.Logger, inseal vault.Seal) (outseal vault.Seal, err error) {
switch configSeal.Type {
case wrapping.AliCloudKMS:
return configureAliCloudKMSSeal(configSeal, infoKeys, info, logger, inseal)
case wrapping.AWSKMS:
return configureAWSKMSSeal(configSeal, infoKeys, info, logger, inseal)
case wrapping.AzureKeyVault:
return configureAzureKeyVaultSeal(configSeal, infoKeys, info, logger, inseal)
case wrapping.GCPCKMS:
return configureGCPCKMSSeal(configSeal, infoKeys, info, logger, inseal)
case wrapping.OCIKMS:
return configureOCIKMSSeal(configSeal, infoKeys, info, logger, inseal)
case wrapping.Transit:
return configureTransitSeal(configSeal, infoKeys, info, logger, inseal)
case wrapping.PKCS11:
return nil, fmt.Errorf("Seal type 'pkcs11' requires the Vault Enterprise HSM binary")
case wrapping.Shamir:
return inseal, nil
default:
return nil, fmt.Errorf("Unknown seal type %q", configSeal.Type)
}
}

View File

@ -1,36 +0,0 @@
package seal
import (
"github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)
func configureAliCloudKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (vault.Seal, error) {
kms := alicloudkms.NewWrapper(nil)
kmsInfo, err := kms.SetConfig(configSeal.Config)
if err != nil {
// If the error is any other than logical.KeyNotFoundError, return the error
if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) {
return nil, err
}
}
autoseal := vault.NewAutoSeal(&seal.Access{
Wrapper: kms,
})
if kmsInfo != nil {
*infoKeys = append(*infoKeys, "Seal Type", "AliCloud KMS Region", "AliCloud KMS KeyID")
(*info)["Seal Type"] = configSeal.Type
(*info)["AliCloud KMS Region"] = kmsInfo["region"]
(*info)["AliCloud KMS KeyID"] = kmsInfo["kms_key_id"]
if domain, ok := kmsInfo["domain"]; ok {
*infoKeys = append(*infoKeys, "AliCloud KMS Domain")
(*info)["AliCloud KMS Domain"] = domain
}
}
return autoseal, nil
}

View File

@ -1,42 +0,0 @@
package seal
import (
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
"github.com/hashicorp/go-kms-wrapping/wrappers/awskms"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)
var getAWSKMSFunc = func(opts *wrapping.WrapperOptions, config map[string]string) (wrapping.Wrapper, map[string]string, error) {
kms := awskms.NewWrapper(nil)
kmsInfo, err := kms.SetConfig(config)
return kms, kmsInfo, err
}
func configureAWSKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger hclog.Logger, inseal vault.Seal) (vault.Seal, error) {
kms, kmsInfo, err := getAWSKMSFunc(nil, configSeal.Config)
if err != nil {
// If the error is any other than logical.KeyNotFoundError, return the error
if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) {
return nil, err
}
}
autoseal := vault.NewAutoSeal(&seal.Access{
Wrapper: kms,
})
if kmsInfo != nil {
*infoKeys = append(*infoKeys, "Seal Type", "AWS KMS Region", "AWS KMS KeyID")
(*info)["Seal Type"] = configSeal.Type
(*info)["AWS KMS Region"] = kmsInfo["region"]
(*info)["AWS KMS KeyID"] = kmsInfo["kms_key_id"]
if endpoint, ok := kmsInfo["endpoint"]; ok {
*infoKeys = append(*infoKeys, "AWS KMS Endpoint")
(*info)["AWS KMS Endpoint"] = endpoint
}
}
return autoseal, nil
}

View File

@ -1,33 +0,0 @@
package seal
import (
"github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)
func configureAzureKeyVaultSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (vault.Seal, error) {
kv := azurekeyvault.NewWrapper(nil)
kvInfo, err := kv.SetConfig(configSeal.Config)
if err != nil {
// If the error is any other than logical.KeyNotFoundError, return the error
if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) {
return nil, err
}
}
autoseal := vault.NewAutoSeal(&seal.Access{
Wrapper: kv,
})
if kvInfo != nil {
*infoKeys = append(*infoKeys, "Seal Type", "Azure Environment", "Azure Vault Name", "Azure Key Name")
(*info)["Seal Type"] = configSeal.Type
(*info)["Azure Environment"] = kvInfo["environment"]
(*info)["Azure Vault Name"] = kvInfo["vault_name"]
(*info)["Azure Key Name"] = kvInfo["key_name"]
}
return autoseal, nil
}

View File

@ -1,44 +0,0 @@
package seal
import (
"github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/sdk/helper/useragent"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)
func configureGCPCKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (vault.Seal, error) {
// The config map can be nil if all other seal params were provided via env
// vars so we nil check here before setting user_agent down below.
if configSeal.Config == nil {
configSeal.Config = map[string]string{}
}
// This is not exposed at the moment so we always override user_agent
// with Vault's internal value.
configSeal.Config["user_agent"] = useragent.String()
kms := gcpckms.NewWrapper(nil)
kmsInfo, err := kms.SetConfig(configSeal.Config)
if err != nil {
// If the error is any other than logical.KeyNotFoundError, return the error
if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) {
return nil, err
}
}
autoseal := vault.NewAutoSeal(&seal.Access{
Wrapper: kms,
})
if kmsInfo != nil {
*infoKeys = append(*infoKeys, "Seal Type", "GCP KMS Project", "GCP KMS Region", "GCP KMS Key Ring", "GCP KMS Crypto Key")
(*info)["Seal Type"] = configSeal.Type
(*info)["GCP KMS Project"] = kmsInfo["project"]
(*info)["GCP KMS Region"] = kmsInfo["region"]
(*info)["GCP KMS Key Ring"] = kmsInfo["key_ring"]
(*info)["GCP KMS Crypto Key"] = kmsInfo["crypto_key"]
}
return autoseal, nil
}

View File

@ -1,31 +0,0 @@
// Copyright © 2019, Oracle and/or its affiliates.
package seal
import (
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-kms-wrapping/wrappers/ocikms"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)
func configureOCIKMSSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (vault.Seal, error) {
kms := ocikms.NewWrapper(nil)
kmsInfo, err := kms.SetConfig(configSeal.Config)
if err != nil {
logger.Error("error on setting up config for OCI KMS", "error", err)
return nil, err
}
autoseal := vault.NewAutoSeal(&seal.Access{
Wrapper: kms,
})
if kmsInfo != nil {
*infoKeys = append(*infoKeys, "Seal Type", "OCI KMS KeyID")
(*info)["Seal Type"] = configSeal.Type
(*info)["OCI KMS KeyID"] = kmsInfo[ocikms.KMSConfigKeyID]
(*info)["OCI KMS Crypto Endpoint"] = kmsInfo[ocikms.KMSConfigCryptoEndpoint]
(*info)["OCI KMS Management Endpoint"] = kmsInfo[ocikms.KMSConfigManagementEndpoint]
(*info)["OCI KMS Principal Type"] = kmsInfo["principal_type"]
}
return autoseal, nil
}

View File

@ -1,46 +0,0 @@
package seal
import (
"github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
"github.com/hashicorp/go-kms-wrapping/wrappers/transit"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/vault/seal"
)
var GetTransitKMSFunc = func(opts *wrapping.WrapperOptions, config map[string]string) (wrapping.Wrapper, map[string]string, error) {
transitSeal := transit.NewWrapper(opts)
sealInfo, err := transitSeal.SetConfig(config)
return transitSeal, sealInfo, err
}
func configureTransitSeal(configSeal *server.Seal, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (vault.Seal, error) {
transitSeal, sealInfo, err := GetTransitKMSFunc(
&wrapping.WrapperOptions{
Logger: logger.ResetNamed("seal-transit"),
}, configSeal.Config)
if err != nil {
// If the error is any other than logical.KeyNotFoundError, return the error
if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) {
return nil, err
}
}
autoseal := vault.NewAutoSeal(&seal.Access{
Wrapper: transitSeal,
})
if sealInfo != nil {
*infoKeys = append(*infoKeys, "Seal Type", "Transit Address", "Transit Mount Path", "Transit Key Name")
(*info)["Seal Type"] = configSeal.Type
(*info)["Transit Address"] = sealInfo["address"]
(*info)["Transit Mount Path"] = sealInfo["mount_path"]
(*info)["Transit Key Name"] = sealInfo["key_name"]
if namespace, ok := sealInfo["namespace"]; ok {
*infoKeys = append(*infoKeys, "Transit Namespace")
(*info)["Transit Namespace"] = namespace
}
}
return autoseal, nil
}

View File

@ -1,4 +1,4 @@
package seal_test
package server
import (
"context"
@ -11,7 +11,7 @@ import (
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/command/server/seal"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/ory/dockertest"
)
@ -26,19 +26,19 @@ func TestTransitWrapper_Lifecycle(t *testing.T) {
"key_name": keyName,
}
s, _, err := seal.GetTransitKMSFunc(nil, wrapperConfig)
kms, _, err := configutil.GetTransitKMSFunc(nil, &configutil.KMS{Config: wrapperConfig})
if err != nil {
t.Fatalf("error setting wrapper config: %v", err)
}
// Test Encrypt and Decrypt calls
input := []byte("foo")
swi, err := s.Encrypt(context.Background(), input, nil)
swi, err := kms.Encrypt(context.Background(), input, nil)
if err != nil {
t.Fatalf("err: %s", err.Error())
}
pt, err := s.Decrypt(context.Background(), swi, nil)
pt, err := kms.Decrypt(context.Background(), swi, nil)
if err != nil {
t.Fatalf("err: %s", err.Error())
}
@ -79,7 +79,7 @@ func TestTransitSeal_TokenRenewal(t *testing.T) {
"mount_path": mountPath,
"key_name": keyName,
}
s, _, err := seal.GetTransitKMSFunc(nil, wrapperConfig)
kms, _, err := configutil.GetTransitKMSFunc(nil, &configutil.KMS{Config: wrapperConfig})
if err != nil {
t.Fatalf("error setting wrapper config: %v", err)
}
@ -88,12 +88,12 @@ func TestTransitSeal_TokenRenewal(t *testing.T) {
// Test Encrypt and Decrypt calls
input := []byte("foo")
swi, err := s.Encrypt(context.Background(), input, nil)
swi, err := kms.Encrypt(context.Background(), input, nil)
if err != nil {
t.Fatalf("err: %s", err.Error())
}
pt, err := s.Decrypt(context.Background(), swi, nil)
pt, err := kms.Decrypt(context.Background(), swi, nil)
if err != nil {
t.Fatalf("err: %s", err.Error())
}

View File

@ -5,6 +5,7 @@ ui = true
listener "tcp" {
address = "127.0.0.1:443"
allow_stuff = true
}
backend "consul" {

View File

@ -37,6 +37,18 @@ entropy "seal" {
mode = "augmentation"
}
kms "commastringpurpose" {
purpose = "foo,bar"
}
kms "slicepurpose" {
purpose = ["zip", "zap"]
}
seal "nopurpose" {
}
seal "stringpurpose" {
purpose = "foo"
}
max_lease_ttl = "10h"
default_lease_ttl = "10h"
cluster_name = "testcluster"

View File

@ -230,28 +230,28 @@ func TestServer(t *testing.T) {
{
"bad_listener_read_header_timeout_config",
testBaseHCL(t, badListenerReadHeaderTimeout) + inmemHCL,
"Could not parse a time value for http_read_header_timeout",
"unknown unit km in duration 12km",
1,
"-test-server-config",
},
{
"bad_listener_read_timeout_config",
testBaseHCL(t, badListenerReadTimeout) + inmemHCL,
"Could not parse a time value for http_read_timeout",
"parsing \"34日\": invalid syntax",
1,
"-test-server-config",
},
{
"bad_listener_write_timeout_config",
testBaseHCL(t, badListenerWriteTimeout) + inmemHCL,
"Could not parse a time value for http_write_timeout",
"unknown unit lbs in duration 56lbs",
1,
"-test-server-config",
},
{
"bad_listener_idle_timeout_config",
testBaseHCL(t, badListenerIdleTimeout) + inmemHCL,
"Could not parse a time value for http_idle_timeout",
"unknown unit gophers in duration 78gophers",
1,
"-test-server-config",
},

View File

@ -2,9 +2,7 @@ package command
import (
"context"
"crypto/rand"
"fmt"
"io"
log "github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
@ -16,17 +14,12 @@ import (
)
var (
createSecureRandomReaderFunc = createSecureRandomReader
adjustCoreConfigForEnt = adjustCoreConfigForEntNoop
adjustCoreConfigForEnt = adjustCoreConfigForEntNoop
)
func adjustCoreConfigForEntNoop(config *server.Config, coreConfig *vault.CoreConfig) {
}
func createSecureRandomReader(config *server.Config, seal *vault.Seal) (io.Reader, error) {
return rand.Reader, nil
}
func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal, unwrapSeal vault.Seal) error {
existBarrierSealConfig, existRecoverySealConfig, err := core.PhysicalSealConfigs(context.Background())
if err != nil {
@ -70,7 +63,7 @@ func adjustCoreForSealMigration(logger log.Logger, core *vault.Core, barrierSeal
case wrapping.Shamir:
// The value reflected in config is what we're going to
migrationSeal = vault.NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: logger.Named("shamir"),
}),
})

12
go.mod
View File

@ -9,7 +9,7 @@ replace github.com/hashicorp/vault/sdk => ./sdk
require (
cloud.google.com/go v0.39.0
github.com/Azure/azure-sdk-for-go v36.2.0+incompatible
github.com/Azure/go-autorest/autorest v0.9.2
github.com/Azure/go-autorest/autorest v0.9.6
github.com/DataDog/zstd v1.4.4 // indirect
github.com/NYTimes/gziphandler v1.1.1
github.com/SAP/go-hdb v0.14.1
@ -43,7 +43,7 @@ require (
github.com/go-test/deep v1.0.2
github.com/gocql/gocql v0.0.0-20190402132108-0e1d5de854df
github.com/gogo/protobuf v1.2.1
github.com/golang/protobuf v1.3.2
github.com/golang/protobuf v1.3.4
github.com/google/go-github v17.0.0+incompatible
github.com/google/go-metrics-stackdriver v0.2.0
github.com/hashicorp/consul-template v0.22.0
@ -51,8 +51,8 @@ require (
github.com/hashicorp/errwrap v1.0.0
github.com/hashicorp/go-cleanhttp v0.5.1
github.com/hashicorp/go-gcp-common v0.6.0
github.com/hashicorp/go-hclog v0.12.0
github.com/hashicorp/go-kms-wrapping v0.5.1
github.com/hashicorp/go-hclog v0.12.1
github.com/hashicorp/go-kms-wrapping v0.5.5
github.com/hashicorp/go-memdb v1.0.2
github.com/hashicorp/go-msgpack v0.5.5
github.com/hashicorp/go-multierror v1.0.0
@ -86,8 +86,8 @@ require (
github.com/hashicorp/vault-plugin-secrets-kv v0.5.5
github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.1.2
github.com/hashicorp/vault-plugin-secrets-openldap v0.1.2
github.com/hashicorp/vault/api v1.0.5-0.20200317185738-82f498082f02
github.com/hashicorp/vault/sdk v0.1.14-0.20200317185738-82f498082f02
github.com/hashicorp/vault/api v1.0.5-0.20200425175256-972b211f3e96
github.com/hashicorp/vault/sdk v0.1.14-0.20200425175256-972b211f3e96
github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4
github.com/jcmturner/gokrb5/v8 v8.0.0
github.com/jefferai/isbadcipher v0.0.0-20190226160619-51d2077c035f

70
go.sum
View File

@ -12,16 +12,23 @@ github.com/Azure/azure-sdk-for-go v36.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.2 h1:6AWuh3uWrsZJcNoCHrCF/+g4aKPCU39kaMO6/qrnK/4=
github.com/Azure/go-autorest/autorest v0.9.2/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
github.com/Azure/go-autorest/autorest v0.9.6 h1:5YWtOnckcudzIw8lPPBcWOnmIFWMtHci1ZWAZulMSx0=
github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.7.0 h1:PUMxSVw3tEImG0JTRqbxjXLKCSoPk7DartDELqlOuiI=
github.com/Azure/go-autorest/autorest/adal v0.7.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.0 h1:18ld/uw9Rr7VkNie7a7RMAcFIWrJdlUL59TWGfcu530=
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0=
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.0/go.mod h1:Oo5cRhLvZteXzI2itUm5ziqsoIxRkzrt3t61FeZaS18=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0 h1:5PAqnv+CSTwW9mlZWZAizmzrazFWEgZykEZXpr2hDtY=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.0/go.mod h1:rNYMNAefZMRowqCV0cVhr/YDW5dD7afFq9nXAXL4ykE=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
@ -81,7 +88,6 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU=
github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs=
github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro=
github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
@ -97,9 +103,7 @@ github.com/aws/aws-sdk-go v1.25.41 h1:/hj7nZ0586wFqpwjNpzWiUTwtaMgxAZNZKHay80MdX
github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
@ -144,17 +148,14 @@ github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
github.com/coreos/go-oidc v2.0.0+incompatible h1:+RStIopZ8wooMx+Vs5Bt8zMXxV1ABl5LbakNExNmZIg=
github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf h1:CAKfRE2YtTUIjjh1bkBtyYFaUT/WmOqsJjgtihT0vMI=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
@ -183,7 +184,6 @@ github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5Jflh
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 h1:2MIhn2R6oXQbgW5yHfS+d6YqyMfXiu2L55rFZC4UD/M=
github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74/go.mod h1:UqXY1lYT/ERa4OEAywUqdok1T4RCRdArkhic1Opuavo=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
@ -247,16 +247,17 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
@ -266,7 +267,6 @@ github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@ -276,7 +276,6 @@ github.com/google/go-metrics-stackdriver v0.2.0 h1:rbs2sxHAPn2OtUj9JdR/Gij1YKGl0
github.com/google/go-metrics-stackdriver v0.2.0/go.mod h1:KLcPyp3dWJAFD+yHisGlJSZktIsTjb50eB72U2YZ9K0=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -300,7 +299,6 @@ github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyC
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.0 h1:S7P+1Hm5V/AT9cjEcUD5uDaQSX0OE577aCXgoaKpYbQ=
github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c h1:Lh2aW+HnU2Nbe1gqD9SOJLJxW1jBMmQOktN2acDyJk8=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
@ -320,11 +318,9 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/consul-template v0.22.0 h1:ti5cqAekOeMfFYLJCjlPtKGwBcqwVxoZO/Y2vctwuUE=
github.com/hashicorp/consul-template v0.22.0/go.mod h1:lHrykBIcPobCuEcIMLJryKxDyk2lUMnQWmffOEONH0k=
github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/api v1.2.1-0.20200128105449-6681be918a6e h1:vOqdnsq53winzJDN6RTQe9n9g87S595PNsdwKyBWXRM=
github.com/hashicorp/consul/api v1.2.1-0.20200128105449-6681be918a6e/go.mod h1:ztzLK20HA5O27oTf2j/wbNgq8qj/crN8xsSx7pzX0sc=
github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.2.0 h1:GWFYFmry/k4b1hEoy7kSkmU8e30GAyI4VZHk0fRxeL4=
github.com/hashicorp/consul/sdk v0.2.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
@ -335,7 +331,6 @@ github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVo
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-gatedio v0.5.0 h1:Jm1X5yP4yCqqWj5L1TgW7iZwCVPGtVc+mro5r/XX7Tg=
github.com/hashicorp/go-gatedio v0.5.0/go.mod h1:Lr3t8L6IyxD3DAeaUxGcgl2JnRUpWMCsmBl4Omu/2t4=
github.com/hashicorp/go-gcp-common v0.5.0 h1:kkIQTjNTopn4eXQ1+lCiHYZXUtgIZvbc6YtAQkMnTos=
github.com/hashicorp/go-gcp-common v0.5.0/go.mod h1:IDGUI2N/OS3PiU4qZcXJeWKPI6O/9Y8hOrbSiMcqyYw=
github.com/hashicorp/go-gcp-common v0.6.0 h1:m1X+DK003bj4WEjqOnv+Csepb3zpfN/bidboUeUSj68=
github.com/hashicorp/go-gcp-common v0.6.0/go.mod h1:RuZi18562/z30wxOzpjeRrGcmk9Ro/rBzixaSZDhIhY=
@ -343,15 +338,14 @@ github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9
github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v0.10.1 h1:uyt/l0dWjJ879yiAu+T7FG3/6QX+zwm4bQ8P7XsYt3o=
github.com/hashicorp/go-hclog v0.10.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v0.12.1 h1:99niEVkDqsEv3/jINwoOUgGE9L41LHXM4k3jTkV+DdA=
github.com/hashicorp/go-hclog v0.12.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc=
github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-kms-wrapping v0.5.1 h1:Ed6Z5gV3LY3J9Ora4cwxVmV8Hyt6CPOTrQoGIPry2Ew=
github.com/hashicorp/go-kms-wrapping v0.5.1/go.mod h1:cGIibZmMx9qlxS1pZTUrEgGqA+7u3zJyvVYMhjU2bDs=
github.com/hashicorp/go-kms-wrapping v0.5.5 h1:ld350yu4UPAFCyt5NifPU3WpFts71z69n3NfMeTv2z8=
github.com/hashicorp/go-kms-wrapping v0.5.5/go.mod h1:FeTpMJSbqrCtAeEK30hlNS8jm6W7cjzmLWurZ4Tk/J4=
github.com/hashicorp/go-kms-wrapping/entropy v0.1.0 h1:xuTi5ZwjimfpvpL09jDE71smCBRpnF5xfo871BSX4gs=
github.com/hashicorp/go-kms-wrapping/entropy v0.1.0/go.mod h1:d1g9WGtAunDNpek8jUIEJnBlbgKS1N2Q61QkHiZyR1g=
github.com/hashicorp/go-memdb v1.0.2 h1:AIjzJlwIxz2inhZqRJZfe6D15lPeF0/cZyS1BVlnlHg=
@ -369,7 +363,6 @@ github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es
github.com/hashicorp/go-retryablehttp v0.6.2 h1:bHM2aVXwBtBJWxHtkSrWuI4umABCUczs52eiUS9nSiw=
github.com/hashicorp/go-retryablehttp v0.6.2/go.mod h1:gEx6HMUGxYYhJScX7W1Il64m6cc2C1mDaW3NQ9sY1FY=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8=
github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
@ -380,8 +373,6 @@ github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwM
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8 h1:PKbxRbsOP7R3f/TpdqcgXrO69T3yd9nLoR+RMRUxSxA=
github.com/hashicorp/go-uuid v1.0.2-0.20191001231223-f32f5fe8d6a8/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
@ -450,14 +441,12 @@ github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.1.2 h1:X9eK6NSb1qafvoE
github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.1.2/go.mod h1:YRW9zn9NZNitRlPYNAWRp/YEdKCF/X8aOg8IYSxFT5Y=
github.com/hashicorp/vault-plugin-secrets-openldap v0.1.2 h1:618nyNUHX2Oc7pcQh6r0Zm0kaMrhkfAyUyFmDFwyYnQ=
github.com/hashicorp/vault-plugin-secrets-openldap v0.1.2/go.mod h1:9Cy4Jp779BjuIOhYLjEfH3M3QCUxZgPnvJ3tAOOmof4=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4 h1:3K3KcD4S6/Y2hevi70EzUTNKOS3cryQyhUnkjE6Tz0w=
github.com/influxdata/influxdb v0.0.0-20190411212539-d24b7ba8c4c4/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
@ -491,9 +480,7 @@ github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f h1:ENpDacvnr8fa
github.com/joyent/triton-go v1.7.1-0.20200416154420-6801d15b779f/go.mod h1:KDSfL7qe5ZfQqvlDMkVjCztbmcpp/c8M77vhQP8ZPvk=
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@ -644,35 +631,27 @@ github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d h1:PinQItctnaL2LtkaS
github.com/pquerna/otp v1.2.1-0.20191009055518-468c2dd2b58d/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0 h1:YVIb/fVcOTMSqtqZWSKnHpSLBxu8DKgxq8z6RuBZwqI=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
@ -718,12 +697,11 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
@ -739,8 +717,8 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 h1:ndzgwNDnKIqyCvHTXaCqh9KlOWKvBry6nuXMJmonVsE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
@ -786,6 +764,7 @@ golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaE
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad h1:Jh8cai0fqIK+f6nG0UgPW5wFk8wmiMhM3AyciDBdtQg=
golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -816,7 +795,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa h1:F+8P+gmewFQYRk6JoLQLwjBCTu3mcIURZfNkVweuRKA=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -832,7 +810,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -859,7 +836,6 @@ golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU=
@ -918,7 +894,6 @@ google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw=
google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1 h1:q4XQuHFC6I28BKZpo6IYyb3mNO+l7lSOxRuYTCiDfXk=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
@ -926,7 +901,6 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -948,7 +922,6 @@ gopkg.in/ory-am/dockertest.v3 v3.3.4 h1:oen8RiwxVNxtQ1pRoV4e4jqh6UjNsOuIZ1NXns6j
gopkg.in/ory-am/dockertest.v3 v3.3.4/go.mod h1:s9mmoLkaGeAh97qygnNj4xWkiN7e1SKekYC6CovU+ek=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.4.1 h1:H0TmLt7/KmzlrDOpa1F+zr0Tk90PbJYBfsVUmRLrf9Y=
gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@ -956,7 +929,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=

View File

@ -40,6 +40,7 @@ import (
logicalGcpKms "github.com/hashicorp/vault-plugin-secrets-gcpkms"
logicalKv "github.com/hashicorp/vault-plugin-secrets-kv"
logicalMongoAtlas "github.com/hashicorp/vault-plugin-secrets-mongodbatlas"
logicalOpenLDAP "github.com/hashicorp/vault-plugin-secrets-openldap"
logicalAws "github.com/hashicorp/vault/builtin/logical/aws"
logicalCass "github.com/hashicorp/vault/builtin/logical/cassandra"
logicalConsul "github.com/hashicorp/vault/builtin/logical/consul"
@ -47,7 +48,6 @@ import (
logicalMssql "github.com/hashicorp/vault/builtin/logical/mssql"
logicalMysql "github.com/hashicorp/vault/builtin/logical/mysql"
logicalNomad "github.com/hashicorp/vault/builtin/logical/nomad"
logicalOpenLDAP "github.com/hashicorp/vault-plugin-secrets-openldap"
logicalPki "github.com/hashicorp/vault/builtin/logical/pki"
logicalPostgres "github.com/hashicorp/vault/builtin/logical/postgresql"
logicalRabbit "github.com/hashicorp/vault/builtin/logical/rabbitmq"

View File

@ -6,13 +6,13 @@ import (
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/builtin/logical/transit"
"github.com/hashicorp/vault/command/server/seal"
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
"github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
vaultseal "github.com/hashicorp/vault/vault/seal"
"github.com/hashicorp/vault/vault/seal"
"github.com/mitchellh/go-testing-interface"
)
@ -65,12 +65,12 @@ func (tss *TransitSealServer) MakeSeal(t testing.T, key string) vault.Seal {
"key_name": key,
"tls_ca_cert": tss.CACertPEMFile,
}
transitSeal, _, err := seal.GetTransitKMSFunc(nil, wrapperConfig)
transitSeal, _, err := configutil.GetTransitKMSFunc(nil, &configutil.KMS{Config: wrapperConfig})
if err != nil {
t.Fatalf("error setting wrapper config: %v", err)
}
return vault.NewAutoSeal(&vaultseal.Access{
return vault.NewAutoSeal(&seal.Access{
Wrapper: transitSeal,
})
}

View File

@ -7,9 +7,20 @@ import (
"testing"
sockaddr "github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/vault"
)
func getListenerConfigForMarshalerTest(addr sockaddr.IPAddr) *configutil.Listener {
return &configutil.Listener{
XForwardedForAuthorizedAddrs: []*sockaddr.SockAddrMarshaler{
{
SockAddr: addr,
},
},
}
}
func TestHandler_XForwardedFor(t *testing.T) {
goodAddr, err := sockaddr.NewIPAddr("127.0.0.1")
if err != nil {
@ -29,11 +40,9 @@ func TestHandler_XForwardedFor(t *testing.T) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.RemoteAddr))
})
return WrapForwardedForHandler(origHandler, []*sockaddr.SockAddrMarshaler{
&sockaddr.SockAddrMarshaler{
SockAddr: goodAddr,
},
}, true, false, 0)
listenerConfig := getListenerConfigForMarshalerTest(goodAddr)
listenerConfig.XForwardedForRejectNotPresent = true
return WrapForwardedForHandler(origHandler, listenerConfig)
}
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
@ -74,11 +83,9 @@ func TestHandler_XForwardedFor(t *testing.T) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.RemoteAddr))
})
return WrapForwardedForHandler(origHandler, []*sockaddr.SockAddrMarshaler{
&sockaddr.SockAddrMarshaler{
SockAddr: badAddr,
},
}, true, false, 0)
listenerConfig := getListenerConfigForMarshalerTest(badAddr)
listenerConfig.XForwardedForRejectNotPresent = true
return WrapForwardedForHandler(origHandler, listenerConfig)
}
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
@ -111,11 +118,10 @@ func TestHandler_XForwardedFor(t *testing.T) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.RemoteAddr))
})
return WrapForwardedForHandler(origHandler, []*sockaddr.SockAddrMarshaler{
&sockaddr.SockAddrMarshaler{
SockAddr: badAddr,
},
}, true, true, 0)
listenerConfig := getListenerConfigForMarshalerTest(badAddr)
listenerConfig.XForwardedForRejectNotPresent = true
listenerConfig.XForwardedForRejectNotAuthorized = true
return WrapForwardedForHandler(origHandler, listenerConfig)
}
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
@ -145,11 +151,11 @@ func TestHandler_XForwardedFor(t *testing.T) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.RemoteAddr))
})
return WrapForwardedForHandler(origHandler, []*sockaddr.SockAddrMarshaler{
&sockaddr.SockAddrMarshaler{
SockAddr: goodAddr,
},
}, true, true, 4)
listenerConfig := getListenerConfigForMarshalerTest(goodAddr)
listenerConfig.XForwardedForRejectNotPresent = true
listenerConfig.XForwardedForRejectNotAuthorized = true
listenerConfig.XForwardedForHopSkips = 4
return WrapForwardedForHandler(origHandler, listenerConfig)
}
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
@ -179,11 +185,11 @@ func TestHandler_XForwardedFor(t *testing.T) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.RemoteAddr))
})
return WrapForwardedForHandler(origHandler, []*sockaddr.SockAddrMarshaler{
&sockaddr.SockAddrMarshaler{
SockAddr: goodAddr,
},
}, true, true, 1)
listenerConfig := getListenerConfigForMarshalerTest(goodAddr)
listenerConfig.XForwardedForRejectNotPresent = true
listenerConfig.XForwardedForRejectNotAuthorized = true
listenerConfig.XForwardedForHopSkips = 1
return WrapForwardedForHandler(origHandler, listenerConfig)
}
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{
@ -216,11 +222,11 @@ func TestHandler_XForwardedFor(t *testing.T) {
w.WriteHeader(http.StatusOK)
w.Write([]byte(r.RemoteAddr))
})
return WrapForwardedForHandler(origHandler, []*sockaddr.SockAddrMarshaler{
&sockaddr.SockAddrMarshaler{
SockAddr: goodAddr,
},
}, true, true, 1)
listenerConfig := getListenerConfigForMarshalerTest(goodAddr)
listenerConfig.XForwardedForRejectNotPresent = true
listenerConfig.XForwardedForRejectNotAuthorized = true
listenerConfig.XForwardedForHopSkips = 1
return WrapForwardedForHandler(origHandler, listenerConfig)
}
cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{

View File

@ -23,6 +23,7 @@ import (
cleanhttp "github.com/hashicorp/go-cleanhttp"
sockaddr "github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/jsonutil"
"github.com/hashicorp/vault/sdk/helper/parseutil"
@ -161,7 +162,7 @@ func Handler(props *vault.HandlerProperties) http.Handler {
}
// Register metrics path without authentication if enabled
if props.UnauthenticatedMetricsAccess {
if props.ListenerConfig != nil && props.ListenerConfig.Telemetry.UnauthenticatedMetricsAccess {
mux.Handle("/v1/sys/metrics", handleMetricsUnauthenticated(core))
} else {
mux.Handle("/v1/sys/metrics", handleLogicalNoForward(core))
@ -252,10 +253,19 @@ func handleAuditNonLogical(core *vault.Core, h http.Handler) http.Handler {
// wrapGenericHandler wraps the handler with an extra layer of handler where
// tasks that should be commonly handled for all the requests and/or responses
// are performed.
func wrapGenericHandler(core *vault.Core, h http.Handler, maxRequestSize int64, maxRequestDuration time.Duration) http.Handler {
func wrapGenericHandler(core *vault.Core, h http.Handler, props *vault.HandlerProperties) http.Handler {
var maxRequestDuration time.Duration
var maxRequestSize int64
if props.ListenerConfig != nil {
maxRequestDuration = props.ListenerConfig.MaxRequestDuration
maxRequestSize = props.ListenerConfig.MaxRequestSize
}
if maxRequestDuration == 0 {
maxRequestDuration = vault.DefaultMaxRequestDuration
}
if maxRequestSize == 0 {
maxRequestSize = DefaultMaxRequestSize
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Set the Cache-Control header for all the responses returned
// by Vault
@ -296,7 +306,11 @@ func wrapGenericHandler(core *vault.Core, h http.Handler, maxRequestSize int64,
})
}
func WrapForwardedForHandler(h http.Handler, authorizedAddrs []*sockaddr.SockAddrMarshaler, rejectNotPresent, rejectNonAuthz bool, hopSkips int) http.Handler {
func WrapForwardedForHandler(h http.Handler, l *configutil.Listener) http.Handler {
rejectNotPresent := l.XForwardedForRejectNotPresent
hopSkips := l.XForwardedForHopSkips
authorizedAddrs := l.XForwardedForAuthorizedAddrs
rejectNotAuthz := l.XForwardedForRejectNotAuthorized
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
headers, headersOK := r.Header[textproto.CanonicalMIMEHeaderKey("X-Forwarded-For")]
if !headersOK || len(headers) == 0 {
@ -342,7 +356,7 @@ func WrapForwardedForHandler(h http.Handler, authorizedAddrs []*sockaddr.SockAdd
if !found {
// If we didn't find it and aren't configured to reject, simply
// don't trust it
if !rejectNonAuthz {
if !rejectNotAuthz {
h.ServeHTTP(w, r)
return
}
@ -362,7 +376,7 @@ func WrapForwardedForHandler(h http.Handler, authorizedAddrs []*sockaddr.SockAdd
}
}
indexToUse := len(acc) - 1 - hopSkips
indexToUse := int64(len(acc)) - 1 - hopSkips
if indexToUse < 0 {
// This is likely an error in either configuration or other
// infrastructure. We could either deny the request, or we

View File

@ -655,7 +655,6 @@ func testNonPrintable(t *testing.T, disable bool) {
ln, addr := TestListener(t)
props := &vault.HandlerProperties{
Core: core,
MaxRequestSize: DefaultMaxRequestSize,
DisablePrintableCheck: disable,
}
TestServerWithListenerAndProperties(t, ln, addr, core, props)

View File

@ -267,7 +267,9 @@ func TestLogical_RequestSizeLimit(t *testing.T) {
defer ln.Close()
TestServerAuth(t, addr, token)
// Write a very large object, should fail
// Write a very large object, should fail. This test works because Go will
// convert the byte slice to base64, which makes it significantly larger
// than the default max request size.
resp := testHttpPut(t, token, addr+"/v1/secret/foo", map[string]interface{}{
"data": make([]byte, DefaultMaxRequestSize),
})

View File

@ -6,6 +6,7 @@ import (
"github.com/armon/go-metrics"
"github.com/hashicorp/vault/helper/metricsutil"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/vault"
)
@ -32,9 +33,12 @@ func TestSysMetricsUnauthenticated(t *testing.T) {
// Setup new custom listener with unauthenticated metrics access
ln, addr = TestListener(t)
props := &vault.HandlerProperties{
Core: core,
MaxRequestSize: DefaultMaxRequestSize,
UnauthenticatedMetricsAccess: true,
Core: core,
ListenerConfig: &configutil.Listener{
Telemetry: configutil.ListenerTelemetry{
UnauthenticatedMetricsAccess: true,
},
},
}
TestServerWithListenerAndProperties(t, ln, addr, core, props)
defer ln.Close()

View File

@ -44,8 +44,7 @@ func TestServerWithListener(tb testing.TB, ln net.Listener, addr string, core *v
// Create a muxer to handle our requests so that we can authenticate
// for tests.
props := &vault.HandlerProperties{
Core: core,
MaxRequestSize: DefaultMaxRequestSize,
Core: core,
}
TestServerWithListenerAndProperties(tb, ln, addr, core, props)
}

View File

@ -15,7 +15,7 @@ var (
genericWrapping = func(core *vault.Core, in http.Handler, props *vault.HandlerProperties) http.Handler {
// Wrap the help wrapped handler with another layer with a generic
// handler
return wrapGenericHandler(core, in, props.MaxRequestSize, props.MaxRequestDuration)
return wrapGenericHandler(core, in, props)
}
additionalRoutes = func(mux *http.ServeMux, core *vault.Core) {}

View File

@ -0,0 +1,211 @@
package configutil
import (
"fmt"
"io/ioutil"
"time"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/sdk/helper/parseutil"
)
// SharedConfig contains some shared values
type SharedConfig struct {
EntSharedConfig
Listeners []*Listener `hcl:"-"`
Seals []*KMS `hcl:"-"`
Entropy *Entropy `hcl:"-"`
DisableMlock bool `hcl:"-"`
DisableMlockRaw interface{} `hcl:"disable_mlock"`
Telemetry *Telemetry `hcl:"telemetry"`
DefaultMaxRequestDuration time.Duration `hcl:"-"`
DefaultMaxRequestDurationRaw interface{} `hcl:"default_max_request_duration"`
// LogFormat specifies the log format. Valid values are "standard" and
// "json". The values are case-insenstive. If no log format is specified,
// then standard format will be used.
LogFormat string `hcl:"log_format"`
LogLevel string `hcl:"log_level"`
PidFile string `hcl:"pid_file"`
ClusterName string `hcl:"cluster_name"`
}
// LoadConfigFile loads the configuration from the given file.
func LoadConfigFile(path string) (*SharedConfig, error) {
// Read the file
d, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return ParseConfig(string(d))
}
func ParseConfig(d string) (*SharedConfig, error) {
// Parse!
obj, err := hcl.Parse(d)
if err != nil {
return nil, err
}
// Start building the result
var result SharedConfig
if err := hcl.DecodeObject(&result, obj); err != nil {
return nil, err
}
if result.DefaultMaxRequestDurationRaw != nil {
if result.DefaultMaxRequestDuration, err = parseutil.ParseDurationSecond(result.DefaultMaxRequestDurationRaw); err != nil {
return nil, err
}
result.DefaultMaxRequestDurationRaw = nil
}
if result.DisableMlockRaw != nil {
if result.DisableMlock, err = parseutil.ParseBool(result.DisableMlockRaw); err != nil {
return nil, err
}
result.DisableMlockRaw = nil
}
list, ok := obj.Node.(*ast.ObjectList)
if !ok {
return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
}
if o := list.Filter("hsm"); len(o.Items) > 0 {
if err := parseKMS(&result, o, "hsm", 1); err != nil {
return nil, errwrap.Wrapf("error parsing 'hsm': {{err}}", err)
}
}
if o := list.Filter("seal"); len(o.Items) > 0 {
if err := parseKMS(&result, o, "seal", 2); err != nil {
return nil, errwrap.Wrapf("error parsing 'seal': {{err}}", err)
}
}
if o := list.Filter("kms"); len(o.Items) > 0 {
if err := parseKMS(&result, o, "kms", 2); err != nil {
return nil, errwrap.Wrapf("error parsing 'kms': {{err}}", err)
}
}
if o := list.Filter("entropy"); len(o.Items) > 0 {
if err := ParseEntropy(&result, o, "entropy"); err != nil {
return nil, errwrap.Wrapf("error parsing 'entropy': {{err}}", err)
}
}
if o := list.Filter("listener"); len(o.Items) > 0 {
if err := ParseListeners(&result, o); err != nil {
return nil, errwrap.Wrapf("error parsing 'listener': {{err}}", err)
}
}
if o := list.Filter("telemetry"); len(o.Items) > 0 {
if err := parseTelemetry(&result, o); err != nil {
return nil, errwrap.Wrapf("error parsing 'telemetry': {{err}}", err)
}
}
entConfig := &(result.EntSharedConfig)
if err := entConfig.ParseConfig(list); err != nil {
return nil, errwrap.Wrapf("error parsing enterprise config: {{err}}", err)
}
return &result, nil
}
// Sanitized returns a copy of the config with all values that are considered
// sensitive stripped. It also strips all `*Raw` values that are mainly
// used for parsing.
//
// Specifically, the fields that this method strips are:
// - KMS.Config
// - Telemetry.CirconusAPIToken
func (c *SharedConfig) Sanitized() map[string]interface{} {
if c == nil {
return nil
}
result := map[string]interface{}{
"disable_mlock": c.DisableMlock,
"default_max_request_duration": c.DefaultMaxRequestDuration,
"log_level": c.LogLevel,
"log_format": c.LogFormat,
"pid_file": c.PidFile,
"cluster_name": c.ClusterName,
}
// Sanitize listeners
if len(c.Listeners) != 0 {
var sanitizedListeners []interface{}
for _, ln := range c.Listeners {
cleanLn := map[string]interface{}{
"type": ln.Type,
"config": ln.RawConfig,
}
sanitizedListeners = append(sanitizedListeners, cleanLn)
}
result["listeners"] = sanitizedListeners
}
// Sanitize seals stanza
if len(c.Seals) != 0 {
var sanitizedSeals []interface{}
for _, s := range c.Seals {
cleanSeal := map[string]interface{}{
"type": s.Type,
"disabled": s.Disabled,
}
sanitizedSeals = append(sanitizedSeals, cleanSeal)
}
result["seals"] = sanitizedSeals
}
// Sanitize telemetry stanza
if c.Telemetry != nil {
sanitizedTelemetry := map[string]interface{}{
"statsite_address": c.Telemetry.StatsiteAddr,
"statsd_address": c.Telemetry.StatsdAddr,
"disable_hostname": c.Telemetry.DisableHostname,
"metrics_prefix": c.Telemetry.MetricsPrefix,
"circonus_api_token": "",
"circonus_api_app": c.Telemetry.CirconusAPIApp,
"circonus_api_url": c.Telemetry.CirconusAPIURL,
"circonus_submission_interval": c.Telemetry.CirconusSubmissionInterval,
"circonus_submission_url": c.Telemetry.CirconusCheckSubmissionURL,
"circonus_check_id": c.Telemetry.CirconusCheckID,
"circonus_check_force_metric_activation": c.Telemetry.CirconusCheckForceMetricActivation,
"circonus_check_instance_id": c.Telemetry.CirconusCheckInstanceID,
"circonus_check_search_tag": c.Telemetry.CirconusCheckSearchTag,
"circonus_check_tags": c.Telemetry.CirconusCheckTags,
"circonus_check_display_name": c.Telemetry.CirconusCheckDisplayName,
"circonus_broker_id": c.Telemetry.CirconusBrokerID,
"circonus_broker_select_tag": c.Telemetry.CirconusBrokerSelectTag,
"dogstatsd_addr": c.Telemetry.DogStatsDAddr,
"dogstatsd_tags": c.Telemetry.DogStatsDTags,
"prometheus_retention_time": c.Telemetry.PrometheusRetentionTime,
"stackdriver_project_id": c.Telemetry.StackdriverProjectID,
"stackdriver_location": c.Telemetry.StackdriverLocation,
"stackdriver_namespace": c.Telemetry.StackdriverNamespace,
"stackdriver_debug_logs": c.Telemetry.StackdriverDebugLogs,
}
result["telemetry"] = sanitizedTelemetry
}
return result
}

View File

@ -0,0 +1,18 @@
// +build !enterprise
package configutil
import (
"github.com/hashicorp/hcl/hcl/ast"
)
type EntSharedConfig struct {
}
func (ec *EntSharedConfig) ParseConfig(list *ast.ObjectList) error {
return nil
}
func ParseEntropy(result *SharedConfig, list *ast.ObjectList, blockName string) error {
return nil
}

View File

@ -0,0 +1,313 @@
// +build !enterprise
package configutil
import (
"crypto/rand"
"fmt"
"io"
"strings"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-hclog"
wrapping "github.com/hashicorp/go-kms-wrapping"
aeadwrapper "github.com/hashicorp/go-kms-wrapping/wrappers/aead"
"github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms"
"github.com/hashicorp/go-kms-wrapping/wrappers/awskms"
"github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault"
"github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms"
"github.com/hashicorp/go-kms-wrapping/wrappers/ocikms"
"github.com/hashicorp/go-kms-wrapping/wrappers/transit"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/hashicorp/vault/sdk/logical"
)
var (
ConfigureWrapper = configureWrapper
CreateSecureRandomReaderFunc = createSecureRandomReader
)
// Entropy contains Entropy configuration for the server
type EntropyMode int
const (
EntropyUnknown EntropyMode = iota
EntropyAugmentation
)
type Entropy struct {
Mode EntropyMode
}
// KMS contains KMS configuration for the server
type KMS struct {
Type string
// Purpose can be used to allow a string-based specification of what this
// KMS is designated for, in situations where we want to allow more than
// one KMS to be specified
Purpose []string `hcl:"-"`
Disabled bool
Config map[string]string
}
func (k *KMS) GoString() string {
return fmt.Sprintf("*%#v", *k)
}
func parseKMS(result *SharedConfig, list *ast.ObjectList, blockName string, maxKMS int) error {
if len(list.Items) > maxKMS {
return fmt.Errorf("only two or less %q blocks are permitted", blockName)
}
seals := make([]*KMS, 0, len(list.Items))
for _, item := range list.Items {
key := blockName
if len(item.Keys) > 0 {
key = item.Keys[0].Token.Value().(string)
}
// We first decode into a map[string]interface{} because purpose isn't
// necessarily a string. Then we migrate everything else over to
// map[string]string and error if it doesn't work.
var m map[string]interface{}
if err := hcl.DecodeObject(&m, item.Val); err != nil {
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", blockName, key))
}
var purpose []string
var err error
if v, ok := m["purpose"]; ok {
if purpose, err = parseutil.ParseCommaStringSlice(v); err != nil {
return multierror.Prefix(fmt.Errorf("unable to parse 'purpose' in kms type %q: %w", key, err), fmt.Sprintf("%s.%s:", blockName, key))
}
for i, p := range purpose {
purpose[i] = strings.ToLower(p)
}
delete(m, "purpose")
}
var disabled bool
if v, ok := m["disabled"]; ok {
disabled, err = parseutil.ParseBool(v)
if err != nil {
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", blockName, key))
}
delete(m, "disabled")
}
strMap := make(map[string]string, len(m))
for k, v := range m {
if vs, ok := v.(string); ok {
strMap[k] = vs
} else {
return multierror.Prefix(fmt.Errorf("unable to parse 'purpose' in kms type %q: value could not be parsed as string", key), fmt.Sprintf("%s.%s:", blockName, key))
}
}
seal := &KMS{
Type: strings.ToLower(key),
Purpose: purpose,
Disabled: disabled,
}
if len(strMap) > 0 {
seal.Config = strMap
}
seals = append(seals, seal)
}
result.Seals = append(result.Seals, seals...)
return nil
}
func configureWrapper(configKMS *KMS, infoKeys *[]string, info *map[string]string, logger hclog.Logger) (wrapping.Wrapper, error) {
var wrapper wrapping.Wrapper
var kmsInfo map[string]string
var err error
opts := &wrapping.WrapperOptions{
Logger: logger,
}
switch configKMS.Type {
case wrapping.Shamir:
return nil, nil
case wrapping.AEAD:
wrapper, kmsInfo, err = GetAEADKMSFunc(opts, configKMS)
case wrapping.AliCloudKMS:
wrapper, kmsInfo, err = GetAliCloudKMSFunc(opts, configKMS)
case wrapping.AWSKMS:
wrapper, kmsInfo, err = GetAWSKMSFunc(opts, configKMS)
case wrapping.AzureKeyVault:
wrapper, kmsInfo, err = GetAzureKeyVaultKMSFunc(opts, configKMS)
case wrapping.GCPCKMS:
wrapper, kmsInfo, err = GetGCPCKMSKMSFunc(opts, configKMS)
case wrapping.OCIKMS:
wrapper, kmsInfo, err = GetOCIKMSKMSFunc(opts, configKMS)
case wrapping.Transit:
wrapper, kmsInfo, err = GetTransitKMSFunc(opts, configKMS)
case wrapping.PKCS11:
return nil, fmt.Errorf("KMS type 'pkcs11' requires the Vault Enterprise HSM binary")
default:
return nil, fmt.Errorf("Unknown KMS type %q", configKMS.Type)
}
if err != nil {
return nil, err
}
for k, v := range kmsInfo {
*infoKeys = append(*infoKeys, k)
(*info)[k] = v
}
return wrapper, nil
}
func GetAEADKMSFunc(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) {
wrapper := aeadwrapper.NewWrapper(opts)
wrapperInfo, err := wrapper.SetConfig(kms.Config)
if err != nil {
return nil, nil, err
}
info := make(map[string]string)
if wrapperInfo != nil {
str := "AEAD Type"
if len(kms.Purpose) > 0 {
str = fmt.Sprintf("%v %s", kms.Purpose, str)
}
info[str] = wrapperInfo["aead_type"]
}
return wrapper, info, nil
}
func GetAliCloudKMSFunc(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) {
wrapper := alicloudkms.NewWrapper(opts)
wrapperInfo, err := wrapper.SetConfig(kms.Config)
if err != nil {
// If the error is any other than logical.KeyNotFoundError, return the error
if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) {
return nil, nil, err
}
}
info := make(map[string]string)
if wrapperInfo != nil {
info["AliCloud KMS Region"] = wrapperInfo["region"]
info["AliCloud KMS KeyID"] = wrapperInfo["kms_key_id"]
if domain, ok := wrapperInfo["domain"]; ok {
info["AliCloud KMS Domain"] = domain
}
}
return wrapper, info, nil
}
func GetAWSKMSFunc(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) {
wrapper := awskms.NewWrapper(opts)
wrapperInfo, err := wrapper.SetConfig(kms.Config)
if err != nil {
// If the error is any other than logical.KeyNotFoundError, return the error
if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) {
return nil, nil, err
}
}
info := make(map[string]string)
if wrapperInfo != nil {
info["AWS KMS Region"] = wrapperInfo["region"]
info["AWS KMS KeyID"] = wrapperInfo["kms_key_id"]
if endpoint, ok := wrapperInfo["endpoint"]; ok {
info["AWS KMS Endpoint"] = endpoint
}
}
return wrapper, info, nil
}
func GetAzureKeyVaultKMSFunc(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) {
wrapper := azurekeyvault.NewWrapper(opts)
wrapperInfo, err := wrapper.SetConfig(kms.Config)
if err != nil {
// If the error is any other than logical.KeyNotFoundError, return the error
if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) {
return nil, nil, err
}
}
info := make(map[string]string)
if wrapperInfo != nil {
info["Azure Environment"] = wrapperInfo["environment"]
info["Azure Vault Name"] = wrapperInfo["vault_name"]
info["Azure Key Name"] = wrapperInfo["key_name"]
}
return wrapper, info, nil
}
func GetGCPCKMSKMSFunc(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) {
wrapper := gcpckms.NewWrapper(opts)
wrapperInfo, err := wrapper.SetConfig(kms.Config)
if err != nil {
// If the error is any other than logical.KeyNotFoundError, return the error
if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) {
return nil, nil, err
}
}
info := make(map[string]string)
if wrapperInfo != nil {
info["GCP KMS Project"] = wrapperInfo["project"]
info["GCP KMS Region"] = wrapperInfo["region"]
info["GCP KMS Key Ring"] = wrapperInfo["key_ring"]
info["GCP KMS Crypto Key"] = wrapperInfo["crypto_key"]
}
return wrapper, info, nil
}
func GetOCIKMSKMSFunc(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) {
wrapper := ocikms.NewWrapper(opts)
wrapperInfo, err := wrapper.SetConfig(kms.Config)
if err != nil {
return nil, nil, err
}
info := make(map[string]string)
if wrapperInfo != nil {
info["OCI KMS KeyID"] = wrapperInfo[ocikms.KMSConfigKeyID]
info["OCI KMS Crypto Endpoint"] = wrapperInfo[ocikms.KMSConfigCryptoEndpoint]
info["OCI KMS Management Endpoint"] = wrapperInfo[ocikms.KMSConfigManagementEndpoint]
info["OCI KMS Principal Type"] = wrapperInfo["principal_type"]
}
return wrapper, info, nil
}
func GetTransitKMSFunc(opts *wrapping.WrapperOptions, kms *KMS) (wrapping.Wrapper, map[string]string, error) {
wrapper := transit.NewWrapper(opts)
wrapperInfo, err := wrapper.SetConfig(kms.Config)
if err != nil {
// If the error is any other than logical.KeyNotFoundError, return the error
if !errwrap.ContainsType(err, new(logical.KeyNotFoundError)) {
return nil, nil, err
}
}
info := make(map[string]string)
if wrapperInfo != nil {
info["Transit Address"] = wrapperInfo["address"]
info["Transit Mount Path"] = wrapperInfo["mount_path"]
info["Transit Key Name"] = wrapperInfo["key_name"]
if namespace, ok := wrapperInfo["namespace"]; ok {
info["Transit Namespace"] = namespace
}
}
return wrapper, info, nil
}
func createSecureRandomReader(conf *SharedConfig, wrapper wrapping.Wrapper) (io.Reader, error) {
return rand.Reader, nil
}

View File

@ -0,0 +1,315 @@
package configutil
import (
"errors"
"fmt"
"strings"
"time"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/hashicorp/vault/sdk/helper/tlsutil"
)
type ListenerTelemetry struct {
UnauthenticatedMetricsAccess bool `hcl:"-"`
UnauthenticatedMetricsAccessRaw interface{} `hcl:"unauthenticated_metrics_access"`
}
// Listener is the listener configuration for the server.
type Listener struct {
RawConfig map[string]interface{}
Type string
Purpose []string `hcl:"-"`
PurposeRaw interface{} `hcl:"purpose"`
Address string `hcl:"address"`
ClusterAddress string `hcl:"cluster_address"`
MaxRequestSize int64 `hcl:"-"`
MaxRequestSizeRaw interface{} `hcl:"max_request_size"`
MaxRequestDuration time.Duration `hcl:"-"`
MaxRequestDurationRaw interface{} `hcl:"max_request_duration"`
RequireRequestHeader bool `hcl:"-"`
RequireRequestHeaderRaw interface{} `hcl:"require_request_header"`
TLSDisable bool `hcl:"-"`
TLSDisableRaw interface{} `hcl:"tls_disable"`
TLSCertFile string `hcl:"tls_cert_file"`
TLSKeyFile string `hcl:"tls_key_file"`
TLSMinVersion string `hcl:"tls_min_version"`
TLSCipherSuites []uint16 `hcl:"-"`
TLSCipherSuitesRaw string `hcl:"tls_cipher_suites"`
TLSPreferServerCipherSuites bool `hcl:"-"`
TLSPreferServerCipherSuitesRaw interface{} `hcl:"tls_prefer_server_cipher_suites"`
TLSRequireAndVerifyClientCert bool `hcl:"-"`
TLSRequireAndVerifyClientCertRaw interface{} `hcl:"tls_require_and_verify_client_cert"`
TLSClientCAFile string `hcl:"tls_client_ca_file"`
TLSDisableClientCerts bool `hcl:"-"`
TLSDisableClientCertsRaw interface{} `hcl:"tls_disable_client_certs"`
HTTPReadTimeout time.Duration `hcl:"-"`
HTTPReadTimeoutRaw interface{} `hcl:"http_read_timeout"`
HTTPReadHeaderTimeout time.Duration `hcl:"-"`
HTTPReadHeaderTimeoutRaw interface{} `hcl:"http_read_header_timeout"`
HTTPWriteTimeout time.Duration `hcl:"-"`
HTTPWriteTimeoutRaw interface{} `hcl:"http_write_timeout"`
HTTPIdleTimeout time.Duration `hcl:"-"`
HTTPIdleTimeoutRaw interface{} `hcl:"http_idle_timeout"`
ProxyProtocolBehavior string `hcl:"proxy_protocol_behavior"`
ProxyProtocolAuthorizedAddrs []*sockaddr.SockAddrMarshaler `hcl:"-"`
ProxyProtocolAuthorizedAddrsRaw interface{} `hcl:"proxy_protocol_authorized_addrs"`
XForwardedForAuthorizedAddrs []*sockaddr.SockAddrMarshaler `hcl:"-"`
XForwardedForAuthorizedAddrsRaw interface{} `hcl:"x_forwarded_for_authorized_addrs"`
XForwardedForHopSkips int64 `hcl:"-"`
XForwardedForHopSkipsRaw interface{} `hcl:"x_forwarded_for_hop_skips"`
XForwardedForRejectNotPresent bool `hcl:"-"`
XForwardedForRejectNotPresentRaw interface{} `hcl:"x_forwarded_for_reject_not_present"`
XForwardedForRejectNotAuthorized bool `hcl:"-"`
XForwardedForRejectNotAuthorizedRaw interface{} `hcl:"x_forwarded_for_reject_not_authorized"`
SocketMode string `hcl:"socket_mode"`
SocketUser string `hcl:"socket_user"`
SocketGroup string `hcl:"socket_group"`
Telemetry ListenerTelemetry `hcl:"telemetry"`
// RandomPort is used only for some testing purposes
RandomPort bool `hcl:"-"`
}
func (l *Listener) GoString() string {
return fmt.Sprintf("*%#v", *l)
}
func ParseListeners(result *SharedConfig, list *ast.ObjectList) error {
var err error
result.Listeners = make([]*Listener, 0, len(list.Items))
for i, item := range list.Items {
var l Listener
if err := hcl.DecodeObject(&l, item.Val); err != nil {
return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i))
}
// Hacky way, for now, to get the values we want for sanitizing
var m map[string]interface{}
if err := hcl.DecodeObject(&m, item.Val); err != nil {
return multierror.Prefix(err, fmt.Sprintf("listeners.%d:", i))
}
l.RawConfig = m
// Base values
{
switch {
case l.Type != "":
case len(item.Keys) == 1:
l.Type = strings.ToLower(item.Keys[0].Token.Value().(string))
default:
return multierror.Prefix(errors.New("listener type must be specified"), fmt.Sprintf("listeners.%d:", i))
}
l.Type = strings.ToLower(l.Type)
switch l.Type {
case "tcp", "unix":
default:
return multierror.Prefix(fmt.Errorf("unsupported listener type %q", l.Type), fmt.Sprintf("listeners.%d:", i))
}
if l.PurposeRaw != nil {
if l.Purpose, err = parseutil.ParseCommaStringSlice(l.PurposeRaw); err != nil {
return multierror.Prefix(fmt.Errorf("unable to parse 'purpose' in listener type %q: %w", l.Type, err), fmt.Sprintf("listeners.%d:", i))
}
for i, v := range l.Purpose {
l.Purpose[i] = strings.ToLower(v)
}
}
}
// Request Parameters
{
if l.MaxRequestSizeRaw != nil {
if l.MaxRequestSize, err = parseutil.ParseInt(l.MaxRequestSizeRaw); err != nil {
return multierror.Prefix(fmt.Errorf("error parsing max_request_size: %w", err), fmt.Sprintf("listeners.%d", i))
}
if l.MaxRequestSize < 0 {
return multierror.Prefix(errors.New("max_request_size cannot be negative"), fmt.Sprintf("listeners.%d", i))
}
l.MaxRequestSizeRaw = nil
}
if l.MaxRequestDurationRaw != nil {
if l.MaxRequestDuration, err = parseutil.ParseDurationSecond(l.MaxRequestDurationRaw); err != nil {
return multierror.Prefix(fmt.Errorf("error parsing max_request_duration: %w", err), fmt.Sprintf("listeners.%d", i))
}
if l.MaxRequestDuration < 0 {
return multierror.Prefix(errors.New("max_request_duration cannot be negative"), fmt.Sprintf("listeners.%d", i))
}
l.MaxRequestDurationRaw = nil
}
if l.RequireRequestHeaderRaw != nil {
if l.RequireRequestHeader, err = parseutil.ParseBool(l.RequireRequestHeaderRaw); err != nil {
return multierror.Prefix(fmt.Errorf("invalid value for require_request_header: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.RequireRequestHeaderRaw = nil
}
}
// TLS Parameters
{
if l.TLSDisableRaw != nil {
if l.TLSDisable, err = parseutil.ParseBool(l.TLSDisableRaw); err != nil {
return multierror.Prefix(fmt.Errorf("invalid value for tls_disable: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.TLSDisableRaw = nil
}
if l.TLSCipherSuitesRaw != "" {
if l.TLSCipherSuites, err = tlsutil.ParseCiphers(l.TLSCipherSuitesRaw); err != nil {
return multierror.Prefix(fmt.Errorf("invalid value for tls_cipher_suites: %w", err), fmt.Sprintf("listeners.%d", i))
}
}
if l.TLSPreferServerCipherSuitesRaw != nil {
if l.TLSPreferServerCipherSuites, err = parseutil.ParseBool(l.TLSPreferServerCipherSuitesRaw); err != nil {
return multierror.Prefix(fmt.Errorf("invalid value for tls_prefer_server_cipher_suites: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.TLSPreferServerCipherSuitesRaw = nil
}
if l.TLSRequireAndVerifyClientCertRaw != nil {
if l.TLSRequireAndVerifyClientCert, err = parseutil.ParseBool(l.TLSRequireAndVerifyClientCertRaw); err != nil {
return multierror.Prefix(fmt.Errorf("invalid value for tls_require_and_verify_client_cert: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.TLSRequireAndVerifyClientCertRaw = nil
}
if l.TLSDisableClientCertsRaw != nil {
if l.TLSDisableClientCerts, err = parseutil.ParseBool(l.TLSDisableClientCertsRaw); err != nil {
return multierror.Prefix(fmt.Errorf("invalid value for tls_disable_client_certs: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.TLSDisableClientCertsRaw = nil
}
}
// HTTP timeouts
{
if l.HTTPReadTimeoutRaw != nil {
if l.HTTPReadTimeout, err = parseutil.ParseDurationSecond(l.HTTPReadTimeoutRaw); err != nil {
return multierror.Prefix(fmt.Errorf("error parsing http_read_timeout: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.HTTPReadTimeoutRaw = nil
}
if l.HTTPReadHeaderTimeoutRaw != nil {
if l.HTTPReadHeaderTimeout, err = parseutil.ParseDurationSecond(l.HTTPReadHeaderTimeoutRaw); err != nil {
return multierror.Prefix(fmt.Errorf("error parsing http_read_header_timeout: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.HTTPReadHeaderTimeoutRaw = nil
}
if l.HTTPWriteTimeoutRaw != nil {
if l.HTTPWriteTimeout, err = parseutil.ParseDurationSecond(l.HTTPWriteTimeoutRaw); err != nil {
return multierror.Prefix(fmt.Errorf("error parsing http_write_timeout: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.HTTPWriteTimeoutRaw = nil
}
if l.HTTPIdleTimeoutRaw != nil {
if l.HTTPIdleTimeout, err = parseutil.ParseDurationSecond(l.HTTPIdleTimeoutRaw); err != nil {
return multierror.Prefix(fmt.Errorf("error parsing http_idle_timeout: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.HTTPIdleTimeoutRaw = nil
}
}
// Proxy Protocol config
{
if l.ProxyProtocolAuthorizedAddrsRaw != nil {
if l.ProxyProtocolAuthorizedAddrs, err = parseutil.ParseAddrs(l.ProxyProtocolAuthorizedAddrsRaw); err != nil {
return multierror.Prefix(fmt.Errorf("error parsing proxy_protocol_authorized_addrs: %w", err), fmt.Sprintf("listeners.%d", i))
}
switch l.ProxyProtocolBehavior {
case "allow_authorized", "deny_authorized":
if len(l.ProxyProtocolAuthorizedAddrs) == 0 {
return multierror.Prefix(errors.New("proxy_protocol_behavior set to allow or deny only authorized addresses but no proxy_protocol_authorized_addrs value"), fmt.Sprintf("listeners.%d", i))
}
}
l.ProxyProtocolAuthorizedAddrsRaw = nil
}
}
// X-Forwarded-For config
{
if l.XForwardedForAuthorizedAddrsRaw != nil {
if l.XForwardedForAuthorizedAddrs, err = parseutil.ParseAddrs(l.XForwardedForAuthorizedAddrsRaw); err != nil {
return multierror.Prefix(fmt.Errorf("error parsing x_forwarded_for_authorized_addrs: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.XForwardedForAuthorizedAddrsRaw = nil
}
if l.XForwardedForHopSkipsRaw != nil {
if l.XForwardedForHopSkips, err = parseutil.ParseInt(l.XForwardedForHopSkipsRaw); err != nil {
return multierror.Prefix(fmt.Errorf("error parsing x_forwarded_for_hop_skips: %w", err), fmt.Sprintf("listeners.%d", i))
}
if l.XForwardedForHopSkips < 0 {
return multierror.Prefix(fmt.Errorf("x_forwarded_for_hop_skips cannot be negative but set to %d", l.XForwardedForHopSkips), fmt.Sprintf("listeners.%d", i))
}
l.XForwardedForHopSkipsRaw = nil
}
if l.XForwardedForRejectNotAuthorizedRaw != nil {
if l.XForwardedForRejectNotAuthorized, err = parseutil.ParseBool(l.XForwardedForRejectNotAuthorizedRaw); err != nil {
return multierror.Prefix(fmt.Errorf("invalid value for x_forwarded_for_reject_not_authorized: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.XForwardedForRejectNotAuthorizedRaw = nil
}
if l.XForwardedForRejectNotPresentRaw != nil {
if l.XForwardedForRejectNotPresent, err = parseutil.ParseBool(l.XForwardedForRejectNotPresentRaw); err != nil {
return multierror.Prefix(fmt.Errorf("invalid value for x_forwarded_for_reject_not_present: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.XForwardedForRejectNotPresentRaw = nil
}
}
// Telemetry
{
if l.Telemetry.UnauthenticatedMetricsAccessRaw != nil {
if l.Telemetry.UnauthenticatedMetricsAccess, err = parseutil.ParseBool(l.Telemetry.UnauthenticatedMetricsAccessRaw); err != nil {
return multierror.Prefix(fmt.Errorf("invalid value for telemetry.unauthenticated_metrics_access: %w", err), fmt.Sprintf("listeners.%d", i))
}
l.Telemetry.UnauthenticatedMetricsAccessRaw = nil
}
}
result.Listeners = append(result.Listeners, &l)
}
return nil
}

View File

@ -0,0 +1,65 @@
package configutil
func (c *SharedConfig) Merge(c2 *SharedConfig) *SharedConfig {
if c2 == nil {
return c
}
result := new(SharedConfig)
for _, l := range c.Listeners {
result.Listeners = append(result.Listeners, l)
}
for _, l := range c2.Listeners {
result.Listeners = append(result.Listeners, l)
}
result.Entropy = c.Entropy
if c2.Entropy != nil {
result.Entropy = c2.Entropy
}
for _, s := range c.Seals {
result.Seals = append(result.Seals, s)
}
for _, s := range c2.Seals {
result.Seals = append(result.Seals, s)
}
result.Telemetry = c.Telemetry
if c2.Telemetry != nil {
result.Telemetry = c2.Telemetry
}
result.DisableMlock = c.DisableMlock
if c2.DisableMlock {
result.DisableMlock = c2.DisableMlock
}
result.DefaultMaxRequestDuration = c.DefaultMaxRequestDuration
if c2.DefaultMaxRequestDuration > result.DefaultMaxRequestDuration {
result.DefaultMaxRequestDuration = c2.DefaultMaxRequestDuration
}
result.LogLevel = c.LogLevel
if c2.LogLevel != "" {
result.LogLevel = c2.LogLevel
}
result.LogFormat = c.LogFormat
if c2.LogFormat != "" {
result.LogFormat = c2.LogFormat
}
result.PidFile = c.PidFile
if c2.PidFile != "" {
result.PidFile = c2.PidFile
}
result.ClusterName = c.ClusterName
if c2.ClusterName != "" {
result.ClusterName = c2.ClusterName
}
return result
}

View File

@ -0,0 +1,339 @@
package configutil
import (
"context"
"errors"
"fmt"
"time"
monitoring "cloud.google.com/go/monitoring/apiv3"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/circonus"
"github.com/armon/go-metrics/datadog"
"github.com/armon/go-metrics/prometheus"
stackdriver "github.com/google/go-metrics-stackdriver"
stackdrivervault "github.com/google/go-metrics-stackdriver/vault"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/helper/metricsutil"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/mitchellh/cli"
"google.golang.org/api/option"
)
const (
PrometheusDefaultRetentionTime = 24 * time.Hour
)
// Telemetry is the telemetry configuration for the server
type Telemetry struct {
StatsiteAddr string `hcl:"statsite_address"`
StatsdAddr string `hcl:"statsd_address"`
DisableHostname bool `hcl:"disable_hostname"`
EnableHostnameLabel bool `hcl:"enable_hostname_label"`
MetricsPrefix string `hcl:"metrics_prefix"`
// Circonus: see https://github.com/circonus-labs/circonus-gometrics
// for more details on the various configuration options.
// Valid configuration combinations:
// - CirconusAPIToken
// metric management enabled (search for existing check or create a new one)
// - CirconusSubmissionUrl
// metric management disabled (use check with specified submission_url,
// broker must be using a public SSL certificate)
// - CirconusAPIToken + CirconusCheckSubmissionURL
// metric management enabled (use check with specified submission_url)
// - CirconusAPIToken + CirconusCheckID
// metric management enabled (use check with specified id)
// CirconusAPIToken is a valid API Token used to create/manage check. If provided,
// metric management is enabled.
// Default: none
CirconusAPIToken string `hcl:"circonus_api_token"`
// CirconusAPIApp is an app name associated with API token.
// Default: "consul"
CirconusAPIApp string `hcl:"circonus_api_app"`
// CirconusAPIURL is the base URL to use for contacting the Circonus API.
// Default: "https://api.circonus.com/v2"
CirconusAPIURL string `hcl:"circonus_api_url"`
// CirconusSubmissionInterval is the interval at which metrics are submitted to Circonus.
// Default: 10s
CirconusSubmissionInterval string `hcl:"circonus_submission_interval"`
// CirconusCheckSubmissionURL is the check.config.submission_url field from a
// previously created HTTPTRAP check.
// Default: none
CirconusCheckSubmissionURL string `hcl:"circonus_submission_url"`
// CirconusCheckID is the check id (not check bundle id) from a previously created
// HTTPTRAP check. The numeric portion of the check._cid field.
// Default: none
CirconusCheckID string `hcl:"circonus_check_id"`
// CirconusCheckForceMetricActivation will force enabling metrics, as they are encountered,
// if the metric already exists and is NOT active. If check management is enabled, the default
// behavior is to add new metrics as they are encountered. If the metric already exists in the
// check, it will *NOT* be activated. This setting overrides that behavior.
// Default: "false"
CirconusCheckForceMetricActivation string `hcl:"circonus_check_force_metric_activation"`
// CirconusCheckInstanceID serves to uniquely identify the metrics coming from this "instance".
// It can be used to maintain metric continuity with transient or ephemeral instances as
// they move around within an infrastructure.
// Default: hostname:app
CirconusCheckInstanceID string `hcl:"circonus_check_instance_id"`
// CirconusCheckSearchTag is a special tag which, when coupled with the instance id, helps to
// narrow down the search results when neither a Submission URL or Check ID is provided.
// Default: service:app (e.g. service:consul)
CirconusCheckSearchTag string `hcl:"circonus_check_search_tag"`
// CirconusCheckTags is a comma separated list of tags to apply to the check. Note that
// the value of CirconusCheckSearchTag will always be added to the check.
// Default: none
CirconusCheckTags string `hcl:"circonus_check_tags"`
// CirconusCheckDisplayName is the name for the check which will be displayed in the Circonus UI.
// Default: value of CirconusCheckInstanceID
CirconusCheckDisplayName string `hcl:"circonus_check_display_name"`
// CirconusBrokerID is an explicit broker to use when creating a new check. The numeric portion
// of broker._cid. If metric management is enabled and neither a Submission URL nor Check ID
// is provided, an attempt will be made to search for an existing check using Instance ID and
// Search Tag. If one is not found, a new HTTPTRAP check will be created.
// Default: use Select Tag if provided, otherwise, a random Enterprise Broker associated
// with the specified API token or the default Circonus Broker.
// Default: none
CirconusBrokerID string `hcl:"circonus_broker_id"`
// CirconusBrokerSelectTag is a special tag which will be used to select a broker when
// a Broker ID is not provided. The best use of this is to as a hint for which broker
// should be used based on *where* this particular instance is running.
// (e.g. a specific geo location or datacenter, dc:sfo)
// Default: none
CirconusBrokerSelectTag string `hcl:"circonus_broker_select_tag"`
// Dogstats:
// DogStatsdAddr is the address of a dogstatsd instance. If provided,
// metrics will be sent to that instance
DogStatsDAddr string `hcl:"dogstatsd_addr"`
// DogStatsdTags are the global tags that should be sent with each packet to dogstatsd
// It is a list of strings, where each string looks like "my_tag_name:my_tag_value"
DogStatsDTags []string `hcl:"dogstatsd_tags"`
// Prometheus:
// PrometheusRetentionTime is the retention time for prometheus metrics if greater than 0.
// Default: 24h
PrometheusRetentionTime time.Duration `hcl:"-"`
PrometheusRetentionTimeRaw interface{} `hcl:"prometheus_retention_time"`
// Stackdriver:
// StackdriverProjectID is the project to publish stackdriver metrics to.
StackdriverProjectID string `hcl:"stackdriver_project_id"`
// StackdriverLocation is the GCP or AWS region of the monitored resource.
StackdriverLocation string `hcl:"stackdriver_location"`
// StackdriverNamespace is the namespace identifier, such as a cluster name.
StackdriverNamespace string `hcl:"stackdriver_namespace"`
// StackdriverDebugLogs will write additional stackdriver related debug logs to stderr.
StackdriverDebugLogs bool `hcl:"stackdriver_debug_logs"`
}
func (t *Telemetry) GoString() string {
return fmt.Sprintf("*%#v", *t)
}
func parseTelemetry(result *SharedConfig, list *ast.ObjectList) error {
if len(list.Items) > 1 {
return fmt.Errorf("only one 'telemetry' block is permitted")
}
// Get our one item
item := list.Items[0]
var t Telemetry
if err := hcl.DecodeObject(&t, item.Val); err != nil {
return multierror.Prefix(err, "telemetry:")
}
if result.Telemetry == nil {
result.Telemetry = &Telemetry{}
}
if err := hcl.DecodeObject(&result.Telemetry, item.Val); err != nil {
return multierror.Prefix(err, "telemetry:")
}
if result.Telemetry.PrometheusRetentionTimeRaw != nil {
var err error
if result.Telemetry.PrometheusRetentionTime, err = parseutil.ParseDurationSecond(result.Telemetry.PrometheusRetentionTimeRaw); err != nil {
return err
}
result.Telemetry.PrometheusRetentionTimeRaw = nil
} else {
result.Telemetry.PrometheusRetentionTime = PrometheusDefaultRetentionTime
}
return nil
}
type SetupTelemetryOpts struct {
Config *Telemetry
Ui cli.Ui
ServiceName string
DisplayName string
UserAgent string
ClusterName string
}
// SetupTelemetry is used to setup the telemetry sub-systems and returns the
// in-memory sink to be used in http configuration
func SetupTelemetry(opts *SetupTelemetryOpts) (*metrics.InmemSink, *metricsutil.ClusterMetricSink, bool, error) {
if opts == nil {
return nil, nil, false, errors.New("nil opts passed into SetupTelemetry")
}
if opts.Config == nil {
opts.Config = &Telemetry{}
}
/* Setup telemetry
Aggregate on 10 second intervals for 1 minute. Expose the
metrics over stderr when there is a SIGUSR1 received.
*/
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
metrics.DefaultInmemSignal(inm)
if opts.Config.MetricsPrefix != "" {
opts.ServiceName = opts.Config.MetricsPrefix
}
metricsConf := metrics.DefaultConfig(opts.ServiceName)
metricsConf.EnableHostname = !opts.Config.DisableHostname
metricsConf.EnableHostnameLabel = opts.Config.EnableHostnameLabel
// Configure the statsite sink
var fanout metrics.FanoutSink
var prometheusEnabled bool
// Configure the Prometheus sink
if opts.Config.PrometheusRetentionTime != 0 {
prometheusEnabled = true
prometheusOpts := prometheus.PrometheusOpts{
Expiration: opts.Config.PrometheusRetentionTime,
}
sink, err := prometheus.NewPrometheusSinkFrom(prometheusOpts)
if err != nil {
return nil, nil, false, err
}
fanout = append(fanout, sink)
}
if opts.Config.StatsiteAddr != "" {
sink, err := metrics.NewStatsiteSink(opts.Config.StatsiteAddr)
if err != nil {
return nil, nil, false, err
}
fanout = append(fanout, sink)
}
// Configure the statsd sink
if opts.Config.StatsdAddr != "" {
sink, err := metrics.NewStatsdSink(opts.Config.StatsdAddr)
if err != nil {
return nil, nil, false, err
}
fanout = append(fanout, sink)
}
// Configure the Circonus sink
if opts.Config.CirconusAPIToken != "" || opts.Config.CirconusCheckSubmissionURL != "" {
cfg := &circonus.Config{}
cfg.Interval = opts.Config.CirconusSubmissionInterval
cfg.CheckManager.API.TokenKey = opts.Config.CirconusAPIToken
cfg.CheckManager.API.TokenApp = opts.Config.CirconusAPIApp
cfg.CheckManager.API.URL = opts.Config.CirconusAPIURL
cfg.CheckManager.Check.SubmissionURL = opts.Config.CirconusCheckSubmissionURL
cfg.CheckManager.Check.ID = opts.Config.CirconusCheckID
cfg.CheckManager.Check.ForceMetricActivation = opts.Config.CirconusCheckForceMetricActivation
cfg.CheckManager.Check.InstanceID = opts.Config.CirconusCheckInstanceID
cfg.CheckManager.Check.SearchTag = opts.Config.CirconusCheckSearchTag
cfg.CheckManager.Check.DisplayName = opts.Config.CirconusCheckDisplayName
cfg.CheckManager.Check.Tags = opts.Config.CirconusCheckTags
cfg.CheckManager.Broker.ID = opts.Config.CirconusBrokerID
cfg.CheckManager.Broker.SelectTag = opts.Config.CirconusBrokerSelectTag
if cfg.CheckManager.API.TokenApp == "" {
cfg.CheckManager.API.TokenApp = opts.ServiceName
}
if cfg.CheckManager.Check.DisplayName == "" {
cfg.CheckManager.Check.DisplayName = opts.DisplayName
}
if cfg.CheckManager.Check.SearchTag == "" {
cfg.CheckManager.Check.SearchTag = fmt.Sprintf("service:%s", opts.ServiceName)
}
sink, err := circonus.NewCirconusSink(cfg)
if err != nil {
return nil, nil, false, err
}
sink.Start()
fanout = append(fanout, sink)
}
if opts.Config.DogStatsDAddr != "" {
var tags []string
if opts.Config.DogStatsDTags != nil {
tags = opts.Config.DogStatsDTags
}
sink, err := datadog.NewDogStatsdSink(opts.Config.DogStatsDAddr, metricsConf.HostName)
if err != nil {
return nil, nil, false, errwrap.Wrapf("failed to start DogStatsD sink: {{err}}", err)
}
sink.SetTags(tags)
fanout = append(fanout, sink)
}
// Configure the stackdriver sink
if opts.Config.StackdriverProjectID != "" {
client, err := monitoring.NewMetricClient(context.Background(), option.WithUserAgent(opts.UserAgent))
if err != nil {
return nil, nil, false, fmt.Errorf("Failed to create stackdriver client: %v", err)
}
sink := stackdriver.NewSink(client, &stackdriver.Config{
LabelExtractor: stackdrivervault.Extractor,
Bucketer: stackdrivervault.Bucketer,
ProjectID: opts.Config.StackdriverProjectID,
Location: opts.Config.StackdriverLocation,
Namespace: opts.Config.StackdriverNamespace,
DebugLogs: opts.Config.StackdriverDebugLogs,
})
fanout = append(fanout, sink)
}
// Initialize the global sink
if len(fanout) > 1 {
// Hostname enabled will create poor quality metrics name for prometheus
if !opts.Config.DisableHostname {
opts.Ui.Warn("telemetry.disable_hostname has been set to false. Recommended setting is true for Prometheus to avoid poorly named metrics.")
}
} else {
metricsConf.EnableHostname = false
}
fanout = append(fanout, inm)
_, err := metrics.NewGlobal(metricsConf, fanout)
if err != nil {
return nil, nil, false, err
}
// Intialize a wrapper around the global sink; this will be passed to Core
// and to any backend.
wrapper := &metricsutil.ClusterMetricSink{
ClusterName: opts.ClusterName,
MaxGaugeCardinality: 500,
GaugeInterval: 10 * time.Minute,
Sink: fanout,
}
return inm, wrapper, prometheusEnabled, nil
}

View File

@ -11,13 +11,18 @@ import (
"strconv"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/internalshared/reloadutil"
"github.com/hashicorp/vault/sdk/helper/parseutil"
"github.com/hashicorp/vault/sdk/helper/tlsutil"
"github.com/jefferai/isbadcipher"
"github.com/mitchellh/cli"
)
type Listener struct {
net.Listener
Config *configutil.Listener
}
type UnixSocketsConfig struct {
User string `hcl:"user"`
Mode string `hcl:"mode"`
@ -67,92 +72,69 @@ func UnixSocketListener(path string, unixSocketsConfig *UnixSocketsConfig) (net.
}, nil
}
func WrapTLS(
ln net.Listener,
func TLSConfig(
l *configutil.Listener,
props map[string]string,
config map[string]interface{},
ui cli.Ui) (net.Listener, map[string]string, reloadutil.ReloadFunc, *tls.Config, error) {
ui cli.Ui) (*tls.Config, reloadutil.ReloadFunc, error) {
props["tls"] = "disabled"
if v, ok := config["tls_disable"]; ok {
disabled, err := parseutil.ParseBool(v)
if err != nil {
return nil, nil, nil, nil, errwrap.Wrapf("invalid value for 'tls_disable': {{err}}", err)
}
if disabled {
return ln, props, nil, nil, nil
}
if l.TLSDisable {
return nil, nil, nil
}
certFileRaw, ok := config["tls_cert_file"]
if !ok {
return nil, nil, nil, nil, fmt.Errorf("'tls_cert_file' must be set")
}
certFile := certFileRaw.(string)
keyFileRaw, ok := config["tls_key_file"]
if !ok {
return nil, nil, nil, nil, fmt.Errorf("'tls_key_file' must be set")
}
keyFile := keyFileRaw.(string)
cg := reloadutil.NewCertificateGetter(certFile, keyFile, "")
if err := cg.Reload(config); err != nil {
cg := reloadutil.NewCertificateGetter(l.TLSCertFile, l.TLSKeyFile, "")
if err := cg.Reload(); err != nil {
// We try the key without a passphrase first and if we get an incorrect
// passphrase response, try again after prompting for a passphrase
if errwrap.Contains(err, x509.IncorrectPasswordError.Error()) {
var passphrase string
passphrase, err = ui.AskSecret(fmt.Sprintf("Enter passphrase for %s:", keyFile))
passphrase, err = ui.AskSecret(fmt.Sprintf("Enter passphrase for %s:", l.TLSKeyFile))
if err == nil {
cg = reloadutil.NewCertificateGetter(certFile, keyFile, passphrase)
if err = cg.Reload(config); err == nil {
cg = reloadutil.NewCertificateGetter(l.TLSCertFile, l.TLSKeyFile, passphrase)
if err = cg.Reload(); err == nil {
goto PASSPHRASECORRECT
}
}
}
return nil, nil, nil, nil, errwrap.Wrapf("error loading TLS cert: {{err}}", err)
return nil, nil, errwrap.Wrapf("error loading TLS cert: {{err}}", err)
}
PASSPHRASECORRECT:
var tlsvers string
tlsversRaw, ok := config["tls_min_version"]
if !ok {
tlsvers = "tls12"
} else {
tlsvers = tlsversRaw.(string)
tlsConf := &tls.Config{
GetCertificate: cg.GetCertificate,
NextProtos: []string{"h2", "http/1.1"},
ClientAuth: tls.RequestClientCert,
PreferServerCipherSuites: l.TLSPreferServerCipherSuites,
}
tlsConf := &tls.Config{}
tlsConf.GetCertificate = cg.GetCertificate
tlsConf.NextProtos = []string{"h2", "http/1.1"}
tlsConf.MinVersion, ok = tlsutil.TLSLookup[tlsvers]
if !ok {
return nil, nil, nil, nil, fmt.Errorf("'tls_min_version' value %q not supported, please specify one of [tls10,tls11,tls12,tls13]", tlsvers)
if l.TLSMinVersion == "" {
l.TLSMinVersion = "tls12"
}
tlsConf.ClientAuth = tls.RequestClientCert
if v, ok := config["tls_cipher_suites"]; ok {
ciphers, err := tlsutil.ParseCiphers(v.(string))
if err != nil {
return nil, nil, nil, nil, errwrap.Wrapf("invalid value for 'tls_cipher_suites': {{err}}", err)
}
var ok bool
tlsConf.MinVersion, ok = tlsutil.TLSLookup[l.TLSMinVersion]
if !ok {
return nil, nil, fmt.Errorf("'tls_min_version' value %q not supported, please specify one of [tls10,tls11,tls12,tls13]", l.TLSMinVersion)
}
if len(l.TLSCipherSuites) > 0 {
// HTTP/2 with TLS 1.2 blacklists several cipher suites.
// https://tools.ietf.org/html/rfc7540#appendix-A
//
// Since the CLI (net/http) automatically uses HTTP/2 with TLS 1.2,
// we check here if all or some specified cipher suites are blacklisted.
badCiphers := []string{}
for _, cipher := range ciphers {
for _, cipher := range l.TLSCipherSuites {
if isbadcipher.IsBadCipher(cipher) {
// Get the name of the current cipher.
cipherStr, err := tlsutil.GetCipherName(cipher)
if err != nil {
return nil, nil, nil, nil, errwrap.Wrapf("invalid value for 'tls_cipher_suites': {{err}}", err)
return nil, nil, errwrap.Wrapf("invalid value for 'tls_cipher_suites': {{err}}", err)
}
badCiphers = append(badCiphers, cipherStr)
}
}
if len(badCiphers) == len(ciphers) {
if len(badCiphers) == len(l.TLSCipherSuites) {
ui.Warn(`WARNING! All cipher suites defined by 'tls_cipher_suites' are blacklisted by the
HTTP/2 specification. HTTP/2 communication with TLS 1.2 will not work as intended
and Vault will be unavailable via the CLI.
@ -163,54 +145,34 @@ blacklisted by the HTTP/2 specification:
%v
Please see https://tools.ietf.org/html/rfc7540#appendix-A for further information.`, badCiphers))
}
tlsConf.CipherSuites = ciphers
tlsConf.CipherSuites = l.TLSCipherSuites
}
if v, ok := config["tls_prefer_server_cipher_suites"]; ok {
preferServer, err := parseutil.ParseBool(v)
if err != nil {
return nil, nil, nil, nil, errwrap.Wrapf("invalid value for 'tls_prefer_server_cipher_suites': {{err}}", err)
}
tlsConf.PreferServerCipherSuites = preferServer
}
var requireVerifyCerts bool
var err error
if v, ok := config["tls_require_and_verify_client_cert"]; ok {
requireVerifyCerts, err = parseutil.ParseBool(v)
if err != nil {
return nil, nil, nil, nil, errwrap.Wrapf("invalid value for 'tls_require_and_verify_client_cert': {{err}}", err)
}
if requireVerifyCerts {
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
}
if tlsClientCaFile, ok := config["tls_client_ca_file"]; ok {
if l.TLSRequireAndVerifyClientCert {
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
if l.TLSClientCAFile != "" {
caPool := x509.NewCertPool()
data, err := ioutil.ReadFile(tlsClientCaFile.(string))
data, err := ioutil.ReadFile(l.TLSClientCAFile)
if err != nil {
return nil, nil, nil, nil, errwrap.Wrapf("failed to read tls_client_ca_file: {{err}}", err)
return nil, nil, errwrap.Wrapf("failed to read tls_client_ca_file: {{err}}", err)
}
if !caPool.AppendCertsFromPEM(data) {
return nil, nil, nil, nil, fmt.Errorf("failed to parse CA certificate in tls_client_ca_file")
return nil, nil, fmt.Errorf("failed to parse CA certificate in tls_client_ca_file")
}
tlsConf.ClientCAs = caPool
}
}
if v, ok := config["tls_disable_client_certs"]; ok {
disableClientCerts, err := parseutil.ParseBool(v)
if err != nil {
return nil, nil, nil, nil, errwrap.Wrapf("invalid value for 'tls_disable_client_certs': {{err}}", err)
}
if disableClientCerts && requireVerifyCerts {
return nil, nil, nil, nil, fmt.Errorf("'tls_disable_client_certs' and 'tls_require_and_verify_client_cert' are mutually exclusive")
}
if disableClientCerts {
tlsConf.ClientAuth = tls.NoClientCert
if l.TLSDisableClientCerts {
if l.TLSRequireAndVerifyClientCert {
return nil, nil, fmt.Errorf("'tls_disable_client_certs' and 'tls_require_and_verify_client_cert' are mutually exclusive")
}
tlsConf.ClientAuth = tls.NoClientCert
}
ln = tls.NewListener(ln, tlsConf)
props["tls"] = "enabled"
return ln, props, cg.Reload, tlsConf, nil
return tlsConf, cg.Reload, nil
}
// setFilePermissions handles configuring ownership and permissions

View File

@ -13,7 +13,7 @@ import (
)
// ReloadFunc are functions that are called when a reload is requested
type ReloadFunc func(map[string]interface{}) error
type ReloadFunc func() error
// CertificateGetter satisfies ReloadFunc and its GetCertificate method
// satisfies the tls.GetCertificate function signature. Currently it does not
@ -36,7 +36,7 @@ func NewCertificateGetter(certFile, keyFile, passphrase string) *CertificateGett
}
}
func (cg *CertificateGetter) Reload(_ map[string]interface{}) error {
func (cg *CertificateGetter) Reload() error {
certPEMBlock, err := ioutil.ReadFile(cg.certFile)
if err != nil {
return err

View File

@ -59,7 +59,7 @@ opM24uvQT3Bc0UM0WNh3tdRFuboxDeBDh7PX/2RIoiaMuCCiRZ3O0A==
}
cg := NewCertificateGetter(certFile, keyFile, "")
err = cg.Reload(nil)
err = cg.Reload()
if err == nil {
t.Fatal("error expected")
}
@ -68,7 +68,7 @@ opM24uvQT3Bc0UM0WNh3tdRFuboxDeBDh7PX/2RIoiaMuCCiRZ3O0A==
}
cg = NewCertificateGetter(certFile, keyFile, password)
if err := cg.Reload(nil); err != nil {
if err := cg.Reload(); err != nil {
t.Fatalf("err: %v", err)
}
}

View File

@ -473,7 +473,7 @@ func (c *Core) newAuditBackend(ctx context.Context, entry *MountEntry, view logi
}
}
c.reloadFuncs[key] = append(c.reloadFuncs[key], func(map[string]interface{}) error {
c.reloadFuncs[key] = append(c.reloadFuncs[key], func() error {
if auditLogger.IsInfo() {
auditLogger.Info("reloading file audit backend", "path", entry.Path)
}

View File

@ -829,7 +829,7 @@ func NewCore(conf *CoreConfig) (*Core, error) {
if c.seal == nil {
c.seal = NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: c.logger.Named("shamir"),
}),
})
@ -1069,7 +1069,7 @@ func (c *Core) unseal(key []byte, useRecoveryKeys bool) (bool, error) {
if sealToUse.BarrierType() == wrapping.Shamir && c.migrationInfo == nil {
// If this is a legacy shamir seal this serves no purpose but it
// doesn't hurt.
err = sealToUse.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(masterKey)
err = sealToUse.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(masterKey)
if err != nil {
return false, err
}
@ -1286,7 +1286,7 @@ func (c *Core) unsealPart(ctx context.Context, seal Seal, key []byte, useRecover
c.migrationInfo.shamirCombinedKey = make([]byte, len(recoveredKey))
copy(c.migrationInfo.shamirCombinedKey, recoveredKey)
if seal.StoredKeysSupported() == vaultseal.StoredKeysSupportedShamirMaster {
err = seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(recoveredKey)
err = seal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(recoveredKey)
if err != nil {
return nil, errwrap.Wrapf("failed to set master key in seal: {{err}}", err)
}
@ -1360,7 +1360,7 @@ func (c *Core) migrateSeal(ctx context.Context) error {
// We have recovery keys; we're going to use them as the new
// shamir KeK.
err := c.seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(c.migrationInfo.recoveryKey)
err := c.seal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(c.migrationInfo.recoveryKey)
if err != nil {
return errwrap.Wrapf("failed to set master key in seal: {{err}}", err)
}
@ -2264,7 +2264,7 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, combinedKey []byte) ([]
case vaultseal.StoredKeysSupportedShamirMaster:
testseal := NewDefaultSeal(&vaultseal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: c.logger.Named("testseal"),
}),
})
@ -2274,7 +2274,7 @@ func (c *Core) unsealKeyToMasterKey(ctx context.Context, combinedKey []byte) ([]
return nil, errwrap.Wrapf("failed to setup test barrier config: {{err}}", err)
}
testseal.SetCachedBarrierConfig(cfg)
err = testseal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(combinedKey)
err = testseal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(combinedKey)
if err != nil {
return nil, errwrap.Wrapf("failed to setup unseal key: {{err}}", err)
}

View File

@ -817,7 +817,7 @@ func (c *Core) reloadShamirKey(ctx context.Context) error {
}
shamirKey = keyring.masterKey
}
return c.seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(shamirKey)
return c.seal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(shamirKey)
}
func (c *Core) performKeyUpgrades(ctx context.Context) error {

View File

@ -517,7 +517,7 @@ func (i *IdentityStore) handleEntityBatchDelete() framework.OperationFunc {
defer i.lock.Unlock()
// Create a MemDB transaction to delete entities from the inmem database
// without altering storage. Batch deletion on storage bucket items is
// without altering storage. Batch deletion on storage bucket items is
// performed directly through entityPacker.
txn := i.db.Txn(true)
defer txn.Abort()

View File

@ -303,7 +303,7 @@ func (c *Core) Initialize(ctx context.Context, initParams *InitParams) (*InitRes
switch c.seal.StoredKeysSupported() {
case seal.StoredKeysSupportedShamirMaster:
keysToStore := [][]byte{barrierKey}
if err := c.seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(sealKey); err != nil {
if err := c.seal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(sealKey); err != nil {
c.logger.Error("failed to set seal key", "error", err)
return nil, errwrap.Wrapf("failed to set seal key: {{err}}", err)
}

View File

@ -400,12 +400,12 @@ func (c *Core) BarrierRekeyUpdate(ctx context.Context, key []byte, nonce string)
case c.seal.BarrierType() == wrapping.Shamir:
if c.seal.StoredKeysSupported() == seal.StoredKeysSupportedShamirMaster {
testseal := NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: c.logger.Named("testseal"),
}),
})
testseal.SetCore(c)
err = testseal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(recoveredKey)
err = testseal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(recoveredKey)
if err != nil {
return nil, logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to setup unseal key: {{err}}", err).Error())
}
@ -533,7 +533,7 @@ func (c *Core) performBarrierRekey(ctx context.Context, newSealKey []byte) logic
}
if c.seal.StoredKeysSupported() != seal.StoredKeysSupportedGeneric {
err := c.seal.GetAccess().Wrapper.(*aeadwrapper.Wrapper).SetAESGCMKeyBytes(newSealKey)
err := c.seal.GetAccess().Wrapper.(*aeadwrapper.ShamirWrapper).SetAESGCMKeyBytes(newSealKey)
if err != nil {
return logical.CodedError(http.StatusInternalServerError, errwrap.Wrapf("failed to update barrier seal key: {{err}}", err).Error())
}

View File

@ -14,6 +14,7 @@ import (
sockaddr "github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/vault/helper/identity"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internalshared/configutil"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/errutil"
@ -40,13 +41,11 @@ var (
// HandlerProperties is used to seed configuration into a vaulthttp.Handler.
// It's in this package to avoid a circular dependency
type HandlerProperties struct {
Core *Core
MaxRequestSize int64
MaxRequestDuration time.Duration
DisablePrintableCheck bool
RecoveryMode bool
RecoveryToken *uberAtomic.String
UnauthenticatedMetricsAccess bool
Core *Core
ListenerConfig *configutil.Listener
DisablePrintableCheck bool
RecoveryMode bool
RecoveryToken *uberAtomic.String
}
// fetchEntityAndDerivedPolicies returns the entity object for the given entity

View File

@ -21,7 +21,7 @@ func NewTestSeal(t testing.T, opts *seal.TestSealOpts) Seal {
switch opts.StoredKeys {
case seal.StoredKeysSupportedShamirMaster:
newSeal := NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: opts.Logger,
}),
})
@ -34,7 +34,7 @@ func NewTestSeal(t testing.T, opts *seal.TestSealOpts) Seal {
return newSeal
case seal.StoredKeysNotSupported:
newSeal := NewDefaultSeal(&seal.Access{
Wrapper: aeadwrapper.NewWrapper(&wrapping.WrapperOptions{
Wrapper: aeadwrapper.NewShamirWrapper(&wrapping.WrapperOptions{
Logger: opts.Logger,
}),
})

View File

@ -1370,7 +1370,7 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
}
certGetter := reloadutil.NewCertificateGetter(certFile, keyFile, "")
certGetters = append(certGetters, certGetter)
certGetter.Reload(nil)
certGetter.Reload()
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{tlsCert},
RootCAs: testCluster.RootCAs,
@ -1590,8 +1590,8 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
if opts != nil && opts.HandlerFunc != nil {
props := opts.DefaultHandlerProperties
props.Core = c
if props.MaxRequestDuration == 0 {
props.MaxRequestDuration = DefaultMaxRequestDuration
if props.ListenerConfig != nil && props.ListenerConfig.MaxRequestDuration == 0 {
props.ListenerConfig.MaxRequestDuration = DefaultMaxRequestDuration
}
handlers[i] = opts.HandlerFunc(&props)
servers[i].Handler = handlers[i]