2015-03-12 22:21:11 +00:00
|
|
|
package command
|
|
|
|
|
|
|
|
import (
|
2018-01-19 06:44:44 +00:00
|
|
|
"context"
|
2016-08-15 20:01:15 +00:00
|
|
|
"encoding/base64"
|
2015-03-12 22:30:07 +00:00
|
|
|
"fmt"
|
2017-07-31 15:28:06 +00:00
|
|
|
"io/ioutil"
|
2015-03-13 17:09:38 +00:00
|
|
|
"net"
|
|
|
|
"net/http"
|
2015-05-02 22:57:40 +00:00
|
|
|
"net/url"
|
2015-04-04 19:06:41 +00:00
|
|
|
"os"
|
2017-07-31 15:28:06 +00:00
|
|
|
"path/filepath"
|
2015-10-28 17:05:56 +00:00
|
|
|
"runtime"
|
2015-04-04 19:06:41 +00:00
|
|
|
"sort"
|
2015-05-02 22:57:40 +00:00
|
|
|
"strconv"
|
2015-03-12 22:21:11 +00:00
|
|
|
"strings"
|
2016-07-30 17:17:29 +00:00
|
|
|
"sync"
|
2015-04-15 01:44:09 +00:00
|
|
|
"time"
|
2015-03-12 22:21:11 +00:00
|
|
|
|
2016-08-19 20:45:17 +00:00
|
|
|
colorable "github.com/mattn/go-colorable"
|
|
|
|
log "github.com/mgutz/logxi/v1"
|
2017-09-08 02:03:44 +00:00
|
|
|
"github.com/mitchellh/cli"
|
2017-08-01 18:07:08 +00:00
|
|
|
testing "github.com/mitchellh/go-testing-interface"
|
2017-08-24 22:23:40 +00:00
|
|
|
"github.com/posener/complete"
|
2016-08-19 20:45:17 +00:00
|
|
|
|
2016-08-19 15:03:53 +00:00
|
|
|
"google.golang.org/grpc/grpclog"
|
|
|
|
|
2015-04-15 01:44:09 +00:00
|
|
|
"github.com/armon/go-metrics"
|
2016-07-22 19:49:23 +00:00
|
|
|
"github.com/armon/go-metrics/circonus"
|
2017-06-17 03:51:46 +00:00
|
|
|
"github.com/armon/go-metrics/datadog"
|
2016-04-04 14:44:22 +00:00
|
|
|
"github.com/hashicorp/errwrap"
|
2017-11-10 21:21:46 +00:00
|
|
|
hclog "github.com/hashicorp/go-hclog"
|
2016-03-11 21:46:56 +00:00
|
|
|
"github.com/hashicorp/go-multierror"
|
2015-04-05 01:07:53 +00:00
|
|
|
"github.com/hashicorp/vault/audit"
|
2015-03-12 22:30:07 +00:00
|
|
|
"github.com/hashicorp/vault/command/server"
|
2015-04-04 19:06:41 +00:00
|
|
|
"github.com/hashicorp/vault/helper/gated-writer"
|
2017-11-10 21:21:46 +00:00
|
|
|
"github.com/hashicorp/vault/helper/logbridge"
|
2016-08-19 20:45:17 +00:00
|
|
|
"github.com/hashicorp/vault/helper/logformat"
|
2015-04-28 22:04:40 +00:00
|
|
|
"github.com/hashicorp/vault/helper/mlock"
|
2017-06-22 19:29:53 +00:00
|
|
|
"github.com/hashicorp/vault/helper/parseutil"
|
2017-07-31 15:28:06 +00:00
|
|
|
"github.com/hashicorp/vault/helper/reload"
|
2015-03-13 17:09:38 +00:00
|
|
|
vaulthttp "github.com/hashicorp/vault/http"
|
2015-03-20 18:32:18 +00:00
|
|
|
"github.com/hashicorp/vault/logical"
|
2015-03-13 17:09:38 +00:00
|
|
|
"github.com/hashicorp/vault/physical"
|
|
|
|
"github.com/hashicorp/vault/vault"
|
2015-11-09 18:52:55 +00:00
|
|
|
"github.com/hashicorp/vault/version"
|
2015-03-12 22:21:11 +00:00
|
|
|
)
|
|
|
|
|
2017-09-08 02:03:44 +00:00
|
|
|
var _ cli.Command = (*ServerCommand)(nil)
|
|
|
|
var _ cli.CommandAutocomplete = (*ServerCommand)(nil)
|
|
|
|
|
2015-03-12 22:21:11 +00:00
|
|
|
type ServerCommand struct {
|
2017-09-22 00:51:12 +00:00
|
|
|
*BaseCommand
|
|
|
|
|
2015-04-05 01:07:53 +00:00
|
|
|
AuditBackends map[string]audit.Factory
|
2015-04-01 22:48:13 +00:00
|
|
|
CredentialBackends map[string]logical.Factory
|
|
|
|
LogicalBackends map[string]logical.Factory
|
2017-08-03 17:24:27 +00:00
|
|
|
PhysicalBackends map[string]physical.Factory
|
2015-03-20 18:32:18 +00:00
|
|
|
|
2016-03-14 18:05:47 +00:00
|
|
|
ShutdownCh chan struct{}
|
|
|
|
SighupCh chan struct{}
|
2016-03-11 21:46:56 +00:00
|
|
|
|
2016-07-30 17:17:29 +00:00
|
|
|
WaitGroup *sync.WaitGroup
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
logGate *gatedwriter.Writer
|
|
|
|
logger log.Logger
|
2016-06-02 16:40:25 +00:00
|
|
|
|
2016-10-10 17:18:19 +00:00
|
|
|
cleanupGuard sync.Once
|
|
|
|
|
2016-09-30 04:06:40 +00:00
|
|
|
reloadFuncsLock *sync.RWMutex
|
2017-07-31 15:28:06 +00:00
|
|
|
reloadFuncs *map[string][]reload.ReloadFunc
|
2017-09-08 02:03:44 +00:00
|
|
|
startedCh chan (struct{}) // for tests
|
|
|
|
reloadedCh chan (struct{}) // for tests
|
|
|
|
|
|
|
|
// new stuff
|
|
|
|
flagConfigs []string
|
|
|
|
flagLogLevel string
|
|
|
|
flagDev bool
|
|
|
|
flagDevRootTokenID string
|
|
|
|
flagDevListenAddr string
|
|
|
|
|
|
|
|
flagDevPluginDir string
|
|
|
|
flagDevHA bool
|
2017-09-20 20:05:00 +00:00
|
|
|
flagDevLatency int
|
|
|
|
flagDevLatencyJitter int
|
2017-09-08 02:03:44 +00:00
|
|
|
flagDevLeasedKV bool
|
2017-09-22 00:51:12 +00:00
|
|
|
flagDevSkipInit bool
|
2017-09-08 02:03:44 +00:00
|
|
|
flagDevThreeNode bool
|
2017-09-22 00:51:12 +00:00
|
|
|
flagDevTransactional bool
|
2017-09-08 02:03:44 +00:00
|
|
|
flagTestVerifyOnly bool
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ServerCommand) Synopsis() string {
|
|
|
|
return "Start a Vault server"
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ServerCommand) Help() string {
|
|
|
|
helpText := `
|
|
|
|
Usage: vault server [options]
|
|
|
|
|
|
|
|
This command starts a Vault server that responds to API requests. By default,
|
|
|
|
Vault will start in a "sealed" state. The Vault cluster must be initialized
|
|
|
|
before use, usually by the "vault init" command. Each Vault server must also
|
|
|
|
be unsealed using the "vault unseal" command or the API before the server can
|
|
|
|
respond to requests.
|
|
|
|
|
|
|
|
Start a server with a configuration file:
|
|
|
|
|
|
|
|
$ vault server -config=/etc/vault/config.hcl
|
|
|
|
|
|
|
|
Run in "dev" mode:
|
|
|
|
|
|
|
|
$ vault server -dev -dev-root-token-id="root"
|
|
|
|
|
|
|
|
For a full list of examples, please see the documentation.
|
|
|
|
|
|
|
|
` + c.Flags().Help()
|
|
|
|
return strings.TrimSpace(helpText)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ServerCommand) Flags() *FlagSets {
|
|
|
|
set := c.flagSet(FlagSetHTTP)
|
|
|
|
|
|
|
|
f := set.NewFlagSet("Command Options")
|
|
|
|
|
|
|
|
f.StringSliceVar(&StringSliceVar{
|
|
|
|
Name: "config",
|
|
|
|
Target: &c.flagConfigs,
|
|
|
|
Completion: complete.PredictOr(
|
|
|
|
complete.PredictFiles("*.hcl"),
|
|
|
|
complete.PredictFiles("*.json"),
|
|
|
|
complete.PredictDirs("*"),
|
|
|
|
),
|
|
|
|
Usage: "Path to a configuration file or directory of configuration " +
|
|
|
|
"files. This flag can be specified multiple times to load multiple " +
|
|
|
|
"configurations. If the path is a directory, all files which end in " +
|
|
|
|
".hcl or .json are loaded.",
|
|
|
|
})
|
|
|
|
|
|
|
|
f.StringVar(&StringVar{
|
|
|
|
Name: "log-level",
|
|
|
|
Target: &c.flagLogLevel,
|
|
|
|
Default: "info",
|
2018-01-03 19:02:31 +00:00
|
|
|
EnvVar: "VAULT_LOG_LEVEL",
|
2017-09-08 02:03:44 +00:00
|
|
|
Completion: complete.PredictSet("trace", "debug", "info", "warn", "err"),
|
|
|
|
Usage: "Log verbosity level. Supported values (in order of detail) are " +
|
|
|
|
"\"trace\", \"debug\", \"info\", \"warn\", and \"err\".",
|
|
|
|
})
|
|
|
|
|
|
|
|
f = set.NewFlagSet("Dev Options")
|
|
|
|
|
|
|
|
f.BoolVar(&BoolVar{
|
|
|
|
Name: "dev",
|
|
|
|
Target: &c.flagDev,
|
|
|
|
Usage: "Enable development mode. In this mode, Vault runs in-memory and " +
|
|
|
|
"starts unsealed. As the name implies, do not run \"dev\" mode in " +
|
|
|
|
"production.",
|
|
|
|
})
|
|
|
|
|
|
|
|
f.StringVar(&StringVar{
|
|
|
|
Name: "dev-root-token-id",
|
|
|
|
Target: &c.flagDevRootTokenID,
|
|
|
|
Default: "",
|
|
|
|
EnvVar: "VAULT_DEV_ROOT_TOKEN_ID",
|
|
|
|
Usage: "Initial root token. This only applies when running in \"dev\" " +
|
|
|
|
"mode.",
|
|
|
|
})
|
|
|
|
|
|
|
|
f.StringVar(&StringVar{
|
|
|
|
Name: "dev-listen-address",
|
|
|
|
Target: &c.flagDevListenAddr,
|
|
|
|
Default: "127.0.0.1:8200",
|
|
|
|
EnvVar: "VAULT_DEV_LISTEN_ADDRESS",
|
|
|
|
Usage: "Address to bind to in \"dev\" mode.",
|
|
|
|
})
|
|
|
|
|
|
|
|
// Internal-only flags to follow.
|
|
|
|
//
|
|
|
|
// Why hello there little source code reader! Welcome to the Vault source
|
|
|
|
// code. The remaining options are intentionally undocumented and come with
|
|
|
|
// no warranty or backwards-compatability promise. Do not use these flags
|
|
|
|
// in production. Do not build automation using these flags. Unless you are
|
|
|
|
// developing against Vault, you should not need any of these flags.
|
|
|
|
|
|
|
|
f.StringVar(&StringVar{
|
|
|
|
Name: "dev-plugin-dir",
|
|
|
|
Target: &c.flagDevPluginDir,
|
|
|
|
Default: "",
|
|
|
|
Completion: complete.PredictDirs("*"),
|
|
|
|
Hidden: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
f.BoolVar(&BoolVar{
|
|
|
|
Name: "dev-ha",
|
|
|
|
Target: &c.flagDevHA,
|
|
|
|
Default: false,
|
|
|
|
Hidden: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
f.BoolVar(&BoolVar{
|
|
|
|
Name: "dev-transactional",
|
|
|
|
Target: &c.flagDevTransactional,
|
|
|
|
Default: false,
|
|
|
|
Hidden: true,
|
|
|
|
})
|
|
|
|
|
2017-09-20 20:05:00 +00:00
|
|
|
f.IntVar(&IntVar{
|
|
|
|
Name: "dev-latency",
|
|
|
|
Target: &c.flagDevLatency,
|
|
|
|
Hidden: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
f.IntVar(&IntVar{
|
|
|
|
Name: "dev-latency-jitter",
|
|
|
|
Target: &c.flagDevLatencyJitter,
|
|
|
|
Hidden: true,
|
|
|
|
})
|
|
|
|
|
2017-09-08 02:03:44 +00:00
|
|
|
f.BoolVar(&BoolVar{
|
2017-09-20 20:05:00 +00:00
|
|
|
Name: "dev-leased-kv",
|
2017-09-08 02:03:44 +00:00
|
|
|
Target: &c.flagDevLeasedKV,
|
|
|
|
Default: false,
|
|
|
|
Hidden: true,
|
|
|
|
})
|
|
|
|
|
2017-09-22 00:51:12 +00:00
|
|
|
f.BoolVar(&BoolVar{
|
|
|
|
Name: "dev-skip-init",
|
|
|
|
Target: &c.flagDevSkipInit,
|
|
|
|
Default: false,
|
|
|
|
Hidden: true,
|
|
|
|
})
|
|
|
|
|
2017-09-08 02:03:44 +00:00
|
|
|
f.BoolVar(&BoolVar{
|
|
|
|
Name: "dev-three-node",
|
|
|
|
Target: &c.flagDevThreeNode,
|
|
|
|
Default: false,
|
|
|
|
Hidden: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
// TODO: should this be a public flag?
|
|
|
|
f.BoolVar(&BoolVar{
|
|
|
|
Name: "test-verify-only",
|
|
|
|
Target: &c.flagTestVerifyOnly,
|
|
|
|
Default: false,
|
|
|
|
Hidden: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
// End internal-only flags.
|
|
|
|
|
|
|
|
return set
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ServerCommand) AutocompleteArgs() complete.Predictor {
|
|
|
|
return complete.PredictNothing
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ServerCommand) AutocompleteFlags() complete.Flags {
|
|
|
|
return c.Flags().Completions()
|
2015-03-12 22:21:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *ServerCommand) Run(args []string) int {
|
2017-09-22 00:51:12 +00:00
|
|
|
f := c.Flags()
|
|
|
|
|
|
|
|
if err := f.Parse(args); err != nil {
|
|
|
|
c.UI.Error(err.Error())
|
2015-03-12 22:21:11 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2016-08-19 20:45:17 +00:00
|
|
|
// Create a logger. We wrap it in a gated writer so that it doesn't
|
|
|
|
// start logging too early.
|
2017-07-31 15:28:06 +00:00
|
|
|
c.logGate = &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)}
|
2016-08-19 20:45:17 +00:00
|
|
|
var level int
|
2017-09-22 00:51:12 +00:00
|
|
|
c.flagLogLevel = strings.ToLower(strings.TrimSpace(c.flagLogLevel))
|
|
|
|
switch c.flagLogLevel {
|
2016-08-19 20:45:17 +00:00
|
|
|
case "trace":
|
|
|
|
level = log.LevelTrace
|
|
|
|
case "debug":
|
|
|
|
level = log.LevelDebug
|
2017-12-19 22:12:23 +00:00
|
|
|
case "info", "":
|
2016-08-19 20:45:17 +00:00
|
|
|
level = log.LevelInfo
|
|
|
|
case "notice":
|
|
|
|
level = log.LevelNotice
|
2017-12-19 22:12:23 +00:00
|
|
|
case "warn", "warning":
|
2016-08-19 20:45:17 +00:00
|
|
|
level = log.LevelWarn
|
2017-12-19 22:12:23 +00:00
|
|
|
case "err", "error":
|
2016-08-19 20:45:17 +00:00
|
|
|
level = log.LevelError
|
|
|
|
default:
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Unknown log level: %s", c.flagLogLevel))
|
2016-08-19 20:45:17 +00:00
|
|
|
return 1
|
|
|
|
}
|
2016-09-22 21:22:02 +00:00
|
|
|
|
|
|
|
logFormat := os.Getenv("VAULT_LOG_FORMAT")
|
|
|
|
if logFormat == "" {
|
|
|
|
logFormat = os.Getenv("LOGXI_FORMAT")
|
|
|
|
}
|
|
|
|
switch strings.ToLower(logFormat) {
|
2016-09-23 17:20:26 +00:00
|
|
|
case "vault", "vault_json", "vault-json", "vaultjson", "json", "":
|
2018-01-03 19:02:31 +00:00
|
|
|
if c.flagDevThreeNode {
|
2017-11-10 21:21:46 +00:00
|
|
|
c.logger = logbridge.NewLogger(hclog.New(&hclog.LoggerOptions{
|
|
|
|
Mutex: &sync.Mutex{},
|
|
|
|
Output: c.logGate,
|
|
|
|
})).LogxiLogger()
|
|
|
|
} else {
|
|
|
|
c.logger = logformat.NewVaultLoggerWithWriter(c.logGate, level)
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
default:
|
2017-07-31 15:28:06 +00:00
|
|
|
c.logger = log.NewLogger(c.logGate, "vault")
|
2016-08-19 20:45:17 +00:00
|
|
|
c.logger.SetLevel(level)
|
|
|
|
}
|
|
|
|
grpclog.SetLogger(&grpclogFaker{
|
|
|
|
logger: c.logger,
|
2017-10-10 16:27:51 +00:00
|
|
|
log: os.Getenv("VAULT_GRPC_LOGGING") != "",
|
2016-08-19 20:45:17 +00:00
|
|
|
})
|
|
|
|
|
2017-09-22 00:51:12 +00:00
|
|
|
// Automatically enable dev mode if other dev flags are provided.
|
|
|
|
if c.flagDevHA || c.flagDevTransactional || c.flagDevLeasedKV || c.flagDevThreeNode {
|
|
|
|
c.flagDev = true
|
2016-08-19 12:29:34 +00:00
|
|
|
}
|
|
|
|
|
2015-03-12 22:21:11 +00:00
|
|
|
// Validation
|
2017-09-22 00:51:12 +00:00
|
|
|
if !c.flagDev {
|
2016-03-02 16:53:23 +00:00
|
|
|
switch {
|
2017-09-22 00:51:12 +00:00
|
|
|
case len(c.flagConfigs) == 0:
|
|
|
|
c.UI.Error("Must specify at least one config path using -config")
|
2016-03-02 16:53:23 +00:00
|
|
|
return 1
|
2017-09-08 02:03:44 +00:00
|
|
|
case c.flagDevRootTokenID != "":
|
|
|
|
c.UI.Warn(wrapAtLength(
|
|
|
|
"You cannot specify a custom root token ID outside of \"dev\" mode. " +
|
|
|
|
"Your request has been ignored."))
|
|
|
|
c.flagDevRootTokenID = ""
|
2016-03-02 16:53:23 +00:00
|
|
|
}
|
2015-03-12 22:21:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Load the configuration
|
2015-03-12 22:30:07 +00:00
|
|
|
var config *server.Config
|
2017-09-22 00:51:12 +00:00
|
|
|
if c.flagDev {
|
|
|
|
config = server.DevConfig(c.flagDevHA, c.flagDevTransactional)
|
|
|
|
if c.flagDevListenAddr != "" {
|
|
|
|
config.Listeners[0].Config["address"] = c.flagDevListenAddr
|
2016-03-03 15:48:52 +00:00
|
|
|
}
|
2015-03-31 23:44:47 +00:00
|
|
|
}
|
2017-09-22 00:51:12 +00:00
|
|
|
for _, path := range c.flagConfigs {
|
2016-08-19 20:45:17 +00:00
|
|
|
current, err := server.LoadConfig(path, c.logger)
|
2015-03-12 22:30:07 +00:00
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error loading configuration from %s: %s", path, err))
|
2015-03-12 22:30:07 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
if config == nil {
|
|
|
|
config = current
|
|
|
|
} else {
|
|
|
|
config = config.Merge(current)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-13 00:39:28 +00:00
|
|
|
// Ensure at least one config was found.
|
|
|
|
if config == nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output(wrapAtLength(
|
|
|
|
"No configuration files found. Please provide configurations with the " +
|
|
|
|
"-config flag. If you are supply the path to a directory, please " +
|
|
|
|
"ensure the directory contains files with the .hcl or .json " +
|
|
|
|
"extension."))
|
2016-02-13 00:39:28 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-05-21 00:49:16 +00:00
|
|
|
// Ensure that a backend is provided
|
2017-03-08 14:17:00 +00:00
|
|
|
if config.Storage == nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output("A storage backend must be specified")
|
2015-05-19 03:47:57 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2016-04-05 20:58:26 +00:00
|
|
|
// If mlockall(2) isn't supported, show a warning. We disable this
|
|
|
|
// in dev because it is quite scary to see when first using Vault.
|
2017-09-22 00:51:12 +00:00
|
|
|
if !c.flagDev && !mlock.Supported() {
|
|
|
|
c.UI.Warn(wrapAtLength(
|
|
|
|
"WARNING! mlock is not supported on this system! An mlockall(2)-like " +
|
|
|
|
"syscall to prevent memory from being swapped to disk is not " +
|
|
|
|
"supported on this system. For better security, only run Vault on " +
|
|
|
|
"systems where this call is supported. If you are running Vault " +
|
|
|
|
"in a Docker container, provide the IPC_LOCK cap to the container."))
|
2015-04-28 22:04:40 +00:00
|
|
|
}
|
|
|
|
|
2016-03-18 14:06:49 +00:00
|
|
|
if err := c.setupTelemetry(config); err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error initializing telemetry: %s", err))
|
2015-12-17 21:38:17 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-03-13 17:09:38 +00:00
|
|
|
// Initialize the backend
|
2017-08-03 17:24:27 +00:00
|
|
|
factory, exists := c.PhysicalBackends[config.Storage.Type]
|
|
|
|
if !exists {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Unknown storage type %s", config.Storage.Type))
|
2017-08-03 17:24:27 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
backend, err := factory(config.Storage.Config, c.logger)
|
2015-03-13 17:09:38 +00:00
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error initializing storage of type %s: %s", config.Storage.Type, err))
|
2015-03-13 17:09:38 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2016-04-04 17:46:33 +00:00
|
|
|
infoKeys := make([]string, 0, 10)
|
|
|
|
info := make(map[string]string)
|
2018-01-03 19:02:31 +00:00
|
|
|
info["log level"] = c.flagLogLevel
|
2017-11-02 14:30:04 +00:00
|
|
|
infoKeys = append(infoKeys, "log level")
|
2016-04-04 17:46:33 +00:00
|
|
|
|
|
|
|
var seal vault.Seal = &vault.DefaultSeal{}
|
|
|
|
|
2016-04-28 18:04:31 +00:00
|
|
|
// Ensure that the seal finalizer is called, even if using verify-only
|
|
|
|
defer func() {
|
2016-12-05 17:28:12 +00:00
|
|
|
if seal != nil {
|
2018-01-19 06:44:44 +00:00
|
|
|
err = seal.Finalize(context.Background())
|
2016-12-05 17:28:12 +00:00
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error finalizing seals: %v", err))
|
2016-12-05 17:28:12 +00:00
|
|
|
}
|
2016-04-28 18:04:31 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2016-12-05 17:28:12 +00:00
|
|
|
if seal == nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Could not create seal! Most likely proper Seal configuration information was not set, but no error was generated."))
|
2016-12-05 17:28:12 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2015-12-14 22:58:30 +00:00
|
|
|
coreConfig := &vault.CoreConfig{
|
2017-04-21 01:46:41 +00:00
|
|
|
Physical: backend,
|
|
|
|
RedirectAddr: config.Storage.RedirectAddr,
|
|
|
|
HAPhysical: nil,
|
|
|
|
Seal: seal,
|
|
|
|
AuditBackends: c.AuditBackends,
|
|
|
|
CredentialBackends: c.CredentialBackends,
|
|
|
|
LogicalBackends: c.LogicalBackends,
|
|
|
|
Logger: c.logger,
|
|
|
|
DisableCache: config.DisableCache,
|
|
|
|
DisableMlock: config.DisableMlock,
|
|
|
|
MaxLeaseTTL: config.MaxLeaseTTL,
|
|
|
|
DefaultLeaseTTL: config.DefaultLeaseTTL,
|
|
|
|
ClusterName: config.ClusterName,
|
|
|
|
CacheSize: config.CacheSize,
|
|
|
|
PluginDirectory: config.PluginDirectory,
|
2017-09-15 04:21:35 +00:00
|
|
|
EnableRaw: config.EnableRawEndpoint,
|
2015-12-14 22:58:30 +00:00
|
|
|
}
|
2017-09-22 00:51:12 +00:00
|
|
|
if c.flagDev {
|
|
|
|
coreConfig.DevToken = c.flagDevRootTokenID
|
|
|
|
if c.flagDevLeasedKV {
|
2017-09-15 13:02:29 +00:00
|
|
|
coreConfig.LogicalBackends["kv"] = vault.LeasedPassthroughBackendFactory
|
2017-06-21 14:42:50 +00:00
|
|
|
}
|
2017-09-22 00:51:12 +00:00
|
|
|
if c.flagDevPluginDir != "" {
|
|
|
|
coreConfig.PluginDirectory = c.flagDevPluginDir
|
2017-08-16 15:17:50 +00:00
|
|
|
}
|
2017-09-20 20:05:00 +00:00
|
|
|
if c.flagDevLatency > 0 {
|
|
|
|
injectLatency := time.Duration(c.flagDevLatency) * time.Millisecond
|
2017-09-11 18:49:08 +00:00
|
|
|
if _, txnOK := backend.(physical.Transactional); txnOK {
|
2017-09-20 20:05:00 +00:00
|
|
|
coreConfig.Physical = physical.NewTransactionalLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger)
|
2017-09-11 18:49:08 +00:00
|
|
|
} else {
|
2017-09-20 20:05:00 +00:00
|
|
|
coreConfig.Physical = physical.NewLatencyInjector(backend, injectLatency, c.flagDevLatencyJitter, c.logger)
|
2017-09-11 18:49:08 +00:00
|
|
|
}
|
|
|
|
}
|
2017-02-16 21:29:30 +00:00
|
|
|
}
|
2015-12-11 20:58:10 +00:00
|
|
|
|
2017-09-22 00:51:12 +00:00
|
|
|
if c.flagDevThreeNode {
|
2018-01-03 19:02:31 +00:00
|
|
|
return c.enableThreeNodeDevCluster(coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
|
2017-07-31 15:28:06 +00:00
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
var disableClustering bool
|
|
|
|
|
2017-03-08 14:17:00 +00:00
|
|
|
// Initialize the separate HA storage backend, if it exists
|
2016-02-02 20:09:58 +00:00
|
|
|
var ok bool
|
2017-03-08 14:17:00 +00:00
|
|
|
if config.HAStorage != nil {
|
2017-08-03 17:24:27 +00:00
|
|
|
factory, exists := c.PhysicalBackends[config.HAStorage.Type]
|
|
|
|
if !exists {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Unknown HA storage type %s", config.HAStorage.Type))
|
2017-08-03 17:24:27 +00:00
|
|
|
return 1
|
2017-09-22 00:51:12 +00:00
|
|
|
|
2017-08-03 17:24:27 +00:00
|
|
|
}
|
|
|
|
habackend, err := factory(config.HAStorage.Config, c.logger)
|
2015-12-11 20:58:10 +00:00
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf(
|
|
|
|
"Error initializing HA storage of type %s: %s", config.HAStorage.Type, err))
|
2015-12-11 20:58:10 +00:00
|
|
|
return 1
|
2017-09-22 00:51:12 +00:00
|
|
|
|
2015-12-11 20:58:10 +00:00
|
|
|
}
|
2015-12-14 22:58:30 +00:00
|
|
|
|
|
|
|
if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error("Specified HA storage does not support HA")
|
2015-12-11 20:58:10 +00:00
|
|
|
return 1
|
|
|
|
}
|
2016-07-18 17:19:58 +00:00
|
|
|
|
|
|
|
if !coreConfig.HAPhysical.HAEnabled() {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error("Specified HA storage has HA support disabled; please consult documentation")
|
2016-07-18 17:19:58 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2017-03-08 14:17:00 +00:00
|
|
|
coreConfig.RedirectAddr = config.HAStorage.RedirectAddr
|
|
|
|
disableClustering = config.HAStorage.DisableClustering
|
2016-08-15 13:42:42 +00:00
|
|
|
if !disableClustering {
|
2017-03-08 14:17:00 +00:00
|
|
|
coreConfig.ClusterAddr = config.HAStorage.ClusterAddr
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
2016-02-02 20:09:58 +00:00
|
|
|
} else {
|
|
|
|
if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok {
|
2017-03-08 14:17:00 +00:00
|
|
|
coreConfig.RedirectAddr = config.Storage.RedirectAddr
|
|
|
|
disableClustering = config.Storage.DisableClustering
|
2016-08-15 13:42:42 +00:00
|
|
|
if !disableClustering {
|
2017-03-08 14:17:00 +00:00
|
|
|
coreConfig.ClusterAddr = config.Storage.ClusterAddr
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
2016-02-02 20:09:58 +00:00
|
|
|
}
|
2015-12-11 20:58:10 +00:00
|
|
|
}
|
|
|
|
|
2017-11-11 01:06:07 +00:00
|
|
|
if envRA := os.Getenv("VAULT_API_ADDR"); envRA != "" {
|
|
|
|
coreConfig.RedirectAddr = envRA
|
|
|
|
} else if envRA := os.Getenv("VAULT_REDIRECT_ADDR"); envRA != "" {
|
2016-08-15 13:42:42 +00:00
|
|
|
coreConfig.RedirectAddr = envRA
|
|
|
|
} else if envAA := os.Getenv("VAULT_ADVERTISE_ADDR"); envAA != "" {
|
|
|
|
coreConfig.RedirectAddr = envAA
|
2015-12-15 02:22:55 +00:00
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
// Attempt to detect the redirect address, if possible
|
|
|
|
var detect physical.RedirectDetect
|
2016-07-18 17:19:58 +00:00
|
|
|
if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
|
2016-08-15 13:42:42 +00:00
|
|
|
detect, ok = coreConfig.HAPhysical.(physical.RedirectDetect)
|
2015-12-15 02:13:17 +00:00
|
|
|
} else {
|
2016-08-15 13:42:42 +00:00
|
|
|
detect, ok = coreConfig.Physical.(physical.RedirectDetect)
|
2015-12-15 02:13:17 +00:00
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
if ok && coreConfig.RedirectAddr == "" {
|
|
|
|
redirect, err := c.detectRedirect(detect, config)
|
2015-05-02 22:57:40 +00:00
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error detecting redirect address: %s", err))
|
2016-08-15 13:42:42 +00:00
|
|
|
} else if redirect == "" {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error("Failed to detect redirect address.")
|
2015-05-02 22:57:40 +00:00
|
|
|
} else {
|
2016-08-15 13:42:42 +00:00
|
|
|
coreConfig.RedirectAddr = redirect
|
2015-05-02 22:57:40 +00:00
|
|
|
}
|
|
|
|
}
|
2017-09-22 00:51:12 +00:00
|
|
|
if coreConfig.RedirectAddr == "" && c.flagDev {
|
2017-02-24 15:45:29 +00:00
|
|
|
coreConfig.RedirectAddr = fmt.Sprintf("http://%s", config.Listeners[0].Config["address"])
|
|
|
|
}
|
2015-05-02 22:57:40 +00:00
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
// After the redirect bits are sorted out, if no cluster address was
|
|
|
|
// explicitly given, derive one from the redirect addr
|
|
|
|
if disableClustering {
|
|
|
|
coreConfig.ClusterAddr = ""
|
|
|
|
} else if envCA := os.Getenv("VAULT_CLUSTER_ADDR"); envCA != "" {
|
|
|
|
coreConfig.ClusterAddr = envCA
|
2017-02-24 15:45:29 +00:00
|
|
|
} else {
|
|
|
|
var addrToUse string
|
2017-02-24 17:50:26 +00:00
|
|
|
switch {
|
|
|
|
case coreConfig.ClusterAddr == "" && coreConfig.RedirectAddr != "":
|
2017-02-24 15:45:29 +00:00
|
|
|
addrToUse = coreConfig.RedirectAddr
|
2017-09-22 00:51:12 +00:00
|
|
|
case c.flagDev:
|
2017-02-24 15:45:29 +00:00
|
|
|
addrToUse = fmt.Sprintf("http://%s", config.Listeners[0].Config["address"])
|
2017-02-24 17:50:26 +00:00
|
|
|
default:
|
|
|
|
goto CLUSTER_SYNTHESIS_COMPLETE
|
2017-02-24 15:45:29 +00:00
|
|
|
}
|
|
|
|
u, err := url.ParseRequestURI(addrToUse)
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf(
|
|
|
|
"Error parsing synthesized cluster address %s: %v", addrToUse, err))
|
2016-04-04 14:44:22 +00:00
|
|
|
return 1
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
host, port, err := net.SplitHostPort(u.Host)
|
2015-03-31 23:44:47 +00:00
|
|
|
if err != nil {
|
2017-02-08 18:50:17 +00:00
|
|
|
// This sucks, as it's a const in the function but not exported in the package
|
|
|
|
if strings.Contains(err.Error(), "missing port in address") {
|
|
|
|
host = u.Host
|
|
|
|
port = "443"
|
|
|
|
} else {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error parsing redirect address: %v", err))
|
2017-02-08 18:50:17 +00:00
|
|
|
return 1
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
2017-02-08 18:50:17 +00:00
|
|
|
nPort, err := strconv.Atoi(port)
|
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf(
|
|
|
|
"Error parsing synthesized address; failed to convert %q to a numeric: %v", port, err))
|
2015-03-31 23:44:47 +00:00
|
|
|
return 1
|
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
u.Host = net.JoinHostPort(host, strconv.Itoa(nPort+1))
|
|
|
|
// Will always be TLS-secured
|
|
|
|
u.Scheme = "https"
|
|
|
|
coreConfig.ClusterAddr = u.String()
|
|
|
|
}
|
2017-02-24 17:50:26 +00:00
|
|
|
|
|
|
|
CLUSTER_SYNTHESIS_COMPLETE:
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
if coreConfig.ClusterAddr != "" {
|
|
|
|
// Force https as we'll always be TLS-secured
|
|
|
|
u, err := url.ParseRequestURI(coreConfig.ClusterAddr)
|
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error parsing cluster address %s: %v", coreConfig.RedirectAddr, err))
|
|
|
|
return 11
|
2015-10-22 07:48:46 +00:00
|
|
|
}
|
2016-08-15 13:42:42 +00:00
|
|
|
u.Scheme = "https"
|
|
|
|
coreConfig.ClusterAddr = u.String()
|
|
|
|
}
|
2015-10-22 07:48:46 +00:00
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
// Initialize the core
|
|
|
|
core, newCoreError := vault.NewCore(coreConfig)
|
|
|
|
if newCoreError != nil {
|
|
|
|
if !errwrap.ContainsType(newCoreError, new(vault.NonFatalError)) {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error initializing core: %s", newCoreError))
|
2016-08-15 13:42:42 +00:00
|
|
|
return 1
|
|
|
|
}
|
2015-03-31 23:44:47 +00:00
|
|
|
}
|
|
|
|
|
2016-09-30 04:06:40 +00:00
|
|
|
// Copy the reload funcs pointers back
|
|
|
|
c.reloadFuncs = coreConfig.ReloadFuncs
|
|
|
|
c.reloadFuncsLock = coreConfig.ReloadFuncsLock
|
|
|
|
|
2015-04-04 19:06:41 +00:00
|
|
|
// Compile server information for output later
|
2017-03-08 14:17:00 +00:00
|
|
|
info["storage"] = config.Storage.Type
|
2017-09-22 00:51:12 +00:00
|
|
|
info["log level"] = c.flagLogLevel
|
2015-04-28 22:11:39 +00:00
|
|
|
info["mlock"] = fmt.Sprintf(
|
|
|
|
"supported: %v, enabled: %v",
|
2016-11-22 17:56:36 +00:00
|
|
|
mlock.Supported(), !config.DisableMlock && mlock.Supported())
|
2017-11-02 14:30:04 +00:00
|
|
|
infoKeys = append(infoKeys, "mlock", "storage")
|
2015-04-04 19:06:41 +00:00
|
|
|
|
2017-02-24 15:45:29 +00:00
|
|
|
if coreConfig.ClusterAddr != "" {
|
|
|
|
info["cluster address"] = coreConfig.ClusterAddr
|
|
|
|
infoKeys = append(infoKeys, "cluster address")
|
|
|
|
}
|
|
|
|
if coreConfig.RedirectAddr != "" {
|
|
|
|
info["redirect address"] = coreConfig.RedirectAddr
|
|
|
|
infoKeys = append(infoKeys, "redirect address")
|
|
|
|
}
|
|
|
|
|
2017-03-08 14:17:00 +00:00
|
|
|
if config.HAStorage != nil {
|
|
|
|
info["HA storage"] = config.HAStorage.Type
|
|
|
|
infoKeys = append(infoKeys, "HA storage")
|
2015-12-11 20:58:10 +00:00
|
|
|
} else {
|
2017-03-08 14:17:00 +00:00
|
|
|
// If the storage supports HA, then note it
|
2015-12-14 22:58:30 +00:00
|
|
|
if coreConfig.HAPhysical != nil {
|
2016-07-18 17:19:58 +00:00
|
|
|
if coreConfig.HAPhysical.HAEnabled() {
|
2017-03-08 14:17:00 +00:00
|
|
|
info["storage"] += " (HA available)"
|
2016-07-18 17:19:58 +00:00
|
|
|
} else {
|
2017-03-08 14:17:00 +00:00
|
|
|
info["storage"] += " (HA disabled)"
|
2016-07-18 17:19:58 +00:00
|
|
|
}
|
2015-12-11 20:58:10 +00:00
|
|
|
}
|
2015-04-17 17:50:37 +00:00
|
|
|
}
|
|
|
|
|
2016-08-19 15:03:53 +00:00
|
|
|
clusterAddrs := []*net.TCPAddr{}
|
2016-08-15 13:42:42 +00:00
|
|
|
|
2015-03-12 22:30:07 +00:00
|
|
|
// Initialize the listeners
|
2016-09-30 04:06:40 +00:00
|
|
|
c.reloadFuncsLock.Lock()
|
2015-03-13 17:09:38 +00:00
|
|
|
lns := make([]net.Listener, 0, len(config.Listeners))
|
2015-04-04 19:06:41 +00:00
|
|
|
for i, lnConfig := range config.Listeners {
|
2018-01-03 19:02:31 +00:00
|
|
|
ln, props, reloadFunc, err := server.NewListener(lnConfig.Type, lnConfig.Config, c.logGate, c.UI)
|
2015-03-13 17:09:38 +00:00
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error initializing listener of type %s: %s", lnConfig.Type, err))
|
2015-03-13 17:09:38 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
lns = append(lns, ln)
|
|
|
|
|
|
|
|
if reloadFunc != nil {
|
2016-09-30 04:06:40 +00:00
|
|
|
relSlice := (*c.reloadFuncs)["listener|"+lnConfig.Type]
|
2016-08-15 13:42:42 +00:00
|
|
|
relSlice = append(relSlice, reloadFunc)
|
2016-09-30 04:06:40 +00:00
|
|
|
(*c.reloadFuncs)["listener|"+lnConfig.Type] = relSlice
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if !disableClustering && lnConfig.Type == "tcp" {
|
2017-06-22 19:29:53 +00:00
|
|
|
var addrRaw interface{}
|
2016-08-15 13:42:42 +00:00
|
|
|
var addr string
|
|
|
|
var ok bool
|
2017-06-22 19:29:53 +00:00
|
|
|
if addrRaw, ok = lnConfig.Config["cluster_address"]; ok {
|
|
|
|
addr = addrRaw.(string)
|
2016-08-19 15:03:53 +00:00
|
|
|
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
|
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error resolving cluster_address: %s", err))
|
2016-08-19 15:03:53 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
clusterAddrs = append(clusterAddrs, tcpAddr)
|
2016-08-15 13:42:42 +00:00
|
|
|
} else {
|
|
|
|
tcpAddr, ok := ln.Addr().(*net.TCPAddr)
|
|
|
|
if !ok {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error("Failed to parse tcp listener")
|
2016-08-15 13:42:42 +00:00
|
|
|
return 1
|
|
|
|
}
|
2017-02-24 15:45:29 +00:00
|
|
|
clusterAddr := &net.TCPAddr{
|
2016-08-19 15:03:53 +00:00
|
|
|
IP: tcpAddr.IP,
|
|
|
|
Port: tcpAddr.Port + 1,
|
2017-02-24 15:45:29 +00:00
|
|
|
}
|
|
|
|
clusterAddrs = append(clusterAddrs, clusterAddr)
|
|
|
|
addr = clusterAddr.String()
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
props["cluster address"] = addr
|
|
|
|
}
|
|
|
|
|
2015-04-04 19:06:41 +00:00
|
|
|
// Store the listener props for output later
|
|
|
|
key := fmt.Sprintf("listener %d", i+1)
|
|
|
|
propsList := make([]string, 0, len(props))
|
|
|
|
for k, v := range props {
|
|
|
|
propsList = append(propsList, fmt.Sprintf(
|
|
|
|
"%s: %q", k, v))
|
|
|
|
}
|
|
|
|
sort.Strings(propsList)
|
|
|
|
infoKeys = append(infoKeys, key)
|
|
|
|
info[key] = fmt.Sprintf(
|
|
|
|
"%s (%s)", lnConfig.Type, strings.Join(propsList, ", "))
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
2016-09-30 04:06:40 +00:00
|
|
|
c.reloadFuncsLock.Unlock()
|
2016-08-15 13:42:42 +00:00
|
|
|
if !disableClustering {
|
2016-08-19 20:45:17 +00:00
|
|
|
if c.logger.IsTrace() {
|
|
|
|
c.logger.Trace("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs)
|
|
|
|
}
|
2015-03-13 17:09:38 +00:00
|
|
|
}
|
|
|
|
|
2016-06-02 16:40:25 +00:00
|
|
|
// Make sure we close all listeners from this point on
|
2016-10-10 17:18:19 +00:00
|
|
|
listenerCloseFunc := func() {
|
2016-06-02 16:40:25 +00:00
|
|
|
for _, ln := range lns {
|
|
|
|
ln.Close()
|
|
|
|
}
|
2016-10-10 17:18:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
defer c.cleanupGuard.Do(listenerCloseFunc)
|
2016-06-02 16:40:25 +00:00
|
|
|
|
2016-11-28 00:16:44 +00:00
|
|
|
infoKeys = append(infoKeys, "version")
|
2016-11-22 21:43:05 +00:00
|
|
|
verInfo := version.GetVersion()
|
2016-11-28 00:28:35 +00:00
|
|
|
info["version"] = verInfo.FullVersionNumber(false)
|
2016-11-28 00:16:44 +00:00
|
|
|
if verInfo.Revision != "" {
|
2016-11-28 00:28:35 +00:00
|
|
|
info["version sha"] = strings.Trim(verInfo.Revision, "'")
|
|
|
|
infoKeys = append(infoKeys, "version sha")
|
|
|
|
}
|
|
|
|
infoKeys = append(infoKeys, "cgo")
|
|
|
|
info["cgo"] = "disabled"
|
|
|
|
if version.CgoEnabled {
|
|
|
|
info["cgo"] = "enabled"
|
2016-11-28 00:16:44 +00:00
|
|
|
}
|
2015-11-09 18:52:55 +00:00
|
|
|
|
2015-04-04 19:06:41 +00:00
|
|
|
// Server configuration output
|
2016-03-30 16:31:47 +00:00
|
|
|
padding := 24
|
|
|
|
sort.Strings(infoKeys)
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output("==> Vault server configuration:\n")
|
2015-04-04 19:06:41 +00:00
|
|
|
for _, k := range infoKeys {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output(fmt.Sprintf(
|
2015-04-04 19:06:41 +00:00
|
|
|
"%s%s: %s",
|
|
|
|
strings.Repeat(" ", padding-len(k)),
|
|
|
|
strings.Title(k),
|
|
|
|
info[k]))
|
|
|
|
}
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output("")
|
2015-04-04 19:06:41 +00:00
|
|
|
|
2017-09-22 00:51:12 +00:00
|
|
|
// Tests might not want to start a vault server and just want to verify
|
|
|
|
// the configuration.
|
|
|
|
if c.flagTestVerifyOnly {
|
2016-02-02 22:47:02 +00:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2017-10-10 16:27:51 +00:00
|
|
|
handler := vaulthttp.Handler(core)
|
|
|
|
|
|
|
|
// This needs to happen before we first unseal, so before we trigger dev
|
|
|
|
// mode if it's set
|
|
|
|
core.SetClusterListenerAddrs(clusterAddrs)
|
|
|
|
core.SetClusterHandler(handler)
|
|
|
|
|
2018-01-19 06:44:44 +00:00
|
|
|
err = core.UnsealWithStoredKeys(context.Background())
|
2017-10-23 21:39:21 +00:00
|
|
|
if err != nil {
|
|
|
|
if !errwrap.ContainsType(err, new(vault.NonFatalError)) {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error initializing core: %s", err))
|
2017-10-23 21:39:21 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-30 17:17:29 +00:00
|
|
|
// Perform service discovery registrations and initialization of
|
|
|
|
// HTTP server after the verifyOnly check.
|
|
|
|
|
|
|
|
// Instantiate the wait group
|
|
|
|
c.WaitGroup = &sync.WaitGroup{}
|
|
|
|
|
|
|
|
// If the backend supports service discovery, run service discovery
|
|
|
|
if coreConfig.HAPhysical != nil && coreConfig.HAPhysical.HAEnabled() {
|
|
|
|
sd, ok := coreConfig.HAPhysical.(physical.ServiceDiscovery)
|
|
|
|
if ok {
|
|
|
|
activeFunc := func() bool {
|
2017-07-31 22:25:27 +00:00
|
|
|
if isLeader, _, _, err := core.Leader(); err == nil {
|
2016-07-30 17:17:29 +00:00
|
|
|
return isLeader
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
sealedFunc := func() bool {
|
|
|
|
if sealed, err := core.Sealed(); err == nil {
|
|
|
|
return sealed
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
if err := sd.RunServiceDiscovery(c.WaitGroup, c.ShutdownCh, coreConfig.RedirectAddr, activeFunc, sealedFunc); err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error initializing service discovery: %v", err))
|
2016-07-30 17:17:29 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-25 17:51:12 +00:00
|
|
|
// If we're in Dev mode, then initialize the core
|
2017-09-22 00:51:12 +00:00
|
|
|
if c.flagDev && !c.flagDevSkipInit {
|
2017-07-31 15:28:06 +00:00
|
|
|
init, err := c.enableDev(core, coreConfig)
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error initializing Dev mode: %s", err))
|
2016-08-15 13:42:42 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
export := "export"
|
|
|
|
quote := "'"
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
export = "set"
|
|
|
|
quote = ""
|
|
|
|
}
|
|
|
|
|
2017-09-22 00:51:12 +00:00
|
|
|
// Print the big dev mode warning!
|
|
|
|
c.UI.Warn(wrapAtLength(
|
|
|
|
"WARNING! dev mode is enabled! In this mode, Vault runs entirely " +
|
|
|
|
"in-memory and starts unsealed with a single unseal key. The root " +
|
|
|
|
"token is already authenticated to the CLI, so you can immediately " +
|
|
|
|
"begin using Vault."))
|
|
|
|
c.UI.Warn("")
|
|
|
|
c.UI.Warn("You may need to set the following environment variable:")
|
|
|
|
c.UI.Warn("")
|
|
|
|
c.UI.Warn(fmt.Sprintf(" $ %s VAULT_ADDR=%s%s%s",
|
|
|
|
export, quote, "http://"+config.Listeners[0].Config["address"].(string), quote))
|
2017-09-21 19:23:29 +00:00
|
|
|
|
|
|
|
// Unseal key is not returned if stored shares is supported
|
|
|
|
if len(init.SecretShares) > 0 {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Warn("")
|
|
|
|
c.UI.Warn(wrapAtLength(
|
|
|
|
"The unseal key and root token are displayed below in case you want " +
|
|
|
|
"to seal/unseal the Vault or re-authenticate."))
|
|
|
|
c.UI.Warn("")
|
|
|
|
c.UI.Warn(fmt.Sprintf("Unseal Key: %s", base64.StdEncoding.EncodeToString(init.SecretShares[0])))
|
2017-09-21 19:23:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(init.RecoveryShares) > 0 {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Warn("")
|
|
|
|
c.UI.Warn(wrapAtLength(
|
|
|
|
"The recovery key and root token are displayed below in case you want " +
|
|
|
|
"to seal/unseal the Vault or re-authenticate."))
|
|
|
|
c.UI.Warn("")
|
|
|
|
c.UI.Warn(fmt.Sprintf("Unseal Key: %s", base64.StdEncoding.EncodeToString(init.RecoveryShares[0])))
|
2017-09-21 19:23:29 +00:00
|
|
|
}
|
|
|
|
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Warn(fmt.Sprintf("Root Token: %s", init.RootToken))
|
|
|
|
|
|
|
|
c.UI.Warn("")
|
|
|
|
c.UI.Warn(wrapAtLength(
|
|
|
|
"Development mode should NOT be used in production installations!"))
|
|
|
|
c.UI.Warn("")
|
2016-08-15 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
2017-11-02 14:30:04 +00:00
|
|
|
// Initialize the HTTP servers
|
2016-02-02 22:47:02 +00:00
|
|
|
for _, ln := range lns {
|
2017-11-02 14:30:04 +00:00
|
|
|
server := &http.Server{
|
|
|
|
Handler: handler,
|
|
|
|
}
|
2016-02-02 22:47:02 +00:00
|
|
|
go server.Serve(ln)
|
|
|
|
}
|
|
|
|
|
2016-04-04 14:44:22 +00:00
|
|
|
if newCoreError != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Warn(wrapAtLength(
|
|
|
|
"WARNING! A non-fatal error occurred during initialization. Please " +
|
|
|
|
"check the logs for more information."))
|
|
|
|
c.UI.Warn("")
|
2016-04-04 14:44:22 +00:00
|
|
|
}
|
|
|
|
|
2015-04-04 19:06:41 +00:00
|
|
|
// Output the header that the server has started
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output("==> Vault server started! Log data will stream in below:\n")
|
|
|
|
|
|
|
|
// Inform any tests that the server is ready
|
|
|
|
select {
|
|
|
|
case c.startedCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
2015-04-04 19:06:41 +00:00
|
|
|
|
|
|
|
// Release the log gate.
|
2017-07-31 15:28:06 +00:00
|
|
|
c.logGate.Flush()
|
2015-04-04 19:06:41 +00:00
|
|
|
|
2017-09-16 21:09:37 +00:00
|
|
|
// Write out the PID to the file now that server has successfully started
|
|
|
|
if err := c.storePidFile(config.PidFile); err != nil {
|
2017-09-20 20:05:00 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error storing PID: %s", err))
|
2017-09-16 21:09:37 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if err := c.removePidFile(config.PidFile); err != nil {
|
2017-09-20 20:05:00 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error deleting the PID file: %s", err))
|
2017-09-16 21:09:37 +00:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2016-08-01 15:58:45 +00:00
|
|
|
// Wait for shutdown
|
|
|
|
shutdownTriggered := false
|
|
|
|
|
2016-03-11 22:01:26 +00:00
|
|
|
for !shutdownTriggered {
|
2016-03-11 21:46:56 +00:00
|
|
|
select {
|
|
|
|
case <-c.ShutdownCh:
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output("==> Vault shutdown triggered")
|
2016-10-10 17:18:19 +00:00
|
|
|
|
|
|
|
// Stop the listners so that we don't process further client requests.
|
|
|
|
c.cleanupGuard.Do(listenerCloseFunc)
|
|
|
|
|
|
|
|
// Shutdown will wait until after Vault is sealed, which means the
|
|
|
|
// request forwarding listeners will also be closed (and also
|
|
|
|
// waited for).
|
2016-03-11 21:46:56 +00:00
|
|
|
if err := core.Shutdown(); err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err))
|
2016-03-11 21:46:56 +00:00
|
|
|
}
|
2016-10-10 17:18:19 +00:00
|
|
|
|
2016-03-11 21:46:56 +00:00
|
|
|
shutdownTriggered = true
|
2016-10-10 17:18:19 +00:00
|
|
|
|
2016-03-11 21:46:56 +00:00
|
|
|
case <-c.SighupCh:
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output("==> Vault reload triggered")
|
|
|
|
if err := c.Reload(c.reloadFuncsLock, c.reloadFuncs, c.flagConfigs); err != nil {
|
|
|
|
c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
|
2016-03-11 21:46:56 +00:00
|
|
|
}
|
|
|
|
}
|
2015-06-18 01:24:56 +00:00
|
|
|
}
|
2016-04-14 01:12:58 +00:00
|
|
|
|
2016-08-01 15:15:25 +00:00
|
|
|
// Wait for dependent goroutines to complete
|
2016-07-30 17:17:29 +00:00
|
|
|
c.WaitGroup.Wait()
|
2015-03-12 22:21:11 +00:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig) (*vault.InitResult, error) {
|
2017-09-21 19:23:29 +00:00
|
|
|
var recoveryConfig *vault.SealConfig
|
|
|
|
barrierConfig := &vault.SealConfig{
|
|
|
|
SecretShares: 1,
|
|
|
|
SecretThreshold: 1,
|
|
|
|
}
|
|
|
|
|
2018-01-19 08:44:06 +00:00
|
|
|
if core.SealAccess().RecoveryKeySupported() {
|
2017-09-21 19:23:29 +00:00
|
|
|
recoveryConfig = &vault.SealConfig{
|
2016-09-13 22:42:24 +00:00
|
|
|
SecretShares: 1,
|
|
|
|
SecretThreshold: 1,
|
2017-09-21 19:23:29 +00:00
|
|
|
}
|
2015-03-31 23:44:47 +00:00
|
|
|
}
|
|
|
|
|
2018-01-19 08:44:06 +00:00
|
|
|
if core.SealAccess().StoredKeysSupported() {
|
2017-09-21 19:23:29 +00:00
|
|
|
barrierConfig.StoredShares = 1
|
|
|
|
}
|
2015-03-31 23:44:47 +00:00
|
|
|
|
2018-01-19 08:44:06 +00:00
|
|
|
ctx := context.Background()
|
|
|
|
|
2017-09-21 19:23:29 +00:00
|
|
|
// Initialize it with a basic single key
|
2018-01-19 06:44:44 +00:00
|
|
|
init, err := core.Initialize(ctx, &vault.InitParams{
|
2017-09-21 19:23:29 +00:00
|
|
|
BarrierConfig: barrierConfig,
|
|
|
|
RecoveryConfig: recoveryConfig,
|
|
|
|
})
|
2015-03-31 23:44:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-09-21 19:23:29 +00:00
|
|
|
|
|
|
|
// Handle unseal with stored keys
|
2018-01-19 08:44:06 +00:00
|
|
|
if core.SealAccess().StoredKeysSupported() {
|
2018-01-19 06:44:44 +00:00
|
|
|
err := core.UnsealWithStoredKeys(ctx)
|
2017-09-21 19:23:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Copy the key so that it can be zeroed
|
|
|
|
key := make([]byte, len(init.SecretShares[0]))
|
|
|
|
copy(key, init.SecretShares[0])
|
|
|
|
|
|
|
|
// Unseal the core
|
|
|
|
unsealed, err := core.Unseal(key)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if !unsealed {
|
|
|
|
return nil, fmt.Errorf("failed to unseal Vault for dev mode")
|
|
|
|
}
|
2015-03-31 23:44:47 +00:00
|
|
|
}
|
|
|
|
|
2017-07-31 22:25:27 +00:00
|
|
|
isLeader, _, _, err := core.Leader()
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil && err != vault.ErrHANotEnabled {
|
|
|
|
return nil, fmt.Errorf("failed to check active status: %v", err)
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
leaderCount := 5
|
|
|
|
for !isLeader {
|
|
|
|
if leaderCount == 0 {
|
|
|
|
buf := make([]byte, 1<<16)
|
|
|
|
runtime.Stack(buf, true)
|
|
|
|
return nil, fmt.Errorf("failed to get active status after five seconds; call stack is\n%s\n", buf)
|
|
|
|
}
|
|
|
|
time.Sleep(1 * time.Second)
|
2017-07-31 22:25:27 +00:00
|
|
|
isLeader, _, _, err = core.Leader()
|
2016-08-15 13:42:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to check active status: %v", err)
|
|
|
|
}
|
|
|
|
leaderCount--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-21 19:23:29 +00:00
|
|
|
// Generate a dev root token if one is provided in the flag
|
2017-07-31 15:28:06 +00:00
|
|
|
if coreConfig.DevToken != "" {
|
2016-03-02 16:53:23 +00:00
|
|
|
req := &logical.Request{
|
2016-08-19 20:45:17 +00:00
|
|
|
ID: "dev-gen-root",
|
2016-03-02 16:53:23 +00:00
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
ClientToken: init.RootToken,
|
|
|
|
Path: "auth/token/create",
|
|
|
|
Data: map[string]interface{}{
|
2017-07-31 15:28:06 +00:00
|
|
|
"id": coreConfig.DevToken,
|
2016-03-02 16:53:23 +00:00
|
|
|
"policies": []string{"root"},
|
|
|
|
"no_parent": true,
|
|
|
|
"no_default_policy": true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
resp, err := core.HandleRequest(req)
|
|
|
|
if err != nil {
|
2017-07-31 15:28:06 +00:00
|
|
|
return nil, fmt.Errorf("failed to create root token with ID %s: %s", coreConfig.DevToken, err)
|
2016-03-02 16:53:23 +00:00
|
|
|
}
|
|
|
|
if resp == nil {
|
2017-07-31 15:28:06 +00:00
|
|
|
return nil, fmt.Errorf("nil response when creating root token with ID %s", coreConfig.DevToken)
|
2016-03-02 16:53:23 +00:00
|
|
|
}
|
|
|
|
if resp.Auth == nil {
|
2017-07-31 15:28:06 +00:00
|
|
|
return nil, fmt.Errorf("nil auth when creating root token with ID %s", coreConfig.DevToken)
|
2016-03-02 16:53:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
init.RootToken = resp.Auth.ClientToken
|
|
|
|
|
2016-08-19 20:45:17 +00:00
|
|
|
req.ID = "dev-revoke-init-root"
|
2016-03-02 16:53:23 +00:00
|
|
|
req.Path = "auth/token/revoke-self"
|
|
|
|
req.Data = nil
|
|
|
|
resp, err = core.HandleRequest(req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to revoke initial root token: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-31 23:44:47 +00:00
|
|
|
// Set the token
|
2016-04-01 20:02:18 +00:00
|
|
|
tokenHelper, err := c.TokenHelper()
|
2015-03-31 23:44:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err := tokenHelper.Store(init.RootToken); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return init, nil
|
|
|
|
}
|
|
|
|
|
2017-12-11 23:02:35 +00:00
|
|
|
func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int {
|
2017-08-01 18:07:08 +00:00
|
|
|
testCluster := vault.NewTestCluster(&testing.RuntimeT{}, base, &vault.TestClusterOptions{
|
2017-07-31 15:28:06 +00:00
|
|
|
HandlerFunc: vaulthttp.Handler,
|
2017-09-22 00:51:12 +00:00
|
|
|
BaseListenAddress: c.flagDevListenAddr,
|
2017-11-10 21:21:46 +00:00
|
|
|
RawLogger: c.logger,
|
2017-12-11 23:02:35 +00:00
|
|
|
TempDir: tempDir,
|
2017-07-31 15:28:06 +00:00
|
|
|
})
|
|
|
|
defer c.cleanupGuard.Do(testCluster.Cleanup)
|
|
|
|
|
|
|
|
info["cluster parameters path"] = testCluster.TempDir
|
2017-11-10 21:33:16 +00:00
|
|
|
infoKeys = append(infoKeys, "cluster parameters path")
|
2017-07-31 15:28:06 +00:00
|
|
|
|
|
|
|
for i, core := range testCluster.Cores {
|
|
|
|
info[fmt.Sprintf("node %d redirect address", i)] = fmt.Sprintf("https://%s", core.Listeners[0].Address.String())
|
|
|
|
infoKeys = append(infoKeys, fmt.Sprintf("node %d redirect address", i))
|
|
|
|
}
|
|
|
|
|
|
|
|
infoKeys = append(infoKeys, "version")
|
|
|
|
verInfo := version.GetVersion()
|
|
|
|
info["version"] = verInfo.FullVersionNumber(false)
|
|
|
|
if verInfo.Revision != "" {
|
|
|
|
info["version sha"] = strings.Trim(verInfo.Revision, "'")
|
|
|
|
infoKeys = append(infoKeys, "version sha")
|
|
|
|
}
|
|
|
|
infoKeys = append(infoKeys, "cgo")
|
|
|
|
info["cgo"] = "disabled"
|
|
|
|
if version.CgoEnabled {
|
|
|
|
info["cgo"] = "enabled"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Server configuration output
|
|
|
|
padding := 24
|
|
|
|
sort.Strings(infoKeys)
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output("==> Vault server configuration:\n")
|
2017-07-31 15:28:06 +00:00
|
|
|
for _, k := range infoKeys {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output(fmt.Sprintf(
|
2017-07-31 15:28:06 +00:00
|
|
|
"%s%s: %s",
|
|
|
|
strings.Repeat(" ", padding-len(k)),
|
|
|
|
strings.Title(k),
|
|
|
|
info[k]))
|
|
|
|
}
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output("")
|
2017-07-31 15:28:06 +00:00
|
|
|
|
|
|
|
for _, core := range testCluster.Cores {
|
|
|
|
core.Server.Handler = vaulthttp.Handler(core.Core)
|
|
|
|
core.SetClusterHandler(core.Server.Handler)
|
|
|
|
}
|
|
|
|
|
|
|
|
testCluster.Start()
|
|
|
|
|
|
|
|
if base.DevToken != "" {
|
|
|
|
req := &logical.Request{
|
|
|
|
ID: "dev-gen-root",
|
|
|
|
Operation: logical.UpdateOperation,
|
|
|
|
ClientToken: testCluster.RootToken,
|
|
|
|
Path: "auth/token/create",
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"id": base.DevToken,
|
|
|
|
"policies": []string{"root"},
|
|
|
|
"no_parent": true,
|
|
|
|
"no_default_policy": true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
resp, err := testCluster.Cores[0].HandleRequest(req)
|
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err))
|
2017-07-31 15:28:06 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if resp == nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("nil response when creating root token with ID %s", base.DevToken))
|
2017-07-31 15:28:06 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if resp.Auth == nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("nil auth when creating root token with ID %s", base.DevToken))
|
2017-07-31 15:28:06 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
testCluster.RootToken = resp.Auth.ClientToken
|
|
|
|
|
|
|
|
req.ID = "dev-revoke-init-root"
|
|
|
|
req.Path = "auth/token/revoke-self"
|
|
|
|
req.Data = nil
|
|
|
|
resp, err = testCluster.Cores[0].HandleRequest(req)
|
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output(fmt.Sprintf("failed to revoke initial root token: %s", err))
|
2017-07-31 15:28:06 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the token
|
|
|
|
tokenHelper, err := c.TokenHelper()
|
|
|
|
if err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error getting token helper: %s", err))
|
2017-07-31 15:28:06 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
if err := tokenHelper.Store(testCluster.RootToken); err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error storing in token helper: %s", err))
|
2017-07-31 15:28:06 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ioutil.WriteFile(filepath.Join(testCluster.TempDir, "root_token"), []byte(testCluster.RootToken), 0755); err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error writing token to tempfile: %s", err))
|
2017-07-31 15:28:06 +00:00
|
|
|
return 1
|
|
|
|
}
|
|
|
|
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output(fmt.Sprintf(
|
2017-07-31 15:28:06 +00:00
|
|
|
"==> Three node dev mode is enabled\n\n" +
|
|
|
|
"The unseal key and root token are reproduced below in case you\n" +
|
|
|
|
"want to seal/unseal the Vault or play with authentication.\n",
|
|
|
|
))
|
|
|
|
|
|
|
|
for i, key := range testCluster.BarrierKeys {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output(fmt.Sprintf(
|
2017-07-31 15:28:06 +00:00
|
|
|
"Unseal Key %d: %s",
|
2017-08-01 15:12:45 +00:00
|
|
|
i+1, base64.StdEncoding.EncodeToString(key),
|
2017-07-31 15:28:06 +00:00
|
|
|
))
|
|
|
|
}
|
|
|
|
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output(fmt.Sprintf(
|
2017-07-31 15:28:06 +00:00
|
|
|
"\nRoot Token: %s\n", testCluster.RootToken,
|
|
|
|
))
|
|
|
|
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output(fmt.Sprintf(
|
2017-08-01 15:50:41 +00:00
|
|
|
"\nUseful env vars:\n"+
|
|
|
|
"VAULT_TOKEN=%s\n"+
|
|
|
|
"VAULT_ADDR=%s\n"+
|
|
|
|
"VAULT_CACERT=%s/ca_cert.pem\n",
|
|
|
|
testCluster.RootToken,
|
|
|
|
testCluster.Cores[0].Client.Address(),
|
|
|
|
testCluster.TempDir,
|
|
|
|
))
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
// Output the header that the server has started
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output("==> Vault server started! Log data will stream in below:\n")
|
|
|
|
|
|
|
|
// Inform any tests that the server is ready
|
|
|
|
select {
|
|
|
|
case c.startedCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
2017-07-31 15:28:06 +00:00
|
|
|
|
|
|
|
// Release the log gate.
|
|
|
|
c.logGate.Flush()
|
|
|
|
|
|
|
|
// Wait for shutdown
|
|
|
|
shutdownTriggered := false
|
|
|
|
|
|
|
|
for !shutdownTriggered {
|
|
|
|
select {
|
|
|
|
case <-c.ShutdownCh:
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output("==> Vault shutdown triggered")
|
2017-07-31 15:28:06 +00:00
|
|
|
|
|
|
|
// Stop the listners so that we don't process further client requests.
|
|
|
|
c.cleanupGuard.Do(testCluster.Cleanup)
|
|
|
|
|
|
|
|
// Shutdown will wait until after Vault is sealed, which means the
|
|
|
|
// request forwarding listeners will also be closed (and also
|
|
|
|
// waited for).
|
|
|
|
for _, core := range testCluster.Cores {
|
|
|
|
if err := core.Shutdown(); err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error with core shutdown: %s", err))
|
2017-07-31 15:28:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
shutdownTriggered = true
|
|
|
|
|
|
|
|
case <-c.SighupCh:
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Output("==> Vault reload triggered")
|
2017-07-31 15:28:06 +00:00
|
|
|
for _, core := range testCluster.Cores {
|
|
|
|
if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil); err != nil {
|
2017-09-22 00:51:12 +00:00
|
|
|
c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload: %s", err))
|
2017-07-31 15:28:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2016-08-15 13:42:42 +00:00
|
|
|
// detectRedirect is used to attempt redirect address detection
|
|
|
|
func (c *ServerCommand) detectRedirect(detect physical.RedirectDetect,
|
2015-05-02 22:57:40 +00:00
|
|
|
config *server.Config) (string, error) {
|
|
|
|
// Get the hostname
|
|
|
|
host, err := detect.DetectHostAddr()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2015-12-22 20:40:36 +00:00
|
|
|
// set [] for ipv6 addresses
|
|
|
|
if strings.Contains(host, ":") && !strings.Contains(host, "]") {
|
|
|
|
host = "[" + host + "]"
|
|
|
|
}
|
|
|
|
|
2015-05-02 22:57:40 +00:00
|
|
|
// Default the port and scheme
|
|
|
|
scheme := "https"
|
|
|
|
port := 8200
|
|
|
|
|
|
|
|
// Attempt to detect overrides
|
|
|
|
for _, list := range config.Listeners {
|
|
|
|
// Only attempt TCP
|
|
|
|
if list.Type != "tcp" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if TLS is disabled
|
2015-05-15 20:41:30 +00:00
|
|
|
if val, ok := list.Config["tls_disable"]; ok {
|
2017-06-22 19:29:53 +00:00
|
|
|
disable, err := parseutil.ParseBool(val)
|
2015-05-15 20:41:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", fmt.Errorf("tls_disable: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if disable {
|
|
|
|
scheme = "http"
|
|
|
|
}
|
2015-05-02 22:57:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check for address override
|
2017-06-22 19:29:53 +00:00
|
|
|
var addr string
|
|
|
|
addrRaw, ok := list.Config["address"]
|
2015-05-02 22:57:40 +00:00
|
|
|
if !ok {
|
|
|
|
addr = "127.0.0.1:8200"
|
2017-06-22 19:29:53 +00:00
|
|
|
} else {
|
|
|
|
addr = addrRaw.(string)
|
2015-05-02 22:57:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check for localhost
|
|
|
|
hostStr, portStr, err := net.SplitHostPort(addr)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if hostStr == "127.0.0.1" {
|
|
|
|
host = hostStr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for custom port
|
|
|
|
listPort, err := strconv.Atoi(portStr)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
port = listPort
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build a URL
|
|
|
|
url := &url.URL{
|
|
|
|
Scheme: scheme,
|
|
|
|
Host: fmt.Sprintf("%s:%d", host, port),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return the URL string
|
|
|
|
return url.String(), nil
|
|
|
|
}
|
|
|
|
|
2016-03-18 14:06:49 +00:00
|
|
|
// setupTelemetry is used to setup the telemetry sub-systems
|
|
|
|
func (c *ServerCommand) setupTelemetry(config *server.Config) error {
|
2015-04-15 01:44:09 +00:00
|
|
|
/* Setup telemetry
|
|
|
|
Aggregate on 10 second intervals for 1 minute. Expose the
|
|
|
|
metrics over stderr when there is a SIGUSR1 received.
|
|
|
|
*/
|
|
|
|
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
|
|
|
|
metrics.DefaultInmemSignal(inm)
|
2015-07-14 22:27:18 +00:00
|
|
|
|
|
|
|
var telConfig *server.Telemetry
|
|
|
|
if config.Telemetry == nil {
|
|
|
|
telConfig = &server.Telemetry{}
|
|
|
|
} else {
|
|
|
|
telConfig = config.Telemetry
|
|
|
|
}
|
|
|
|
|
2015-04-15 01:44:09 +00:00
|
|
|
metricsConf := metrics.DefaultConfig("vault")
|
2015-07-14 22:27:18 +00:00
|
|
|
metricsConf.EnableHostname = !telConfig.DisableHostname
|
2015-04-15 01:44:09 +00:00
|
|
|
|
|
|
|
// Configure the statsite sink
|
|
|
|
var fanout metrics.FanoutSink
|
2015-07-14 22:27:18 +00:00
|
|
|
if telConfig.StatsiteAddr != "" {
|
|
|
|
sink, err := metrics.NewStatsiteSink(telConfig.StatsiteAddr)
|
2015-04-15 01:44:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fanout = append(fanout, sink)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Configure the statsd sink
|
2015-07-14 22:27:18 +00:00
|
|
|
if telConfig.StatsdAddr != "" {
|
|
|
|
sink, err := metrics.NewStatsdSink(telConfig.StatsdAddr)
|
2015-04-15 01:44:09 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
fanout = append(fanout, sink)
|
|
|
|
}
|
|
|
|
|
2016-07-22 19:49:23 +00:00
|
|
|
// Configure the Circonus sink
|
|
|
|
if telConfig.CirconusAPIToken != "" || telConfig.CirconusCheckSubmissionURL != "" {
|
|
|
|
cfg := &circonus.Config{}
|
|
|
|
cfg.Interval = telConfig.CirconusSubmissionInterval
|
|
|
|
cfg.CheckManager.API.TokenKey = telConfig.CirconusAPIToken
|
|
|
|
cfg.CheckManager.API.TokenApp = telConfig.CirconusAPIApp
|
|
|
|
cfg.CheckManager.API.URL = telConfig.CirconusAPIURL
|
|
|
|
cfg.CheckManager.Check.SubmissionURL = telConfig.CirconusCheckSubmissionURL
|
|
|
|
cfg.CheckManager.Check.ID = telConfig.CirconusCheckID
|
|
|
|
cfg.CheckManager.Check.ForceMetricActivation = telConfig.CirconusCheckForceMetricActivation
|
|
|
|
cfg.CheckManager.Check.InstanceID = telConfig.CirconusCheckInstanceID
|
|
|
|
cfg.CheckManager.Check.SearchTag = telConfig.CirconusCheckSearchTag
|
2016-11-10 21:17:55 +00:00
|
|
|
cfg.CheckManager.Check.DisplayName = telConfig.CirconusCheckDisplayName
|
|
|
|
cfg.CheckManager.Check.Tags = telConfig.CirconusCheckTags
|
2016-07-22 19:49:23 +00:00
|
|
|
cfg.CheckManager.Broker.ID = telConfig.CirconusBrokerID
|
|
|
|
cfg.CheckManager.Broker.SelectTag = telConfig.CirconusBrokerSelectTag
|
|
|
|
|
|
|
|
if cfg.CheckManager.API.TokenApp == "" {
|
|
|
|
cfg.CheckManager.API.TokenApp = "vault"
|
|
|
|
}
|
|
|
|
|
2017-02-26 23:18:46 +00:00
|
|
|
if cfg.CheckManager.Check.DisplayName == "" {
|
|
|
|
cfg.CheckManager.Check.DisplayName = "Vault"
|
|
|
|
}
|
|
|
|
|
2016-07-22 19:49:23 +00:00
|
|
|
if cfg.CheckManager.Check.SearchTag == "" {
|
|
|
|
cfg.CheckManager.Check.SearchTag = "service:vault"
|
|
|
|
}
|
|
|
|
|
|
|
|
sink, err := circonus.NewCirconusSink(cfg)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
sink.Start()
|
|
|
|
fanout = append(fanout, sink)
|
|
|
|
}
|
|
|
|
|
2017-06-17 03:51:46 +00:00
|
|
|
if telConfig.DogStatsDAddr != "" {
|
|
|
|
var tags []string
|
|
|
|
|
|
|
|
if telConfig.DogStatsDTags != nil {
|
|
|
|
tags = telConfig.DogStatsDTags
|
|
|
|
}
|
|
|
|
|
|
|
|
sink, err := datadog.NewDogStatsdSink(telConfig.DogStatsDAddr, metricsConf.HostName)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to start DogStatsD sink. Got: %s", err)
|
|
|
|
}
|
|
|
|
sink.SetTags(tags)
|
|
|
|
fanout = append(fanout, sink)
|
|
|
|
}
|
|
|
|
|
2015-04-15 01:44:09 +00:00
|
|
|
// Initialize the global sink
|
|
|
|
if len(fanout) > 0 {
|
|
|
|
fanout = append(fanout, inm)
|
|
|
|
metrics.NewGlobal(metricsConf, fanout)
|
|
|
|
} else {
|
|
|
|
metricsConf.EnableHostname = false
|
|
|
|
metrics.NewGlobal(metricsConf, inm)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
func (c *ServerCommand) Reload(lock *sync.RWMutex, reloadFuncs *map[string][]reload.ReloadFunc, configPath []string) error {
|
|
|
|
lock.RLock()
|
|
|
|
defer lock.RUnlock()
|
2016-09-30 19:04:50 +00:00
|
|
|
|
|
|
|
var reloadErrors *multierror.Error
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
for k, relFuncs := range *reloadFuncs {
|
|
|
|
switch {
|
|
|
|
case strings.HasPrefix(k, "listener|"):
|
|
|
|
for _, relFunc := range relFuncs {
|
|
|
|
if relFunc != nil {
|
|
|
|
if err := relFunc(nil); err != nil {
|
|
|
|
reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading listener: %v", err))
|
|
|
|
}
|
|
|
|
}
|
2016-09-30 19:04:50 +00:00
|
|
|
}
|
|
|
|
|
2017-07-31 15:28:06 +00:00
|
|
|
case strings.HasPrefix(k, "audit_file|"):
|
|
|
|
for _, relFunc := range relFuncs {
|
|
|
|
if relFunc != nil {
|
|
|
|
if err := relFunc(nil); err != nil {
|
2017-09-08 02:03:44 +00:00
|
|
|
reloadErrors = multierror.Append(reloadErrors, fmt.Errorf("Error encountered reloading file audit device at path %s: %v", strings.TrimPrefix(k, "audit_file|"), err))
|
2017-07-31 15:28:06 +00:00
|
|
|
}
|
2016-09-30 19:04:50 +00:00
|
|
|
}
|
2016-03-11 21:46:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-22 00:51:12 +00:00
|
|
|
// Send a message that we reloaded. This prevents "guessing" sleep times
|
|
|
|
// in tests.
|
|
|
|
select {
|
|
|
|
case c.reloadedCh <- struct{}{}:
|
|
|
|
default:
|
2017-08-24 22:23:40 +00:00
|
|
|
}
|
2017-09-20 20:05:00 +00:00
|
|
|
|
|
|
|
return reloadErrors.ErrorOrNil()
|
2017-08-24 22:23:40 +00:00
|
|
|
}
|
|
|
|
|
2017-09-16 21:09:37 +00:00
|
|
|
// storePidFile is used to write out our PID to a file if necessary
|
|
|
|
func (c *ServerCommand) storePidFile(pidPath string) error {
|
|
|
|
// Quit fast if no pidfile
|
|
|
|
if pidPath == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open the PID file
|
|
|
|
pidFile, err := os.OpenFile(pidPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not open pid file: %v", err)
|
|
|
|
}
|
|
|
|
defer pidFile.Close()
|
|
|
|
|
|
|
|
// Write out the PID
|
|
|
|
pid := os.Getpid()
|
|
|
|
_, err = pidFile.WriteString(fmt.Sprintf("%d", pid))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not write to pid file: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// removePidFile is used to cleanup the PID file if necessary
|
|
|
|
func (c *ServerCommand) removePidFile(pidPath string) error {
|
|
|
|
if pidPath == "" {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return os.Remove(pidPath)
|
|
|
|
}
|
|
|
|
|
2016-08-19 20:45:17 +00:00
|
|
|
type grpclogFaker struct {
|
|
|
|
logger log.Logger
|
2017-10-10 16:27:51 +00:00
|
|
|
log bool
|
2016-08-19 20:45:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (g *grpclogFaker) Fatal(args ...interface{}) {
|
|
|
|
g.logger.Error(fmt.Sprint(args...))
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (g *grpclogFaker) Fatalf(format string, args ...interface{}) {
|
|
|
|
g.logger.Error(fmt.Sprintf(format, args...))
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (g *grpclogFaker) Fatalln(args ...interface{}) {
|
|
|
|
g.logger.Error(fmt.Sprintln(args...))
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (g *grpclogFaker) Print(args ...interface{}) {
|
2017-11-13 20:37:00 +00:00
|
|
|
if g.log && g.logger.IsTrace() {
|
2017-10-10 16:27:51 +00:00
|
|
|
g.logger.Trace(fmt.Sprint(args...))
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (g *grpclogFaker) Printf(format string, args ...interface{}) {
|
2017-11-13 20:37:00 +00:00
|
|
|
if g.log && g.logger.IsTrace() {
|
2017-10-10 16:27:51 +00:00
|
|
|
g.logger.Trace(fmt.Sprintf(format, args...))
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (g *grpclogFaker) Println(args ...interface{}) {
|
2017-11-13 20:37:00 +00:00
|
|
|
if g.log && g.logger.IsTrace() {
|
2017-10-10 16:27:51 +00:00
|
|
|
g.logger.Trace(fmt.Sprintln(args...))
|
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
}
|