Rename physical backend to storage and alias old value (#2456)

This commit is contained in:
Jeff Mitchell 2017-03-08 09:17:00 -05:00 committed by GitHub
parent 624c6eab20
commit b11f92ba5a
20 changed files with 132 additions and 117 deletions

View File

@ -173,8 +173,8 @@ func (c *ServerCommand) Run(args []string) int {
}
// Ensure that a backend is provided
if config.Backend == nil {
c.Ui.Output("A physical backend must be specified")
if config.Storage == nil {
c.Ui.Output("A storage backend must be specified")
return 1
}
@ -194,11 +194,11 @@ func (c *ServerCommand) Run(args []string) int {
// Initialize the backend
backend, err := physical.NewBackend(
config.Backend.Type, c.logger, config.Backend.Config)
config.Storage.Type, c.logger, config.Storage.Config)
if err != nil {
c.Ui.Output(fmt.Sprintf(
"Error initializing backend of type %s: %s",
config.Backend.Type, err))
"Error initializing storage of type %s: %s",
config.Storage.Type, err))
return 1
}
@ -224,7 +224,7 @@ func (c *ServerCommand) Run(args []string) int {
coreConfig := &vault.CoreConfig{
Physical: backend,
RedirectAddr: config.Backend.RedirectAddr,
RedirectAddr: config.Storage.RedirectAddr,
HAPhysical: nil,
Seal: seal,
AuditBackends: c.AuditBackends,
@ -244,39 +244,39 @@ func (c *ServerCommand) Run(args []string) int {
var disableClustering bool
// Initialize the separate HA physical backend, if it exists
// Initialize the separate HA storage backend, if it exists
var ok bool
if config.HABackend != nil {
if config.HAStorage != nil {
habackend, err := physical.NewBackend(
config.HABackend.Type, c.logger, config.HABackend.Config)
config.HAStorage.Type, c.logger, config.HAStorage.Config)
if err != nil {
c.Ui.Output(fmt.Sprintf(
"Error initializing backend of type %s: %s",
config.HABackend.Type, err))
"Error initializing HA storage of type %s: %s",
config.HAStorage.Type, err))
return 1
}
if coreConfig.HAPhysical, ok = habackend.(physical.HABackend); !ok {
c.Ui.Output("Specified HA backend does not support HA")
c.Ui.Output("Specified HA storage does not support HA")
return 1
}
if !coreConfig.HAPhysical.HAEnabled() {
c.Ui.Output("Specified HA backend has HA support disabled; please consult documentation")
c.Ui.Output("Specified HA storage has HA support disabled; please consult documentation")
return 1
}
coreConfig.RedirectAddr = config.HABackend.RedirectAddr
disableClustering = config.HABackend.DisableClustering
coreConfig.RedirectAddr = config.HAStorage.RedirectAddr
disableClustering = config.HAStorage.DisableClustering
if !disableClustering {
coreConfig.ClusterAddr = config.HABackend.ClusterAddr
coreConfig.ClusterAddr = config.HAStorage.ClusterAddr
}
} else {
if coreConfig.HAPhysical, ok = backend.(physical.HABackend); ok {
coreConfig.RedirectAddr = config.Backend.RedirectAddr
disableClustering = config.Backend.DisableClustering
coreConfig.RedirectAddr = config.Storage.RedirectAddr
disableClustering = config.Storage.DisableClustering
if !disableClustering {
coreConfig.ClusterAddr = config.Backend.ClusterAddr
coreConfig.ClusterAddr = config.Storage.ClusterAddr
}
}
}
@ -378,12 +378,12 @@ CLUSTER_SYNTHESIS_COMPLETE:
c.reloadFuncsLock = coreConfig.ReloadFuncsLock
// Compile server information for output later
info["backend"] = config.Backend.Type
info["storage"] = config.Storage.Type
info["log level"] = logLevel
info["mlock"] = fmt.Sprintf(
"supported: %v, enabled: %v",
mlock.Supported(), !config.DisableMlock && mlock.Supported())
infoKeys = append(infoKeys, "log level", "mlock", "backend")
infoKeys = append(infoKeys, "log level", "mlock", "storage")
if coreConfig.ClusterAddr != "" {
info["cluster address"] = coreConfig.ClusterAddr
@ -394,16 +394,16 @@ CLUSTER_SYNTHESIS_COMPLETE:
infoKeys = append(infoKeys, "redirect address")
}
if config.HABackend != nil {
info["HA backend"] = config.HABackend.Type
infoKeys = append(infoKeys, "HA backend")
if config.HAStorage != nil {
info["HA storage"] = config.HAStorage.Type
infoKeys = append(infoKeys, "HA storage")
} else {
// If the backend supports HA, then note it
// If the storage supports HA, then note it
if coreConfig.HAPhysical != nil {
if coreConfig.HAPhysical.HAEnabled() {
info["backend"] += " (HA available)"
info["storage"] += " (HA available)"
} else {
info["backend"] += " (HA disabled)"
info["storage"] += " (HA disabled)"
}
}
}

View File

@ -21,8 +21,8 @@ import (
// Config is the configuration for the vault server.
type Config struct {
Listeners []*Listener `hcl:"-"`
Backend *Backend `hcl:"-"`
HABackend *Backend `hcl:"-"`
Storage *Storage `hcl:"-"`
HAStorage *Storage `hcl:"-"`
HSM *HSM `hcl:"-"`
@ -51,7 +51,7 @@ func DevConfig(ha, transactional bool) *Config {
DisableCache: false,
DisableMlock: true,
Backend: &Backend{
Storage: &Storage{
Type: "inmem",
},
@ -75,11 +75,11 @@ func DevConfig(ha, transactional bool) *Config {
switch {
case ha && transactional:
ret.Backend.Type = "inmem_transactional_ha"
ret.Storage.Type = "inmem_transactional_ha"
case !ha && transactional:
ret.Backend.Type = "inmem_transactional"
ret.Storage.Type = "inmem_transactional"
case ha && !transactional:
ret.Backend.Type = "inmem_ha"
ret.Storage.Type = "inmem_ha"
}
return ret
@ -95,8 +95,8 @@ func (l *Listener) GoString() string {
return fmt.Sprintf("*%#v", *l)
}
// Backend is the backend configuration for the server.
type Backend struct {
// Storage is the underlying storage configuration for the server.
type Storage struct {
Type string
RedirectAddr string
ClusterAddr string
@ -104,7 +104,7 @@ type Backend struct {
Config map[string]string
}
func (b *Backend) GoString() string {
func (b *Storage) GoString() string {
return fmt.Sprintf("*%#v", *b)
}
@ -215,14 +215,14 @@ func (c *Config) Merge(c2 *Config) *Config {
result.Listeners = append(result.Listeners, l)
}
result.Backend = c.Backend
if c2.Backend != nil {
result.Backend = c2.Backend
result.Storage = c.Storage
if c2.Storage != nil {
result.Storage = c2.Storage
}
result.HABackend = c.HABackend
if c2.HABackend != nil {
result.HABackend = c2.HABackend
result.HAStorage = c.HAStorage
if c2.HAStorage != nil {
result.HAStorage = c2.HAStorage
}
result.HSM = c.HSM
@ -349,6 +349,8 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
valid := []string{
"atlas",
"storage",
"ha_storage",
"backend",
"ha_backend",
"hsm",
@ -366,15 +368,28 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
return nil, err
}
if o := list.Filter("backend"); len(o.Items) > 0 {
if err := parseBackends(&result, o); err != nil {
return nil, fmt.Errorf("error parsing 'backend': %s", err)
// Look for storage but still support old backend
if o := list.Filter("storage"); len(o.Items) > 0 {
if err := parseStorage(&result, o, "storage"); err != nil {
return nil, fmt.Errorf("error parsing 'storage': %s", err)
}
} else {
if o := list.Filter("backend"); len(o.Items) > 0 {
if err := parseStorage(&result, o, "backend"); err != nil {
return nil, fmt.Errorf("error parsing 'backend': %s", err)
}
}
}
if o := list.Filter("ha_backend"); len(o.Items) > 0 {
if err := parseHABackends(&result, o); err != nil {
return nil, fmt.Errorf("error parsing 'ha_backend': %s", err)
if o := list.Filter("ha_storage"); len(o.Items) > 0 {
if err := parseHAStorage(&result, o, "ha_storage"); err != nil {
return nil, fmt.Errorf("error parsing 'ha_storage': %s", err)
}
} else {
if o := list.Filter("ha_backend"); len(o.Items) > 0 {
if err := parseHAStorage(&result, o, "ha_backend"); err != nil {
return nil, fmt.Errorf("error parsing 'ha_backend': %s", err)
}
}
}
@ -476,22 +491,22 @@ func isTemporaryFile(name string) bool {
(strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs
}
func parseBackends(result *Config, list *ast.ObjectList) error {
func parseStorage(result *Config, list *ast.ObjectList, name string) error {
if len(list.Items) > 1 {
return fmt.Errorf("only one 'backend' block is permitted")
return fmt.Errorf("only one %q block is permitted", name)
}
// Get our item
item := list.Items[0]
key := "backend"
key := name
if len(item.Keys) > 0 {
key = item.Keys[0].Token.Value().(string)
}
var m map[string]string
if err := hcl.DecodeObject(&m, item.Val); err != nil {
return multierror.Prefix(err, fmt.Sprintf("backend.%s:", key))
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
}
// Pull out the redirect address since it's common to all backends
@ -516,12 +531,12 @@ func parseBackends(result *Config, list *ast.ObjectList) error {
if v, ok := m["disable_clustering"]; ok {
disableClustering, err = strconv.ParseBool(v)
if err != nil {
return multierror.Prefix(err, fmt.Sprintf("backend.%s:", key))
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
}
delete(m, "disable_clustering")
}
result.Backend = &Backend{
result.Storage = &Storage{
RedirectAddr: redirectAddr,
ClusterAddr: clusterAddr,
DisableClustering: disableClustering,
@ -531,22 +546,22 @@ func parseBackends(result *Config, list *ast.ObjectList) error {
return nil
}
func parseHABackends(result *Config, list *ast.ObjectList) error {
func parseHAStorage(result *Config, list *ast.ObjectList, name string) error {
if len(list.Items) > 1 {
return fmt.Errorf("only one 'ha_backend' block is permitted")
return fmt.Errorf("only one %q block is permitted", name)
}
// Get our item
item := list.Items[0]
key := "backend"
key := name
if len(item.Keys) > 0 {
key = item.Keys[0].Token.Value().(string)
}
var m map[string]string
if err := hcl.DecodeObject(&m, item.Val); err != nil {
return multierror.Prefix(err, fmt.Sprintf("ha_backend.%s:", key))
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
}
// Pull out the redirect address since it's common to all backends
@ -571,12 +586,12 @@ func parseHABackends(result *Config, list *ast.ObjectList) error {
if v, ok := m["disable_clustering"]; ok {
disableClustering, err = strconv.ParseBool(v)
if err != nil {
return multierror.Prefix(err, fmt.Sprintf("backend.%s:", key))
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", name, key))
}
delete(m, "disable_clustering")
}
result.HABackend = &Backend{
result.HAStorage = &Storage{
RedirectAddr: redirectAddr,
ClusterAddr: clusterAddr,
DisableClustering: disableClustering,

View File

@ -37,7 +37,7 @@ func TestLoadConfigFile(t *testing.T) {
},
},
Backend: &Backend{
Storage: &Storage{
Type: "consul",
RedirectAddr: "foo",
Config: map[string]string{
@ -45,7 +45,7 @@ func TestLoadConfigFile(t *testing.T) {
},
},
HABackend: &Backend{
HAStorage: &Storage{
Type: "consul",
RedirectAddr: "snafu",
Config: map[string]string{
@ -105,7 +105,7 @@ func TestLoadConfigFile_json(t *testing.T) {
},
},
Backend: &Backend{
Storage: &Storage{
Type: "consul",
Config: map[string]string{
"foo": "bar",
@ -171,7 +171,7 @@ func TestLoadConfigFile_json2(t *testing.T) {
},
},
Backend: &Backend{
Storage: &Storage{
Type: "consul",
Config: map[string]string{
"foo": "bar",
@ -179,7 +179,7 @@ func TestLoadConfigFile_json2(t *testing.T) {
DisableClustering: true,
},
HABackend: &Backend{
HAStorage: &Storage{
Type: "consul",
Config: map[string]string{
"bar": "baz",
@ -234,7 +234,7 @@ func TestLoadConfigDir(t *testing.T) {
},
},
Backend: &Backend{
Storage: &Storage{
Type: "consul",
Config: map[string]string{
"foo": "bar",

View File

@ -11,7 +11,7 @@
"node_id": "foo_node"
}
}],
"backend": {
"storage": {
"consul": {
"foo": "bar",
"disable_clustering": "true"

View File

@ -12,12 +12,12 @@
}
}
],
"backend":{
"storage":{
"consul":{
"foo":"bar"
}
},
"ha_backend":{
"ha_storage":{
"consul":{
"bar":"baz",
"disable_clustering": "true"

View File

@ -64,8 +64,8 @@ func TestServer_GoodSeparateHA(t *testing.T) {
t.Fatalf("bad: %d\n\n%s\n\n%s", code, ui.ErrorWriter.String(), ui.OutputWriter.String())
}
if !strings.Contains(ui.OutputWriter.String(), "HA Backend:") {
t.Fatalf("did not find HA Backend: %s", ui.OutputWriter.String())
if !strings.Contains(ui.OutputWriter.String(), "HA Storage:") {
t.Fatalf("did not find HA Storage: %s", ui.OutputWriter.String())
}
}

View File

@ -13,7 +13,7 @@ The format of this file is [HCL](https://github.com/hashicorp/hcl) or JSON.
An example configuration is shown below:
```javascript
backend "consul" {
storage "consul" {
address = "127.0.0.1:8500"
path = "vault"
}
@ -37,15 +37,15 @@ sending a SIGHUP to the server process. These are denoted below.
## Parameters
- `backend` <tt>([StorageBackend][storage-backend]: \<required\>)</tt> -
- `storage` <tt>([StorageBackend][storage-backend]: \<required\>)</tt> -
Configures the storage backend where Vault data is stored. Please see the
[storage backends documentation][storage-backend] for the full list of
available storage backends.
- `ha_backend` <tt>([StorageBackend][storage-backend]: nil)</tt> - Configures
- `ha_storage` <tt>([StorageBackend][storage-backend]: nil)</tt> - Configures
the storage backend where Vault HA coordination will take place. This must be
an HA-supporting backend. If not set, HA will be attempted on the backend
given in the `backend` parameter.
given in the `storage` parameter.
- `cluster_name` `(string: <generated>)` Specifies the identifier for the
Vault cluster. If omitted, Vault will generate a value. When connecting to

View File

@ -24,7 +24,7 @@ to the storage container.
you may be referred to the original author.
```hcl
backend "azure" {
storage "azure" {
accountName = "my-storage-account"
accountKey = "abcd1234"
container = "container-efgh5678"
@ -52,7 +52,7 @@ This example shows configuring the Azure storage backend with a custom number of
maximum parallel connections.
```hcl
backend "azure" {
storage "azure" {
accountName = "my-storage-account"
accountKey = "abcd1234"
container = "container-efgh5678"

View File

@ -22,7 +22,7 @@ check.
by HashiCorp.
```hcl
backend "consul" {
storage "consul" {
address = "127.0.0.1:8500"
path = "vault"
}
@ -139,7 +139,7 @@ This example shows a sample physical backend configuration which communicates
with a local Consul agent running on `127.0.0.1:8500`.
```hcl
backend "consul" {}
storage "consul" {}
```
### Detailed Customization
@ -148,7 +148,7 @@ This example shows communicating with Consul on a custom address with an ACL
token.
```hcl
backend "consul" {
storage "consul" {
address = "10.5.7.92:8194"
token = "abcd1234"
}
@ -161,7 +161,7 @@ This path must be readable and writable by the Consul ACL token, if Consul
configured to use ACLs.
```hcl
backend "consul" {
storage "consul" {
path = "vault/"
}
```
@ -171,7 +171,7 @@ backend "consul" {
This example shows communicating with Consul over a local unix socket.
```hcl
backend "consul" {
storage "consul" {
address = "unix:///tmp/.consul.http.sock"
}
```
@ -182,7 +182,7 @@ This example shows using a custom CA, certificate, and key file to securely
communicate with Consul over TLS.
```hcl
backend "consul" {
storage "consul" {
scheme = "https"
tls_ca_file = "/etc/pem/vault.ca"
tls_cert_file = "/etc/pem/vault.cert"

View File

@ -23,7 +23,7 @@ The DynamoDB storage backend is used to persist Vault's data in
you may be referred to the original author.
```hcl
backend "dynamodb" {
storage "dynamodb" {
ha_enabled = true
region = "us-west-2"
table = "vault-data"
@ -99,7 +99,7 @@ discussed in more detail in the [HA concepts page](/docs/concepts/ha.html).
This example shows using a custom table name and read/write capacity.
```hcl
backend "dynamodb" {
storage "dynamodb" {
table = "my-vault-data"
read_capacity = 10
@ -112,7 +112,7 @@ backend "dynamodb" {
This example show enabling high availability for the DynamoDB storage backend.
```hcl
backend "dynamodb" {
storage "dynamodb" {
ha_enabled = true
redirect_addr = "vault-leader.my-company.internal"
}

View File

@ -18,13 +18,13 @@ based on the version of the Etcd cluster.
The v2 API has known issues with HA support and should not be used in HA
scenarios.
- **Community Supported** the Etcd storage backend is supported by the
community. While it has undergone review by HashiCorp employees, they may not
be as knowledgeable about the technology. If you encounter problems with them,
you may be referred to the original author.
- **Community Supported** the Etcd storage backend is supported by CoreOS.
While it has undergone review by HashiCorp employees, they may not be as
knowledgeable about the technology. If you encounter problems with them, you
may be referred to the original author.
```hcl
backend "etcd" {
storage "etcd" {
address = "http://localhost:2379"
etcd_api = "v3"
}
@ -92,7 +92,7 @@ discussed in more detail in the [HA concepts page](/docs/concepts/ha.html).
This example shows connecting to the Etcd cluster using a username and password.
```hcl
backend "etcd" {
storage "etcd" {
username = "user1234"
password = "pass5678"
}
@ -103,7 +103,7 @@ backend "etcd" {
This example shows storing data in a custom path.
```hcl
backend "etcd" {
storage "etcd" {
path = "my-vault-data/"
}
```
@ -113,7 +113,7 @@ backend "etcd" {
This example show enabling high availability for the Etcd storage backend.
```hcl
backend "etcd" {
storage "etcd" {
ha_enabled = true
redirect_addr = "vault-leader.my-company.internal"
}

View File

@ -21,7 +21,7 @@ situations, or to develop locally where durability is not critical.
HashiCorp.
```hcl
backend "file" {
storage "file" {
path = "/mnt/vault/data"
}
```
@ -41,7 +41,7 @@ This example shows the Filesytem storage backend being mounted at
`/mnt/vault/data`.
```hcl
backend "file" {
storage "file" {
path = "/mnt/vault/data"
}
```

View File

@ -21,7 +21,7 @@ The Google Cloud storage backend is used to persist Vault's data in
you may be referred to the original author.
```hcl
backend "gcs" {
storage "gcs" {
bucket = "my-storage-bucket"
credentials_file = "/tmp/credentials.json"
}
@ -49,7 +49,7 @@ backend "gcs" {
This example shows a default configuration for the Google Cloud Storage backend.
```hcl
backend "gcs" {
storage "gcs" {
bucket = "my-storage-bucket"
credentials_file = "/tmp/credentials.json"
}

View File

@ -6,7 +6,7 @@ description: |-
The In-Memory storage backend is used to persist Vault's data entirely
in-memory on the same machine in which Vault is running. This is useful for
development and experimentation, but use of this backend is highly discouraged
in production.
in production except in very specific use-cases.
---
# In-Memory Storage Backend
@ -27,7 +27,7 @@ is restarted.
HashiCorp.
```hcl
backend "inmem" {}
storage "inmem" {}
```
## `inmem` Parameters
@ -39,5 +39,5 @@ The In-Memory storage backend has no configuration parameters.
This example shows activating the In-Memory storage backend.
```hcl
backend "inmem" {}
storage "inmem" {}
```

View File

@ -20,10 +20,10 @@ choose one from the navigation on the left.
## Configuration
Storage backend configuration is done through the Vault configuration file using
the `backend` stanza:
the `storage` stanza:
```hcl
backend [NAME] {
storage [NAME] {
[PARAMETERS...]
}
```
@ -31,7 +31,7 @@ backend [NAME] {
For example:
```hcl
backend "file" {
storage "file" {
path = "/mnt/vault/data"
}
```

View File

@ -21,7 +21,7 @@ server or cluster.
you may be referred to the original author.
```hcl
backend "mysql" {
storage "mysql" {
username = "user1234"
password = "secret123!"
database = "vault"
@ -58,7 +58,7 @@ This example shows configuring the MySQL backend to use a custom database and
table name.
```hcl
backend "mysql" {
storage "mysql" {
database = "my-vault"
table = "vault-data"
username = "user1234"

View File

@ -21,7 +21,7 @@ The PostgreSQL storage backend is used to persist Vault's data in a
you may be referred to the original author.
```hcl
backend "postgresql" {
storage "postgresql" {
connection_url = "postgres://user123:secret123!@localhost:5432/vault"
}
```
@ -60,7 +60,7 @@ This example shows connecting to a PostgresSQL cluster using full SSL
verification (recommended).
```hcl
backend "postgresql" {
storage "postgresql" {
connection_url = "postgres://user:pass@localhost:5432/database?sslmode=verify-full"
}
```
@ -69,7 +69,7 @@ To disable SSL verification (not recommended), replace `verify-full` with
`disable`:
```hcl
backend "postgresql" {
storage "postgresql" {
connection_url = "postgres://user:pass@localhost:5432/database?sslmode=disable"
}
```

View File

@ -21,7 +21,7 @@ bucket.
you may be referred to the original author.
```hcl
backend "s3" {
storage "s3" {
access_key = "abcd1234"
secret_key = "defg5678"
bucket = "my-bucket"
@ -62,7 +62,7 @@ cause Vault to attempt to retrieve credentials from the AWS metadata service.
This example shows using Amazon S3 as a storage backed.
```hcl
backend "s3" {
storage "s3" {
access_key = "abcd1234"
secret_key = "defg5678"
bucket = "my-bucket"

View File

@ -22,7 +22,7 @@ The Swift storage backend is used to persist Vault's data in an
you may be referred to the original author.
```hcl
backend "swift" {
storage "swift" {
auth_url = "https://..."
username = "admin"
password = "secret123!"
@ -59,7 +59,7 @@ backend "swift" {
This example shows a default configuration for Swift.
```hcl
backend "swift" {
storage "swift" {
auth_url = "https://os.initernal/v1/auth"
container = "container-239"

View File

@ -20,7 +20,7 @@ The Zookeeper storage backend is used to persist Vault's data in
you may be referred to the original author.
```hcl
backend "zookeeper" {
storage "zookeeper" {
address = "localhost:2181"
path = "vault/"
}
@ -89,7 +89,7 @@ This example shows configuring Vault to communicate with a Zookeeper
installation running on a custom port and to store data at a custom path.
```hcl
backend "zookeeper" {
storage "zookeeper" {
address = "localhost:3253"
path = "my-vault-data/"
}
@ -102,7 +102,7 @@ access only to the user "vaultUser". As per Zookeeper's ACL model, the digest
value in `znode_owner` must match the user in `znode_owner`.
```hcl
backend "zookeeper" {
storage "zookeeper" {
znode_owner = "digest:vaultUser:raxgVAfnDRljZDAcJFxznkZsExs="
auth_info = "digest:vaultUser:abc"
}
@ -115,7 +115,7 @@ This example instructs Vault to only allow access from localhost. As this is the
for the ACL check.
```hcl
backend "zookeeper" {
storage "zookeeper" {
znode_owner = "ip:127.0.0.1"
}
```