2015-03-15 21:42:05 +00:00
|
|
|
package vault
|
|
|
|
|
|
|
|
import (
|
2016-08-15 20:01:15 +00:00
|
|
|
"encoding/base64"
|
|
|
|
"encoding/hex"
|
2016-09-29 04:01:28 +00:00
|
|
|
"encoding/json"
|
2015-05-28 17:24:41 +00:00
|
|
|
"fmt"
|
2015-03-15 21:42:05 +00:00
|
|
|
"strings"
|
2016-05-03 18:24:04 +00:00
|
|
|
"sync"
|
2015-03-16 23:11:55 +00:00
|
|
|
"time"
|
2015-03-15 21:42:05 +00:00
|
|
|
|
2017-02-16 21:29:30 +00:00
|
|
|
"github.com/hashicorp/vault/helper/consts"
|
2017-03-07 16:21:22 +00:00
|
|
|
"github.com/hashicorp/vault/helper/parseutil"
|
2017-04-24 19:15:01 +00:00
|
|
|
"github.com/hashicorp/vault/helper/wrapping"
|
2015-03-15 21:42:05 +00:00
|
|
|
"github.com/hashicorp/vault/logical"
|
2015-03-16 00:35:59 +00:00
|
|
|
"github.com/hashicorp/vault/logical/framework"
|
2015-08-31 18:27:49 +00:00
|
|
|
"github.com/mitchellh/mapstructure"
|
2015-03-15 21:42:05 +00:00
|
|
|
)
|
|
|
|
|
2015-05-28 17:24:41 +00:00
|
|
|
var (
|
|
|
|
// protectedPaths cannot be accessed via the raw APIs.
|
|
|
|
// This is both for security and to prevent disrupting Vault.
|
|
|
|
protectedPaths = []string{
|
2016-04-19 16:01:15 +00:00
|
|
|
"core",
|
2015-05-28 17:24:41 +00:00
|
|
|
}
|
2017-02-17 01:13:19 +00:00
|
|
|
|
2017-02-17 04:23:21 +00:00
|
|
|
replicationPaths = func(b *SystemBackend) []*framework.Path {
|
|
|
|
return []*framework.Path{
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "replication/status",
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: func(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
var state consts.ReplicationState
|
|
|
|
resp := &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"mode": state.String(),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
return resp, nil
|
|
|
|
},
|
2017-02-17 01:13:19 +00:00
|
|
|
},
|
|
|
|
},
|
2017-02-17 04:23:21 +00:00
|
|
|
}
|
2017-02-17 01:13:19 +00:00
|
|
|
}
|
2015-05-28 17:24:41 +00:00
|
|
|
)
|
|
|
|
|
2017-06-17 05:26:25 +00:00
|
|
|
func NewSystemBackend(core *Core) *SystemBackend {
|
2015-08-27 15:51:35 +00:00
|
|
|
b := &SystemBackend{
|
|
|
|
Core: core,
|
|
|
|
}
|
2015-09-04 20:58:12 +00:00
|
|
|
|
2015-05-16 00:19:32 +00:00
|
|
|
b.Backend = &framework.Backend{
|
2015-04-04 04:00:23 +00:00
|
|
|
Help: strings.TrimSpace(sysHelpRoot),
|
|
|
|
|
2015-03-31 00:46:18 +00:00
|
|
|
PathsSpecial: &logical.Paths{
|
|
|
|
Root: []string{
|
|
|
|
"auth/*",
|
|
|
|
"remount",
|
2015-03-31 23:45:00 +00:00
|
|
|
"audit",
|
|
|
|
"audit/*",
|
2015-04-02 00:44:43 +00:00
|
|
|
"raw/*",
|
2017-02-16 21:29:30 +00:00
|
|
|
"replication/primary/secondary-token",
|
2017-02-17 04:36:06 +00:00
|
|
|
"replication/reindex",
|
2015-05-28 00:53:42 +00:00
|
|
|
"rotate",
|
2017-06-17 05:26:25 +00:00
|
|
|
"config/cors",
|
2017-02-02 19:49:20 +00:00
|
|
|
"config/auditing/*",
|
2017-04-24 18:35:32 +00:00
|
|
|
"plugins/catalog/*",
|
2017-05-04 02:03:42 +00:00
|
|
|
"revoke-prefix/*",
|
|
|
|
"leases/revoke-prefix/*",
|
|
|
|
"leases/revoke-force/*",
|
|
|
|
"leases/lookup/*",
|
2015-03-31 00:46:18 +00:00
|
|
|
},
|
2017-01-04 21:44:03 +00:00
|
|
|
|
|
|
|
Unauthenticated: []string{
|
|
|
|
"wrapping/pubkey",
|
2017-02-16 21:29:30 +00:00
|
|
|
"replication/status",
|
2017-01-04 21:44:03 +00:00
|
|
|
},
|
2015-03-16 00:35:59 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
Paths: []*framework.Path{
|
2016-03-17 16:55:38 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "capabilities-accessor$",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"accessor": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
2016-03-17 19:23:36 +00:00
|
|
|
Description: "Accessor of the token for which capabilities are being queried.",
|
2016-03-17 16:55:38 +00:00
|
|
|
},
|
|
|
|
"path": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
2016-03-17 19:23:36 +00:00
|
|
|
Description: "Path on which capabilities are being queried.",
|
2016-03-17 16:55:38 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
2016-03-17 17:33:49 +00:00
|
|
|
logical.UpdateOperation: b.handleCapabilitiesAccessor,
|
2016-03-17 16:55:38 +00:00
|
|
|
},
|
|
|
|
|
2016-03-17 19:23:36 +00:00
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["capabilities_accessor"][0]),
|
2016-03-18 03:01:28 +00:00
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["capabilities_accessor"][1]),
|
2016-03-17 16:55:38 +00:00
|
|
|
},
|
2016-03-18 03:01:28 +00:00
|
|
|
|
2017-06-17 04:04:55 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "config/cors$",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"enable": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeBool,
|
|
|
|
Description: "Enables or disables CORS headers on requests.",
|
|
|
|
},
|
|
|
|
"allowed_origins": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeCommaStringSlice,
|
2017-06-17 05:26:25 +00:00
|
|
|
Description: "A comma-separated string or array of strings indicating origins that may make cross-origin requests.",
|
2017-06-17 04:04:55 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: b.handleCORSRead,
|
|
|
|
logical.UpdateOperation: b.handleCORSUpdate,
|
|
|
|
logical.DeleteOperation: b.handleCORSDelete,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["config/cors"][0]),
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["config/cors"][1]),
|
|
|
|
},
|
|
|
|
|
2016-03-17 17:33:49 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "capabilities$",
|
|
|
|
|
2016-03-18 02:52:03 +00:00
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"token": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: "Token for which capabilities are being queried.",
|
|
|
|
},
|
|
|
|
"path": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: "Path on which capabilities are being queried.",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.UpdateOperation: b.handleCapabilities,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["capabilities"][0]),
|
2016-03-18 03:01:28 +00:00
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["capabilities"][1]),
|
2016-03-18 02:52:03 +00:00
|
|
|
},
|
2016-04-13 21:15:54 +00:00
|
|
|
|
2016-03-18 02:52:03 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "capabilities-self$",
|
|
|
|
|
2016-03-17 17:33:49 +00:00
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"token": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
2016-03-17 19:23:36 +00:00
|
|
|
Description: "Token for which capabilities are being queried.",
|
2016-03-17 17:33:49 +00:00
|
|
|
},
|
|
|
|
"path": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
2016-03-17 19:23:36 +00:00
|
|
|
Description: "Path on which capabilities are being queried.",
|
2016-03-17 17:33:49 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.UpdateOperation: b.handleCapabilities,
|
|
|
|
},
|
|
|
|
|
2016-03-18 03:01:28 +00:00
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["capabilities_self"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["capabilities_self"][1]),
|
2016-03-17 17:33:49 +00:00
|
|
|
},
|
2016-03-17 16:55:38 +00:00
|
|
|
|
2016-04-13 21:15:54 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "generate-root(/attempt)?$",
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["generate-root"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["generate-root"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "init$",
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["init"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["init"][1]),
|
|
|
|
},
|
|
|
|
|
2015-12-16 21:56:15 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "rekey/backup$",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
2016-04-04 14:44:22 +00:00
|
|
|
logical.ReadOperation: b.handleRekeyRetrieveBarrier,
|
|
|
|
logical.DeleteOperation: b.handleRekeyDeleteBarrier,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["rekey_backup"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["rekey_backup"][0]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "rekey/recovery-key-backup$",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: b.handleRekeyRetrieveRecovery,
|
|
|
|
logical.DeleteOperation: b.handleRekeyDeleteRecovery,
|
2015-12-16 21:56:15 +00:00
|
|
|
},
|
|
|
|
|
2016-03-10 02:04:54 +00:00
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["rekey_backup"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["rekey_backup"][0]),
|
2015-12-16 21:56:15 +00:00
|
|
|
},
|
|
|
|
|
2016-06-15 16:35:30 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "auth/(?P<path>.+?)/tune$",
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"path": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["auth_tune"][0]),
|
|
|
|
},
|
|
|
|
"default_lease_ttl": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["tune_default_lease_ttl"][0]),
|
|
|
|
},
|
|
|
|
"max_lease_ttl": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: b.handleAuthTuneRead,
|
|
|
|
logical.UpdateOperation: b.handleAuthTuneWrite,
|
|
|
|
},
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["auth_tune"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["auth_tune"][1]),
|
|
|
|
},
|
|
|
|
|
2015-03-16 00:35:59 +00:00
|
|
|
&framework.Path{
|
2015-09-02 19:56:58 +00:00
|
|
|
Pattern: "mounts/(?P<path>.+?)/tune$",
|
2015-03-16 00:35:59 +00:00
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"path": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["mount_path"][0]),
|
|
|
|
},
|
2015-09-09 19:24:45 +00:00
|
|
|
"default_lease_ttl": &framework.FieldSchema{
|
2015-09-25 13:46:20 +00:00
|
|
|
Type: framework.TypeString,
|
2015-09-09 19:24:45 +00:00
|
|
|
Description: strings.TrimSpace(sysHelp["tune_default_lease_ttl"][0]),
|
|
|
|
},
|
|
|
|
"max_lease_ttl": &framework.FieldSchema{
|
2015-09-25 13:46:20 +00:00
|
|
|
Type: framework.TypeString,
|
2015-09-09 19:24:45 +00:00
|
|
|
Description: strings.TrimSpace(sysHelp["tune_max_lease_ttl"][0]),
|
2015-09-02 19:56:58 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
2016-01-07 20:10:05 +00:00
|
|
|
logical.ReadOperation: b.handleMountTuneRead,
|
2016-01-07 15:30:47 +00:00
|
|
|
logical.UpdateOperation: b.handleMountTuneWrite,
|
2015-09-02 19:56:58 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["mount_tune"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["mount_tune"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "mounts/(?P<path>.+?)",
|
2015-03-16 00:35:59 +00:00
|
|
|
|
2015-09-02 19:56:58 +00:00
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"path": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["mount_path"][0]),
|
|
|
|
},
|
2015-03-16 00:35:59 +00:00
|
|
|
"type": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["mount_type"][0]),
|
|
|
|
},
|
|
|
|
"description": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["mount_desc"][0]),
|
|
|
|
},
|
2015-08-31 18:27:49 +00:00
|
|
|
"config": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeMap,
|
|
|
|
Description: strings.TrimSpace(sysHelp["mount_config"][0]),
|
|
|
|
},
|
2017-02-16 21:29:30 +00:00
|
|
|
"local": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeBool,
|
|
|
|
Default: false,
|
|
|
|
Description: strings.TrimSpace(sysHelp["mount_local"][0]),
|
|
|
|
},
|
2015-03-16 00:35:59 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
2016-01-07 20:10:05 +00:00
|
|
|
logical.UpdateOperation: b.handleMount,
|
2015-03-16 00:35:59 +00:00
|
|
|
logical.DeleteOperation: b.handleUnmount,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["mount"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["mount"][1]),
|
|
|
|
},
|
|
|
|
|
2015-09-03 12:54:59 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "mounts$",
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: b.handleMountTable,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["mounts"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["mounts"][1]),
|
|
|
|
},
|
|
|
|
|
2015-03-16 00:35:59 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "remount",
|
|
|
|
|
2015-03-19 14:05:22 +00:00
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"from": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
2016-04-13 21:15:54 +00:00
|
|
|
Description: "The previous mount point.",
|
2015-03-19 14:05:22 +00:00
|
|
|
},
|
|
|
|
"to": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
2016-04-13 21:15:54 +00:00
|
|
|
Description: "The new mount point.",
|
2015-03-19 14:05:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2015-03-16 00:35:59 +00:00
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
2016-01-07 15:30:47 +00:00
|
|
|
logical.UpdateOperation: b.handleRemount,
|
2015-03-16 00:35:59 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["remount"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["remount"][1]),
|
|
|
|
},
|
2015-03-16 23:11:55 +00:00
|
|
|
|
|
|
|
&framework.Path{
|
2017-05-04 02:03:42 +00:00
|
|
|
Pattern: "leases/lookup/(?P<prefix>.+?)?",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"prefix": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["leases-list-prefix"][0]),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ListOperation: b.handleLeaseLookupList,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["leases"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["leases"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "leases/lookup",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"lease_id": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["lease_id"][0]),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.UpdateOperation: b.handleLeaseLookup,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["leases"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["leases"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "(leases/)?renew" + framework.OptionalParamRegex("url_lease_id"),
|
2015-03-16 23:11:55 +00:00
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
2016-08-08 22:34:00 +00:00
|
|
|
"url_lease_id": &framework.FieldSchema{
|
2016-08-08 22:00:44 +00:00
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["lease_id"][0]),
|
|
|
|
},
|
2015-04-08 20:35:32 +00:00
|
|
|
"lease_id": &framework.FieldSchema{
|
2015-03-16 23:11:55 +00:00
|
|
|
Type: framework.TypeString,
|
2015-04-08 20:35:32 +00:00
|
|
|
Description: strings.TrimSpace(sysHelp["lease_id"][0]),
|
2015-03-16 23:11:55 +00:00
|
|
|
},
|
|
|
|
"increment": &framework.FieldSchema{
|
2015-06-17 22:58:20 +00:00
|
|
|
Type: framework.TypeDurationSecond,
|
2015-03-16 23:11:55 +00:00
|
|
|
Description: strings.TrimSpace(sysHelp["increment"][0]),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
2016-01-07 15:30:47 +00:00
|
|
|
logical.UpdateOperation: b.handleRenew,
|
2015-03-16 23:11:55 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["renew"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["renew"][1]),
|
|
|
|
},
|
2015-03-16 23:26:34 +00:00
|
|
|
|
|
|
|
&framework.Path{
|
2017-05-04 02:03:42 +00:00
|
|
|
Pattern: "(leases/)?revoke" + framework.OptionalParamRegex("url_lease_id"),
|
2015-03-16 23:26:34 +00:00
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
2017-04-27 14:47:43 +00:00
|
|
|
"url_lease_id": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["lease_id"][0]),
|
|
|
|
},
|
2015-04-08 20:35:32 +00:00
|
|
|
"lease_id": &framework.FieldSchema{
|
2015-03-16 23:26:34 +00:00
|
|
|
Type: framework.TypeString,
|
2015-04-08 20:35:32 +00:00
|
|
|
Description: strings.TrimSpace(sysHelp["lease_id"][0]),
|
2015-03-16 23:26:34 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
2016-01-07 15:30:47 +00:00
|
|
|
logical.UpdateOperation: b.handleRevoke,
|
2015-03-16 23:26:34 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["revoke"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["revoke"][1]),
|
|
|
|
},
|
2015-03-16 23:33:48 +00:00
|
|
|
|
Add forced revocation.
In some situations, it can be impossible to revoke leases (for instance,
if someone has gone and manually removed users created by Vault). This
can not only cause Vault to cycle trying to revoke them, but it also
prevents mounts from being unmounted, leaving them in a tainted state
where the only operations allowed are to revoke (or rollback), which
will never successfully complete.
This adds a new endpoint that works similarly to `revoke-prefix` but
ignores errors coming from a backend upon revocation (it does not ignore
errors coming from within the expiration manager, such as errors
accessing the data store). This can be used to force Vault to abandon
leases.
Like `revoke-prefix`, this is a very sensitive operation and requires
`sudo`. It is implemented as a separate endpoint, rather than an
argument to `revoke-prefix`, to ensure that control can be delegated
appropriately, as even most administrators should not normally have
this privilege.
Fixes #1135
2016-03-03 01:26:38 +00:00
|
|
|
&framework.Path{
|
2017-05-04 02:03:42 +00:00
|
|
|
Pattern: "(leases/)?revoke-force/(?P<prefix>.+)",
|
Add forced revocation.
In some situations, it can be impossible to revoke leases (for instance,
if someone has gone and manually removed users created by Vault). This
can not only cause Vault to cycle trying to revoke them, but it also
prevents mounts from being unmounted, leaving them in a tainted state
where the only operations allowed are to revoke (or rollback), which
will never successfully complete.
This adds a new endpoint that works similarly to `revoke-prefix` but
ignores errors coming from a backend upon revocation (it does not ignore
errors coming from within the expiration manager, such as errors
accessing the data store). This can be used to force Vault to abandon
leases.
Like `revoke-prefix`, this is a very sensitive operation and requires
`sudo`. It is implemented as a separate endpoint, rather than an
argument to `revoke-prefix`, to ensure that control can be delegated
appropriately, as even most administrators should not normally have
this privilege.
Fixes #1135
2016-03-03 01:26:38 +00:00
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"prefix": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["revoke-force-path"][0]),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.UpdateOperation: b.handleRevokeForce,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["revoke-force"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["revoke-force"][1]),
|
|
|
|
},
|
|
|
|
|
2015-03-16 23:33:48 +00:00
|
|
|
&framework.Path{
|
2017-05-04 02:03:42 +00:00
|
|
|
Pattern: "(leases/)?revoke-prefix/(?P<prefix>.+)",
|
2015-03-16 23:33:48 +00:00
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"prefix": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["revoke-prefix-path"][0]),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
2016-01-07 15:30:47 +00:00
|
|
|
logical.UpdateOperation: b.handleRevokePrefix,
|
2015-03-16 23:33:48 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["revoke-prefix"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["revoke-prefix"][1]),
|
|
|
|
},
|
2015-03-20 19:48:19 +00:00
|
|
|
|
|
|
|
&framework.Path{
|
2017-05-04 13:40:11 +00:00
|
|
|
Pattern: "leases/tidy$",
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.UpdateOperation: b.handleTidyLeases,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["tidy_leases"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["tidy_leases"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
2015-03-20 19:48:19 +00:00
|
|
|
Pattern: "auth$",
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: b.handleAuthTable,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["auth-table"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["auth-table"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "auth/(?P<path>.+)",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"path": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["auth_path"][0]),
|
|
|
|
},
|
|
|
|
"type": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["auth_type"][0]),
|
|
|
|
},
|
|
|
|
"description": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["auth_desc"][0]),
|
|
|
|
},
|
2017-02-16 21:29:30 +00:00
|
|
|
"local": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeBool,
|
|
|
|
Default: false,
|
|
|
|
Description: strings.TrimSpace(sysHelp["mount_local"][0]),
|
|
|
|
},
|
2015-03-20 19:48:19 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
2016-01-07 20:10:05 +00:00
|
|
|
logical.UpdateOperation: b.handleEnableAuth,
|
2015-03-20 19:48:19 +00:00
|
|
|
logical.DeleteOperation: b.handleDisableAuth,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["auth"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["auth"][1]),
|
|
|
|
},
|
2015-03-23 21:43:31 +00:00
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "policy$",
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: b.handlePolicyList,
|
2016-03-02 19:16:54 +00:00
|
|
|
logical.ListOperation: b.handlePolicyList,
|
2015-03-23 21:43:31 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["policy-list"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["policy-list"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "policy/(?P<name>.+)",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"name": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["policy-name"][0]),
|
|
|
|
},
|
|
|
|
"rules": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["policy-rules"][0]),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: b.handlePolicyRead,
|
2016-01-07 20:10:05 +00:00
|
|
|
logical.UpdateOperation: b.handlePolicySet,
|
2015-03-23 21:43:31 +00:00
|
|
|
logical.DeleteOperation: b.handlePolicyDelete,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["policy"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["policy"][1]),
|
|
|
|
},
|
2015-03-31 23:45:00 +00:00
|
|
|
|
2016-04-13 21:15:54 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "seal-status$",
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["seal-status"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["seal-status"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "seal$",
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["seal"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["seal"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "unseal$",
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["unseal"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["unseal"][1]),
|
|
|
|
},
|
|
|
|
|
2015-11-19 01:26:03 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "audit-hash/(?P<path>.+)",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"path": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["audit_path"][0]),
|
|
|
|
},
|
|
|
|
|
|
|
|
"input": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
2016-01-07 15:30:47 +00:00
|
|
|
logical.UpdateOperation: b.handleAuditHash,
|
2015-11-19 01:26:03 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["audit-hash"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["audit-hash"][1]),
|
|
|
|
},
|
|
|
|
|
2015-03-31 23:45:00 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "audit$",
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: b.handleAuditTable,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["audit-table"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["audit-table"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "audit/(?P<path>.+)",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"path": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["audit_path"][0]),
|
|
|
|
},
|
|
|
|
"type": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["audit_type"][0]),
|
|
|
|
},
|
|
|
|
"description": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: strings.TrimSpace(sysHelp["audit_desc"][0]),
|
|
|
|
},
|
|
|
|
"options": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeMap,
|
|
|
|
Description: strings.TrimSpace(sysHelp["audit_opts"][0]),
|
|
|
|
},
|
2017-02-16 21:29:30 +00:00
|
|
|
"local": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeBool,
|
|
|
|
Default: false,
|
|
|
|
Description: strings.TrimSpace(sysHelp["mount_local"][0]),
|
|
|
|
},
|
2015-03-31 23:45:00 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
2016-01-07 20:10:05 +00:00
|
|
|
logical.UpdateOperation: b.handleEnableAudit,
|
2015-03-31 23:45:00 +00:00
|
|
|
logical.DeleteOperation: b.handleDisableAudit,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["audit"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["audit"][1]),
|
|
|
|
},
|
2015-04-02 00:44:43 +00:00
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "raw/(?P<path>.+)",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"path": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
},
|
|
|
|
"value": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: b.handleRawRead,
|
2016-01-07 20:10:05 +00:00
|
|
|
logical.UpdateOperation: b.handleRawWrite,
|
2015-04-02 00:44:43 +00:00
|
|
|
logical.DeleteOperation: b.handleRawDelete,
|
|
|
|
},
|
|
|
|
},
|
2015-05-28 00:53:42 +00:00
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "key-status$",
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: b.handleKeyStatus,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["key-status"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["key-status"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "rotate$",
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
2016-01-07 15:30:47 +00:00
|
|
|
logical.UpdateOperation: b.handleRotate,
|
2015-05-28 00:53:42 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["rotate"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["rotate"][1]),
|
|
|
|
},
|
2016-09-29 04:01:28 +00:00
|
|
|
|
2017-01-04 21:44:03 +00:00
|
|
|
/*
|
|
|
|
// Disabled for the moment as we don't support this externally
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "wrapping/pubkey$",
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: b.handleWrappingPubkey,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["wrappubkey"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["wrappubkey"][1]),
|
|
|
|
},
|
|
|
|
*/
|
|
|
|
|
2016-09-29 04:01:28 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "wrapping/wrap$",
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.UpdateOperation: b.handleWrappingWrap,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["wrap"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["wrap"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "wrapping/unwrap$",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"token": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.UpdateOperation: b.handleWrappingUnwrap,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["unwrap"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["unwrap"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "wrapping/lookup$",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"token": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.UpdateOperation: b.handleWrappingLookup,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["wraplookup"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["wraplookup"][1]),
|
|
|
|
},
|
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "wrapping/rewrap$",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"token": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.UpdateOperation: b.handleWrappingRewrap,
|
|
|
|
},
|
|
|
|
|
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["rewrap"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["rewrap"][1]),
|
|
|
|
},
|
2017-02-02 19:49:20 +00:00
|
|
|
|
|
|
|
&framework.Path{
|
|
|
|
Pattern: "config/auditing/request-headers/(?P<header>.+)",
|
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"header": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
|
|
|
},
|
|
|
|
"hmac": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeBool,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.UpdateOperation: b.handleAuditedHeaderUpdate,
|
|
|
|
logical.DeleteOperation: b.handleAuditedHeaderDelete,
|
|
|
|
logical.ReadOperation: b.handleAuditedHeaderRead,
|
|
|
|
},
|
|
|
|
|
2017-02-03 18:08:31 +00:00
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers-name"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["audited-headers-name"][1]),
|
2017-02-02 19:49:20 +00:00
|
|
|
},
|
2017-05-04 02:03:42 +00:00
|
|
|
|
2017-02-02 19:49:20 +00:00
|
|
|
&framework.Path{
|
|
|
|
Pattern: "config/auditing/request-headers$",
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ReadOperation: b.handleAuditedHeadersRead,
|
|
|
|
},
|
|
|
|
|
2017-02-03 18:08:31 +00:00
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["audited-headers"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["audited-headers"][1]),
|
2017-02-02 19:49:20 +00:00
|
|
|
},
|
2017-04-12 16:40:54 +00:00
|
|
|
&framework.Path{
|
2017-04-24 18:35:32 +00:00
|
|
|
Pattern: "plugins/catalog/$",
|
2017-04-12 16:40:54 +00:00
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.ListOperation: b.handlePluginCatalogList,
|
|
|
|
},
|
|
|
|
|
2017-04-12 17:01:36 +00:00
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]),
|
2017-04-12 16:40:54 +00:00
|
|
|
},
|
2017-04-04 00:52:29 +00:00
|
|
|
&framework.Path{
|
2017-04-24 18:35:32 +00:00
|
|
|
Pattern: "plugins/catalog/(?P<name>.+)",
|
2017-04-04 00:52:29 +00:00
|
|
|
|
|
|
|
Fields: map[string]*framework.FieldSchema{
|
|
|
|
"name": &framework.FieldSchema{
|
2017-04-24 17:30:33 +00:00
|
|
|
Type: framework.TypeString,
|
|
|
|
Description: "The name of the plugin",
|
2017-04-04 00:52:29 +00:00
|
|
|
},
|
|
|
|
"sha_256": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
2017-04-24 17:30:33 +00:00
|
|
|
Description: `The SHA256 sum of the executable used in the
|
|
|
|
command field. This should be HEX encoded.`,
|
2017-04-04 00:52:29 +00:00
|
|
|
},
|
|
|
|
"command": &framework.FieldSchema{
|
|
|
|
Type: framework.TypeString,
|
2017-04-24 17:30:33 +00:00
|
|
|
Description: `The command used to start the plugin. The
|
|
|
|
executable defined in this command must exist in vault's
|
|
|
|
plugin directory.`,
|
2017-04-04 00:52:29 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
Callbacks: map[logical.Operation]framework.OperationFunc{
|
|
|
|
logical.UpdateOperation: b.handlePluginCatalogUpdate,
|
|
|
|
logical.DeleteOperation: b.handlePluginCatalogDelete,
|
|
|
|
logical.ReadOperation: b.handlePluginCatalogRead,
|
|
|
|
},
|
|
|
|
|
2017-04-12 17:01:36 +00:00
|
|
|
HelpSynopsis: strings.TrimSpace(sysHelp["plugin-catalog"][0]),
|
|
|
|
HelpDescription: strings.TrimSpace(sysHelp["plugin-catalog"][1]),
|
2017-04-04 00:52:29 +00:00
|
|
|
},
|
2015-03-16 00:35:59 +00:00
|
|
|
},
|
|
|
|
}
|
2015-09-04 20:58:12 +00:00
|
|
|
|
2017-02-17 04:23:21 +00:00
|
|
|
b.Backend.Paths = append(b.Backend.Paths, replicationPaths(b)...)
|
2017-02-16 21:29:30 +00:00
|
|
|
|
|
|
|
b.Backend.Invalidate = b.invalidate
|
|
|
|
|
2017-06-17 05:26:25 +00:00
|
|
|
return b
|
2015-03-16 00:35:59 +00:00
|
|
|
}
|
|
|
|
|
2015-03-15 21:42:05 +00:00
|
|
|
// SystemBackend implements logical.Backend and is used to interact with
|
|
|
|
// the core of the system. This backend is hardcoded to exist at the "sys"
|
|
|
|
// prefix. Conceptually it is similar to procfs on Linux.
|
2015-03-15 21:54:49 +00:00
|
|
|
type SystemBackend struct {
|
2017-06-17 05:26:25 +00:00
|
|
|
*framework.Backend
|
|
|
|
Core *Core
|
2015-03-15 21:42:05 +00:00
|
|
|
}
|
|
|
|
|
2017-06-17 04:04:55 +00:00
|
|
|
// handleCORSRead returns the current CORS configuration
|
|
|
|
func (b *SystemBackend) handleCORSRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
|
|
|
corsConf := b.Core.corsConfig
|
|
|
|
|
2017-06-17 05:26:25 +00:00
|
|
|
enabled := corsConf.IsEnabled()
|
|
|
|
|
|
|
|
resp := &logical.Response{
|
2017-06-17 04:04:55 +00:00
|
|
|
Data: map[string]interface{}{
|
2017-06-17 05:26:25 +00:00
|
|
|
"enabled": enabled,
|
2017-06-17 04:04:55 +00:00
|
|
|
},
|
2017-06-17 05:26:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if enabled {
|
|
|
|
corsConf.RLock()
|
|
|
|
resp.Data["allowed_origins"] = corsConf.AllowedOrigins
|
|
|
|
corsConf.RUnlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
2017-06-17 04:04:55 +00:00
|
|
|
}
|
|
|
|
|
2017-06-17 05:26:25 +00:00
|
|
|
// handleCORSUpdate sets the list of origins that are allowed to make
|
|
|
|
// cross-origin requests and sets the CORS enabled flag to true
|
2017-06-17 04:04:55 +00:00
|
|
|
func (b *SystemBackend) handleCORSUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
|
|
|
origins := d.Get("allowed_origins").([]string)
|
|
|
|
|
2017-06-17 05:26:25 +00:00
|
|
|
return nil, b.Core.corsConfig.Enable(origins)
|
2017-06-17 04:04:55 +00:00
|
|
|
}
|
|
|
|
|
2017-06-17 05:26:25 +00:00
|
|
|
// handleCORSDelete clears the allowed origins and sets the CORS enabled flag
|
|
|
|
// to false
|
2017-06-17 04:04:55 +00:00
|
|
|
func (b *SystemBackend) handleCORSDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
2017-06-17 05:26:25 +00:00
|
|
|
return nil, b.Core.corsConfig.Disable()
|
2017-06-17 04:04:55 +00:00
|
|
|
}
|
|
|
|
|
2017-03-07 20:22:21 +00:00
|
|
|
func (b *SystemBackend) handleTidyLeases(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
|
|
|
err := b.Core.expiration.Tidy()
|
|
|
|
if err != nil {
|
|
|
|
b.Backend.Logger().Error("sys: failed to tidy leases", "error", err)
|
|
|
|
return handleError(err)
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-02-16 21:29:30 +00:00
|
|
|
func (b *SystemBackend) invalidate(key string) {
|
|
|
|
if b.Core.logger.IsTrace() {
|
2017-05-05 23:40:26 +00:00
|
|
|
b.Core.logger.Trace("sys: invalidating key", "key", key)
|
2017-02-16 21:29:30 +00:00
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case strings.HasPrefix(key, policySubPath):
|
|
|
|
b.Core.stateLock.RLock()
|
|
|
|
defer b.Core.stateLock.RUnlock()
|
|
|
|
if b.Core.policyStore != nil {
|
|
|
|
b.Core.policyStore.invalidate(strings.TrimPrefix(key, policySubPath))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-12 16:40:54 +00:00
|
|
|
func (b *SystemBackend) handlePluginCatalogList(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
|
|
|
plugins, err := b.Core.pluginCatalog.List()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-04-24 17:30:33 +00:00
|
|
|
return logical.ListResponse(plugins), nil
|
2017-04-12 16:40:54 +00:00
|
|
|
}
|
|
|
|
|
2017-04-04 00:52:29 +00:00
|
|
|
func (b *SystemBackend) handlePluginCatalogUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
|
|
|
pluginName := d.Get("name").(string)
|
|
|
|
if pluginName == "" {
|
|
|
|
return logical.ErrorResponse("missing plugin name"), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
sha256 := d.Get("sha_256").(string)
|
|
|
|
if sha256 == "" {
|
|
|
|
return logical.ErrorResponse("missing SHA-256 value"), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
command := d.Get("command").(string)
|
|
|
|
if command == "" {
|
|
|
|
return logical.ErrorResponse("missing command value"), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
sha256Bytes, err := hex.DecodeString(sha256)
|
|
|
|
if err != nil {
|
|
|
|
return logical.ErrorResponse("Could not decode SHA-256 value from Hex"), err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = b.Core.pluginCatalog.Set(pluginName, command, sha256Bytes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *SystemBackend) handlePluginCatalogRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
|
|
|
pluginName := d.Get("name").(string)
|
|
|
|
if pluginName == "" {
|
|
|
|
return logical.ErrorResponse("missing plugin name"), nil
|
|
|
|
}
|
|
|
|
plugin, err := b.Core.pluginCatalog.Get(pluginName)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-04-25 01:31:27 +00:00
|
|
|
if plugin == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-04-04 00:52:29 +00:00
|
|
|
|
|
|
|
return &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"plugin": plugin,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *SystemBackend) handlePluginCatalogDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
|
|
|
pluginName := d.Get("name").(string)
|
|
|
|
if pluginName == "" {
|
|
|
|
return logical.ErrorResponse("missing plugin name"), nil
|
|
|
|
}
|
2017-04-12 16:40:54 +00:00
|
|
|
err := b.Core.pluginCatalog.Delete(pluginName)
|
2017-04-04 00:52:29 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-04-12 16:40:54 +00:00
|
|
|
return nil, nil
|
2017-04-04 00:52:29 +00:00
|
|
|
}
|
|
|
|
|
2017-02-02 19:49:20 +00:00
|
|
|
// handleAuditedHeaderUpdate creates or overwrites a header entry
|
|
|
|
func (b *SystemBackend) handleAuditedHeaderUpdate(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
|
|
|
header := d.Get("header").(string)
|
|
|
|
hmac := d.Get("hmac").(bool)
|
|
|
|
if header == "" {
|
|
|
|
return logical.ErrorResponse("missing header name"), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
headerConfig := b.Core.AuditedHeadersConfig()
|
|
|
|
err := headerConfig.add(header, hmac)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleAudtedHeaderDelete deletes the header with the given name
|
|
|
|
func (b *SystemBackend) handleAuditedHeaderDelete(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
|
|
|
header := d.Get("header").(string)
|
|
|
|
if header == "" {
|
|
|
|
return logical.ErrorResponse("missing header name"), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
headerConfig := b.Core.AuditedHeadersConfig()
|
|
|
|
err := headerConfig.remove(header)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleAuditedHeaderRead returns the header configuration for the given header name
|
|
|
|
func (b *SystemBackend) handleAuditedHeaderRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
|
|
|
header := d.Get("header").(string)
|
|
|
|
if header == "" {
|
|
|
|
return logical.ErrorResponse("missing header name"), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
headerConfig := b.Core.AuditedHeadersConfig()
|
|
|
|
settings, ok := headerConfig.Headers[header]
|
|
|
|
if !ok {
|
|
|
|
return logical.ErrorResponse("Could not find header in config"), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
header: settings,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleAuditedHeadersRead returns the whole audited headers config
|
|
|
|
func (b *SystemBackend) handleAuditedHeadersRead(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
|
|
|
headerConfig := b.Core.AuditedHeadersConfig()
|
|
|
|
|
|
|
|
return &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"headers": headerConfig.Headers,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2017-06-17 04:04:55 +00:00
|
|
|
// handleCapabilities returns the ACL capabilities of the token for a given path
|
2016-03-17 17:33:49 +00:00
|
|
|
func (b *SystemBackend) handleCapabilities(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
2016-09-29 04:01:28 +00:00
|
|
|
token := d.Get("token").(string)
|
|
|
|
if token == "" {
|
|
|
|
token = req.ClientToken
|
|
|
|
}
|
|
|
|
capabilities, err := b.Core.Capabilities(token, d.Get("path").(string))
|
2016-03-17 17:33:49 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
2016-03-17 18:07:55 +00:00
|
|
|
"capabilities": capabilities,
|
2016-03-17 17:33:49 +00:00
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2017-06-17 04:04:55 +00:00
|
|
|
// handleCapabilitiesAccessor returns the ACL capabilities of the
|
|
|
|
// token associted with the given accessor for a given path.
|
2016-03-17 17:33:49 +00:00
|
|
|
func (b *SystemBackend) handleCapabilitiesAccessor(req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
2016-03-17 18:07:55 +00:00
|
|
|
accessor := d.Get("accessor").(string)
|
|
|
|
if accessor == "" {
|
2016-03-17 19:39:58 +00:00
|
|
|
return logical.ErrorResponse("missing accessor"), nil
|
2016-03-17 18:07:55 +00:00
|
|
|
}
|
|
|
|
|
2017-05-03 17:08:47 +00:00
|
|
|
aEntry, err := b.Core.tokenStore.lookupByAccessor(accessor, false)
|
2016-03-17 18:07:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-07-29 22:20:38 +00:00
|
|
|
capabilities, err := b.Core.Capabilities(aEntry.TokenID, d.Get("path").(string))
|
2016-03-17 16:55:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-03-17 18:07:55 +00:00
|
|
|
|
2016-03-17 16:55:38 +00:00
|
|
|
return &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"capabilities": capabilities,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2015-12-16 21:56:15 +00:00
|
|
|
// handleRekeyRetrieve returns backed-up, PGP-encrypted unseal keys from a
|
|
|
|
// rekey operation
|
|
|
|
func (b *SystemBackend) handleRekeyRetrieve(
|
2016-04-04 14:44:22 +00:00
|
|
|
req *logical.Request,
|
|
|
|
data *framework.FieldData,
|
|
|
|
recovery bool) (*logical.Response, error) {
|
|
|
|
backup, err := b.Core.RekeyRetrieveBackup(recovery)
|
2015-12-16 21:56:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to look up backed-up keys: %v", err)
|
|
|
|
}
|
|
|
|
if backup == nil {
|
|
|
|
return logical.ErrorResponse("no backed-up keys found"), nil
|
|
|
|
}
|
|
|
|
|
2016-08-15 20:01:15 +00:00
|
|
|
keysB64 := map[string][]string{}
|
|
|
|
for k, v := range backup.Keys {
|
|
|
|
for _, j := range v {
|
|
|
|
currB64Keys := keysB64[k]
|
|
|
|
if currB64Keys == nil {
|
|
|
|
currB64Keys = []string{}
|
|
|
|
}
|
|
|
|
key, err := hex.DecodeString(j)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error decoding hex-encoded backup key: %v", err)
|
|
|
|
}
|
|
|
|
currB64Keys = append(currB64Keys, base64.StdEncoding.EncodeToString(key))
|
|
|
|
keysB64[k] = currB64Keys
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-16 21:56:15 +00:00
|
|
|
// Format the status
|
|
|
|
resp := &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
2016-08-15 20:01:15 +00:00
|
|
|
"nonce": backup.Nonce,
|
|
|
|
"keys": backup.Keys,
|
|
|
|
"keys_base64": keysB64,
|
2015-12-16 21:56:15 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2016-04-04 14:44:22 +00:00
|
|
|
func (b *SystemBackend) handleRekeyRetrieveBarrier(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
return b.handleRekeyRetrieve(req, data, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *SystemBackend) handleRekeyRetrieveRecovery(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
return b.handleRekeyRetrieve(req, data, true)
|
|
|
|
}
|
|
|
|
|
2015-12-16 21:56:15 +00:00
|
|
|
// handleRekeyDelete deletes backed-up, PGP-encrypted unseal keys from a rekey
|
|
|
|
// operation
|
|
|
|
func (b *SystemBackend) handleRekeyDelete(
|
2016-04-04 14:44:22 +00:00
|
|
|
req *logical.Request,
|
|
|
|
data *framework.FieldData,
|
|
|
|
recovery bool) (*logical.Response, error) {
|
|
|
|
err := b.Core.RekeyDeleteBackup(recovery)
|
2015-12-16 21:56:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error during deletion of backed-up keys: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
2016-09-29 04:01:28 +00:00
|
|
|
|
2016-04-04 14:44:22 +00:00
|
|
|
func (b *SystemBackend) handleRekeyDeleteBarrier(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
return b.handleRekeyDelete(req, data, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *SystemBackend) handleRekeyDeleteRecovery(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
return b.handleRekeyDelete(req, data, true)
|
|
|
|
}
|
2015-12-16 21:56:15 +00:00
|
|
|
|
2015-03-15 21:42:05 +00:00
|
|
|
// handleMountTable handles the "mounts" endpoint to provide the mount table
|
2015-03-16 00:35:59 +00:00
|
|
|
func (b *SystemBackend) handleMountTable(
|
2015-03-19 22:11:42 +00:00
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
2015-11-11 16:44:07 +00:00
|
|
|
b.Core.mountsLock.RLock()
|
|
|
|
defer b.Core.mountsLock.RUnlock()
|
2015-03-15 21:42:05 +00:00
|
|
|
|
|
|
|
resp := &logical.Response{
|
2015-03-19 22:11:42 +00:00
|
|
|
Data: make(map[string]interface{}),
|
2015-03-15 21:42:05 +00:00
|
|
|
}
|
2015-11-11 16:44:07 +00:00
|
|
|
|
2015-03-15 21:42:05 +00:00
|
|
|
for _, entry := range b.Core.mounts.Entries {
|
2015-08-31 18:27:49 +00:00
|
|
|
info := map[string]interface{}{
|
2015-03-15 21:42:05 +00:00
|
|
|
"type": entry.Type,
|
|
|
|
"description": entry.Description,
|
2017-06-26 17:14:36 +00:00
|
|
|
"accessor": entry.Accessor,
|
2015-09-25 13:46:20 +00:00
|
|
|
"config": map[string]interface{}{
|
2016-06-21 00:08:12 +00:00
|
|
|
"default_lease_ttl": int64(entry.Config.DefaultLeaseTTL.Seconds()),
|
|
|
|
"max_lease_ttl": int64(entry.Config.MaxLeaseTTL.Seconds()),
|
2017-03-08 14:20:09 +00:00
|
|
|
"force_no_cache": entry.Config.ForceNoCache,
|
2015-09-25 13:46:20 +00:00
|
|
|
},
|
2017-02-16 21:29:30 +00:00
|
|
|
"local": entry.Local,
|
2015-03-15 21:42:05 +00:00
|
|
|
}
|
2015-09-25 13:46:20 +00:00
|
|
|
|
2015-03-15 21:42:05 +00:00
|
|
|
resp.Data[entry.Path] = info
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleMount is used to mount a new path
|
2015-03-16 00:35:59 +00:00
|
|
|
func (b *SystemBackend) handleMount(
|
2015-03-19 22:11:42 +00:00
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
2017-02-16 21:29:30 +00:00
|
|
|
b.Core.clusterParamsLock.RLock()
|
|
|
|
repState := b.Core.replicationState
|
|
|
|
b.Core.clusterParamsLock.RUnlock()
|
|
|
|
|
|
|
|
local := data.Get("local").(bool)
|
|
|
|
if !local && repState == consts.ReplicationSecondary {
|
|
|
|
return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
|
|
|
|
}
|
|
|
|
|
2015-03-16 00:35:59 +00:00
|
|
|
// Get all the options
|
|
|
|
path := data.Get("path").(string)
|
|
|
|
logicalType := data.Get("type").(string)
|
|
|
|
description := data.Get("description").(string)
|
2015-09-01 22:29:30 +00:00
|
|
|
|
2016-03-03 18:13:47 +00:00
|
|
|
path = sanitizeMountPath(path)
|
|
|
|
|
2015-09-01 22:29:30 +00:00
|
|
|
var config MountConfig
|
2015-09-25 13:46:20 +00:00
|
|
|
|
|
|
|
var apiConfig struct {
|
|
|
|
DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
|
|
|
|
MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
|
2017-03-08 14:20:09 +00:00
|
|
|
ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
|
2015-09-25 13:46:20 +00:00
|
|
|
}
|
2015-09-01 22:29:30 +00:00
|
|
|
configMap := data.Get("config").(map[string]interface{})
|
|
|
|
if configMap != nil && len(configMap) != 0 {
|
2015-09-25 13:46:20 +00:00
|
|
|
err := mapstructure.Decode(configMap, &apiConfig)
|
2015-09-01 22:29:30 +00:00
|
|
|
if err != nil {
|
2015-08-31 18:27:49 +00:00
|
|
|
return logical.ErrorResponse(
|
2015-09-01 22:29:30 +00:00
|
|
|
"unable to convert given mount config information"),
|
2015-08-31 18:27:49 +00:00
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
}
|
2015-03-15 21:42:05 +00:00
|
|
|
|
2015-09-25 13:46:20 +00:00
|
|
|
switch apiConfig.DefaultLeaseTTL {
|
|
|
|
case "":
|
|
|
|
case "system":
|
|
|
|
default:
|
2017-03-07 16:21:22 +00:00
|
|
|
tmpDef, err := parseutil.ParseDurationSecond(apiConfig.DefaultLeaseTTL)
|
2015-09-25 13:46:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return logical.ErrorResponse(fmt.Sprintf(
|
|
|
|
"unable to parse default TTL of %s: %s", apiConfig.DefaultLeaseTTL, err)),
|
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
2016-07-11 18:19:35 +00:00
|
|
|
config.DefaultLeaseTTL = tmpDef
|
2015-09-25 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
switch apiConfig.MaxLeaseTTL {
|
|
|
|
case "":
|
|
|
|
case "system":
|
|
|
|
default:
|
2017-03-07 16:21:22 +00:00
|
|
|
tmpMax, err := parseutil.ParseDurationSecond(apiConfig.MaxLeaseTTL)
|
2015-09-25 13:46:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return logical.ErrorResponse(fmt.Sprintf(
|
|
|
|
"unable to parse max TTL of %s: %s", apiConfig.MaxLeaseTTL, err)),
|
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
2016-07-11 18:19:35 +00:00
|
|
|
config.MaxLeaseTTL = tmpMax
|
2015-09-25 13:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if config.MaxLeaseTTL != 0 && config.DefaultLeaseTTL > config.MaxLeaseTTL {
|
|
|
|
return logical.ErrorResponse(
|
|
|
|
"given default lease TTL greater than given max lease TTL"),
|
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
|
|
|
|
if config.DefaultLeaseTTL > b.Core.maxLeaseTTL {
|
|
|
|
return logical.ErrorResponse(fmt.Sprintf(
|
|
|
|
"given default lease TTL greater than system max lease TTL of %d", int(b.Core.maxLeaseTTL.Seconds()))),
|
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
|
2017-03-08 14:20:09 +00:00
|
|
|
// Copy over the force no cache if set
|
|
|
|
if apiConfig.ForceNoCache {
|
|
|
|
config.ForceNoCache = true
|
|
|
|
}
|
|
|
|
|
2015-03-15 21:42:05 +00:00
|
|
|
if logicalType == "" {
|
2015-03-16 00:35:59 +00:00
|
|
|
return logical.ErrorResponse(
|
|
|
|
"backend type must be specified as a string"),
|
|
|
|
logical.ErrInvalidRequest
|
2015-03-15 21:42:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create the mount entry
|
|
|
|
me := &MountEntry{
|
2016-05-26 16:55:00 +00:00
|
|
|
Table: mountTableType,
|
2015-03-16 00:35:59 +00:00
|
|
|
Path: path,
|
2015-03-15 21:42:05 +00:00
|
|
|
Type: logicalType,
|
|
|
|
Description: description,
|
2015-09-01 22:29:30 +00:00
|
|
|
Config: config,
|
2017-02-16 21:29:30 +00:00
|
|
|
Local: local,
|
2015-03-15 21:42:05 +00:00
|
|
|
}
|
2015-09-01 22:29:30 +00:00
|
|
|
|
2015-03-15 21:42:05 +00:00
|
|
|
// Attempt mount
|
2017-05-09 21:51:09 +00:00
|
|
|
if err := b.Core.mount(me); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: mount failed", "path", me.Path, "error", err)
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-15 21:42:05 +00:00
|
|
|
}
|
2015-08-10 17:27:25 +00:00
|
|
|
|
2015-03-15 21:42:05 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-08-10 17:27:25 +00:00
|
|
|
// used to intercept an HTTPCodedError so it goes back to callee
|
|
|
|
func handleError(
|
|
|
|
err error) (*logical.Response, error) {
|
|
|
|
switch err.(type) {
|
|
|
|
case logical.HTTPCodedError:
|
|
|
|
return logical.ErrorResponse(err.Error()), err
|
|
|
|
default:
|
|
|
|
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-15 21:42:05 +00:00
|
|
|
// handleUnmount is used to unmount a path
|
2015-03-16 00:35:59 +00:00
|
|
|
func (b *SystemBackend) handleUnmount(
|
2015-03-19 22:11:42 +00:00
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
2017-02-16 21:29:30 +00:00
|
|
|
b.Core.clusterParamsLock.RLock()
|
|
|
|
repState := b.Core.replicationState
|
|
|
|
b.Core.clusterParamsLock.RUnlock()
|
|
|
|
|
2015-03-19 22:11:42 +00:00
|
|
|
suffix := strings.TrimPrefix(req.Path, "mounts/")
|
2015-03-15 21:42:05 +00:00
|
|
|
if len(suffix) == 0 {
|
2015-03-15 21:53:41 +00:00
|
|
|
return logical.ErrorResponse("path cannot be blank"), logical.ErrInvalidRequest
|
2015-03-15 21:42:05 +00:00
|
|
|
}
|
|
|
|
|
2016-03-03 18:13:47 +00:00
|
|
|
suffix = sanitizeMountPath(suffix)
|
|
|
|
|
2017-02-16 21:29:30 +00:00
|
|
|
entry := b.Core.router.MatchingMountEntry(suffix)
|
|
|
|
if entry != nil && !entry.Local && repState == consts.ReplicationSecondary {
|
|
|
|
return logical.ErrorResponse("cannot unmount a non-local mount on a replication secondary"), nil
|
|
|
|
}
|
|
|
|
|
2015-03-15 21:42:05 +00:00
|
|
|
// Attempt unmount
|
2016-09-19 17:02:25 +00:00
|
|
|
if existed, err := b.Core.unmount(suffix); existed && err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: unmount failed", "path", suffix, "error", err)
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-15 21:42:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleRemount is used to remount a path
|
2015-03-16 00:35:59 +00:00
|
|
|
func (b *SystemBackend) handleRemount(
|
2015-03-19 22:11:42 +00:00
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
2017-02-16 21:29:30 +00:00
|
|
|
b.Core.clusterParamsLock.RLock()
|
|
|
|
repState := b.Core.replicationState
|
|
|
|
b.Core.clusterParamsLock.RUnlock()
|
|
|
|
|
2015-03-15 21:42:05 +00:00
|
|
|
// Get the paths
|
2015-03-19 22:11:42 +00:00
|
|
|
fromPath := data.Get("from").(string)
|
|
|
|
toPath := data.Get("to").(string)
|
2015-03-15 21:42:05 +00:00
|
|
|
if fromPath == "" || toPath == "" {
|
|
|
|
return logical.ErrorResponse(
|
|
|
|
"both 'from' and 'to' path must be specified as a string"),
|
2015-03-15 21:53:41 +00:00
|
|
|
logical.ErrInvalidRequest
|
2015-03-15 21:42:05 +00:00
|
|
|
}
|
2015-09-01 22:29:30 +00:00
|
|
|
|
2016-03-03 18:13:47 +00:00
|
|
|
fromPath = sanitizeMountPath(fromPath)
|
|
|
|
toPath = sanitizeMountPath(toPath)
|
|
|
|
|
2017-02-16 21:29:30 +00:00
|
|
|
entry := b.Core.router.MatchingMountEntry(fromPath)
|
|
|
|
if entry != nil && !entry.Local && repState == consts.ReplicationSecondary {
|
|
|
|
return logical.ErrorResponse("cannot remount a non-local mount on a replication secondary"), nil
|
|
|
|
}
|
|
|
|
|
2015-09-02 19:56:58 +00:00
|
|
|
// Attempt remount
|
|
|
|
if err := b.Core.remount(fromPath, toPath); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: remount failed", "from_path", fromPath, "to_path", toPath, "error", err)
|
2015-09-02 19:56:58 +00:00
|
|
|
return handleError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-06-15 16:35:30 +00:00
|
|
|
// handleAuthTuneRead is used to get config settings on a auth path
|
|
|
|
func (b *SystemBackend) handleAuthTuneRead(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
path := data.Get("path").(string)
|
|
|
|
if path == "" {
|
|
|
|
return logical.ErrorResponse(
|
|
|
|
"path must be specified as a string"),
|
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
return b.handleTuneReadCommon("auth/" + path)
|
|
|
|
}
|
|
|
|
|
2015-10-10 00:00:17 +00:00
|
|
|
// handleMountTuneRead is used to get config settings on a backend
|
|
|
|
func (b *SystemBackend) handleMountTuneRead(
|
2015-09-02 19:56:58 +00:00
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
path := data.Get("path").(string)
|
|
|
|
if path == "" {
|
|
|
|
return logical.ErrorResponse(
|
|
|
|
"path must be specified as a string"),
|
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
|
2016-06-15 16:35:30 +00:00
|
|
|
// This call will read both logical backend's configuration as well as auth backends'.
|
|
|
|
// Retaining this behavior for backward compatibility. If this behavior is not desired,
|
|
|
|
// an error can be returned if path has a prefix of "auth/".
|
|
|
|
return b.handleTuneReadCommon(path)
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleTuneReadCommon returns the config settings of a path
|
|
|
|
func (b *SystemBackend) handleTuneReadCommon(path string) (*logical.Response, error) {
|
2016-03-03 18:13:47 +00:00
|
|
|
path = sanitizeMountPath(path)
|
2015-09-04 20:58:12 +00:00
|
|
|
|
|
|
|
sysView := b.Core.router.MatchingSystemView(path)
|
|
|
|
if sysView == nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: cannot fetch sysview", "path", path)
|
|
|
|
return handleError(fmt.Errorf("sys: cannot fetch sysview for path %s", path))
|
2015-09-04 20:58:12 +00:00
|
|
|
}
|
|
|
|
|
2017-03-08 14:20:09 +00:00
|
|
|
mountEntry := b.Core.router.MatchingMountEntry(path)
|
|
|
|
if mountEntry == nil {
|
|
|
|
b.Backend.Logger().Error("sys: cannot fetch mount entry", "path", path)
|
|
|
|
return handleError(fmt.Errorf("sys: cannot fetch mount entry for path %s", path))
|
|
|
|
}
|
|
|
|
|
2015-09-02 19:56:58 +00:00
|
|
|
resp := &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
2015-09-25 13:46:20 +00:00
|
|
|
"default_lease_ttl": int(sysView.DefaultLeaseTTL().Seconds()),
|
|
|
|
"max_lease_ttl": int(sysView.MaxLeaseTTL().Seconds()),
|
2017-03-08 14:20:09 +00:00
|
|
|
"force_no_cache": mountEntry.Config.ForceNoCache,
|
2015-09-02 19:56:58 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2016-06-15 16:35:30 +00:00
|
|
|
// handleAuthTuneWrite is used to set config settings on an auth path
|
|
|
|
func (b *SystemBackend) handleAuthTuneWrite(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
path := data.Get("path").(string)
|
|
|
|
if path == "" {
|
|
|
|
return logical.ErrorResponse("path must be specified as a string"),
|
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
return b.handleTuneWriteCommon("auth/"+path, data)
|
|
|
|
}
|
|
|
|
|
2015-10-10 00:00:17 +00:00
|
|
|
// handleMountTuneWrite is used to set config settings on a backend
|
|
|
|
func (b *SystemBackend) handleMountTuneWrite(
|
2015-09-02 19:56:58 +00:00
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
path := data.Get("path").(string)
|
|
|
|
if path == "" {
|
2016-06-15 16:35:30 +00:00
|
|
|
return logical.ErrorResponse("path must be specified as a string"),
|
2015-09-02 19:56:58 +00:00
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
2016-06-15 16:35:30 +00:00
|
|
|
// This call will write both logical backend's configuration as well as auth backends'.
|
|
|
|
// Retaining this behavior for backward compatibility. If this behavior is not desired,
|
|
|
|
// an error can be returned if path has a prefix of "auth/".
|
|
|
|
return b.handleTuneWriteCommon(path, data)
|
|
|
|
}
|
2015-09-02 19:56:58 +00:00
|
|
|
|
2016-06-15 16:35:30 +00:00
|
|
|
// handleTuneWriteCommon is used to set config settings on a path
|
|
|
|
func (b *SystemBackend) handleTuneWriteCommon(
|
|
|
|
path string, data *framework.FieldData) (*logical.Response, error) {
|
2017-02-16 21:29:30 +00:00
|
|
|
b.Core.clusterParamsLock.RLock()
|
|
|
|
repState := b.Core.replicationState
|
|
|
|
b.Core.clusterParamsLock.RUnlock()
|
|
|
|
|
2016-03-03 18:13:47 +00:00
|
|
|
path = sanitizeMountPath(path)
|
2015-09-04 20:58:12 +00:00
|
|
|
|
2015-09-09 19:24:45 +00:00
|
|
|
// Prevent protected paths from being changed
|
2015-09-19 15:50:50 +00:00
|
|
|
for _, p := range untunableMounts {
|
2015-09-09 19:24:45 +00:00
|
|
|
if strings.HasPrefix(path, p) {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: cannot tune this mount", "path", path)
|
|
|
|
return handleError(fmt.Errorf("sys: cannot tune '%s'", path))
|
2015-09-09 19:24:45 +00:00
|
|
|
}
|
2015-08-31 18:27:49 +00:00
|
|
|
}
|
2015-03-15 21:42:05 +00:00
|
|
|
|
2015-09-09 19:24:45 +00:00
|
|
|
mountEntry := b.Core.router.MatchingMountEntry(path)
|
|
|
|
if mountEntry == nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: tune failed: no mount entry found", "path", path)
|
|
|
|
return handleError(fmt.Errorf("sys: tune of path '%s' failed: no mount entry found", path))
|
2015-09-02 19:56:58 +00:00
|
|
|
}
|
2017-02-16 21:29:30 +00:00
|
|
|
if mountEntry != nil && !mountEntry.Local && repState == consts.ReplicationSecondary {
|
|
|
|
return logical.ErrorResponse("cannot tune a non-local mount on a replication secondary"), nil
|
|
|
|
}
|
2015-09-03 12:54:59 +00:00
|
|
|
|
2016-05-03 18:24:04 +00:00
|
|
|
var lock *sync.RWMutex
|
|
|
|
switch {
|
|
|
|
case strings.HasPrefix(path, "auth/"):
|
|
|
|
lock = &b.Core.authLock
|
|
|
|
default:
|
|
|
|
lock = &b.Core.mountsLock
|
|
|
|
}
|
|
|
|
|
2015-09-09 19:24:45 +00:00
|
|
|
// Timing configuration parameters
|
|
|
|
{
|
2015-09-21 13:39:37 +00:00
|
|
|
var newDefault, newMax *time.Duration
|
2015-09-25 13:46:20 +00:00
|
|
|
defTTL := data.Get("default_lease_ttl").(string)
|
|
|
|
switch defTTL {
|
|
|
|
case "":
|
|
|
|
case "system":
|
|
|
|
tmpDef := time.Duration(0)
|
|
|
|
newDefault = &tmpDef
|
|
|
|
default:
|
2017-03-07 16:21:22 +00:00
|
|
|
tmpDef, err := parseutil.ParseDurationSecond(defTTL)
|
2015-09-25 13:46:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return handleError(err)
|
|
|
|
}
|
2016-07-11 18:19:35 +00:00
|
|
|
newDefault = &tmpDef
|
2015-09-09 19:24:45 +00:00
|
|
|
}
|
2015-09-25 13:46:20 +00:00
|
|
|
|
|
|
|
maxTTL := data.Get("max_lease_ttl").(string)
|
|
|
|
switch maxTTL {
|
|
|
|
case "":
|
|
|
|
case "system":
|
|
|
|
tmpMax := time.Duration(0)
|
|
|
|
newMax = &tmpMax
|
|
|
|
default:
|
2017-03-07 16:21:22 +00:00
|
|
|
tmpMax, err := parseutil.ParseDurationSecond(maxTTL)
|
2015-09-25 13:46:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return handleError(err)
|
|
|
|
}
|
2016-07-11 18:19:35 +00:00
|
|
|
newMax = &tmpMax
|
2015-09-09 19:24:45 +00:00
|
|
|
}
|
2015-09-25 13:46:20 +00:00
|
|
|
|
2015-09-21 13:39:37 +00:00
|
|
|
if newDefault != nil || newMax != nil {
|
2016-05-03 18:24:04 +00:00
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
|
2017-03-02 19:37:59 +00:00
|
|
|
if err := b.tuneMountTTLs(path, mountEntry, newDefault, newMax); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: tuning failed", "path", path, "error", err)
|
2015-09-09 19:24:45 +00:00
|
|
|
return handleError(err)
|
|
|
|
}
|
|
|
|
}
|
2015-03-15 21:42:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil, nil
|
|
|
|
}
|
2015-03-16 00:35:59 +00:00
|
|
|
|
2017-05-04 02:03:42 +00:00
|
|
|
// handleLease is use to view the metadata for a given LeaseID
|
|
|
|
func (b *SystemBackend) handleLeaseLookup(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
leaseID := data.Get("lease_id").(string)
|
|
|
|
if leaseID == "" {
|
|
|
|
return logical.ErrorResponse("lease_id must be specified"),
|
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
|
|
|
|
leaseTimes, err := b.Core.expiration.FetchLeaseTimes(leaseID)
|
|
|
|
if err != nil {
|
|
|
|
b.Backend.Logger().Error("sys: error retrieving lease", "lease_id", leaseID, "error", err)
|
|
|
|
return handleError(err)
|
|
|
|
}
|
|
|
|
if leaseTimes == nil {
|
|
|
|
return logical.ErrorResponse("invalid lease"), logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
|
|
|
|
resp := &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"id": leaseID,
|
|
|
|
"issue_time": leaseTimes.IssueTime,
|
|
|
|
"expire_time": nil,
|
|
|
|
"last_renewal": nil,
|
|
|
|
"ttl": int64(0),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
renewable, _ := leaseTimes.renewable()
|
|
|
|
resp.Data["renewable"] = renewable
|
|
|
|
|
|
|
|
if !leaseTimes.LastRenewalTime.IsZero() {
|
|
|
|
resp.Data["last_renewal"] = leaseTimes.LastRenewalTime
|
|
|
|
}
|
|
|
|
if !leaseTimes.ExpireTime.IsZero() {
|
|
|
|
resp.Data["expire_time"] = leaseTimes.ExpireTime
|
|
|
|
resp.Data["ttl"] = leaseTimes.ttl()
|
|
|
|
}
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *SystemBackend) handleLeaseLookupList(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
prefix := data.Get("prefix").(string)
|
|
|
|
if prefix != "" && !strings.HasSuffix(prefix, "/") {
|
|
|
|
prefix = prefix + "/"
|
|
|
|
}
|
|
|
|
|
|
|
|
keys, err := b.Core.expiration.idView.List(prefix)
|
|
|
|
if err != nil {
|
|
|
|
b.Backend.Logger().Error("sys: error listing leases", "prefix", prefix, "error", err)
|
|
|
|
return handleError(err)
|
|
|
|
}
|
|
|
|
return logical.ListResponse(keys), nil
|
|
|
|
}
|
|
|
|
|
2015-04-08 20:35:32 +00:00
|
|
|
// handleRenew is used to renew a lease with a given LeaseID
|
2015-03-16 23:11:55 +00:00
|
|
|
func (b *SystemBackend) handleRenew(
|
2015-03-19 22:11:42 +00:00
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
2015-03-16 23:11:55 +00:00
|
|
|
// Get all the options
|
2015-04-08 20:35:32 +00:00
|
|
|
leaseID := data.Get("lease_id").(string)
|
2016-08-08 22:00:44 +00:00
|
|
|
if leaseID == "" {
|
2016-08-08 22:34:00 +00:00
|
|
|
leaseID = data.Get("url_lease_id").(string)
|
2016-08-08 22:00:44 +00:00
|
|
|
}
|
2017-04-27 14:47:43 +00:00
|
|
|
if leaseID == "" {
|
|
|
|
return logical.ErrorResponse("lease_id must be specified"),
|
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
2015-03-16 23:11:55 +00:00
|
|
|
incrementRaw := data.Get("increment").(int)
|
|
|
|
|
|
|
|
// Convert the increment
|
|
|
|
increment := time.Duration(incrementRaw) * time.Second
|
|
|
|
|
|
|
|
// Invoke the expiration manager directly
|
2015-04-08 20:35:32 +00:00
|
|
|
resp, err := b.Core.expiration.Renew(leaseID, increment)
|
2015-03-16 23:14:53 +00:00
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: lease renewal failed", "lease_id", leaseID, "error", err)
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-16 23:14:53 +00:00
|
|
|
}
|
|
|
|
return resp, err
|
2015-03-16 23:11:55 +00:00
|
|
|
}
|
|
|
|
|
2015-04-08 20:35:32 +00:00
|
|
|
// handleRevoke is used to revoke a given LeaseID
|
2015-03-16 23:26:34 +00:00
|
|
|
func (b *SystemBackend) handleRevoke(
|
2015-03-19 22:11:42 +00:00
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
2015-03-16 23:26:34 +00:00
|
|
|
// Get all the options
|
2015-04-08 20:35:32 +00:00
|
|
|
leaseID := data.Get("lease_id").(string)
|
2017-04-27 14:47:43 +00:00
|
|
|
if leaseID == "" {
|
|
|
|
leaseID = data.Get("url_lease_id").(string)
|
|
|
|
}
|
|
|
|
if leaseID == "" {
|
|
|
|
return logical.ErrorResponse("lease_id must be specified"),
|
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
2015-03-16 23:26:34 +00:00
|
|
|
|
|
|
|
// Invoke the expiration manager directly
|
2015-04-08 20:35:32 +00:00
|
|
|
if err := b.Core.expiration.Revoke(leaseID); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: lease revocation failed", "lease_id", leaseID, "error", err)
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-16 23:26:34 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-04-08 20:35:32 +00:00
|
|
|
// handleRevokePrefix is used to revoke a prefix with many LeaseIDs
|
2015-03-16 23:33:48 +00:00
|
|
|
func (b *SystemBackend) handleRevokePrefix(
|
2015-03-19 22:11:42 +00:00
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
Add forced revocation.
In some situations, it can be impossible to revoke leases (for instance,
if someone has gone and manually removed users created by Vault). This
can not only cause Vault to cycle trying to revoke them, but it also
prevents mounts from being unmounted, leaving them in a tainted state
where the only operations allowed are to revoke (or rollback), which
will never successfully complete.
This adds a new endpoint that works similarly to `revoke-prefix` but
ignores errors coming from a backend upon revocation (it does not ignore
errors coming from within the expiration manager, such as errors
accessing the data store). This can be used to force Vault to abandon
leases.
Like `revoke-prefix`, this is a very sensitive operation and requires
`sudo`. It is implemented as a separate endpoint, rather than an
argument to `revoke-prefix`, to ensure that control can be delegated
appropriately, as even most administrators should not normally have
this privilege.
Fixes #1135
2016-03-03 01:26:38 +00:00
|
|
|
return b.handleRevokePrefixCommon(req, data, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleRevokeForce is used to revoke a prefix with many LeaseIDs, ignoring errors
|
|
|
|
func (b *SystemBackend) handleRevokeForce(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
return b.handleRevokePrefixCommon(req, data, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleRevokePrefixCommon is used to revoke a prefix with many LeaseIDs
|
|
|
|
func (b *SystemBackend) handleRevokePrefixCommon(
|
|
|
|
req *logical.Request, data *framework.FieldData, force bool) (*logical.Response, error) {
|
2015-03-16 23:33:48 +00:00
|
|
|
// Get all the options
|
|
|
|
prefix := data.Get("prefix").(string)
|
|
|
|
|
|
|
|
// Invoke the expiration manager directly
|
Add forced revocation.
In some situations, it can be impossible to revoke leases (for instance,
if someone has gone and manually removed users created by Vault). This
can not only cause Vault to cycle trying to revoke them, but it also
prevents mounts from being unmounted, leaving them in a tainted state
where the only operations allowed are to revoke (or rollback), which
will never successfully complete.
This adds a new endpoint that works similarly to `revoke-prefix` but
ignores errors coming from a backend upon revocation (it does not ignore
errors coming from within the expiration manager, such as errors
accessing the data store). This can be used to force Vault to abandon
leases.
Like `revoke-prefix`, this is a very sensitive operation and requires
`sudo`. It is implemented as a separate endpoint, rather than an
argument to `revoke-prefix`, to ensure that control can be delegated
appropriately, as even most administrators should not normally have
this privilege.
Fixes #1135
2016-03-03 01:26:38 +00:00
|
|
|
var err error
|
|
|
|
if force {
|
|
|
|
err = b.Core.expiration.RevokeForce(prefix)
|
|
|
|
} else {
|
|
|
|
err = b.Core.expiration.RevokePrefix(prefix)
|
|
|
|
}
|
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: revoke prefix failed", "prefix", prefix, "error", err)
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-16 23:33:48 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-03-20 19:48:19 +00:00
|
|
|
// handleAuthTable handles the "auth" endpoint to provide the auth table
|
|
|
|
func (b *SystemBackend) handleAuthTable(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
2015-11-11 16:44:07 +00:00
|
|
|
b.Core.authLock.RLock()
|
|
|
|
defer b.Core.authLock.RUnlock()
|
2015-03-20 19:48:19 +00:00
|
|
|
|
|
|
|
resp := &logical.Response{
|
|
|
|
Data: make(map[string]interface{}),
|
|
|
|
}
|
|
|
|
for _, entry := range b.Core.auth.Entries {
|
2016-06-15 16:35:30 +00:00
|
|
|
info := map[string]interface{}{
|
2015-03-20 19:48:19 +00:00
|
|
|
"type": entry.Type,
|
|
|
|
"description": entry.Description,
|
2017-06-26 17:14:36 +00:00
|
|
|
"accessor": entry.Accessor,
|
2016-06-15 16:35:30 +00:00
|
|
|
"config": map[string]interface{}{
|
2016-06-21 00:08:12 +00:00
|
|
|
"default_lease_ttl": int64(entry.Config.DefaultLeaseTTL.Seconds()),
|
|
|
|
"max_lease_ttl": int64(entry.Config.MaxLeaseTTL.Seconds()),
|
2016-06-15 16:35:30 +00:00
|
|
|
},
|
2017-02-16 21:29:30 +00:00
|
|
|
"local": entry.Local,
|
2015-03-20 19:48:19 +00:00
|
|
|
}
|
|
|
|
resp.Data[entry.Path] = info
|
|
|
|
}
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleEnableAuth is used to enable a new credential backend
|
|
|
|
func (b *SystemBackend) handleEnableAuth(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
2017-02-16 21:29:30 +00:00
|
|
|
b.Core.clusterParamsLock.RLock()
|
|
|
|
repState := b.Core.replicationState
|
|
|
|
b.Core.clusterParamsLock.RUnlock()
|
|
|
|
|
|
|
|
local := data.Get("local").(bool)
|
|
|
|
if !local && repState == consts.ReplicationSecondary {
|
|
|
|
return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
|
|
|
|
}
|
|
|
|
|
2015-03-20 19:48:19 +00:00
|
|
|
// Get all the options
|
|
|
|
path := data.Get("path").(string)
|
|
|
|
logicalType := data.Get("type").(string)
|
|
|
|
description := data.Get("description").(string)
|
|
|
|
|
|
|
|
if logicalType == "" {
|
|
|
|
return logical.ErrorResponse(
|
|
|
|
"backend type must be specified as a string"),
|
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
|
2016-03-03 18:13:47 +00:00
|
|
|
path = sanitizeMountPath(path)
|
|
|
|
|
2015-03-20 19:48:19 +00:00
|
|
|
// Create the mount entry
|
|
|
|
me := &MountEntry{
|
2016-05-26 17:38:51 +00:00
|
|
|
Table: credentialTableType,
|
2015-03-20 19:48:19 +00:00
|
|
|
Path: path,
|
|
|
|
Type: logicalType,
|
|
|
|
Description: description,
|
2017-02-16 21:29:30 +00:00
|
|
|
Local: local,
|
2015-03-20 19:48:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt enabling
|
2017-05-09 21:51:09 +00:00
|
|
|
if err := b.Core.enableCredential(me); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: enable auth mount failed", "path", me.Path, "error", err)
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-20 19:48:19 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleDisableAuth is used to disable a credential backend
|
|
|
|
func (b *SystemBackend) handleDisableAuth(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
suffix := strings.TrimPrefix(req.Path, "auth/")
|
|
|
|
if len(suffix) == 0 {
|
|
|
|
return logical.ErrorResponse("path cannot be blank"), logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
|
2016-03-03 18:13:47 +00:00
|
|
|
suffix = sanitizeMountPath(suffix)
|
|
|
|
|
2015-03-20 19:48:19 +00:00
|
|
|
// Attempt disable
|
2016-09-19 17:02:25 +00:00
|
|
|
if existed, err := b.Core.disableCredential(suffix); existed && err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: disable auth mount failed", "path", suffix, "error", err)
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-20 19:48:19 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-03-23 21:43:31 +00:00
|
|
|
// handlePolicyList handles the "policy" endpoint to provide the enabled policies
|
|
|
|
func (b *SystemBackend) handlePolicyList(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
// Get all the configured policies
|
2015-11-06 16:52:26 +00:00
|
|
|
policies, err := b.Core.policyStore.ListPolicies()
|
2015-03-23 21:43:31 +00:00
|
|
|
|
|
|
|
// Add the special "root" policy
|
|
|
|
policies = append(policies, "root")
|
2016-03-02 19:16:54 +00:00
|
|
|
resp := logical.ListResponse(policies)
|
|
|
|
|
|
|
|
// Backwords compatibility
|
|
|
|
resp.Data["policies"] = resp.Data["keys"]
|
|
|
|
|
|
|
|
return resp, err
|
2015-03-23 21:43:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// handlePolicyRead handles the "policy/<name>" endpoint to read a policy
|
|
|
|
func (b *SystemBackend) handlePolicyRead(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
name := data.Get("name").(string)
|
|
|
|
|
2015-11-06 16:52:26 +00:00
|
|
|
policy, err := b.Core.policyStore.GetPolicy(name)
|
2015-03-23 21:43:31 +00:00
|
|
|
if err != nil {
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-23 21:43:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if policy == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"name": name,
|
|
|
|
"rules": policy.Raw,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handlePolicySet handles the "policy/<name>" endpoint to set a policy
|
|
|
|
func (b *SystemBackend) handlePolicySet(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
name := data.Get("name").(string)
|
2017-05-26 14:48:41 +00:00
|
|
|
|
|
|
|
rulesRaw, ok := data.GetOk("rules")
|
|
|
|
if !ok {
|
|
|
|
return logical.ErrorResponse("'rules' parameter not supplied"), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
rules := rulesRaw.(string)
|
|
|
|
if rules == "" {
|
|
|
|
return logical.ErrorResponse("'rules' parameter empty"), nil
|
|
|
|
}
|
2015-03-23 21:43:31 +00:00
|
|
|
|
|
|
|
// Validate the rules parse
|
|
|
|
parse, err := Parse(rules)
|
|
|
|
if err != nil {
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-23 21:43:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Override the name
|
2015-10-07 17:52:21 +00:00
|
|
|
parse.Name = strings.ToLower(name)
|
2015-03-23 21:43:31 +00:00
|
|
|
|
|
|
|
// Update the policy
|
2015-11-06 16:52:26 +00:00
|
|
|
if err := b.Core.policyStore.SetPolicy(parse); err != nil {
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-23 21:43:31 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handlePolicyDelete handles the "policy/<name>" endpoint to delete a policy
|
|
|
|
func (b *SystemBackend) handlePolicyDelete(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
name := data.Get("name").(string)
|
2016-05-02 04:08:07 +00:00
|
|
|
|
2015-11-06 16:52:26 +00:00
|
|
|
if err := b.Core.policyStore.DeletePolicy(name); err != nil {
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-23 21:43:31 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-03-31 23:45:00 +00:00
|
|
|
// handleAuditTable handles the "audit" endpoint to provide the audit table
|
|
|
|
func (b *SystemBackend) handleAuditTable(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
2015-11-11 16:44:07 +00:00
|
|
|
b.Core.auditLock.RLock()
|
|
|
|
defer b.Core.auditLock.RUnlock()
|
2015-03-31 23:45:00 +00:00
|
|
|
|
|
|
|
resp := &logical.Response{
|
|
|
|
Data: make(map[string]interface{}),
|
|
|
|
}
|
|
|
|
for _, entry := range b.Core.audit.Entries {
|
|
|
|
info := map[string]interface{}{
|
2016-03-14 21:15:07 +00:00
|
|
|
"path": entry.Path,
|
2015-03-31 23:45:00 +00:00
|
|
|
"type": entry.Type,
|
|
|
|
"description": entry.Description,
|
|
|
|
"options": entry.Options,
|
2017-02-16 21:29:30 +00:00
|
|
|
"local": entry.Local,
|
2015-03-31 23:45:00 +00:00
|
|
|
}
|
|
|
|
resp.Data[entry.Path] = info
|
|
|
|
}
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2015-11-19 01:26:03 +00:00
|
|
|
// handleAuditHash is used to fetch the hash of the given input data with the
|
|
|
|
// specified audit backend's salt
|
|
|
|
func (b *SystemBackend) handleAuditHash(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
path := data.Get("path").(string)
|
|
|
|
input := data.Get("input").(string)
|
|
|
|
if input == "" {
|
|
|
|
return logical.ErrorResponse("the \"input\" parameter is empty"), nil
|
|
|
|
}
|
|
|
|
|
2016-03-03 18:13:47 +00:00
|
|
|
path = sanitizeMountPath(path)
|
2015-11-19 01:26:03 +00:00
|
|
|
|
|
|
|
hash, err := b.Core.auditBroker.GetHash(path, input)
|
|
|
|
if err != nil {
|
|
|
|
return logical.ErrorResponse(err.Error()), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"hash": hash,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2015-03-31 23:45:00 +00:00
|
|
|
// handleEnableAudit is used to enable a new audit backend
|
|
|
|
func (b *SystemBackend) handleEnableAudit(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
2017-02-16 21:29:30 +00:00
|
|
|
b.Core.clusterParamsLock.RLock()
|
|
|
|
repState := b.Core.replicationState
|
|
|
|
b.Core.clusterParamsLock.RUnlock()
|
|
|
|
|
|
|
|
local := data.Get("local").(bool)
|
|
|
|
if !local && repState == consts.ReplicationSecondary {
|
|
|
|
return logical.ErrorResponse("cannot add a non-local mount to a replication secondary"), nil
|
|
|
|
}
|
|
|
|
|
2015-03-31 23:45:00 +00:00
|
|
|
// Get all the options
|
|
|
|
path := data.Get("path").(string)
|
|
|
|
backendType := data.Get("type").(string)
|
|
|
|
description := data.Get("description").(string)
|
|
|
|
options := data.Get("options").(map[string]interface{})
|
|
|
|
|
|
|
|
optionMap := make(map[string]string)
|
|
|
|
for k, v := range options {
|
|
|
|
vStr, ok := v.(string)
|
|
|
|
if !ok {
|
|
|
|
return logical.ErrorResponse("options must be string valued"),
|
|
|
|
logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
optionMap[k] = vStr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the mount entry
|
|
|
|
me := &MountEntry{
|
2016-05-26 17:38:51 +00:00
|
|
|
Table: auditTableType,
|
2015-03-31 23:45:00 +00:00
|
|
|
Path: path,
|
|
|
|
Type: backendType,
|
|
|
|
Description: description,
|
|
|
|
Options: optionMap,
|
2017-02-16 21:29:30 +00:00
|
|
|
Local: local,
|
2015-03-31 23:45:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt enabling
|
|
|
|
if err := b.Core.enableAudit(me); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: enable audit mount failed", "path", me.Path, "error", err)
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-31 23:45:00 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleDisableAudit is used to disable an audit backend
|
|
|
|
func (b *SystemBackend) handleDisableAudit(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
path := data.Get("path").(string)
|
|
|
|
|
|
|
|
// Attempt disable
|
2016-09-19 17:02:25 +00:00
|
|
|
if existed, err := b.Core.disableAudit(path); existed && err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: disable audit mount failed", "path", path, "error", err)
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-03-31 23:45:00 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-04-02 00:44:43 +00:00
|
|
|
// handleRawRead is used to read directly from the barrier
|
|
|
|
func (b *SystemBackend) handleRawRead(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
path := data.Get("path").(string)
|
2015-05-28 17:24:41 +00:00
|
|
|
|
|
|
|
// Prevent access of protected paths
|
|
|
|
for _, p := range protectedPaths {
|
|
|
|
if strings.HasPrefix(path, p) {
|
|
|
|
err := fmt.Sprintf("cannot read '%s'", path)
|
|
|
|
return logical.ErrorResponse(err), logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-02 00:44:43 +00:00
|
|
|
entry, err := b.Core.barrier.Get(path)
|
|
|
|
if err != nil {
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-04-02 00:44:43 +00:00
|
|
|
}
|
|
|
|
if entry == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
resp := &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"value": string(entry.Value),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleRawWrite is used to write directly to the barrier
|
|
|
|
func (b *SystemBackend) handleRawWrite(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
path := data.Get("path").(string)
|
2015-05-28 17:24:41 +00:00
|
|
|
|
|
|
|
// Prevent access of protected paths
|
|
|
|
for _, p := range protectedPaths {
|
|
|
|
if strings.HasPrefix(path, p) {
|
|
|
|
err := fmt.Sprintf("cannot write '%s'", path)
|
|
|
|
return logical.ErrorResponse(err), logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-02 00:44:43 +00:00
|
|
|
value := data.Get("value").(string)
|
|
|
|
entry := &Entry{
|
|
|
|
Key: path,
|
|
|
|
Value: []byte(value),
|
|
|
|
}
|
|
|
|
if err := b.Core.barrier.Put(entry); err != nil {
|
|
|
|
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleRawDelete is used to delete directly from the barrier
|
|
|
|
func (b *SystemBackend) handleRawDelete(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
path := data.Get("path").(string)
|
2015-05-28 17:24:41 +00:00
|
|
|
|
|
|
|
// Prevent access of protected paths
|
|
|
|
for _, p := range protectedPaths {
|
|
|
|
if strings.HasPrefix(path, p) {
|
|
|
|
err := fmt.Sprintf("cannot delete '%s'", path)
|
|
|
|
return logical.ErrorResponse(err), logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-02 00:44:43 +00:00
|
|
|
if err := b.Core.barrier.Delete(path); err != nil {
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-04-02 00:44:43 +00:00
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2015-05-28 00:53:42 +00:00
|
|
|
// handleKeyStatus returns status information about the backend key
|
|
|
|
func (b *SystemBackend) handleKeyStatus(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
// Get the key info
|
|
|
|
info, err := b.Core.barrier.ActiveKeyInfo()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
resp := &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"term": info.Term,
|
2016-07-25 18:11:57 +00:00
|
|
|
"install_time": info.InstallTime.Format(time.RFC3339Nano),
|
2015-05-28 00:53:42 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleRotate is used to trigger a key rotation
|
|
|
|
func (b *SystemBackend) handleRotate(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
2017-02-16 21:29:30 +00:00
|
|
|
b.Core.clusterParamsLock.RLock()
|
|
|
|
repState := b.Core.replicationState
|
|
|
|
b.Core.clusterParamsLock.RUnlock()
|
|
|
|
if repState == consts.ReplicationSecondary {
|
|
|
|
return logical.ErrorResponse("cannot rotate on a replication secondary"), nil
|
|
|
|
}
|
|
|
|
|
2015-05-28 23:43:15 +00:00
|
|
|
// Rotate to the new term
|
|
|
|
newTerm, err := b.Core.barrier.Rotate()
|
|
|
|
if err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: failed to create new encryption key", "error", err)
|
2015-08-10 17:27:25 +00:00
|
|
|
return handleError(err)
|
2015-05-28 00:53:42 +00:00
|
|
|
}
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Info("sys: installed new encryption key")
|
2015-05-28 23:43:15 +00:00
|
|
|
|
2015-08-20 20:20:35 +00:00
|
|
|
// In HA mode, we need to an upgrade path for the standby instances
|
2015-05-28 23:43:15 +00:00
|
|
|
if b.Core.ha != nil {
|
|
|
|
// Create the upgrade path to the new term
|
|
|
|
if err := b.Core.barrier.CreateUpgrade(newTerm); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: failed to create new upgrade", "term", newTerm, "error", err)
|
2015-05-28 23:43:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Schedule the destroy of the upgrade path
|
|
|
|
time.AfterFunc(keyRotateGracePeriod, func() {
|
|
|
|
if err := b.Core.barrier.DestroyUpgrade(newTerm); err != nil {
|
2016-08-19 20:45:17 +00:00
|
|
|
b.Backend.Logger().Error("sys: failed to destroy upgrade", "term", newTerm, "error", err)
|
2015-05-28 23:43:15 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2017-02-16 21:29:30 +00:00
|
|
|
|
|
|
|
// Write to the canary path, which will force a synchronous truing during
|
|
|
|
// replication
|
|
|
|
if err := b.Core.barrier.Put(&Entry{
|
|
|
|
Key: coreKeyringCanaryPath,
|
|
|
|
Value: []byte(fmt.Sprintf("new-rotation-term-%d", newTerm)),
|
|
|
|
}); err != nil {
|
|
|
|
b.Core.logger.Error("core: error saving keyring canary", "error", err)
|
|
|
|
return nil, fmt.Errorf("failed to save keyring canary: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-05-28 00:53:42 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2017-01-04 21:44:03 +00:00
|
|
|
func (b *SystemBackend) handleWrappingPubkey(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
x, _ := b.Core.wrappingJWTKey.X.MarshalText()
|
|
|
|
y, _ := b.Core.wrappingJWTKey.Y.MarshalText()
|
|
|
|
return &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"jwt_x": string(x),
|
|
|
|
"jwt_y": string(y),
|
|
|
|
"jwt_curve": corePrivateKeyTypeP521,
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2016-09-29 04:01:28 +00:00
|
|
|
func (b *SystemBackend) handleWrappingWrap(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
2017-01-04 21:44:03 +00:00
|
|
|
if req.WrapInfo == nil || req.WrapInfo.TTL == 0 {
|
2016-09-29 04:01:28 +00:00
|
|
|
return logical.ErrorResponse("endpoint requires response wrapping to be used"), logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
|
2017-02-22 16:13:40 +00:00
|
|
|
// N.B.: Do *NOT* allow JWT wrapping tokens to be created through this
|
|
|
|
// endpoint. JWTs are signed so if we don't allow users to create wrapping
|
|
|
|
// tokens using them we can ensure that an operator can't spoof a legit JWT
|
|
|
|
// wrapped token, which makes certain init/rekey/generate-root cases have
|
|
|
|
// better properties.
|
|
|
|
req.WrapInfo.Format = "uuid"
|
|
|
|
|
2016-09-29 04:01:28 +00:00
|
|
|
return &logical.Response{
|
|
|
|
Data: data.Raw,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *SystemBackend) handleWrappingUnwrap(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
// If a third party is unwrapping (rather than the calling token being the
|
|
|
|
// wrapping token) we detect this so that we can revoke the original
|
|
|
|
// wrapping token after reading it
|
|
|
|
var thirdParty bool
|
|
|
|
|
|
|
|
token := data.Get("token").(string)
|
|
|
|
if token != "" {
|
|
|
|
thirdParty = true
|
|
|
|
} else {
|
|
|
|
token = req.ClientToken
|
|
|
|
}
|
|
|
|
|
|
|
|
if thirdParty {
|
|
|
|
// Use the token to decrement the use count to avoid a second operation on the token.
|
|
|
|
_, err := b.Core.tokenStore.UseTokenByID(token)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error decrementing wrapping token's use-count: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
defer b.Core.tokenStore.Revoke(token)
|
|
|
|
}
|
|
|
|
|
|
|
|
cubbyReq := &logical.Request{
|
|
|
|
Operation: logical.ReadOperation,
|
|
|
|
Path: "cubbyhole/response",
|
|
|
|
ClientToken: token,
|
|
|
|
}
|
|
|
|
cubbyResp, err := b.Core.router.Route(cubbyReq)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error looking up wrapping information: %v", err)
|
|
|
|
}
|
|
|
|
if cubbyResp == nil {
|
|
|
|
return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
|
|
|
|
}
|
|
|
|
if cubbyResp != nil && cubbyResp.IsError() {
|
|
|
|
return cubbyResp, nil
|
|
|
|
}
|
|
|
|
if cubbyResp.Data == nil {
|
|
|
|
return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
responseRaw := cubbyResp.Data["response"]
|
|
|
|
if responseRaw == nil {
|
|
|
|
return nil, fmt.Errorf("no response found inside the cubbyhole")
|
|
|
|
}
|
|
|
|
response, ok := responseRaw.(string)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("could not decode response inside the cubbyhole")
|
|
|
|
}
|
|
|
|
|
|
|
|
resp := &logical.Response{
|
|
|
|
Data: map[string]interface{}{},
|
|
|
|
}
|
|
|
|
if len(response) == 0 {
|
|
|
|
resp.Data[logical.HTTPStatusCode] = 204
|
|
|
|
} else {
|
|
|
|
resp.Data[logical.HTTPStatusCode] = 200
|
|
|
|
resp.Data[logical.HTTPRawBody] = []byte(response)
|
|
|
|
resp.Data[logical.HTTPContentType] = "application/json"
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *SystemBackend) handleWrappingLookup(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
token := data.Get("token").(string)
|
|
|
|
|
|
|
|
if token == "" {
|
|
|
|
return logical.ErrorResponse("missing \"token\" value in input"), logical.ErrInvalidRequest
|
|
|
|
}
|
|
|
|
|
|
|
|
cubbyReq := &logical.Request{
|
|
|
|
Operation: logical.ReadOperation,
|
|
|
|
Path: "cubbyhole/wrapinfo",
|
|
|
|
ClientToken: token,
|
|
|
|
}
|
|
|
|
cubbyResp, err := b.Core.router.Route(cubbyReq)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error looking up wrapping information: %v", err)
|
|
|
|
}
|
|
|
|
if cubbyResp == nil {
|
|
|
|
return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
|
|
|
|
}
|
|
|
|
if cubbyResp != nil && cubbyResp.IsError() {
|
|
|
|
return cubbyResp, nil
|
|
|
|
}
|
|
|
|
if cubbyResp.Data == nil {
|
|
|
|
return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
creationTTLRaw := cubbyResp.Data["creation_ttl"]
|
|
|
|
creationTime := cubbyResp.Data["creation_time"]
|
|
|
|
|
|
|
|
resp := &logical.Response{
|
|
|
|
Data: map[string]interface{}{},
|
|
|
|
}
|
|
|
|
if creationTTLRaw != nil {
|
|
|
|
creationTTL, err := creationTTLRaw.(json.Number).Int64()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error reading creation_ttl value from wrapping information: %v", err)
|
|
|
|
}
|
|
|
|
resp.Data["creation_ttl"] = time.Duration(creationTTL).Seconds()
|
|
|
|
}
|
|
|
|
if creationTime != nil {
|
|
|
|
// This was JSON marshaled so it's already a string in RFC3339 format
|
|
|
|
resp.Data["creation_time"] = cubbyResp.Data["creation_time"]
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *SystemBackend) handleWrappingRewrap(
|
|
|
|
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
|
|
|
// If a third party is rewrapping (rather than the calling token being the
|
|
|
|
// wrapping token) we detect this so that we can revoke the original
|
|
|
|
// wrapping token after reading it. Right now wrapped tokens can't unwrap
|
|
|
|
// themselves, but in case we change it, this will be ready to do the right
|
|
|
|
// thing.
|
|
|
|
var thirdParty bool
|
|
|
|
|
|
|
|
token := data.Get("token").(string)
|
|
|
|
if token != "" {
|
|
|
|
thirdParty = true
|
|
|
|
} else {
|
|
|
|
token = req.ClientToken
|
|
|
|
}
|
|
|
|
|
|
|
|
if thirdParty {
|
|
|
|
// Use the token to decrement the use count to avoid a second operation on the token.
|
|
|
|
_, err := b.Core.tokenStore.UseTokenByID(token)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error decrementing wrapping token's use-count: %v", err)
|
|
|
|
}
|
|
|
|
defer b.Core.tokenStore.Revoke(token)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the original TTL
|
|
|
|
cubbyReq := &logical.Request{
|
|
|
|
Operation: logical.ReadOperation,
|
|
|
|
Path: "cubbyhole/wrapinfo",
|
|
|
|
ClientToken: token,
|
|
|
|
}
|
|
|
|
cubbyResp, err := b.Core.router.Route(cubbyReq)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error looking up wrapping information: %v", err)
|
|
|
|
}
|
|
|
|
if cubbyResp == nil {
|
|
|
|
return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
|
|
|
|
}
|
|
|
|
if cubbyResp != nil && cubbyResp.IsError() {
|
|
|
|
return cubbyResp, nil
|
|
|
|
}
|
|
|
|
if cubbyResp.Data == nil {
|
|
|
|
return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the creation TTL on the request
|
|
|
|
creationTTLRaw := cubbyResp.Data["creation_ttl"]
|
|
|
|
if creationTTLRaw == nil {
|
|
|
|
return nil, fmt.Errorf("creation_ttl value in wrapping information was nil")
|
|
|
|
}
|
|
|
|
creationTTL, err := cubbyResp.Data["creation_ttl"].(json.Number).Int64()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error reading creation_ttl value from wrapping information: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the original response and return it as the data for the new response
|
|
|
|
cubbyReq = &logical.Request{
|
|
|
|
Operation: logical.ReadOperation,
|
|
|
|
Path: "cubbyhole/response",
|
|
|
|
ClientToken: token,
|
|
|
|
}
|
|
|
|
cubbyResp, err = b.Core.router.Route(cubbyReq)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("error looking up response: %v", err)
|
|
|
|
}
|
|
|
|
if cubbyResp == nil {
|
|
|
|
return logical.ErrorResponse("no information found; wrapping token may be from a previous Vault version"), nil
|
|
|
|
}
|
|
|
|
if cubbyResp != nil && cubbyResp.IsError() {
|
|
|
|
return cubbyResp, nil
|
|
|
|
}
|
|
|
|
if cubbyResp.Data == nil {
|
|
|
|
return logical.ErrorResponse("wrapping information was nil; wrapping token may be from a previous Vault version"), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
response := cubbyResp.Data["response"]
|
|
|
|
if response == nil {
|
|
|
|
return nil, fmt.Errorf("no response found inside the cubbyhole")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return response in "response"; wrapping code will detect the rewrap and
|
|
|
|
// slot in instead of nesting
|
|
|
|
return &logical.Response{
|
|
|
|
Data: map[string]interface{}{
|
|
|
|
"response": response,
|
|
|
|
},
|
2017-04-24 19:15:01 +00:00
|
|
|
WrapInfo: &wrapping.ResponseWrapInfo{
|
2016-11-11 20:12:11 +00:00
|
|
|
TTL: time.Duration(creationTTL),
|
|
|
|
},
|
2016-09-29 04:01:28 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2016-03-03 18:13:47 +00:00
|
|
|
func sanitizeMountPath(path string) string {
|
|
|
|
if !strings.HasSuffix(path, "/") {
|
|
|
|
path += "/"
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.HasPrefix(path, "/") {
|
|
|
|
path = path[1:]
|
|
|
|
}
|
|
|
|
|
|
|
|
return path
|
|
|
|
}
|
|
|
|
|
2015-04-04 04:00:23 +00:00
|
|
|
const sysHelpRoot = `
|
|
|
|
The system backend is built-in to Vault and cannot be remounted or
|
|
|
|
unmounted. It contains the paths that are used to configure Vault itself
|
|
|
|
as well as perform core operations.
|
|
|
|
`
|
|
|
|
|
2015-03-16 00:35:59 +00:00
|
|
|
// sysHelp is all the help text for the sys backend.
|
|
|
|
var sysHelp = map[string][2]string{
|
2017-06-17 04:04:55 +00:00
|
|
|
"config/cors": {
|
|
|
|
"Configures or returns the current configuration of CORS settings.",
|
|
|
|
`
|
|
|
|
This path responds to the following HTTP methods.
|
|
|
|
|
|
|
|
GET /
|
|
|
|
Returns the configuration of the CORS setting.
|
|
|
|
|
|
|
|
POST /
|
|
|
|
Sets the comma-separated list of origins that can make cross-origin requests.
|
|
|
|
|
|
|
|
DELETE /
|
|
|
|
Clears the CORS configuration and disables acceptance of CORS requests.
|
|
|
|
`,
|
|
|
|
},
|
2016-04-13 21:15:54 +00:00
|
|
|
"init": {
|
|
|
|
"Initializes or returns the initialization status of the Vault.",
|
|
|
|
`
|
|
|
|
This path responds to the following HTTP methods.
|
|
|
|
|
|
|
|
GET /
|
|
|
|
Returns the initialization status of the Vault.
|
|
|
|
|
|
|
|
POST /
|
|
|
|
Initializes a new vault.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
"generate-root": {
|
|
|
|
"Reads, generates, or deletes a root token regeneration process.",
|
|
|
|
`
|
|
|
|
This path responds to multiple HTTP methods which change the behavior. Those
|
|
|
|
HTTP methods are listed below.
|
|
|
|
|
|
|
|
GET /attempt
|
|
|
|
Reads the configuration and progress of the current root generation
|
|
|
|
attempt.
|
|
|
|
|
|
|
|
POST /attempt
|
|
|
|
Initializes a new root generation attempt. Only a single root generation
|
|
|
|
attempt can take place at a time. One (and only one) of otp or pgp_key
|
|
|
|
are required.
|
|
|
|
|
|
|
|
DELETE /attempt
|
|
|
|
Cancels any in-progress root generation attempt. This clears any
|
|
|
|
progress made. This must be called to change the OTP or PGP key being
|
|
|
|
used.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
"seal-status": {
|
|
|
|
"Returns the seal status of the Vault.",
|
|
|
|
`
|
|
|
|
This path responds to the following HTTP methods.
|
|
|
|
|
|
|
|
GET /
|
|
|
|
Returns the seal status of the Vault. This is an unauthenticated
|
|
|
|
endpoint.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
"seal": {
|
|
|
|
"Seals the Vault.",
|
|
|
|
`
|
|
|
|
This path responds to the following HTTP methods.
|
|
|
|
|
|
|
|
PUT /
|
|
|
|
Seals the Vault.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
"unseal": {
|
|
|
|
"Unseals the Vault.",
|
|
|
|
`
|
|
|
|
This path responds to the following HTTP methods.
|
|
|
|
|
|
|
|
PUT /
|
|
|
|
Unseals the Vault.
|
|
|
|
`,
|
|
|
|
},
|
2015-03-16 00:35:59 +00:00
|
|
|
"mounts": {
|
|
|
|
"List the currently mounted backends.",
|
|
|
|
`
|
2016-04-13 21:15:54 +00:00
|
|
|
This path responds to the following HTTP methods.
|
|
|
|
|
|
|
|
GET /
|
|
|
|
Lists all the mounted secret backends.
|
|
|
|
|
|
|
|
GET /<mount point>
|
|
|
|
Get information about the mount at the specified path.
|
|
|
|
|
|
|
|
POST /<mount point>
|
|
|
|
Mount a new secret backend to the mount point in the URL.
|
|
|
|
|
|
|
|
POST /<mount point>/tune
|
|
|
|
Tune configuration parameters for the given mount point.
|
|
|
|
|
|
|
|
DELETE /<mount point>
|
|
|
|
Unmount the specified mount point.
|
2015-03-16 00:35:59 +00:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"mount": {
|
|
|
|
`Mount a new backend at a new path.`,
|
|
|
|
`
|
|
|
|
Mount a backend at a new path. A backend can be mounted multiple times at
|
|
|
|
multiple paths in order to configure multiple separately configured backends.
|
|
|
|
Example: you might have an AWS backend for the east coast, and one for the
|
|
|
|
west coast.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"mount_path": {
|
|
|
|
`The path to mount to. Example: "aws/east"`,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
|
|
|
"mount_type": {
|
|
|
|
`The type of the backend. Example: "passthrough"`,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
|
|
|
"mount_desc": {
|
|
|
|
`User-friendly description for this mount.`,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
2015-08-31 18:27:49 +00:00
|
|
|
"mount_config": {
|
|
|
|
`Configuration for this mount, such as default_lease_ttl
|
|
|
|
and max_lease_ttl.`,
|
|
|
|
},
|
|
|
|
|
2017-02-16 21:29:30 +00:00
|
|
|
"mount_local": {
|
|
|
|
`Mark the mount as a local mount, which is not replicated
|
|
|
|
and is unaffected by replication.`,
|
|
|
|
},
|
|
|
|
|
2015-09-09 19:24:45 +00:00
|
|
|
"tune_default_lease_ttl": {
|
|
|
|
`The default lease TTL for this mount.`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"tune_max_lease_ttl": {
|
|
|
|
`The max lease TTL for this mount.`,
|
|
|
|
},
|
|
|
|
|
2015-03-16 00:35:59 +00:00
|
|
|
"remount": {
|
|
|
|
"Move the mount point of an already-mounted backend.",
|
|
|
|
`
|
2016-04-13 21:15:54 +00:00
|
|
|
This path responds to the following HTTP methods.
|
2015-03-19 14:05:22 +00:00
|
|
|
|
2016-04-13 21:15:54 +00:00
|
|
|
POST /sys/remount
|
|
|
|
Changes the mount point of an already-mounted backend.
|
|
|
|
`,
|
2015-03-19 14:05:22 +00:00
|
|
|
},
|
|
|
|
|
2016-06-15 16:35:30 +00:00
|
|
|
"auth_tune": {
|
|
|
|
"Tune the configuration parameters for an auth path.",
|
|
|
|
`Read and write the 'default-lease-ttl' and 'max-lease-ttl' values of
|
|
|
|
the auth path.`,
|
|
|
|
},
|
|
|
|
|
2015-09-02 19:56:58 +00:00
|
|
|
"mount_tune": {
|
|
|
|
"Tune backend configuration parameters for this mount.",
|
2016-06-15 16:35:30 +00:00
|
|
|
`Read and write the 'default-lease-ttl' and 'max-lease-ttl' values of
|
|
|
|
the mount.`,
|
2015-09-02 19:56:58 +00:00
|
|
|
},
|
|
|
|
|
2015-03-16 23:11:55 +00:00
|
|
|
"renew": {
|
|
|
|
"Renew a lease on a secret",
|
|
|
|
`
|
|
|
|
When a secret is read, it may optionally include a lease interval
|
|
|
|
and a boolean indicating if renew is possible. For secrets that support
|
|
|
|
lease renewal, this endpoint is used to extend the validity of the
|
|
|
|
lease and to prevent an automatic revocation.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
|
2015-04-08 20:35:32 +00:00
|
|
|
"lease_id": {
|
|
|
|
"The lease identifier to renew. This is included with a lease.",
|
2015-03-16 23:11:55 +00:00
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
|
|
|
"increment": {
|
|
|
|
"The desired increment in seconds to the lease",
|
|
|
|
"",
|
|
|
|
},
|
2015-03-16 23:26:34 +00:00
|
|
|
|
|
|
|
"revoke": {
|
|
|
|
"Revoke a leased secret immediately",
|
|
|
|
`
|
|
|
|
When a secret is generated with a lease, it is automatically revoked
|
|
|
|
at the end of the lease period if not renewed. However, in some cases
|
|
|
|
you may want to force an immediate revocation. This endpoint can be
|
2015-04-08 20:35:32 +00:00
|
|
|
used to revoke the secret with the given Lease ID.
|
2015-03-16 23:26:34 +00:00
|
|
|
`,
|
|
|
|
},
|
2015-03-16 23:33:48 +00:00
|
|
|
|
|
|
|
"revoke-prefix": {
|
|
|
|
"Revoke all secrets generated in a given prefix",
|
|
|
|
`
|
|
|
|
Revokes all the secrets generated under a given mount prefix. As
|
|
|
|
an example, "prod/aws/" might be the AWS logical backend, and due to
|
|
|
|
a change in the "ops" policy, we may want to invalidate all the secrets
|
|
|
|
generated. We can do a revoke prefix at "prod/aws/ops" to revoke all
|
2015-04-08 20:35:32 +00:00
|
|
|
the ops secrets. This does a prefix match on the Lease IDs and revokes
|
2015-03-16 23:33:48 +00:00
|
|
|
all matching leases.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"revoke-prefix-path": {
|
|
|
|
`The path to revoke keys under. Example: "prod/aws/ops"`,
|
|
|
|
"",
|
|
|
|
},
|
2015-03-20 19:48:19 +00:00
|
|
|
|
Add forced revocation.
In some situations, it can be impossible to revoke leases (for instance,
if someone has gone and manually removed users created by Vault). This
can not only cause Vault to cycle trying to revoke them, but it also
prevents mounts from being unmounted, leaving them in a tainted state
where the only operations allowed are to revoke (or rollback), which
will never successfully complete.
This adds a new endpoint that works similarly to `revoke-prefix` but
ignores errors coming from a backend upon revocation (it does not ignore
errors coming from within the expiration manager, such as errors
accessing the data store). This can be used to force Vault to abandon
leases.
Like `revoke-prefix`, this is a very sensitive operation and requires
`sudo`. It is implemented as a separate endpoint, rather than an
argument to `revoke-prefix`, to ensure that control can be delegated
appropriately, as even most administrators should not normally have
this privilege.
Fixes #1135
2016-03-03 01:26:38 +00:00
|
|
|
"revoke-force": {
|
|
|
|
"Revoke all secrets generated in a given prefix, ignoring errors.",
|
|
|
|
`
|
|
|
|
See the path help for 'revoke-prefix'; this behaves the same, except that it
|
|
|
|
ignores errors encountered during revocation. This can be used in certain
|
|
|
|
recovery situations; for instance, when you want to unmount a backend, but it
|
|
|
|
is impossible to fix revocation errors and these errors prevent the unmount
|
|
|
|
from proceeding. This is a DANGEROUS operation as it removes Vault's oversight
|
|
|
|
of external secrets. Access to this prefix should be tightly controlled.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"revoke-force-path": {
|
|
|
|
`The path to revoke keys under. Example: "prod/aws/ops"`,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
2015-03-20 19:48:19 +00:00
|
|
|
"auth-table": {
|
|
|
|
"List the currently enabled credential backends.",
|
|
|
|
`
|
2016-04-13 21:15:54 +00:00
|
|
|
This path responds to the following HTTP methods.
|
|
|
|
|
|
|
|
GET /
|
|
|
|
List the currently enabled credential backends: the name, the type of
|
|
|
|
the backend, and a user friendly description of the purpose for the
|
|
|
|
credential backend.
|
|
|
|
|
|
|
|
POST /<mount point>
|
|
|
|
Enable a new auth backend.
|
|
|
|
|
|
|
|
DELETE /<mount point>
|
|
|
|
Disable the auth backend at the given mount point.
|
2015-03-20 19:48:19 +00:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"auth": {
|
|
|
|
`Enable a new credential backend with a name.`,
|
|
|
|
`
|
|
|
|
Enable a credential mechanism at a new path. A backend can be mounted multiple times at
|
|
|
|
multiple paths in order to configure multiple separately configured backends.
|
|
|
|
Example: you might have an OAuth backend for GitHub, and one for Google Apps.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"auth_path": {
|
|
|
|
`The path to mount to. Cannot be delimited. Example: "user"`,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
|
|
|
"auth_type": {
|
|
|
|
`The type of the backend. Example: "userpass"`,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
|
|
|
"auth_desc": {
|
|
|
|
`User-friendly description for this crential backend.`,
|
|
|
|
"",
|
|
|
|
},
|
2015-03-23 21:43:31 +00:00
|
|
|
|
|
|
|
"policy-list": {
|
|
|
|
`List the configured access control policies.`,
|
|
|
|
`
|
2016-04-13 21:15:54 +00:00
|
|
|
This path responds to the following HTTP methods.
|
|
|
|
|
|
|
|
GET /
|
|
|
|
List the names of the configured access control policies.
|
|
|
|
|
|
|
|
GET /<name>
|
|
|
|
Retrieve the rules for the named policy.
|
|
|
|
|
|
|
|
PUT /<name>
|
|
|
|
Add or update a policy.
|
|
|
|
|
|
|
|
DELETE /<name>
|
|
|
|
Delete the policy with the given name.
|
2015-03-23 21:43:31 +00:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"policy": {
|
|
|
|
`Read, Modify, or Delete an access control policy.`,
|
|
|
|
`
|
|
|
|
Read the rules of an existing policy, create or update the rules of a policy,
|
|
|
|
or delete a policy.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"policy-name": {
|
|
|
|
`The name of the policy. Example: "ops"`,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
|
|
|
"policy-rules": {
|
|
|
|
`The rules of the policy. Either given in HCL or JSON format.`,
|
|
|
|
"",
|
|
|
|
},
|
2015-03-31 23:45:00 +00:00
|
|
|
|
2015-11-19 01:26:03 +00:00
|
|
|
"audit-hash": {
|
|
|
|
"The hash of the given string via the given audit backend",
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
2015-03-31 23:45:00 +00:00
|
|
|
"audit-table": {
|
|
|
|
"List the currently enabled audit backends.",
|
|
|
|
`
|
2016-04-13 21:15:54 +00:00
|
|
|
This path responds to the following HTTP methods.
|
|
|
|
|
|
|
|
GET /
|
|
|
|
List the currently enabled audit backends.
|
|
|
|
|
|
|
|
PUT /<path>
|
|
|
|
Enable an audit backend at the given path.
|
|
|
|
|
|
|
|
DELETE /<path>
|
|
|
|
Disable the given audit backend.
|
2015-03-31 23:45:00 +00:00
|
|
|
`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"audit_path": {
|
|
|
|
`The name of the backend. Cannot be delimited. Example: "mysql"`,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
|
|
|
"audit_type": {
|
|
|
|
`The type of the backend. Example: "mysql"`,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
|
|
|
"audit_desc": {
|
|
|
|
`User-friendly description for this audit backend.`,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
|
|
|
"audit_opts": {
|
|
|
|
`Configuration options for the audit backend.`,
|
|
|
|
"",
|
|
|
|
},
|
|
|
|
|
|
|
|
"audit": {
|
|
|
|
`Enable or disable audit backends.`,
|
|
|
|
`
|
|
|
|
Enable a new audit backend or disable an existing backend.
|
|
|
|
`,
|
|
|
|
},
|
2015-05-28 00:53:42 +00:00
|
|
|
|
|
|
|
"key-status": {
|
|
|
|
"Provides information about the backend encryption key.",
|
|
|
|
`
|
|
|
|
Provides the current backend encryption key term and installation time.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"rotate": {
|
|
|
|
"Rotates the backend encryption key used to persist data.",
|
|
|
|
`
|
|
|
|
Rotate generates a new encryption key which is used to encrypt all
|
|
|
|
data going to the storage backend. The old encryption keys are kept so
|
|
|
|
that data encrypted using those keys can still be decrypted.
|
|
|
|
`,
|
|
|
|
},
|
2016-03-10 02:04:54 +00:00
|
|
|
|
|
|
|
"rekey_backup": {
|
|
|
|
"Allows fetching or deleting the backup of the rotated unseal keys.",
|
|
|
|
"",
|
|
|
|
},
|
2016-03-17 19:23:36 +00:00
|
|
|
|
|
|
|
"capabilities": {
|
|
|
|
"Fetches the capabilities of the given token on the given path.",
|
2016-03-18 03:01:28 +00:00
|
|
|
`Returns the capabilities of the given token on the path.
|
|
|
|
The path will be searched for a path match in all the policies associated with the token.`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"capabilities_self": {
|
|
|
|
"Fetches the capabilities of the given token on the given path.",
|
|
|
|
`Returns the capabilities of the client token on the path.
|
|
|
|
The path will be searched for a path match in all the policies associated with the client token.`,
|
2016-03-17 19:23:36 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
"capabilities_accessor": {
|
|
|
|
"Fetches the capabilities of the token associated with the given token, on the given path.",
|
2016-03-18 03:01:28 +00:00
|
|
|
`When there is no access to the token, token accessor can be used to fetch the token's capabilities
|
|
|
|
on a given path.`,
|
2016-03-17 19:23:36 +00:00
|
|
|
},
|
2016-09-29 04:01:28 +00:00
|
|
|
|
2017-03-07 20:22:21 +00:00
|
|
|
"tidy_leases": {
|
|
|
|
`This endpoint performs cleanup tasks that can be run if certain error
|
|
|
|
conditions have occurred.`,
|
2017-03-07 21:06:05 +00:00
|
|
|
`This endpoint performs cleanup tasks that can be run to clean up the
|
2017-03-07 20:22:21 +00:00
|
|
|
lease entries after certain error conditions. Usually running this is not
|
|
|
|
necessary, and is only required if upgrade notes or support personnel suggest
|
|
|
|
it.`,
|
|
|
|
},
|
|
|
|
|
2016-09-29 04:01:28 +00:00
|
|
|
"wrap": {
|
|
|
|
"Response-wraps an arbitrary JSON object.",
|
|
|
|
`Round trips the given input data into a response-wrapped token.`,
|
|
|
|
},
|
|
|
|
|
2017-01-04 21:44:03 +00:00
|
|
|
"wrappubkey": {
|
|
|
|
"Returns pubkeys used in some wrapping formats.",
|
|
|
|
"Returns pubkeys used in some wrapping formats.",
|
|
|
|
},
|
|
|
|
|
2016-09-29 04:01:28 +00:00
|
|
|
"unwrap": {
|
|
|
|
"Unwraps a response-wrapped token.",
|
|
|
|
`Unwraps a response-wrapped token. Unlike simply reading from cubbyhole/response,
|
|
|
|
this provides additional validation on the token, and rather than a JSON-escaped
|
|
|
|
string, the returned response is the exact same as the contained wrapped response.`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"wraplookup": {
|
|
|
|
"Looks up the properties of a response-wrapped token.",
|
|
|
|
`Returns the creation TTL and creation time of a response-wrapped token.`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"rewrap": {
|
|
|
|
"Rotates a response-wrapped token.",
|
|
|
|
`Rotates a response-wrapped token; the output is a new token with the same
|
|
|
|
response wrapped inside and the same creation TTL. The original token is revoked.`,
|
|
|
|
},
|
2017-02-03 18:08:31 +00:00
|
|
|
"audited-headers-name": {
|
|
|
|
"Configures the headers sent to the audit logs.",
|
|
|
|
`
|
|
|
|
This path responds to the following HTTP methods.
|
|
|
|
|
|
|
|
GET /<name>
|
|
|
|
Returns the setting for the header with the given name.
|
|
|
|
|
|
|
|
POST /<name>
|
|
|
|
Enable auditing of the given header.
|
|
|
|
|
|
|
|
DELETE /<path>
|
|
|
|
Disable auditing of the given header.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
"audited-headers": {
|
|
|
|
"Lists the headers configured to be audited.",
|
|
|
|
`Returns a list of headers that have been configured to be audited.`,
|
|
|
|
},
|
2017-04-24 18:35:32 +00:00
|
|
|
"plugins/catalog": {
|
2017-04-12 17:01:36 +00:00
|
|
|
`Configures the plugins known to vault`,
|
|
|
|
`
|
|
|
|
This path responds to the following HTTP methods.
|
2017-04-24 17:30:33 +00:00
|
|
|
LIST /
|
2017-04-12 17:01:36 +00:00
|
|
|
Returns a list of names of configured plugins.
|
2017-05-04 02:03:42 +00:00
|
|
|
|
2017-04-12 17:01:36 +00:00
|
|
|
GET /<name>
|
|
|
|
Retrieve the metadata for the named plugin.
|
|
|
|
|
|
|
|
PUT /<name>
|
|
|
|
Add or update plugin.
|
|
|
|
|
|
|
|
DELETE /<name>
|
|
|
|
Delete the plugin with the given name.
|
|
|
|
`,
|
|
|
|
},
|
2017-05-04 02:03:42 +00:00
|
|
|
"leases": {
|
|
|
|
`View or list lease metadata.`,
|
|
|
|
`
|
|
|
|
This path responds to the following HTTP methods.
|
|
|
|
|
|
|
|
PUT /
|
|
|
|
Retrieve the metadata for the provided lease id.
|
|
|
|
|
|
|
|
LIST /<prefix>
|
|
|
|
Lists the leases for the named prefix.
|
|
|
|
`,
|
|
|
|
},
|
|
|
|
|
|
|
|
"leases-list-prefix": {
|
|
|
|
`The path to list leases under. Example: "aws/creds/deploy"`,
|
|
|
|
"",
|
|
|
|
},
|
2015-03-16 00:35:59 +00:00
|
|
|
}
|