Final sync

This commit is contained in:
Jeff Mitchell 2017-10-23 17:39:21 -04:00
parent d38a699c32
commit a25dae82dd
27 changed files with 482 additions and 84 deletions

View File

@ -53,7 +53,8 @@ func pathLogin(b *backend) *framework.Path {
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathLogin,
logical.UpdateOperation: b.pathLogin,
logical.AliasLookaheadOperation: b.pathLoginAliasLookahead,
},
HelpSynopsis: pathLoginSyn,
@ -61,6 +62,23 @@ func pathLogin(b *backend) *framework.Path {
}
}
func (b *backend) pathLoginAliasLookahead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
appId := data.Get("app_id").(string)
if appId == "" {
return nil, fmt.Errorf("missing app_id")
}
return &logical.Response{
Auth: &logical.Auth{
Alias: &logical.Alias{
Name: appId,
},
},
}, nil
}
func (b *backend) pathLogin(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
appId := data.Get("app_id").(string)

View File

@ -2,6 +2,7 @@ package approle
import (
"fmt"
"strings"
"time"
"github.com/hashicorp/vault/logical"
@ -23,17 +24,33 @@ func pathLogin(b *backend) *framework.Path {
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathLoginUpdate,
logical.UpdateOperation: b.pathLoginUpdate,
logical.AliasLookaheadOperation: b.pathLoginUpdateAliasLookahead,
},
HelpSynopsis: pathLoginHelpSys,
HelpDescription: pathLoginHelpDesc,
}
}
func (b *backend) pathLoginUpdateAliasLookahead(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
roleID := strings.TrimSpace(data.Get("role_id").(string))
if roleID == "" {
return nil, fmt.Errorf("missing role_id")
}
return &logical.Response{
Auth: &logical.Auth{
Alias: &logical.Alias{
Name: roleID,
},
},
}, nil
}
// Returns the Auth object indicating the authentication and authorization information
// if the credentials provided are validated by the backend.
func (b *backend) pathLoginUpdate(req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
role, roleName, metadata, err := b.validateCredentials(req, data)
role, roleName, metadata, _, err := b.validateCredentials(req, data)
if err != nil || role == nil {
return logical.ErrorResponse(fmt.Sprintf("failed to validate SecretID: %s", err)), nil
}

View File

@ -90,34 +90,35 @@ func (b *backend) validateRoleID(s logical.Storage, roleID string) (*roleStorage
}
// Validates the supplied RoleID and SecretID
func (b *backend) validateCredentials(req *logical.Request, data *framework.FieldData) (*roleStorageEntry, string, map[string]string, error) {
func (b *backend) validateCredentials(req *logical.Request, data *framework.FieldData) (*roleStorageEntry, string, map[string]string, string, error) {
metadata := make(map[string]string)
// RoleID must be supplied during every login
roleID := strings.TrimSpace(data.Get("role_id").(string))
if roleID == "" {
return nil, "", metadata, fmt.Errorf("missing role_id")
return nil, "", metadata, "", fmt.Errorf("missing role_id")
}
// Validate the RoleID and get the Role entry
role, roleName, err := b.validateRoleID(req.Storage, roleID)
if err != nil {
return nil, "", metadata, err
return nil, "", metadata, "", err
}
if role == nil || roleName == "" {
return nil, "", metadata, fmt.Errorf("failed to validate role_id")
return nil, "", metadata, "", fmt.Errorf("failed to validate role_id")
}
// Calculate the TTL boundaries since this reflects the properties of the token issued
if role.TokenTTL, role.TokenMaxTTL, err = b.SanitizeTTL(role.TokenTTL, role.TokenMaxTTL); err != nil {
return nil, "", metadata, err
return nil, "", metadata, "", err
}
var secretID string
if role.BindSecretID {
// If 'bind_secret_id' was set on role, look for the field 'secret_id'
// to be specified and validate it.
secretID := strings.TrimSpace(data.Get("secret_id").(string))
secretID = strings.TrimSpace(data.Get("secret_id").(string))
if secretID == "" {
return nil, "", metadata, fmt.Errorf("missing secret_id")
return nil, "", metadata, "", fmt.Errorf("missing secret_id")
}
// Check if the SecretID supplied is valid. If use limit was specified
@ -125,29 +126,29 @@ func (b *backend) validateCredentials(req *logical.Request, data *framework.Fiel
var valid bool
valid, metadata, err = b.validateBindSecretID(req, roleName, secretID, role.HMACKey, role.BoundCIDRList)
if err != nil {
return nil, "", metadata, err
return nil, "", metadata, "", err
}
if !valid {
return nil, "", metadata, fmt.Errorf("invalid secret_id %q", secretID)
return nil, "", metadata, "", fmt.Errorf("invalid secret_id %q", secretID)
}
}
if role.BoundCIDRList != "" {
// If 'bound_cidr_list' was set, verify the CIDR restrictions
if req.Connection == nil || req.Connection.RemoteAddr == "" {
return nil, "", metadata, fmt.Errorf("failed to get connection information")
return nil, "", metadata, "", fmt.Errorf("failed to get connection information")
}
belongs, err := cidrutil.IPBelongsToCIDRBlocksString(req.Connection.RemoteAddr, role.BoundCIDRList, ",")
if err != nil {
return nil, "", metadata, fmt.Errorf("failed to verify the CIDR restrictions set on the role: %v", err)
return nil, "", metadata, "", fmt.Errorf("failed to verify the CIDR restrictions set on the role: %v", err)
}
if !belongs {
return nil, "", metadata, fmt.Errorf("source address %q unauthorized through CIDR restrictions on the role", req.Connection.RemoteAddr)
return nil, "", metadata, "", fmt.Errorf("source address %q unauthorized through CIDR restrictions on the role", req.Connection.RemoteAddr)
}
}
return role, roleName, metadata, nil
return role, roleName, metadata, secretID, nil
}
// validateBindSecretID is used to determine if the given SecretID is a valid one.

View File

@ -106,7 +106,8 @@ needs to be supplied along with 'identity' parameter.`,
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathLoginUpdate,
logical.UpdateOperation: b.pathLoginUpdate,
logical.AliasLookaheadOperation: b.pathLoginUpdate,
},
HelpSynopsis: pathLoginSyn,
@ -546,6 +547,17 @@ func (b *backend) pathLoginUpdateEc2(
}
}
// If we're just looking up for MFA, return the Alias info
if req.Operation == logical.AliasLookaheadOperation {
return &logical.Response{
Auth: &logical.Auth{
Alias: &logical.Alias{
Name: identityDocParsed.InstanceID,
},
},
}, nil
}
roleName := data.Get("role").(string)
// If roleName is not supplied, a role in the name of the instance's AMI ID will be looked for
@ -1157,6 +1169,18 @@ func (b *backend) pathLoginUpdateIam(
// This could either be a "userID:SessionID" (in the case of an assumed role) or just a "userID"
// (in the case of an IAM user).
callerUniqueId := strings.Split(callerID.UserId, ":")[0]
// If we're just looking up for MFA, return the Alias info
if req.Operation == logical.AliasLookaheadOperation {
return &logical.Response{
Auth: &logical.Auth{
Alias: &logical.Alias{
Name: callerUniqueId,
},
},
}, nil
}
entity, err := parseIamArn(callerID.Arn)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf("error parsing arn %q: %v", callerID.Arn, err)), nil

View File

@ -34,11 +34,28 @@ func pathLogin(b *backend) *framework.Path {
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathLogin,
logical.UpdateOperation: b.pathLogin,
logical.AliasLookaheadOperation: b.pathLoginAliasLookahead,
},
}
}
func (b *backend) pathLoginAliasLookahead(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
clientCerts := req.Connection.ConnState.PeerCertificates
if len(clientCerts) == 0 {
return nil, fmt.Errorf("no client certificate found")
}
return &logical.Response{
Auth: &logical.Auth{
Alias: &logical.Alias{
Name: clientCerts[0].Subject.CommonName,
},
},
}, nil
}
func (b *backend) pathLogin(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {

View File

@ -23,14 +23,36 @@ func pathLogin(b *backend) *framework.Path {
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathLogin,
logical.UpdateOperation: b.pathLogin,
logical.AliasLookaheadOperation: b.pathLoginAliasLookahead,
},
}
}
func (b *backend) pathLoginAliasLookahead(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
token := data.Get("token").(string)
var verifyResp *verifyCredentialsResp
if verifyResponse, resp, err := b.verifyCredentials(req, token); err != nil {
return nil, err
} else if resp != nil {
return resp, nil
} else {
verifyResp = verifyResponse
}
return &logical.Response{
Auth: &logical.Auth{
Alias: &logical.Alias{
Name: *verifyResp.User.Login,
},
},
}, nil
}
func (b *backend) pathLogin(
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
token := data.Get("token").(string)
var verifyResp *verifyCredentialsResp

View File

@ -25,7 +25,8 @@ func pathLogin(b *backend) *framework.Path {
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathLogin,
logical.UpdateOperation: b.pathLogin,
logical.AliasLookaheadOperation: b.pathLoginAliasLookahead,
},
HelpSynopsis: pathLoginSyn,
@ -33,6 +34,22 @@ func pathLogin(b *backend) *framework.Path {
}
}
func (b *backend) pathLoginAliasLookahead(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
username := d.Get("username").(string)
if username == "" {
return nil, fmt.Errorf("missing username")
}
return &logical.Response{
Auth: &logical.Auth{
Alias: &logical.Alias{
Name: username,
},
},
}, nil
}
func (b *backend) pathLogin(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
username := d.Get("username").(string)

View File

@ -27,7 +27,8 @@ func pathLogin(b *backend) *framework.Path {
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathLogin,
logical.UpdateOperation: b.pathLogin,
logical.AliasLookaheadOperation: b.pathLoginAliasLookahead,
},
HelpSynopsis: pathLoginSyn,
@ -35,6 +36,22 @@ func pathLogin(b *backend) *framework.Path {
}
}
func (b *backend) pathLoginAliasLookahead(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
username := d.Get("username").(string)
if username == "" {
return nil, fmt.Errorf("missing username")
}
return &logical.Response{
Auth: &logical.Auth{
Alias: &logical.Alias{
Name: username,
},
},
}, nil
}
func (b *backend) pathLogin(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
username := d.Get("username").(string)

View File

@ -37,7 +37,8 @@ func pathLogin(b *backend) *framework.Path {
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathLogin,
logical.UpdateOperation: b.pathLogin,
logical.AliasLookaheadOperation: b.pathLoginAliasLookahead,
},
HelpSynopsis: pathLoginSyn,
@ -45,6 +46,22 @@ func pathLogin(b *backend) *framework.Path {
}
}
func (b *backend) pathLoginAliasLookahead(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
username := d.Get("username").(string)
if username == "" {
return nil, fmt.Errorf("missing username")
}
return &logical.Response{
Auth: &logical.Auth{
Alias: &logical.Alias{
Name: username,
},
},
}, nil
}
func (b *backend) pathLogin(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
username := d.Get("username").(string)

View File

@ -27,7 +27,8 @@ func pathLogin(b *backend) *framework.Path {
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathLogin,
logical.UpdateOperation: b.pathLogin,
logical.AliasLookaheadOperation: b.pathLoginAliasLookahead,
},
HelpSynopsis: pathLoginSyn,
@ -35,6 +36,22 @@ func pathLogin(b *backend) *framework.Path {
}
}
func (b *backend) pathLoginAliasLookahead(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
username := strings.ToLower(d.Get("username").(string))
if username == "" {
return nil, fmt.Errorf("missing username")
}
return &logical.Response{
Auth: &logical.Auth{
Alias: &logical.Alias{
Name: username,
},
},
}, nil
}
func (b *backend) pathLogin(
req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
username := strings.ToLower(d.Get("username").(string))

View File

@ -44,6 +44,10 @@ func Backend() *backend {
"root",
"root/sign-self-issued",
},
SealWrapStorage: []string{
"config/ca_bundle",
},
},
Paths: []*framework.Path{

View File

@ -42,6 +42,12 @@ func Backend(conf *logical.BackendConfig) (*backend, error) {
LocalStorage: []string{
"otp/",
},
SealWrapStorage: []string{
caPrivateKey,
caPrivateKeyStoragePath,
"keys/",
},
},
Paths: []*framework.Path{

View File

@ -22,6 +22,12 @@ func Backend() *backend {
b.Backend = &framework.Backend{
Help: strings.TrimSpace(backendHelp),
PathsSpecial: &logical.Paths{
SealWrapStorage: []string{
"key/",
},
},
Paths: []*framework.Path{
pathListKeys(&b),
pathKeys(&b),

View File

@ -348,7 +348,7 @@ func (b *backend) pathKeyCreate(
// Prepare the url and barcode
urlString := keyObject.String()
// Don't include QR code is size is set to zero
// Don't include QR code if size is set to zero
if qrSize == 0 {
response = &logical.Response{
Data: map[string]interface{}{
@ -358,7 +358,7 @@ func (b *backend) pathKeyCreate(
} else {
barcode, err := keyObject.Image(qrSize, qrSize)
if err != nil {
return logical.ErrorResponse("an error occured while generating a QR code image"), err
return nil, fmt.Errorf("failed to generate QR code image: %v", err)
}
var buff bytes.Buffer

View File

@ -19,6 +19,13 @@ func Factory(conf *logical.BackendConfig) (logical.Backend, error) {
func Backend(conf *logical.BackendConfig) *backend {
var b backend
b.Backend = &framework.Backend{
PathsSpecial: &logical.Paths{
SealWrapStorage: []string{
"archive/",
"policy/",
},
},
Paths: []*framework.Path{
// Rotate/Config needs to come before Keys
// as the handler is greedy

View File

@ -51,22 +51,20 @@ func (c *MountsCommand) Run(args []string) int {
}
defTTL := "system"
switch {
case mount.Type == "system":
defTTL = "n/a"
case mount.Type == "cubbyhole":
case mount.Type == "system", mount.Type == "cubbyhole", mount.Type == "identity":
defTTL = "n/a"
case mount.Config.DefaultLeaseTTL != 0:
defTTL = strconv.Itoa(mount.Config.DefaultLeaseTTL)
}
maxTTL := "system"
switch {
case mount.Type == "system":
maxTTL = "n/a"
case mount.Type == "cubbyhole":
case mount.Type == "system", mount.Type == "cubbyhole", mount.Type == "identity":
maxTTL = "n/a"
case mount.Config.MaxLeaseTTL != 0:
maxTTL = strconv.Itoa(mount.Config.MaxLeaseTTL)
}
replicatedBehavior := "replicated"
if mount.Local {
replicatedBehavior = "local"

View File

@ -30,7 +30,7 @@ type RekeyCommand struct {
func (c *RekeyCommand) Run(args []string) int {
var init, cancel, status, delete, retrieve, backup, recoveryKey bool
var shares, threshold int
var shares, threshold, storedShares int
var nonce string
var pgpKeys pgpkeys.PubKeyFilesFlag
flags := c.Meta.FlagSet("rekey", meta.FlagSetDefault)
@ -43,6 +43,7 @@ func (c *RekeyCommand) Run(args []string) int {
flags.BoolVar(&recoveryKey, "recovery-key", c.RecoveryKey, "")
flags.IntVar(&shares, "key-shares", 5, "")
flags.IntVar(&threshold, "key-threshold", 3, "")
flags.IntVar(&storedShares, "stored-shares", 0, "")
flags.StringVar(&nonce, "nonce", "", "")
flags.Var(&pgpKeys, "pgp-keys", "")
flags.Usage = func() { c.Ui.Error(c.Help()) }
@ -64,7 +65,7 @@ func (c *RekeyCommand) Run(args []string) int {
// Check if we are running doing any restricted variants
switch {
case init:
return c.initRekey(client, shares, threshold, pgpKeys, backup, recoveryKey)
return c.initRekey(client, shares, threshold, storedShares, pgpKeys, backup, recoveryKey)
case cancel:
return c.cancelRekey(client, recoveryKey)
case status:
@ -194,30 +195,37 @@ func (c *RekeyCommand) Run(args []string) int {
c.Ui.Output(fmt.Sprintf(
"\n"+
"Vault rekeyed with %d keys and a key threshold of %d. Please\n"+
"securely distribute the above keys. When the vault is re-sealed,\n"+
"restarted, or stopped, you must provide at least %d of these keys\n"+
"to unseal it again.\n\n"+
"Vault does not store the master key. Without at least %d keys,\n"+
"your vault will remain permanently sealed.",
"Vault rekeyed with %d keys and a key threshold of %d.\n",
shares,
threshold,
threshold,
threshold,
))
// Print this message if keys are returned
if len(result.Keys) > 0 {
c.Ui.Output(fmt.Sprintf(
"\n"+
"Please securely distribute the above keys. When the vault is re-sealed,\n"+
"restarted, or stopped, you must provide at least %d of these keys\n"+
"to unseal it again.\n\n"+
"Vault does not store the master key. Without at least %[1]d keys,\n"+
"your vault will remain permanently sealed.",
threshold,
))
}
return 0
}
// initRekey is used to start the rekey process
func (c *RekeyCommand) initRekey(client *api.Client,
shares, threshold int,
shares, threshold, storedShares int,
pgpKeys pgpkeys.PubKeyFilesFlag,
backup, recoveryKey bool) int {
// Start the rekey
request := &api.RekeyInitRequest{
SecretShares: shares,
SecretThreshold: threshold,
StoredShares: storedShares,
PGPKeys: pgpKeys,
Backup: backup,
}

View File

@ -242,7 +242,7 @@ func (c *ServerCommand) Run(args []string) int {
}()
if seal == nil {
c.Ui.Error(fmt.Sprintf("Could not create seal"))
c.Ui.Error(fmt.Sprintf("Could not create seal; most likely proper Seal configuration information was not set, but no error was generated."))
return 1
}
@ -264,6 +264,7 @@ func (c *ServerCommand) Run(args []string) int {
PluginDirectory: config.PluginDirectory,
EnableRaw: config.EnableRawEndpoint,
}
if dev {
coreConfig.DevToken = devRootTokenID
if devLeasedKV {
@ -576,6 +577,14 @@ CLUSTER_SYNTHESIS_COMPLETE:
core.SetClusterListenerAddrs(clusterAddrs)
core.SetClusterHandler(handler)
err = core.UnsealWithStoredKeys()
if err != nil {
if !errwrap.ContainsType(err, new(vault.NonFatalError)) {
c.Ui.Output(fmt.Sprintf("Error initializing core: %s", err))
return 1
}
}
// Perform service discovery registrations and initialization of
// HTTP server after the verifyOnly check.
@ -668,6 +677,18 @@ CLUSTER_SYNTHESIS_COMPLETE:
go server.Serve(ln)
}
if sealConfigError != nil {
init, err := core.Initialized()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error checking if core is initialized: %v", err))
return 1
}
if init {
c.Ui.Error("Vault is initialized but no Seal key could be loaded")
return 1
}
}
if newCoreError != nil {
c.Ui.Output("==> Warning:\n\nNon-fatal error during initialization; check the logs for more information.")
c.Ui.Output("")

View File

@ -24,7 +24,7 @@ type Config struct {
Storage *Storage `hcl:"-"`
HAStorage *Storage `hcl:"-"`
HSM *HSM `hcl:"-"`
Seal *Seal `hcl:"-"`
CacheSize int `hcl:"cache_size"`
DisableCache bool `hcl:"-"`
@ -115,13 +115,13 @@ func (b *Storage) GoString() string {
return fmt.Sprintf("*%#v", *b)
}
// HSM contains HSM configuration for the server
type HSM struct {
// Seal contains Seal configuration for the server
type Seal struct {
Type string
Config map[string]string
}
func (h *HSM) GoString() string {
func (h *Seal) GoString() string {
return fmt.Sprintf("*%#v", *h)
}
@ -241,9 +241,9 @@ func (c *Config) Merge(c2 *Config) *Config {
result.HAStorage = c2.HAStorage
}
result.HSM = c.HSM
if c2.HSM != nil {
result.HSM = c2.HSM
result.Seal = c.Seal
if c2.Seal != nil {
result.Seal = c2.Seal
}
result.Telemetry = c.Telemetry
@ -394,6 +394,7 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
"backend",
"ha_backend",
"hsm",
"seal",
"listener",
"cache_size",
"disable_cache",
@ -438,11 +439,17 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
}
if o := list.Filter("hsm"); len(o.Items) > 0 {
if err := parseHSMs(&result, o); err != nil {
if err := parseSeal(&result, o, "hsm"); err != nil {
return nil, fmt.Errorf("error parsing 'hsm': %s", err)
}
}
if o := list.Filter("seal"); len(o.Items) > 0 {
if err := parseSeal(&result, o, "seal"); err != nil {
return nil, fmt.Errorf("error parsing 'seal': %s", err)
}
}
if o := list.Filter("listener"); len(o.Items) > 0 {
if err := parseListeners(&result, o); err != nil {
return nil, fmt.Errorf("error parsing 'listener': %s", err)
@ -645,38 +652,57 @@ func parseHAStorage(result *Config, list *ast.ObjectList, name string) error {
return nil
}
func parseHSMs(result *Config, list *ast.ObjectList) error {
func parseSeal(result *Config, list *ast.ObjectList, blockName string) error {
if len(list.Items) > 1 {
return fmt.Errorf("only one 'hsm' block is permitted")
return fmt.Errorf("only one %q block is permitted", blockName)
}
// Get our item
item := list.Items[0]
key := "hsm"
key := blockName
if len(item.Keys) > 0 {
key = item.Keys[0].Token.Value().(string)
}
valid := []string{
"lib",
"slot",
"pin",
"mechanism",
"key_label",
"generate_key",
"regenerate_key",
var valid []string
// Valid parameter for the Seal types
switch key {
case "pkcs11":
valid = []string{
"lib",
"slot",
"pin",
"mechanism",
"hmac_mechanism",
"key_label",
"hmac_key_label",
"generate_key",
"regenerate_key",
"max_parallel",
}
case "awskms":
valid = []string{
"aws_region",
"access_key",
"secret_key",
"kms_key_id",
"max_parallel",
}
default:
return fmt.Errorf("invalid seal type %q", key)
}
if err := checkHCLKeys(item.Val, valid); err != nil {
return multierror.Prefix(err, fmt.Sprintf("hsm.%s:", key))
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", blockName, key))
}
var m map[string]string
if err := hcl.DecodeObject(&m, item.Val); err != nil {
return multierror.Prefix(err, fmt.Sprintf("hsm.%s:", key))
return multierror.Prefix(err, fmt.Sprintf("%s.%s:", blockName, key))
}
result.HSM = &HSM{
result.Seal = &Seal{
Type: strings.ToLower(key),
Config: m,
}

View File

@ -32,12 +32,28 @@ const (
// not to use request forwarding
NoRequestForwardingHeaderName = "X-Vault-No-Request-Forwarding"
// MFAHeaderName represents the HTTP header which carries the credentials
// required to perform MFA on any path.
MFAHeaderName = "X-Vault-MFA"
// canonicalMFAHeaderName is the MFA header value's format in the request
// headers. Do not alter the casing of this string.
canonicalMFAHeaderName = "X-Vault-Mfa"
// PolicyOverrideHeaderName is the header set to request overriding
// soft-mandatory Sentinel policies.
PolicyOverrideHeaderName = "X-Vault-Policy-Override"
// MaxRequestSize is the maximum accepted request size. This is to prevent
// a denial of service attack where no Content-Length is provided and the server
// is fed ever more data until it exhausts memory.
MaxRequestSize = 32 * 1024 * 1024
)
var (
ReplicationStaleReadTimeout = 2 * time.Second
)
// Handler returns an http.Handler for the API. This can be used on
// its own to mount the Vault API within another web server.
func Handler(core *vault.Core) http.Handler {

View File

@ -163,6 +163,8 @@ func TestSysMounts_headerAuth(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -173,6 +175,8 @@ func TestSysMounts_headerAuth(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -183,6 +187,8 @@ func TestSysMounts_headerAuth(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -193,6 +199,8 @@ func TestSysMounts_headerAuth(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -204,6 +212,8 @@ func TestSysMounts_headerAuth(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -214,6 +224,8 @@ func TestSysMounts_headerAuth(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -224,6 +236,8 @@ func TestSysMounts_headerAuth(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -234,6 +248,8 @@ func TestSysMounts_headerAuth(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},

View File

@ -57,6 +57,14 @@ func handleSysInitPut(core *vault.Core, w http.ResponseWriter, r *http.Request)
PGPKeys: req.RecoveryPGPKeys,
}
// N.B. Although the core is capable of handling situations where some keys
// are stored and some aren't, in practice, replication + HSMs makes this
// extremely hard to reason about, to the point that it will probably never
// be supported. The reason is that each HSM needs to encode the master key
// separately, which means the shares must be generated independently,
// which means both that the shares will be different *AND* there would
// need to be a way to actually allow fetching of the generated keys by
// operators.
if core.SealAccess().StoredKeysSupported() {
if barrierConfig.SecretShares != 1 {
respondError(w, http.StatusBadRequest, fmt.Errorf("secret shares must be 1"))

View File

@ -33,6 +33,8 @@ func TestSysMounts(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -43,6 +45,8 @@ func TestSysMounts(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -53,6 +57,8 @@ func TestSysMounts(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -63,6 +69,8 @@ func TestSysMounts(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -74,6 +82,8 @@ func TestSysMounts(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -84,6 +94,8 @@ func TestSysMounts(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -94,6 +106,8 @@ func TestSysMounts(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -104,6 +118,8 @@ func TestSysMounts(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -120,7 +136,7 @@ func TestSysMounts(t *testing.T) {
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
t.Fatalf("bad: expected: %#v\nactual: %#v\n", expected, actual)
}
}
@ -154,6 +170,8 @@ func TestSysMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -164,6 +182,8 @@ func TestSysMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -174,6 +194,8 @@ func TestSysMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -184,6 +206,8 @@ func TestSysMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -194,6 +218,8 @@ func TestSysMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -205,6 +231,8 @@ func TestSysMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -215,6 +243,8 @@ func TestSysMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -225,6 +255,8 @@ func TestSysMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -235,6 +267,8 @@ func TestSysMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -245,6 +279,8 @@ func TestSysMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -261,7 +297,7 @@ func TestSysMount(t *testing.T) {
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: %#v", actual)
t.Fatalf("bad: expected: %#v\nactual: %#v\n", expected, actual)
}
}
@ -317,6 +353,8 @@ func TestSysRemount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -327,6 +365,8 @@ func TestSysRemount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -337,6 +377,8 @@ func TestSysRemount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -347,6 +389,8 @@ func TestSysRemount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -357,6 +401,8 @@ func TestSysRemount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -368,6 +414,8 @@ func TestSysRemount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -378,6 +426,8 @@ func TestSysRemount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -388,6 +438,8 @@ func TestSysRemount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -398,6 +450,8 @@ func TestSysRemount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -408,6 +462,8 @@ func TestSysRemount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -461,6 +517,8 @@ func TestSysUnmount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -471,6 +529,8 @@ func TestSysUnmount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -481,6 +541,8 @@ func TestSysUnmount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -491,6 +553,8 @@ func TestSysUnmount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -502,6 +566,8 @@ func TestSysUnmount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -512,6 +578,8 @@ func TestSysUnmount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -522,6 +590,8 @@ func TestSysUnmount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -532,6 +602,8 @@ func TestSysUnmount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -582,6 +654,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -592,6 +666,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -602,6 +678,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -612,6 +690,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -622,6 +702,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -633,6 +715,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -643,6 +727,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -653,6 +739,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -663,6 +751,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -673,6 +763,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -744,6 +836,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("259196400"),
"max_lease_ttl": json.Number("259200000"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -754,6 +848,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -764,6 +860,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -774,6 +872,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -784,6 +884,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -795,6 +897,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("259196400"),
"max_lease_ttl": json.Number("259200000"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -805,6 +909,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -815,6 +921,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},
@ -825,6 +933,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": true,
},
@ -835,6 +945,8 @@ func TestSysTuneMount(t *testing.T) {
"default_lease_ttl": json.Number("0"),
"max_lease_ttl": json.Number("0"),
"force_no_cache": false,
"plugin_name": "",
"seal_wrap": false,
},
"local": false,
},

View File

@ -79,7 +79,7 @@ func TestSysWritePolicy(t *testing.T) {
resp := testHttpPost(t, token, addr+"/v1/sys/policy/foo", map[string]interface{}{
"rules": `path "*" { capabilities = ["read"] }`,
})
testResponseStatus(t, resp, 204)
testResponseStatus(t, resp, 200)
resp = testHttpGet(t, token, addr+"/v1/sys/policy")
@ -120,7 +120,7 @@ func TestSysDeletePolicy(t *testing.T) {
resp := testHttpPost(t, token, addr+"/v1/sys/policy/foo", map[string]interface{}{
"rules": `path "*" { capabilities = ["read"] }`,
})
testResponseStatus(t, resp, 204)
testResponseStatus(t, resp, 200)
resp = testHttpDelete(t, token, addr+"/v1/sys/policy/foo")
testResponseStatus(t, resp, 204)

View File

@ -113,12 +113,13 @@ func handleSysRekeyInitPut(core *vault.Core, recovery bool, w http.ResponseWrite
return
}
// Right now we don't support this, but the rest of the code is ready for
// when we do, hence the check below for this to be false if
// StoredShares is greater than zero
if core.SealAccess().StoredKeysSupported() && !recovery {
respondError(w, http.StatusBadRequest, fmt.Errorf("rekeying of barrier not supported when stored key support is available"))
return
// If the seal supports recovery keys and stored keys, then we allow rekeying the barrier key
// iff the secret shares, secret threshold, and stored shares are set to 1.
if !recovery && core.SealAccess().RecoveryKeySupported() && core.SealAccess().StoredKeysSupported() {
if req.SecretShares != 1 || req.SecretThreshold != 1 || req.StoredShares != 1 {
respondError(w, http.StatusBadRequest, fmt.Errorf("secret shares, secret threshold, and stored shares must be set to 1"))
return
}
}
if len(req.PGPKeys) > 0 && len(req.PGPKeys) != req.SecretShares-req.StoredShares {

View File

@ -13,7 +13,7 @@ import (
// Test to check if the API errors out when wrong number of PGP keys are
// supplied for rekey
func TestSysRekeyInit_pgpKeysEntriesForRekey(t *testing.T) {
func TestSysRekey_Init_pgpKeysEntriesForRekey(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
@ -27,7 +27,7 @@ func TestSysRekeyInit_pgpKeysEntriesForRekey(t *testing.T) {
testResponseStatus(t, resp, 400)
}
func TestSysRekeyInit_Status(t *testing.T) {
func TestSysRekey_Init_Status(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
@ -56,12 +56,13 @@ func TestSysRekeyInit_Status(t *testing.T) {
}
}
func TestSysRekeyInit_Setup(t *testing.T) {
func TestSysRekey_Init_Setup(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
TestServerAuth(t, addr, token)
// Start rekey
resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
"secret_shares": 5,
"secret_threshold": 3,
@ -88,6 +89,7 @@ func TestSysRekeyInit_Setup(t *testing.T) {
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
}
// Get rekey status
resp = testHttpGet(t, token, addr+"/v1/sys/rekey/init")
actual = map[string]interface{}{}
@ -114,7 +116,7 @@ func TestSysRekeyInit_Setup(t *testing.T) {
}
}
func TestSysRekeyInit_Cancel(t *testing.T) {
func TestSysRekey_Init_Cancel(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()

View File

@ -281,7 +281,7 @@ func TestSysSeal_Permissions(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
if resp != nil {
if resp == nil || resp.IsError() {
t.Fatalf("bad: %#v", resp)
}
@ -319,7 +319,7 @@ func TestSysSeal_Permissions(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
if resp != nil {
if resp == nil || resp.IsError() {
t.Fatalf("bad: %#v", resp)
}
@ -340,7 +340,7 @@ func TestSysSeal_Permissions(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
if resp != nil {
if resp == nil || resp.IsError() {
t.Fatalf("bad: %#v", resp)
}
@ -361,7 +361,7 @@ func TestSysSeal_Permissions(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
if resp != nil {
if resp == nil || resp.IsError() {
t.Fatalf("bad: %#v", resp)
}