Merge branch 'master' into partnerships-add-docs

This commit is contained in:
Andy Manoske 2018-09-26 19:17:26 -07:00 committed by GitHub
commit ab1494389c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
936 changed files with 43688 additions and 18803 deletions

View file

@ -6,6 +6,10 @@ if [ "$remote" = "enterprise" ]; then
exit 0
fi
if [ "$remote" = "ent" ]; then
exit 0
fi
if [ -f version/version_ent.go ]; then
echo "Found enterprise version file while pushing to oss remote"
exit 1

View file

@ -23,16 +23,14 @@ matrix:
- go: tip
cache:
yarn: true
directories:
- ui/node_modules
before_install:
- nvm install 8
- nvm use 8
# Repo for Yarn
- sudo apt-key adv --fetch-keys http://dl.yarnpkg.com/debian/pubkey.gpg
- echo "deb http://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
- sudo apt-get update -qq
- sudo apt-get install -y -qq yarn
- curl -o- -L https://yarnpkg.com/install.sh | bash -s -- --version 1.9.4
- export PATH="$HOME/.yarn/bin:$PATH"
branches:
only:

View file

@ -1,4 +1,50 @@
## 0.11.1 (September 5th, 2018)
## 0.11.2 (Unreleased)
FEATURES:
* AWS Secret Engine Root Credential Rotation: The credential used by the AWS
secret engine can now be rotated, to ensure that only Vault knows the
credentials its using. [GH-5140]
* Storage Backend Migrator: A new `operator migrate` command allows offline
migration of data between two storage backends.
BUG FIXES:
* core: Re-add deprecated capabilities information for now [GH-5360]
* core: Fix handling of cyclic token relationships [GH-4803]
* storage/mysql: Fix locking on MariaDB [GH-5343]
* replication: Fix DR API when using a token [GH-5398]
IMPROVEMENTS:
* auth/aws: The identity alias name can now configured to be either IAM unique
ID of the IAM Principal, or ARN of the caller identity [GH-5247]
* cli: Format TTLs for non-secret responses [GH-5367]
* identity: Support operating on entities and groups by their names [GH-5355]
* plugins: Add `env` parameter when registering plugins to the catalog to allow
operators to include environment variables during plugin execution. [GH-5359]
## 0.11.1.1 (September 17th, 2018) (Enterprise Only)
BUG FIXES:
* agent: Fix auth handler-based wrapping of output tokens [GH-5316]
* core: Properly store the replication checkpoint file if it's larger than the
storage engine's per-item limit
* core: Improve WAL deletion rate
* core: Fix token creation on performance standby nodes
* core: Fix unwrapping inside a namespace
* core: Always forward tidy operations from performance standby nodes
IMPROVEMENTS:
* auth/aws: add support for key/value pairs or JSON values for
`iam_request_headers` with IAM auth method [GH-5320]
* auth/aws, secret/aws: Throttling errors from the AWS API will now be
reported as 502 errors by Vault, along with the original error [GH-5270]
* replication: Start fetching during a sync from where it previously errored
## 0.11.1 (September 6th, 2018)
SECURITY:
@ -26,10 +72,12 @@ IMPROVEMENTS:
BUG FIXES:
* core: Ensure we use a background context when stepping down [GH-5290]
* core: Properly check error return from random byte reading [GH-5277]
* core: Re-add `sys/` top-route injection for now [GH-5241]
* core: Properly store the replication checkpoint file if it's larger than the
storage engine's per-item limit
* core: Policies stored in minified JSON would return an error [GH-5229]
* core: Evaluate templated policies in capabilities check [GH-5250]
* identity: Update MemDB with identity group alias while loading groups [GH-5289]
* secrets/database: Fix nil pointer when revoking some leases [GH-5262]
* secrets/pki: Fix sign-verbatim losing extra Subject attributes [GH-5245]
* secrets/pki: Remove certificates from store when tidying revoked

View file

@ -92,7 +92,7 @@ vet:
# source files.
prep: fmtcheck
@sh -c "'$(CURDIR)/scripts/goversioncheck.sh' '$(GO_VERSION_MIN)'"
go generate $(go list ./... | grep -v /vendor/)
@go generate $(go list ./... | grep -v /vendor/)
@if [ -d .git/hooks ]; then cp .hooks/* .git/hooks/; fi
# bootstrap the build by downloading additional tools
@ -143,10 +143,13 @@ proto:
protoc helper/forwarding/types.proto --go_out=plugins=grpc:../../..
protoc logical/*.proto --go_out=plugins=grpc:../../..
protoc physical/types.proto --go_out=plugins=grpc:../../..
protoc helper/identity/mfa/types.proto --go_out=plugins=grpc:../../..
protoc helper/identity/types.proto --go_out=plugins=grpc:../../..
protoc builtin/logical/database/dbplugin/*.proto --go_out=plugins=grpc:../../..
protoc logical/plugin/pb/*.proto --go_out=plugins=grpc:../../..
sed -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/protobuf:"/sentinel:"" protobuf:"/' helper/identity/types.pb.go helper/storagepacker/types.pb.go logical/plugin/pb/backend.pb.go
sed -i '1s;^;// +build !enterprise\n;' physical/types.pb.go
sed -i '1s;^;// +build !enterprise\n;' helper/identity/mfa/types.pb.go
sed -i -e 's/Idp/IDP/' -e 's/Url/URL/' -e 's/Id/ID/' -e 's/IDentity/Identity/' -e 's/EntityId/EntityID/' -e 's/Api/API/' -e 's/Qr/QR/' -e 's/Totp/TOTP/' -e 's/Mfa/MFA/' -e 's/Pingid/PingID/' -e 's/protobuf:"/sentinel:"" protobuf:"/' -e 's/namespaceId/namespaceID/' -e 's/Ttl/TTL/' -e 's/BoundCidrs/BoundCIDRs/' helper/identity/types.pb.go helper/storagepacker/types.pb.go logical/plugin/pb/backend.pb.go logical/identity.pb.go
fmtcheck:
@true

View file

@ -8,6 +8,8 @@ import (
"net/http"
"net/url"
"github.com/hashicorp/vault/helper/consts"
retryablehttp "github.com/hashicorp/go-retryablehttp"
)
@ -124,7 +126,7 @@ func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) {
}
if len(r.ClientToken) != 0 {
req.Header.Set("X-Vault-Token", r.ClientToken)
req.Header.Set(consts.AuthHeaderName, r.ClientToken)
}
if len(r.WrapTTL) != 0 {

View file

@ -50,5 +50,15 @@ func (c *Sys) Capabilities(token, path string) ([]string, error) {
return nil, err
}
if len(res) == 0 {
_, ok := secret.Data["capabilities"]
if ok {
err = mapstructure.Decode(secret.Data["capabilities"], &res)
if err != nil {
return nil, err
}
}
}
return res, nil
}

View file

@ -9,6 +9,7 @@ import (
"github.com/SermoDigital/jose/jws"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
"github.com/mitchellh/copystructure"
@ -113,20 +114,26 @@ func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config
errString = in.OuterErr.Error()
}
ns, err := namespace.FromContext(ctx)
if err != nil {
return err
}
reqEntry := &AuditRequestEntry{
Type: "request",
Error: errString,
Auth: AuditAuth{
ClientToken: auth.ClientToken,
Accessor: auth.Accessor,
DisplayName: auth.DisplayName,
Policies: auth.Policies,
TokenPolicies: auth.TokenPolicies,
IdentityPolicies: auth.IdentityPolicies,
Metadata: auth.Metadata,
EntityID: auth.EntityID,
RemainingUses: req.ClientTokenRemainingUses,
ClientToken: auth.ClientToken,
Accessor: auth.Accessor,
DisplayName: auth.DisplayName,
Policies: auth.Policies,
TokenPolicies: auth.TokenPolicies,
IdentityPolicies: auth.IdentityPolicies,
ExternalNamespacePolicies: auth.ExternalNamespacePolicies,
Metadata: auth.Metadata,
EntityID: auth.EntityID,
RemainingUses: req.ClientTokenRemainingUses,
},
Request: AuditRequest{
@ -134,12 +141,16 @@ func (f *AuditFormatter) FormatRequest(ctx context.Context, w io.Writer, config
ClientToken: req.ClientToken,
ClientTokenAccessor: req.ClientTokenAccessor,
Operation: req.Operation,
Path: req.Path,
Data: req.Data,
PolicyOverride: req.PolicyOverride,
RemoteAddr: getRemoteAddr(req),
ReplicationCluster: req.ReplicationCluster,
Headers: req.Headers,
Namespace: AuditNamespace{
ID: ns.ID,
Path: ns.Path,
},
Path: req.Path,
Data: req.Data,
PolicyOverride: req.PolicyOverride,
RemoteAddr: getRemoteAddr(req),
ReplicationCluster: req.ReplicationCluster,
Headers: req.Headers,
},
}
@ -276,17 +287,23 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config
errString = in.OuterErr.Error()
}
ns, err := namespace.FromContext(ctx)
if err != nil {
return err
}
var respAuth *AuditAuth
if resp.Auth != nil {
respAuth = &AuditAuth{
ClientToken: resp.Auth.ClientToken,
Accessor: resp.Auth.Accessor,
DisplayName: resp.Auth.DisplayName,
Policies: resp.Auth.Policies,
TokenPolicies: resp.Auth.TokenPolicies,
IdentityPolicies: resp.Auth.IdentityPolicies,
Metadata: resp.Auth.Metadata,
NumUses: resp.Auth.NumUses,
ClientToken: resp.Auth.ClientToken,
Accessor: resp.Auth.Accessor,
DisplayName: resp.Auth.DisplayName,
Policies: resp.Auth.Policies,
TokenPolicies: resp.Auth.TokenPolicies,
IdentityPolicies: resp.Auth.IdentityPolicies,
ExternalNamespacePolicies: resp.Auth.ExternalNamespacePolicies,
Metadata: resp.Auth.Metadata,
NumUses: resp.Auth.NumUses,
}
}
@ -317,15 +334,16 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config
Type: "response",
Error: errString,
Auth: AuditAuth{
DisplayName: auth.DisplayName,
Policies: auth.Policies,
TokenPolicies: auth.TokenPolicies,
IdentityPolicies: auth.IdentityPolicies,
Metadata: auth.Metadata,
ClientToken: auth.ClientToken,
Accessor: auth.Accessor,
RemainingUses: req.ClientTokenRemainingUses,
EntityID: auth.EntityID,
DisplayName: auth.DisplayName,
Policies: auth.Policies,
TokenPolicies: auth.TokenPolicies,
IdentityPolicies: auth.IdentityPolicies,
ExternalNamespacePolicies: auth.ExternalNamespacePolicies,
Metadata: auth.Metadata,
ClientToken: auth.ClientToken,
Accessor: auth.Accessor,
RemainingUses: req.ClientTokenRemainingUses,
EntityID: auth.EntityID,
},
Request: AuditRequest{
@ -333,12 +351,16 @@ func (f *AuditFormatter) FormatResponse(ctx context.Context, w io.Writer, config
ClientToken: req.ClientToken,
ClientTokenAccessor: req.ClientTokenAccessor,
Operation: req.Operation,
Path: req.Path,
Data: req.Data,
PolicyOverride: req.PolicyOverride,
RemoteAddr: getRemoteAddr(req),
ReplicationCluster: req.ReplicationCluster,
Headers: req.Headers,
Namespace: AuditNamespace{
ID: ns.ID,
Path: ns.Path,
},
Path: req.Path,
Data: req.Data,
PolicyOverride: req.PolicyOverride,
RemoteAddr: getRemoteAddr(req),
ReplicationCluster: req.ReplicationCluster,
Headers: req.Headers,
},
Response: AuditResponse{
@ -386,6 +408,7 @@ type AuditRequest struct {
Operation logical.Operation `json:"operation"`
ClientToken string `json:"client_token"`
ClientTokenAccessor string `json:"client_token_accessor"`
Namespace AuditNamespace `json:"namespace"`
Path string `json:"path"`
Data map[string]interface{} `json:"data"`
PolicyOverride bool `json:"policy_override"`
@ -403,16 +426,17 @@ type AuditResponse struct {
}
type AuditAuth struct {
ClientToken string `json:"client_token"`
Accessor string `json:"accessor"`
DisplayName string `json:"display_name"`
Policies []string `json:"policies"`
TokenPolicies []string `json:"token_policies,omitempty"`
IdentityPolicies []string `json:"identity_policies,omitempty"`
Metadata map[string]string `json:"metadata"`
NumUses int `json:"num_uses,omitempty"`
RemainingUses int `json:"remaining_uses,omitempty"`
EntityID string `json:"entity_id"`
ClientToken string `json:"client_token"`
Accessor string `json:"accessor"`
DisplayName string `json:"display_name"`
Policies []string `json:"policies"`
TokenPolicies []string `json:"token_policies,omitempty"`
IdentityPolicies []string `json:"identity_policies,omitempty"`
ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies,omitempty"`
Metadata map[string]string `json:"metadata"`
NumUses int `json:"num_uses,omitempty"`
RemainingUses int `json:"remaining_uses,omitempty"`
EntityID string `json:"entity_id"`
}
type AuditSecret struct {
@ -428,6 +452,11 @@ type AuditResponseWrapInfo struct {
WrappedAccessor string `json:"wrapped_accessor,omitempty"`
}
type AuditNamespace struct {
ID string `json:"id"`
Path string `json:"path"`
}
// getRemoteAddr safely gets the remote address avoiding a nil pointer
func getRemoteAddr(req *logical.Request) string {
if req != nil && req.Connection != nil {

View file

@ -13,6 +13,7 @@ import (
"fmt"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
)
@ -91,7 +92,7 @@ func TestFormatJSON_formatRequest(t *testing.T) {
Request: tc.Req,
OuterErr: tc.Err,
}
if err := formatter.FormatRequest(context.Background(), &buf, config, in); err != nil {
if err := formatter.FormatRequest(namespace.RootContext(nil), &buf, config, in); err != nil {
t.Fatalf("bad: %s\nerr: %s", name, err)
}
@ -104,6 +105,7 @@ func TestFormatJSON_formatRequest(t *testing.T) {
if err := jsonutil.DecodeJSON([]byte(expectedResultStr), &expectedjson); err != nil {
t.Fatalf("bad json: %s", err)
}
expectedjson.Request.Namespace = AuditNamespace{ID: "root"}
var actualjson = new(AuditRequestEntry)
if err := jsonutil.DecodeJSON([]byte(buf.String())[len(tc.Prefix):], &actualjson); err != nil {

View file

@ -11,6 +11,7 @@ import (
"fmt"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/salt"
"github.com/hashicorp/vault/logical"
)
@ -52,7 +53,7 @@ func TestFormatJSONx_formatRequest(t *testing.T) {
errors.New("this is an error"),
"",
"",
fmt.Sprintf(`<json:object name="auth"><json:string name="accessor">bar</json:string><json:string name="client_token">%s</json:string><json:string name="display_name">testtoken</json:string><json:string name="entity_id"></json:string><json:null name="metadata" /><json:array name="policies"><json:string>root</json:string></json:array></json:object><json:string name="error">this is an error</json:string><json:object name="request"><json:string name="client_token"></json:string><json:string name="client_token_accessor"></json:string><json:null name="data" /><json:object name="headers"><json:array name="foo"><json:string>bar</json:string></json:array></json:object><json:string name="id"></json:string><json:string name="operation">update</json:string><json:string name="path">/foo</json:string><json:boolean name="policy_override">false</json:boolean><json:string name="remote_address">127.0.0.1</json:string><json:number name="wrap_ttl">60</json:number></json:object><json:string name="type">request</json:string>`,
fmt.Sprintf(`<json:object name="auth"><json:string name="accessor">bar</json:string><json:string name="client_token">%s</json:string><json:string name="display_name">testtoken</json:string><json:string name="entity_id"></json:string><json:null name="metadata" /><json:array name="policies"><json:string>root</json:string></json:array></json:object><json:string name="error">this is an error</json:string><json:object name="request"><json:string name="client_token"></json:string><json:string name="client_token_accessor"></json:string><json:null name="data" /><json:object name="headers"><json:array name="foo"><json:string>bar</json:string></json:array></json:object><json:string name="id"></json:string><json:object name="namespace"><json:string name="id">root</json:string><json:string name="path"></json:string></json:object><json:string name="operation">update</json:string><json:string name="path">/foo</json:string><json:boolean name="policy_override">false</json:boolean><json:string name="remote_address">127.0.0.1</json:string><json:number name="wrap_ttl">60</json:number></json:object><json:string name="type">request</json:string>`,
fooSalted),
},
"auth, request with prefix": {
@ -73,7 +74,7 @@ func TestFormatJSONx_formatRequest(t *testing.T) {
errors.New("this is an error"),
"",
"@cee: ",
fmt.Sprintf(`<json:object name="auth"><json:string name="accessor">bar</json:string><json:string name="client_token">%s</json:string><json:string name="display_name">testtoken</json:string><json:string name="entity_id"></json:string><json:null name="metadata" /><json:array name="policies"><json:string>root</json:string></json:array></json:object><json:string name="error">this is an error</json:string><json:object name="request"><json:string name="client_token"></json:string><json:string name="client_token_accessor"></json:string><json:null name="data" /><json:object name="headers"><json:array name="foo"><json:string>bar</json:string></json:array></json:object><json:string name="id"></json:string><json:string name="operation">update</json:string><json:string name="path">/foo</json:string><json:boolean name="policy_override">false</json:boolean><json:string name="remote_address">127.0.0.1</json:string><json:number name="wrap_ttl">60</json:number></json:object><json:string name="type">request</json:string>`,
fmt.Sprintf(`<json:object name="auth"><json:string name="accessor">bar</json:string><json:string name="client_token">%s</json:string><json:string name="display_name">testtoken</json:string><json:string name="entity_id"></json:string><json:null name="metadata" /><json:array name="policies"><json:string>root</json:string></json:array></json:object><json:string name="error">this is an error</json:string><json:object name="request"><json:string name="client_token"></json:string><json:string name="client_token_accessor"></json:string><json:null name="data" /><json:object name="headers"><json:array name="foo"><json:string>bar</json:string></json:array></json:object><json:string name="id"></json:string><json:object name="namespace"><json:string name="id">root</json:string><json:string name="path"></json:string></json:object><json:string name="operation">update</json:string><json:string name="path">/foo</json:string><json:boolean name="policy_override">false</json:boolean><json:string name="remote_address">127.0.0.1</json:string><json:number name="wrap_ttl">60</json:number></json:object><json:string name="type">request</json:string>`,
fooSalted),
},
}
@ -95,7 +96,7 @@ func TestFormatJSONx_formatRequest(t *testing.T) {
Request: tc.Req,
OuterErr: tc.Err,
}
if err := formatter.FormatRequest(context.Background(), &buf, config, in); err != nil {
if err := formatter.FormatRequest(namespace.RootContext(nil), &buf, config, in); err != nil {
t.Fatalf("bad: %s\nerr: %s", name, err)
}

View file

@ -33,10 +33,10 @@ func Factory(ctx context.Context, conf *audit.BackendConfig) (audit.Backend, err
}
// normalize path if configured for stdout
if strings.ToLower(path) == "stdout" {
if strings.EqualFold(path, "stdout") {
path = "stdout"
}
if strings.ToLower(path) == "discard" {
if strings.EqualFold(path, "discard") {
path = "discard"
}

View file

@ -8,6 +8,7 @@ import (
"time"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
@ -27,6 +28,11 @@ func pathTidySecretID(b *backend) *framework.Path {
// tidySecretID is used to delete entries in the whitelist that are expired.
func (b *backend) tidySecretID(ctx context.Context, req *logical.Request) (*logical.Response, error) {
// If we are a performance standby forward the request to the active node
if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) {
return nil, logical.ErrReadOnly
}
if !atomic.CompareAndSwapUint32(b.tidySecretIDCASGuard, 0, 1) {
resp := &logical.Response{}
resp.AddWarning("Tidy operation already in progress.")

View file

@ -9,6 +9,7 @@ import (
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/vault/helper/awsutil"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
@ -115,6 +116,7 @@ func Backend(conf *logical.BackendConfig) (*backend, error) {
pathRoleTag(b),
pathConfigClient(b),
pathConfigCertificate(b),
pathConfigIdentity(b),
pathConfigSts(b),
pathListSts(b),
pathConfigTidyRoletagBlacklist(b),
@ -233,14 +235,14 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag
}
iamClient, err := b.clientIAM(ctx, s, region.ID(), entity.AccountNumber)
if err != nil {
return "", err
return "", awsutil.AppendLogicalError(err)
}
switch entity.Type {
case "user":
userInfo, err := iamClient.GetUser(&iam.GetUserInput{UserName: &entity.FriendlyName})
if err != nil {
return "", err
return "", awsutil.AppendLogicalError(err)
}
if userInfo == nil {
return "", fmt.Errorf("got nil result from GetUser")
@ -249,7 +251,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag
case "role":
roleInfo, err := iamClient.GetRole(&iam.GetRoleInput{RoleName: &entity.FriendlyName})
if err != nil {
return "", err
return "", awsutil.AppendLogicalError(err)
}
if roleInfo == nil {
return "", fmt.Errorf("got nil result from GetRole")
@ -258,7 +260,7 @@ func (b *backend) resolveArnToRealUniqueId(ctx context.Context, s logical.Storag
case "instance-profile":
profileInfo, err := iamClient.GetInstanceProfile(&iam.GetInstanceProfileInput{InstanceProfileName: &entity.FriendlyName})
if err != nil {
return "", err
return "", awsutil.AppendLogicalError(err)
}
if profileInfo == nil {
return "", fmt.Errorf("got nil result from GetInstanceProfile")

View file

@ -20,6 +20,10 @@ import (
logicaltest "github.com/hashicorp/vault/logical/testing"
)
const testVaultHeaderValue = "VaultAcceptanceTesting"
const testValidRoleName = "valid-role"
const testInvalidRoleName = "invalid-role"
func TestBackend_CreateParseVerifyRoleTag(t *testing.T) {
// create a backend
config := logical.TestBackendConfig()
@ -1499,20 +1503,18 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) {
// Test setup largely done
// At this point, we're going to:
// 1. Configure the client to require our test header value
// 2. Configure two different roles:
// 2. Configure identity to use the ARN for the alias
// 3. Configure two different roles:
// a. One bound to our test user
// b. One bound to a garbage ARN
// 3. Pass in a request that doesn't have the signed header, ensure
// 4. Pass in a request that doesn't have the signed header, ensure
// we're not allowed to login
// 4. Passin a request that has a validly signed header, but the wrong
// 5. Passin a request that has a validly signed header, but the wrong
// value, ensure it doesn't allow login
// 5. Pass in a request that has a validly signed request, ensure
// 6. Pass in a request that has a validly signed request, ensure
// it allows us to login to our role
// 6. Pass in a request that has a validly signed request, asking for
// 7. Pass in a request that has a validly signed request, asking for
// the other role, ensure it fails
const testVaultHeaderValue = "VaultAcceptanceTesting"
const testValidRoleName = "valid-role"
const testInvalidRoleName = "invalid-role"
clientConfigData := map[string]interface{}{
"iam_server_id_header_value": testVaultHeaderValue,
@ -1528,6 +1530,23 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) {
t.Fatal(err)
}
configIdentityData := map[string]interface{}{
"iam_alias": identityAliasIAMFullArn,
}
configIdentityRequest := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/identity",
Storage: storage,
Data: configIdentityData,
}
resp, err := b.HandleRequest(context.Background(), configIdentityRequest)
if err != nil {
t.Fatal(err)
}
if resp != nil && resp.IsError() {
t.Fatalf("received error response when configuring identity: %#v", resp)
}
// configuring the valid role we'll be able to login to
roleData := map[string]interface{}{
"bound_iam_principal_arn": []string{entity.canonicalArn(), "arn:aws:iam::123456789012:role/FakeRoleArn1*"}, // Fake ARN MUST be wildcard terminated because we're resolving unique IDs, and the wildcard termination prevents unique ID resolution
@ -1540,7 +1559,7 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) {
Storage: storage,
Data: roleData,
}
resp, err := b.HandleRequest(context.Background(), roleRequest)
resp, err = b.HandleRequest(context.Background(), roleRequest)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("bad: failed to create role: resp:%#v\nerr:%v", resp, err)
}
@ -1658,6 +1677,12 @@ func TestBackendAcc_LoginWithCallerIdentity(t *testing.T) {
if resp == nil || resp.Auth == nil || resp.IsError() {
t.Fatalf("bad: expected valid login: resp:%#v", resp)
}
if resp.Auth.Alias == nil {
t.Fatalf("bad: nil auth Alias")
}
if resp.Auth.Alias.Name != *testIdentity.Arn {
t.Fatalf("bad: expected identity alias of %q, got %q instead", *testIdentity.Arn, resp.Auth.Alias.Name)
}
renewReq := generateRenewRequest(storage, resp.Auth)
// dump a fake ARN into the metadata to ensure that we ONLY look

View file

@ -8,6 +8,7 @@ import (
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/errwrap"
@ -19,27 +20,9 @@ type CLIHandler struct{}
// Generates the necessary data to send to the Vault server for generating a token
// This is useful for other API clients to use
func GenerateLoginData(accessKey, secretKey, sessionToken, headerValue string) (map[string]interface{}, error) {
func GenerateLoginData(creds *credentials.Credentials, headerValue string) (map[string]interface{}, error) {
loginData := make(map[string]interface{})
credConfig := &awsutil.CredentialsConfig{
AccessKey: accessKey,
SecretKey: secretKey,
SessionToken: sessionToken,
}
creds, err := credConfig.GenerateCredentialChain()
if err != nil {
return nil, err
}
if creds == nil {
return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata")
}
_, err = creds.Get()
if err != nil {
return nil, errwrap.Wrapf("failed to retrieve credentials from credential chain: {{err}}", err)
}
// Use the credentials we've found to construct an STS session
stsSession, err := session.NewSessionWithOptions(session.Options{
Config: aws.Config{Credentials: creds},
@ -91,7 +74,12 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro
headerValue = ""
}
loginData, err := GenerateLoginData(m["aws_access_key_id"], m["aws_secret_access_key"], m["aws_security_token"], headerValue)
creds, err := RetrieveCreds(m["aws_access_key_id"], m["aws_secret_access_key"], m["aws_security_token"])
if err != nil {
return nil, err
}
loginData, err := GenerateLoginData(creds, headerValue)
if err != nil {
return nil, err
}
@ -112,6 +100,27 @@ func (h *CLIHandler) Auth(c *api.Client, m map[string]string) (*api.Secret, erro
return secret, nil
}
func RetrieveCreds(accessKey, secretKey, sessionToken string) (*credentials.Credentials, error) {
credConfig := &awsutil.CredentialsConfig{
AccessKey: accessKey,
SecretKey: secretKey,
SessionToken: sessionToken,
}
creds, err := credConfig.GenerateCredentialChain()
if err != nil {
return nil, err
}
if creds == nil {
return nil, fmt.Errorf("could not compile valid credential providers from static config, environment, shared, or instance metadata")
}
_, err = creds.Get()
if err != nil {
return nil, errwrap.Wrapf("failed to retrieve credentials from credential chain: {{err}}", err)
}
return creds, nil
}
func (h *CLIHandler) Help() string {
help := `
Usage: vault login -method=aws [CONFIG K=V...]

View file

@ -0,0 +1,98 @@
package awsauth
import (
"context"
"fmt"
"github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func pathConfigIdentity(b *backend) *framework.Path {
return &framework.Path{
Pattern: "config/identity$",
Fields: map[string]*framework.FieldSchema{
"iam_alias": &framework.FieldSchema{
Type: framework.TypeString,
Default: identityAliasIAMUniqueID,
Description: fmt.Sprintf("Configure how the AWS auth method generates entity aliases when using IAM auth. Valid values are %q and %q", identityAliasIAMUniqueID, identityAliasIAMFullArn),
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.ReadOperation: pathConfigIdentityRead,
logical.UpdateOperation: pathConfigIdentityUpdate,
},
HelpSynopsis: pathConfigIdentityHelpSyn,
HelpDescription: pathConfigIdentityHelpDesc,
}
}
func pathConfigIdentityRead(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
entry, err := req.Storage.Get(ctx, "config/identity")
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
var result identityConfig
if err := entry.DecodeJSON(&result); err != nil {
return nil, err
}
return &logical.Response{
Data: map[string]interface{}{
"iam_alias": result.IAMAlias,
},
}, nil
}
func pathConfigIdentityUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
var configEntry identityConfig
iamAliasRaw, ok := data.GetOk("iam_alias")
if ok {
iamAlias := iamAliasRaw.(string)
allowedIAMAliasValues := []string{identityAliasIAMUniqueID, identityAliasIAMFullArn}
if !strutil.StrListContains(allowedIAMAliasValues, iamAlias) {
return logical.ErrorResponse(fmt.Sprintf("iam_alias of %q not in set of allowed values: %v", iamAlias, allowedIAMAliasValues)), nil
}
configEntry.IAMAlias = iamAlias
entry, err := logical.StorageEntryJSON("config/identity", configEntry)
if err != nil {
return nil, err
}
if err := req.Storage.Put(ctx, entry); err != nil {
return nil, err
}
}
return nil, nil
}
type identityConfig struct {
IAMAlias string `json:"iam_alias"`
}
const identityAliasIAMUniqueID = "unique_id"
const identityAliasIAMFullArn = "full_arn"
const pathConfigIdentityHelpSyn = `
Configure the way the AWS auth method interacts with the identity store
`
const pathConfigIdentityHelpDesc = `
The AWS auth backend defaults to aliasing an IAM principal's unique ID to the
identity store. This path allows users to change how Vault configures the
mapping to Identity aliases for more flexibility.
You can set the iam_alias parameter to one of the following values:
* 'unique_id': This retains Vault's default behavior
* 'full_arn': This maps the full authenticated ARN to the identity alias, e.g.,
"arn:aws:sts::<account_id>:assumed-role/<role_name>/<role_session_name>
This is useful where you have an identity provder that sets role_session_name
to a known value of a person, such as a username or email address, and allows
you to map those roles back to entries in your identity store.
`

View file

@ -0,0 +1,90 @@
package awsauth
import (
"context"
"testing"
"github.com/hashicorp/vault/logical"
)
func TestBackend_pathConfigIdentity(t *testing.T) {
config := logical.TestBackendConfig()
storage := &logical.InmemStorage{}
config.StorageView = storage
b, err := Backend(config)
if err != nil {
t.Fatal(err)
}
err = b.Setup(context.Background(), config)
if err != nil {
t.Fatal(err)
}
resp, err := b.HandleRequest(context.Background(), &logical.Request{
Operation: logical.ReadOperation,
Path: "config/identity",
Storage: storage,
})
if err != nil {
t.Fatal(err)
}
if resp != nil {
if resp.IsError() {
t.Fatalf("failed to read identity config entry")
} else if resp.Data["iam_alias"] != nil && resp.Data["iam_alias"] != "" {
t.Fatalf("returned alias is non-empty: %q", resp.Data["alias"])
}
}
data := map[string]interface{}{
"iam_alias": "invalid",
}
resp, err = b.HandleRequest(context.Background(), &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/identity",
Data: data,
Storage: storage,
})
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatalf("nil response from invalid config/identity request")
}
if !resp.IsError() {
t.Fatalf("received non-error response from invalid config/identity request: %#v", resp)
}
data["iam_alias"] = identityAliasIAMFullArn
resp, err = b.HandleRequest(context.Background(), &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/identity",
Data: data,
Storage: storage,
})
if err != nil {
t.Fatal(err)
}
if resp != nil && resp.IsError() {
t.Fatalf("received error response from valid config/identity request: %#v", resp)
}
resp, err = b.HandleRequest(context.Background(), &logical.Request{
Operation: logical.ReadOperation,
Path: "config/identity",
Storage: storage,
})
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatalf("nil response received from config/identity when data expected")
} else if resp.IsError() {
t.Fatalf("error response received from reading config/identity: %#v", resp)
} else if resp.Data["iam_alias"] != identityAliasIAMFullArn {
t.Fatalf("bad: expected response with iam_alias value of %q; got %#v", identityAliasIAMFullArn, resp)
}
}

View file

@ -5,14 +5,12 @@ import (
"crypto/subtle"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"encoding/xml"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"regexp"
"strings"
"time"
@ -24,6 +22,7 @@ import (
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/awsutil"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/strutil"
"github.com/hashicorp/vault/logical"
@ -89,10 +88,11 @@ when using iam auth_type.`,
This must match the request body included in the signature.`,
},
"iam_request_headers": {
Type: framework.TypeString,
Description: `Base64-encoded JSON representation of the request headers when auth_type is
iam. This must at a minimum include the headers over
which AWS has included a signature.`,
Type: framework.TypeHeader,
Description: `Key/value pairs of headers for use in the
sts:GetCallerIdentity HTTP requests headers when auth_type is iam. Can be either
a Base64-encoded, JSON-serialized string, or a JSON object of key/value pairs.
This must at a minimum include the headers over which AWS has included a signature.`,
},
"identity": {
Type: framework.TypeString,
@ -132,7 +132,7 @@ func (b *backend) instanceIamRoleARN(iamClient *iam.IAM, instanceProfileName str
InstanceProfileName: aws.String(instanceProfileName),
})
if err != nil {
return "", err
return "", awsutil.AppendLogicalError(err)
}
if profile == nil {
return "", fmt.Errorf("nil output while getting instance profile details")
@ -168,7 +168,8 @@ func (b *backend) validateInstance(ctx context.Context, s logical.Storage, insta
},
})
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("error fetching description for instance ID %q: {{err}}", instanceID), err)
errW := errwrap.Wrapf(fmt.Sprintf("error fetching description for instance ID %q: {{err}}", instanceID), err)
return nil, errwrap.Wrap(errW, awsutil.CheckAWSError(err))
}
if status == nil {
return nil, fmt.Errorf("nil output from describe instances")
@ -202,7 +203,7 @@ func validateMetadata(clientNonce, pendingTime string, storedIdentity *whitelist
}
// If reauthentication is disabled or if the nonce supplied matches a
// predefied nonce which indicates reauthentication to be disabled,
// predefined nonce which indicates reauthentication to be disabled,
// authentication will not succeed.
if storedIdentity.DisallowReauthentication ||
subtle.ConstantTimeCompare([]byte(reauthenticationDisabledNonce), []byte(clientNonce)) == 1 {
@ -1113,6 +1114,19 @@ func (b *backend) pathLoginRenewEc2(ctx context.Context, req *logical.Request, d
}
func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
identityConfigEntryRaw, err := req.Storage.Get(ctx, "config/identity")
if err != nil {
return nil, errwrap.Wrapf("failed to retrieve identity config: {{err}}", err)
}
var identityConfigEntry identityConfig
if identityConfigEntryRaw == nil {
identityConfigEntry.IAMAlias = identityAliasIAMUniqueID
} else {
if err = identityConfigEntryRaw.DecodeJSON(&identityConfigEntry); err != nil {
return nil, errwrap.Wrapf("failed to parse stored config/identity: {{err}}", err)
}
}
method := data.Get("iam_http_request_method").(string)
if method == "" {
return logical.ErrorResponse("missing iam_http_request_method"), nil
@ -1149,17 +1163,10 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request,
}
body := string(bodyRaw)
headersB64 := data.Get("iam_request_headers").(string)
if headersB64 == "" {
headers := data.Get("iam_request_headers").(http.Header)
if len(headers) == 0 {
return logical.ErrorResponse("missing iam_request_headers"), nil
}
headers, err := parseIamRequestHeaders(headersB64)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf("Error parsing iam_request_headers: %v", err)), nil
}
if headers == nil {
return logical.ErrorResponse("nil response when parsing iam_request_headers"), nil
}
config, err := b.lockedClientConfigEntry(ctx, req.Storage)
if err != nil {
@ -1187,13 +1194,20 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request,
// This could either be a "userID:SessionID" (in the case of an assumed role) or just a "userID"
// (in the case of an IAM user).
callerUniqueId := strings.Split(callerID.UserId, ":")[0]
identityAlias := ""
switch identityConfigEntry.IAMAlias {
case identityAliasIAMUniqueID:
identityAlias = callerUniqueId
case identityAliasIAMFullArn:
identityAlias = callerID.Arn
}
// If we're just looking up for MFA, return the Alias info
if req.Operation == logical.AliasLookaheadOperation {
return &logical.Response{
Auth: &logical.Auth{
Alias: &logical.Alias{
Name: callerUniqueId,
Name: identityAlias,
},
},
}, nil
@ -1316,7 +1330,7 @@ func (b *backend) pathLoginUpdateIam(ctx context.Context, req *logical.Request,
MaxTTL: roleEntry.MaxTTL,
},
Alias: &logical.Alias{
Name: callerUniqueId,
Name: identityAlias,
},
},
}
@ -1394,7 +1408,7 @@ func parseIamArn(iamArn string) (*iamEntity, error) {
func validateVaultHeaderValue(headers http.Header, requestUrl *url.URL, requiredHeaderValue string) error {
providedValue := ""
for k, v := range headers {
if strings.ToLower(iamServerIdHeader) == strings.ToLower(k) {
if strings.EqualFold(iamServerIdHeader, k) {
providedValue = strings.Join(v, ",")
break
}
@ -1491,41 +1505,6 @@ func parseGetCallerIdentityResponse(response string) (GetCallerIdentityResponse,
return result, err
}
func parseIamRequestHeaders(headersB64 string) (http.Header, error) {
headersJson, err := base64.StdEncoding.DecodeString(headersB64)
if err != nil {
return nil, fmt.Errorf("failed to base64 decode iam_request_headers")
}
var headersDecoded map[string]interface{}
err = jsonutil.DecodeJSON(headersJson, &headersDecoded)
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("failed to JSON decode iam_request_headers %q: {{err}}", headersJson), err)
}
headers := make(http.Header)
for k, v := range headersDecoded {
switch typedValue := v.(type) {
case string:
headers.Add(k, typedValue)
case json.Number:
headers.Add(k, typedValue.String())
case []interface{}:
for _, individualVal := range typedValue {
switch possibleStrVal := individualVal.(type) {
case string:
headers.Add(k, possibleStrVal)
case json.Number:
headers.Add(k, possibleStrVal.String())
default:
return nil, fmt.Errorf("header %q contains value %q that has type %s, not string", k, individualVal, reflect.TypeOf(individualVal))
}
}
default:
return nil, fmt.Errorf("header %q value %q has type %s, not string or []interface", k, typedValue, reflect.TypeOf(v))
}
}
return headers, nil
}
func submitCallerIdentityRequest(method, endpoint string, parsedUrl *url.URL, body string, headers http.Header) (*GetCallerIdentityResult, error) {
// NOTE: We need to ensure we're calling STS, instead of acting as an unintended network proxy
// The protection against this is that this method will only call the endpoint specified in the
@ -1536,6 +1515,7 @@ func submitCallerIdentityRequest(method, endpoint string, parsedUrl *url.URL, bo
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
response, err := client.Do(request)
if err != nil {
return nil, errwrap.Wrapf("error making request: {{err}}", err)

View file

@ -1,13 +1,18 @@
package awsauth
import (
"encoding/base64"
"encoding/json"
"context"
"errors"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/vault/logical"
)
func TestBackend_pathLogin_getCallerIdentityResponse(t *testing.T) {
@ -39,16 +44,16 @@ func TestBackend_pathLogin_getCallerIdentityResponse(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if parsed_arn := parsedUserResponse.GetCallerIdentityResult[0].Arn; parsed_arn != expectedUserArn {
t.Errorf("expected to parse arn %#v, got %#v", expectedUserArn, parsed_arn)
if parsedArn := parsedUserResponse.GetCallerIdentityResult[0].Arn; parsedArn != expectedUserArn {
t.Errorf("expected to parse arn %#v, got %#v", expectedUserArn, parsedArn)
}
parsedRoleResponse, err := parseGetCallerIdentityResponse(responseFromAssumedRole)
if err != nil {
t.Fatal(err)
}
if parsed_arn := parsedRoleResponse.GetCallerIdentityResult[0].Arn; parsed_arn != expectedRoleArn {
t.Errorf("expected to parn arn %#v; got %#v", expectedRoleArn, parsed_arn)
if parsedArn := parsedRoleResponse.GetCallerIdentityResult[0].Arn; parsedArn != expectedRoleArn {
t.Errorf("expected to parn arn %#v; got %#v", expectedRoleArn, parsedArn)
}
_, err = parseGetCallerIdentityResponse("SomeRandomGibberish")
@ -113,7 +118,7 @@ func TestBackend_pathLogin_parseIamArn(t *testing.T) {
func TestBackend_validateVaultHeaderValue(t *testing.T) {
const canaryHeaderValue = "Vault-Server"
requestUrl, err := url.Parse("https://sts.amazonaws.com/")
requestURL, err := url.Parse("https://sts.amazonaws.com/")
if err != nil {
t.Fatalf("error parsing test URL: %v", err)
}
@ -143,68 +148,259 @@ func TestBackend_validateVaultHeaderValue(t *testing.T) {
"Authorization": []string{"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request", "SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"},
}
err = validateVaultHeaderValue(postHeadersMissing, requestUrl, canaryHeaderValue)
err = validateVaultHeaderValue(postHeadersMissing, requestURL, canaryHeaderValue)
if err == nil {
t.Error("validated POST request with missing Vault header")
}
err = validateVaultHeaderValue(postHeadersInvalid, requestUrl, canaryHeaderValue)
err = validateVaultHeaderValue(postHeadersInvalid, requestURL, canaryHeaderValue)
if err == nil {
t.Error("validated POST request with invalid Vault header value")
}
err = validateVaultHeaderValue(postHeadersUnsigned, requestUrl, canaryHeaderValue)
err = validateVaultHeaderValue(postHeadersUnsigned, requestURL, canaryHeaderValue)
if err == nil {
t.Error("validated POST request with unsigned Vault header")
}
err = validateVaultHeaderValue(postHeadersValid, requestUrl, canaryHeaderValue)
err = validateVaultHeaderValue(postHeadersValid, requestURL, canaryHeaderValue)
if err != nil {
t.Errorf("did NOT validate valid POST request: %v", err)
}
err = validateVaultHeaderValue(postHeadersSplit, requestUrl, canaryHeaderValue)
err = validateVaultHeaderValue(postHeadersSplit, requestURL, canaryHeaderValue)
if err != nil {
t.Errorf("did NOT validate valid POST request with split Authorization header: %v", err)
}
}
func TestBackend_pathLogin_parseIamRequestHeaders(t *testing.T) {
testIamParser := func(headers interface{}, expectedHeaders http.Header) error {
headersJson, err := json.Marshal(headers)
if err != nil {
return fmt.Errorf("unable to JSON encode headers: %v", err)
}
headersB64 := base64.StdEncoding.EncodeToString(headersJson)
parsedHeaders, err := parseIamRequestHeaders(headersB64)
if err != nil {
return fmt.Errorf("error parsing encoded headers: %v", err)
}
if parsedHeaders == nil {
return fmt.Errorf("nil result from parsing headers")
}
if !reflect.DeepEqual(parsedHeaders, expectedHeaders) {
return fmt.Errorf("parsed headers not equal to input headers")
}
return nil
}
headersGoStyle := http.Header{
"Header1": []string{"Value1"},
"Header2": []string{"Value2"},
}
headersMixedType := map[string]interface{}{
"Header1": "Value1",
"Header2": []string{"Value2"},
}
err := testIamParser(headersGoStyle, headersGoStyle)
// TestBackend_pathLogin_IAMHeaders tests login with iam_request_headers,
// supporting both base64 encoded string and JSON headers
func TestBackend_pathLogin_IAMHeaders(t *testing.T) {
storage := &logical.InmemStorage{}
config := logical.TestBackendConfig()
config.StorageView = storage
b, err := Backend(config)
if err != nil {
t.Errorf("error parsing go-style headers: %v", err)
t.Fatal(err)
}
err = testIamParser(headersMixedType, headersGoStyle)
err = b.Setup(context.Background(), config)
if err != nil {
t.Errorf("error parsing mixed-style headers: %v", err)
t.Fatal(err)
}
// sets up a test server to stand in for STS service
ts := setupIAMTestServer()
defer ts.Close()
clientConfigData := map[string]interface{}{
"iam_server_id_header_value": testVaultHeaderValue,
"sts_endpoint": ts.URL,
}
clientRequest := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/client",
Storage: storage,
Data: clientConfigData,
}
_, err = b.HandleRequest(context.Background(), clientRequest)
if err != nil {
t.Fatal(err)
}
// create a role entry
roleEntry := &awsRoleEntry{
Version: currentRoleStorageVersion,
AuthType: iamAuthType,
}
if err := b.nonLockedSetAWSRole(context.Background(), storage, testValidRoleName, roleEntry); err != nil {
t.Fatalf("failed to set entry: %s", err)
}
// create a baseline loginData map structure, including iam_request_headers
// already base64encoded. This is the "Default" loginData used for all tests.
// Each sub test can override the map's iam_request_headers entry
loginData, err := defaultLoginData()
if err != nil {
t.Fatal(err)
}
// expected errors for certain tests
missingHeaderErr := errors.New("error validating X-Vault-AWS-IAM-Server-ID header: missing header \"X-Vault-AWS-IAM-Server-ID\"")
parsingErr := errors.New("error making upstream request: error parsing STS response")
testCases := []struct {
Name string
Header interface{}
ExpectErr error
}{
{
Name: "Default",
},
{
Name: "Map-complete",
Header: map[string]interface{}{
"Content-Length": "43",
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
"User-Agent": "aws-sdk-go/1.14.24 (go1.11; darwin; amd64)",
"X-Amz-Date": "20180910T203328Z",
"X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting",
"Authorization": "AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4",
},
},
{
Name: "Map-incomplete",
Header: map[string]interface{}{
"Content-Length": "43",
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
"User-Agent": "aws-sdk-go/1.14.24 (go1.11; darwin; amd64)",
"X-Amz-Date": "20180910T203328Z",
"Authorization": "AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4",
},
ExpectErr: missingHeaderErr,
},
{
Name: "JSON-complete",
Header: `{
"Content-Length":"43",
"Content-Type":"application/x-www-form-urlencoded; charset=utf-8",
"User-Agent":"aws-sdk-go/1.14.24 (go1.11; darwin; amd64)",
"X-Amz-Date":"20180910T203328Z",
"X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting",
"Authorization":"AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=cdef5819b2e97f1ff0f3e898fd2621aa03af00a4ec3e019122c20e5482534bf4"
}`,
},
{
Name: "JSON-incomplete",
Header: `{
"Content-Length":"43",
"Content-Type":"application/x-www-form-urlencoded; charset=utf-8",
"User-Agent":"aws-sdk-go/1.14.24 (go1.11; darwin; amd64)",
"X-Amz-Date":"20180910T203328Z",
"X-Vault-Aws-Iam-Server-Id": "VaultAcceptanceTesting",
"Authorization":"AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180910/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id"
}`,
ExpectErr: parsingErr,
},
{
Name: "Base64-complete",
Header: base64Complete(),
},
{
Name: "Base64-incomplete-missing-header",
Header: base64MissingVaultID(),
ExpectErr: missingHeaderErr,
},
{
Name: "Base64-incomplete-missing-auth-sig",
Header: base64MissingAuthField(),
ExpectErr: parsingErr,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
if tc.Header != nil {
loginData["iam_request_headers"] = tc.Header
}
loginRequest := &logical.Request{
Operation: logical.UpdateOperation,
Path: "login",
Storage: storage,
Data: loginData,
}
resp, err := b.HandleRequest(context.Background(), loginRequest)
if err != nil || resp == nil || resp.IsError() {
if tc.ExpectErr != nil && tc.ExpectErr.Error() == resp.Error().Error() {
return
}
t.Errorf("un expected failed login:\nresp: %#v\n\nerr: %v", resp, err)
}
})
}
}
func defaultLoginData() (map[string]interface{}, error) {
awsSession, err := session.NewSession()
if err != nil {
return nil, fmt.Errorf("failed to create session: %s", err)
}
stsService := sts.New(awsSession)
stsInputParams := &sts.GetCallerIdentityInput{}
stsRequestValid, _ := stsService.GetCallerIdentityRequest(stsInputParams)
stsRequestValid.HTTPRequest.Header.Add(iamServerIdHeader, testVaultHeaderValue)
stsRequestValid.HTTPRequest.Header.Add("Authorization", fmt.Sprintf("%s,%s,%s",
"AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/iam/aws4_request",
"SignedHeaders=content-type;host;x-amz-date;x-vault-aws-iam-server-id",
"Signature=5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"))
stsRequestValid.Sign()
return buildCallerIdentityLoginData(stsRequestValid.HTTPRequest, testValidRoleName)
}
// setupIAMTestServer configures httptest server to intercept and respond to the
// IAM login path's invocation of submitCallerIdentityRequest (which does not
// use the AWS SDK), which receieves the mocked response responseFromUser
// containing user information matching the role.
func setupIAMTestServer() *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
responseString := `<GetCallerIdentityResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
<GetCallerIdentityResult>
<Arn>arn:aws:iam::123456789012:user/valid-role</Arn>
<UserId>ASOMETHINGSOMETHINGSOMETHING</UserId>
<Account>123456789012</Account>
</GetCallerIdentityResult>
<ResponseMetadata>
<RequestId>7f4fc40c-853a-11e6-8848-8d035d01eb87</RequestId>
</ResponseMetadata>
</GetCallerIdentityResponse>`
auth := r.Header.Get("Authorization")
parts := strings.Split(auth, ",")
for i, s := range parts {
s = strings.TrimSpace(s)
key := strings.Split(s, "=")
parts[i] = key[0]
}
// verify the "Authorization" header contains all the expected parts
expectedAuthParts := []string{"AWS4-HMAC-SHA256 Credential", "SignedHeaders", "Signature"}
var matchingCount int
for _, v := range parts {
for _, z := range expectedAuthParts {
if z == v {
matchingCount++
}
}
}
if matchingCount != len(expectedAuthParts) {
responseString = "missing auth parts"
}
fmt.Fprintln(w, responseString)
}))
}
// base64Complete returns a base64 encoded auth header as expected
func base64Complete() string {
min := `{"Authorization":["AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180907/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=97086b0531854844099fc52733fa2c88a2bfb54b2689600c6e249358a8353b52"],"Content-Length":["43"],"Content-Type":["application/x-www-form-urlencoded; charset=utf-8"],"User-Agent":["aws-sdk-go/1.14.24 (go1.11; darwin; amd64)"],"X-Amz-Date":["20180907T222145Z"],"X-Vault-Aws-Iam-Server-Id":["VaultAcceptanceTesting"]}`
return min
}
// base64MissingVaultID returns a base64 encoded auth header, that omits the
// Vault ID header
func base64MissingVaultID() string {
min := `{"Authorization":["AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180907/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id, Signature=97086b0531854844099fc52733fa2c88a2bfb54b2689600c6e249358a8353b52"],"Content-Length":["43"],"Content-Type":["application/x-www-form-urlencoded; charset=utf-8"],"User-Agent":["aws-sdk-go/1.14.24 (go1.11; darwin; amd64)"],"X-Amz-Date":["20180907T222145Z"]}`
return min
}
// base64MissingAuthField returns a base64 encoded Auth header, that omits the
// "Signature" part
func base64MissingAuthField() string {
min := `{"Authorization":["AWS4-HMAC-SHA256 Credential=AKIAJPQ466AIIQW4LPSQ/20180907/us-east-1/sts/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-date;x-vault-aws-iam-server-id"],"Content-Length":["43"],"Content-Type":["application/x-www-form-urlencoded; charset=utf-8"],"User-Agent":["aws-sdk-go/1.14.24 (go1.11; darwin; amd64)"],"X-Amz-Date":["20180907T222145Z"],"X-Vault-Aws-Iam-Server-Id":["VaultAcceptanceTesting"]}`
return min
}

View file

@ -8,6 +8,7 @@ import (
"time"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
@ -35,6 +36,11 @@ expiration, before it is removed from the backend storage.`,
// tidyWhitelistIdentity is used to delete entries in the whitelist that are expired.
func (b *backend) tidyWhitelistIdentity(ctx context.Context, req *logical.Request, safetyBuffer int) (*logical.Response, error) {
// If we are a performance standby forward the request to the active node
if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) {
return nil, logical.ErrReadOnly
}
if !atomic.CompareAndSwapUint32(b.tidyWhitelistCASGuard, 0, 1) {
resp := &logical.Response{}
resp.AddWarning("Tidy operation already in progress.")

View file

@ -8,6 +8,7 @@ import (
"time"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
@ -35,6 +36,11 @@ expiration, before it is removed from the backend storage.`,
// tidyBlacklistRoleTag is used to clean-up the entries in the role tag blacklist.
func (b *backend) tidyBlacklistRoleTag(ctx context.Context, req *logical.Request, safetyBuffer int) (*logical.Response, error) {
// If we are a performance standby forward the request to the active node
if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) {
return nil, logical.ErrReadOnly
}
if !atomic.CompareAndSwapUint32(b.tidyBlacklistCASGuard, 0, 1) {
resp := &logical.Response{}
resp.AddWarning("Tidy operation already in progress.")

View file

@ -196,7 +196,7 @@ func (b *backend) verifyCredentials(ctx context.Context, req *logical.Request, t
}
for _, o := range allOrgs {
if strings.ToLower(*o.Login) == strings.ToLower(config.Organization) {
if strings.EqualFold(*o.Login, config.Organization) {
org = o
break
}

View file

@ -62,7 +62,7 @@ func (b *backend) Group(ctx context.Context, s logical.Storage, n string) (*Grou
return nil, "", err
}
for _, groupName := range entries {
if strings.ToLower(groupName) == strings.ToLower(n) {
if strings.EqualFold(groupName, n) {
entry, err = s.Get(ctx, "group/"+groupName)
if err != nil {
return nil, "", err

View file

@ -6,6 +6,8 @@ import (
"sync"
"time"
"github.com/aws/aws-sdk-go/service/iam/iamiface"
"github.com/aws/aws-sdk-go/service/sts/stsiface"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
@ -33,7 +35,8 @@ func Backend() *backend {
},
Paths: []*framework.Path{
pathConfigRoot(),
pathConfigRoot(&b),
pathConfigRotateRoot(&b),
pathConfigLease(&b),
pathRoles(&b),
pathListRoles(&b),
@ -57,6 +60,14 @@ type backend struct {
// Mutex to protect access to reading and writing policies
roleMutex sync.RWMutex
// Mutex to protect access to iam/sts clients and client configs
clientMutex sync.RWMutex
// iamClient and stsClient hold configured iam and sts clients for reuse, and
// to enable mocking with AWS iface for tests
iamClient iamiface.IAMAPI
stsClient stsiface.STSAPI
}
const backendHelp = `
@ -68,3 +79,59 @@ After mounting this backend, credentials to generate IAM keys must
be configured with the "root" path and policies must be written using
the "roles/" endpoints before any access keys can be generated.
`
// clientIAM returns the configured IAM client. If nil, it constructs a new one
// and returns it, setting it the internal variable
func (b *backend) clientIAM(ctx context.Context, s logical.Storage) (iamiface.IAMAPI, error) {
b.clientMutex.RLock()
if b.iamClient != nil {
b.clientMutex.RUnlock()
return b.iamClient, nil
}
// Upgrade the lock for writing
b.clientMutex.RUnlock()
b.clientMutex.Lock()
defer b.clientMutex.Unlock()
// check client again, in the event that a client was being created while we
// waited for Lock()
if b.iamClient != nil {
return b.iamClient, nil
}
iamClient, err := nonCachedClientIAM(ctx, s)
if err != nil {
return nil, err
}
b.iamClient = iamClient
return b.iamClient, nil
}
func (b *backend) clientSTS(ctx context.Context, s logical.Storage) (stsiface.STSAPI, error) {
b.clientMutex.RLock()
if b.stsClient != nil {
b.clientMutex.RUnlock()
return b.stsClient, nil
}
// Upgrade the lock for writing
b.clientMutex.RUnlock()
b.clientMutex.Lock()
defer b.clientMutex.Unlock()
// check client again, in the event that a client was being created while we
// waited for Lock()
if b.stsClient != nil {
return b.stsClient, nil
}
stsClient, err := nonCachedClientSTS(ctx, s)
if err != nil {
return nil, err
}
b.stsClient = stsClient
return b.stsClient, nil
}

View file

@ -4,8 +4,10 @@ import (
"context"
"fmt"
"log"
"net/http"
"os"
"reflect"
"sync"
"testing"
"time"
@ -16,19 +18,32 @@ import (
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/iam/iamiface"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/logical"
logicaltest "github.com/hashicorp/vault/logical/testing"
"github.com/mitchellh/mapstructure"
)
var initSetup sync.Once
type mockIAMClient struct {
iamiface.IAMAPI
}
func (m *mockIAMClient) CreateUser(input *iam.CreateUserInput) (*iam.CreateUserOutput, error) {
return nil, awserr.New("Throttling", "", nil)
}
func getBackend(t *testing.T) logical.Backend {
be, _ := Factory(context.Background(), logical.TestBackendConfig())
return be
}
func TestBackend_basic(t *testing.T) {
t.Parallel()
logicaltest.Test(t, logicaltest.TestCase{
AcceptanceTest: true,
PreCheck: func() { testAccPreCheck(t) },
@ -42,13 +57,21 @@ func TestBackend_basic(t *testing.T) {
}
func TestBackend_basicSTS(t *testing.T) {
t.Parallel()
awsAccountID, err := getAccountID()
if err != nil {
t.Logf("Unable to retrive user via sts:GetCallerIdentity: %#v", err)
t.Skip("Could not determine AWS account ID from sts:GetCallerIdentity for acceptance tests, skipping")
}
roleName := generateUniqueName(t.Name())
userName := generateUniqueName(t.Name())
accessKey := &awsAccessKey{}
logicaltest.Test(t, logicaltest.TestCase{
AcceptanceTest: true,
PreCheck: func() {
testAccPreCheck(t)
createUser(t, accessKey)
createRole(t)
createUser(t, userName, accessKey)
createRole(t, roleName, awsAccountID)
// Sleep sometime because AWS is eventually consistent
// Both the createUser and createRole depend on this
log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...")
@ -57,20 +80,22 @@ func TestBackend_basicSTS(t *testing.T) {
Backend: getBackend(t),
Steps: []logicaltest.TestStep{
testAccStepConfigWithCreds(t, accessKey),
testAccStepRotateRoot(accessKey),
testAccStepWritePolicy(t, "test", testDynamoPolicy),
testAccStepRead(t, "sts", "test", []credentialTestFunc{listDynamoTablesTest}),
testAccStepWriteArnPolicyRef(t, "test", ec2PolicyArn),
testAccStepReadSTSWithArnPolicy(t, "test"),
testAccStepWriteArnRoleRef(t, testRoleName),
testAccStepRead(t, "sts", testRoleName, []credentialTestFunc{describeInstancesTest}),
testAccStepWriteArnRoleRef(t, "test2", roleName, awsAccountID),
testAccStepRead(t, "sts", "test2", []credentialTestFunc{describeInstancesTest}),
},
Teardown: func() error {
return teardown(accessKey)
return teardown(accessKey, roleName, userName)
},
})
}
func TestBackend_policyCrud(t *testing.T) {
t.Parallel()
compacted, err := compactJSON(testDynamoPolicy)
if err != nil {
t.Fatalf("bad: %s", err)
@ -89,24 +114,72 @@ func TestBackend_policyCrud(t *testing.T) {
})
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" {
log.Println("[INFO] Test: Using us-west-2 as test region")
os.Setenv("AWS_DEFAULT_REGION", "us-west-2")
func TestBackend_throttled(t *testing.T) {
t.Parallel()
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b := Backend()
if err := b.Setup(context.Background(), config); err != nil {
t.Fatal(err)
}
if v := os.Getenv("AWS_ACCOUNT_ID"); v == "" {
accountId, err := getAccountId()
if err != nil {
t.Logf("Unable to retrive user via iam:GetUser: %#v", err)
t.Skip("AWS_ACCOUNT_ID not explicitly set and could not be read from iam:GetUser for acceptance tests, skipping")
}
log.Printf("[INFO] Test: Used %s as AWS_ACCOUNT_ID", accountId)
os.Setenv("AWS_ACCOUNT_ID", accountId)
connData := map[string]interface{}{
"credential_type": "iam_user",
}
confReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "roles/something",
Storage: config.StorageView,
Data: connData,
}
resp, err := b.HandleRequest(context.Background(), confReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("failed to write configuration: resp:%#v err:%s", resp, err)
}
// Mock the IAM API call to return a throttled response to the CreateUser API
// call
b.iamClient = &mockIAMClient{}
credReq := &logical.Request{
Operation: logical.UpdateOperation,
Path: "creds/something",
Storage: config.StorageView,
}
credResp, err := b.HandleRequest(context.Background(), credReq)
if err == nil {
t.Fatalf("failed to trigger expected throttling error condition: resp:%#v", credResp)
}
rErr := credResp.Error()
expected := "Error creating IAM user: Throttling: "
if rErr.Error() != expected {
t.Fatalf("error message did not match, expected (%s), got (%s)", expected, rErr.Error())
}
// verify the error we got back is returned with a http.StatusBadGateway
code, err := logical.RespondErrorCommon(credReq, credResp, err)
if err == nil {
t.Fatal("expected error after running req/resp/err through RespondErrorCommon, got nil")
}
if code != http.StatusBadGateway {
t.Fatalf("expected HTTP status 'bad gateway', got: (%d)", code)
}
}
func getAccountId() (string, error) {
func testAccPreCheck(t *testing.T) {
initSetup.Do(func() {
if v := os.Getenv("AWS_DEFAULT_REGION"); v == "" {
log.Println("[INFO] Test: Using us-west-2 as test region")
os.Setenv("AWS_DEFAULT_REGION", "us-west-2")
}
})
}
func getAccountID() (string, error) {
awsConfig := &aws.Config{
Region: aws.String("us-east-1"),
HTTPClient: cleanhttp.DefaultClient(),
@ -126,9 +199,7 @@ func getAccountId() (string, error) {
return *res.Account, nil
}
const testRoleName = "Vault-Acceptance-Test-AWS-Assume-Role"
func createRole(t *testing.T) {
func createRole(t *testing.T, roleName, awsAccountID string) {
const testRoleAssumePolicy = `{
"Version": "2012-10-17",
"Statement": [
@ -147,15 +218,15 @@ func createRole(t *testing.T) {
HTTPClient: cleanhttp.DefaultClient(),
}
svc := iam.New(session.New(awsConfig))
trustPolicy := fmt.Sprintf(testRoleAssumePolicy, os.Getenv("AWS_ACCOUNT_ID"))
trustPolicy := fmt.Sprintf(testRoleAssumePolicy, awsAccountID)
params := &iam.CreateRoleInput{
AssumeRolePolicyDocument: aws.String(trustPolicy),
RoleName: aws.String(testRoleName),
RoleName: aws.String(roleName),
Path: aws.String("/"),
}
log.Printf("[INFO] AWS CreateRole: %s", testRoleName)
log.Printf("[INFO] AWS CreateRole: %s", roleName)
_, err := svc.CreateRole(params)
if err != nil {
@ -164,7 +235,7 @@ func createRole(t *testing.T) {
attachment := &iam.AttachRolePolicyInput{
PolicyArn: aws.String(ec2PolicyArn),
RoleName: aws.String(testRoleName), // Required
RoleName: aws.String(roleName), // Required
}
_, err = svc.AttachRolePolicy(attachment)
@ -173,9 +244,7 @@ func createRole(t *testing.T) {
}
}
const testUserName = "Vault-Acceptance-Test-AWS-FederationToken"
func createUser(t *testing.T, accessKey *awsAccessKey) {
func createUser(t *testing.T, userName string, accessKey *awsAccessKey) {
// The sequence of user creation actions is carefully chosen to minimize
// impact of stolen IAM user credentials
// 1. Create user, without any permissions or credentials. At this point,
@ -212,9 +281,9 @@ func createUser(t *testing.T, accessKey *awsAccessKey) {
svc := iam.New(session.New(awsConfig))
createUserInput := &iam.CreateUserInput{
UserName: aws.String(testUserName),
UserName: aws.String(userName),
}
log.Printf("[INFO] AWS CreateUser: %s", testUserName)
log.Printf("[INFO] AWS CreateUser: %s", userName)
_, err := svc.CreateUser(createUserInput)
if err != nil {
t.Fatalf("AWS CreateUser failed: %v", err)
@ -223,7 +292,7 @@ func createUser(t *testing.T, accessKey *awsAccessKey) {
putPolicyInput := &iam.PutUserPolicyInput{
PolicyDocument: aws.String(timebombPolicy),
PolicyName: aws.String("SelfDestructionTimebomb"),
UserName: aws.String(testUserName),
UserName: aws.String(userName),
}
_, err = svc.PutUserPolicy(putPolicyInput)
if err != nil {
@ -232,7 +301,7 @@ func createUser(t *testing.T, accessKey *awsAccessKey) {
attachUserPolicyInput := &iam.AttachUserPolicyInput{
PolicyArn: aws.String("arn:aws:iam::aws:policy/AdministratorAccess"),
UserName: aws.String(testUserName),
UserName: aws.String(userName),
}
_, err = svc.AttachUserPolicy(attachUserPolicyInput)
if err != nil {
@ -240,7 +309,7 @@ func createUser(t *testing.T, accessKey *awsAccessKey) {
}
createAccessKeyInput := &iam.CreateAccessKeyInput{
UserName: aws.String(testUserName),
UserName: aws.String(userName),
}
createAccessKeyOutput, err := svc.CreateAccessKey(createAccessKeyInput)
if err != nil {
@ -251,11 +320,11 @@ func createUser(t *testing.T, accessKey *awsAccessKey) {
}
genAccessKey := createAccessKeyOutput.AccessKey
accessKey.AccessKeyId = *genAccessKey.AccessKeyId
accessKey.AccessKeyID = *genAccessKey.AccessKeyId
accessKey.SecretAccessKey = *genAccessKey.SecretAccessKey
}
func deleteTestRole() error {
func deleteTestRole(roleName string) error {
awsConfig := &aws.Config{
Region: aws.String("us-east-1"),
HTTPClient: cleanhttp.DefaultClient(),
@ -264,7 +333,7 @@ func deleteTestRole() error {
attachment := &iam.DetachRolePolicyInput{
PolicyArn: aws.String(ec2PolicyArn),
RoleName: aws.String(testRoleName), // Required
RoleName: aws.String(roleName), // Required
}
_, err := svc.DetachRolePolicy(attachment)
if err != nil {
@ -273,10 +342,10 @@ func deleteTestRole() error {
}
params := &iam.DeleteRoleInput{
RoleName: aws.String(testRoleName),
RoleName: aws.String(roleName),
}
log.Printf("[INFO] AWS DeleteRole: %s", testRoleName)
log.Printf("[INFO] AWS DeleteRole: %s", roleName)
_, err = svc.DeleteRole(params)
if err != nil {
@ -286,9 +355,9 @@ func deleteTestRole() error {
return nil
}
func teardown(accessKey *awsAccessKey) error {
func teardown(accessKey *awsAccessKey, roleName, userName string) error {
if err := deleteTestRole(); err != nil {
if err := deleteTestRole(roleName); err != nil {
return err
}
awsConfig := &aws.Config{
@ -299,7 +368,7 @@ func teardown(accessKey *awsAccessKey) error {
userDetachment := &iam.DetachUserPolicyInput{
PolicyArn: aws.String("arn:aws:iam::aws:policy/AdministratorAccess"),
UserName: aws.String(testUserName),
UserName: aws.String(userName),
}
_, err := svc.DetachUserPolicy(userDetachment)
if err != nil {
@ -308,8 +377,8 @@ func teardown(accessKey *awsAccessKey) error {
}
deleteAccessKeyInput := &iam.DeleteAccessKeyInput{
AccessKeyId: aws.String(accessKey.AccessKeyId),
UserName: aws.String(testUserName),
AccessKeyId: aws.String(accessKey.AccessKeyID),
UserName: aws.String(userName),
}
_, err = svc.DeleteAccessKey(deleteAccessKeyInput)
if err != nil {
@ -319,7 +388,7 @@ func teardown(accessKey *awsAccessKey) error {
deleteUserPolicyInput := &iam.DeleteUserPolicyInput{
PolicyName: aws.String("SelfDestructionTimebomb"),
UserName: aws.String(testUserName),
UserName: aws.String(userName),
}
_, err = svc.DeleteUserPolicy(deleteUserPolicyInput)
if err != nil {
@ -327,9 +396,9 @@ func teardown(accessKey *awsAccessKey) error {
return err
}
deleteUserInput := &iam.DeleteUserInput{
UserName: aws.String(testUserName),
UserName: aws.String(userName),
}
log.Printf("[INFO] AWS DeleteUser: %s", testUserName)
log.Printf("[INFO] AWS DeleteUser: %s", userName)
_, err = svc.DeleteUser(deleteUserInput)
if err != nil {
log.Printf("[WARN] AWS DeleteUser failed: %v", err)
@ -361,13 +430,51 @@ func testAccStepConfigWithCreds(t *testing.T, accessKey *awsAccessKey) logicalte
// In particular, they get evaluated before accessKey gets set by CreateUser
// and thus would fail. By moving to a closure in a PreFlight, we ensure that
// the creds get evaluated lazily after they've been properly set
req.Data["access_key"] = accessKey.AccessKeyId
req.Data["access_key"] = accessKey.AccessKeyID
req.Data["secret_key"] = accessKey.SecretAccessKey
return nil
},
}
}
func testAccStepRotateRoot(oldAccessKey *awsAccessKey) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "config/rotate-root",
Check: func(resp *logical.Response) error {
if resp == nil {
return fmt.Errorf("received nil response from config/rotate-root")
}
newAccessKeyID := resp.Data["access_key"].(string)
if newAccessKeyID == oldAccessKey.AccessKeyID {
return fmt.Errorf("rotate-root didn't rotate access key")
}
awsConfig := &aws.Config{
Region: aws.String("us-east-1"),
HTTPClient: cleanhttp.DefaultClient(),
Credentials: credentials.NewStaticCredentials(oldAccessKey.AccessKeyID, oldAccessKey.SecretAccessKey, ""),
}
// sigh....
oldAccessKey.AccessKeyID = newAccessKeyID
log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...")
time.Sleep(10 * time.Second)
svc := sts.New(session.New(awsConfig))
params := &sts.GetCallerIdentityInput{}
_, err := svc.GetCallerIdentity(params)
if err == nil {
return fmt.Errorf("bad: old credentials succeeded after rotate")
}
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() != "InvalidClientTokenId" {
return fmt.Errorf("Unknown error returned from AWS: %#v", aerr)
}
return nil
}
return err
},
}
}
func testAccStepRead(t *testing.T, path, name string, credentialTests []credentialTestFunc) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.ReadOperation,
@ -594,6 +701,7 @@ func testAccStepWriteArnPolicyRef(t *testing.T, name string, arn string) logical
}
func TestBackend_basicPolicyArnRef(t *testing.T) {
t.Parallel()
logicaltest.Test(t, logicaltest.TestCase{
AcceptanceTest: true,
PreCheck: func() { testAccPreCheck(t) },
@ -607,6 +715,7 @@ func TestBackend_basicPolicyArnRef(t *testing.T) {
}
func TestBackend_iamUserManagedInlinePolicies(t *testing.T) {
t.Parallel()
compacted, err := compactJSON(testDynamoPolicy)
if err != nil {
t.Fatalf("bad: %#v", err)
@ -637,6 +746,8 @@ func TestBackend_iamUserManagedInlinePolicies(t *testing.T) {
}
func TestBackend_AssumedRoleWithPolicyDoc(t *testing.T) {
t.Parallel()
roleName := generateUniqueName(t.Name())
// This looks a bit curious. The policy document and the role document act
// as a logical intersection of policies. The role allows ec2:Describe*
// (among other permissions). This policy allows everything BUT
@ -653,16 +764,21 @@ func TestBackend_AssumedRoleWithPolicyDoc(t *testing.T) {
}]
}
`
awsAccountID, err := getAccountID()
if err != nil {
t.Logf("Unable to retrive user via sts:GetCallerIdentity: %#v", err)
t.Skip("Could not determine AWS account ID from sts:GetCallerIdentity for acceptance tests, skipping")
}
roleData := map[string]interface{}{
"policy_document": allowAllButDescribeAzs,
"role_arns": []string{fmt.Sprintf("arn:aws:iam::%s:role/%s", os.Getenv("AWS_ACCOUNT_ID"), testRoleName)},
"role_arns": []string{fmt.Sprintf("arn:aws:iam::%s:role/%s", awsAccountID, roleName)},
"credential_type": assumedRoleCred,
}
logicaltest.Test(t, logicaltest.TestCase{
AcceptanceTest: true,
PreCheck: func() {
testAccPreCheck(t)
createRole(t)
createRole(t, roleName, awsAccountID)
// Sleep sometime because AWS is eventually consistent
log.Println("[WARN] Sleeping for 10 seconds waiting for AWS...")
time.Sleep(10 * time.Second)
@ -674,11 +790,14 @@ func TestBackend_AssumedRoleWithPolicyDoc(t *testing.T) {
testAccStepRead(t, "sts", "test", []credentialTestFunc{describeInstancesTest, describeAzsTestUnauthorized}),
testAccStepRead(t, "creds", "test", []credentialTestFunc{describeInstancesTest, describeAzsTestUnauthorized}),
},
Teardown: deleteTestRole,
Teardown: func() error {
return deleteTestRole(roleName)
},
})
}
func TestBackend_policyArnCrud(t *testing.T) {
t.Parallel()
logicaltest.Test(t, logicaltest.TestCase{
AcceptanceTest: true,
Backend: getBackend(t),
@ -720,18 +839,22 @@ func testAccStepReadArnPolicy(t *testing.T, name string, value string) logicalte
}
}
func testAccStepWriteArnRoleRef(t *testing.T, roleName string) logicaltest.TestStep {
func testAccStepWriteArnRoleRef(t *testing.T, vaultRoleName, awsRoleName, awsAccountID string) logicaltest.TestStep {
return logicaltest.TestStep{
Operation: logical.UpdateOperation,
Path: "roles/" + roleName,
Path: "roles/" + vaultRoleName,
Data: map[string]interface{}{
"arn": fmt.Sprintf("arn:aws:iam::%s:role/%s", os.Getenv("AWS_ACCOUNT_ID"), roleName),
"arn": fmt.Sprintf("arn:aws:iam::%s:role/%s", awsAccountID, awsRoleName),
},
}
}
func generateUniqueName(prefix string) string {
return testhelpers.RandomWithPrefix(prefix)
}
type awsAccessKey struct {
AccessKeyId string
AccessKeyID string
SecretAccessKey string
}

View file

@ -15,6 +15,7 @@ import (
"github.com/hashicorp/vault/logical"
)
// NOTE: The caller is required to ensure that b.clientMutex is at least read locked
func getRootConfig(ctx context.Context, s logical.Storage, clientType string) (*aws.Config, error) {
credsConfig := &awsutil.CredentialsConfig{}
var endpoint string
@ -68,7 +69,7 @@ func getRootConfig(ctx context.Context, s logical.Storage, clientType string) (*
}, nil
}
func clientIAM(ctx context.Context, s logical.Storage) (*iam.IAM, error) {
func nonCachedClientIAM(ctx context.Context, s logical.Storage) (*iam.IAM, error) {
awsConfig, err := getRootConfig(ctx, s, "iam")
if err != nil {
return nil, err
@ -82,7 +83,7 @@ func clientIAM(ctx context.Context, s logical.Storage) (*iam.IAM, error) {
return client, nil
}
func clientSTS(ctx context.Context, s logical.Storage) (*sts.STS, error) {
func nonCachedClientSTS(ctx context.Context, s logical.Storage) (*sts.STS, error) {
awsConfig, err := getRootConfig(ctx, s, "sts")
if err != nil {
return nil, err

View file

@ -8,7 +8,7 @@ import (
"github.com/hashicorp/vault/logical/framework"
)
func pathConfigRoot() *framework.Path {
func pathConfigRoot(b *backend) *framework.Path {
return &framework.Path{
Pattern: "config/root",
Fields: map[string]*framework.FieldSchema{
@ -42,7 +42,7 @@ func pathConfigRoot() *framework.Path {
},
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: pathConfigRootWrite,
logical.UpdateOperation: b.pathConfigRootWrite,
},
HelpSynopsis: pathConfigRootHelpSyn,
@ -50,12 +50,15 @@ func pathConfigRoot() *framework.Path {
}
}
func pathConfigRootWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
func (b *backend) pathConfigRootWrite(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
region := data.Get("region").(string)
iamendpoint := data.Get("iam_endpoint").(string)
stsendpoint := data.Get("sts_endpoint").(string)
maxretries := data.Get("max_retries").(int)
b.clientMutex.Lock()
defer b.clientMutex.Unlock()
entry, err := logical.StorageEntryJSON("config/root", rootConfig{
AccessKey: data.Get("access_key").(string),
SecretKey: data.Get("secret_key").(string),
@ -72,6 +75,11 @@ func pathConfigRootWrite(ctx context.Context, req *logical.Request, data *framew
return nil, err
}
// clear possible cached IAM / STS clients after successfully updating
// config/root
b.iamClient = nil
b.stsClient = nil
return nil, nil
}

View file

@ -0,0 +1,124 @@
package aws
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
func pathConfigRotateRoot(b *backend) *framework.Path {
return &framework.Path{
Pattern: "config/rotate-root",
Callbacks: map[logical.Operation]framework.OperationFunc{
logical.UpdateOperation: b.pathConfigRotateRootUpdate,
},
HelpSynopsis: pathConfigRotateRootHelpSyn,
HelpDescription: pathConfigRotateRootHelpDesc,
}
}
func (b *backend) pathConfigRotateRootUpdate(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
// have to get the client config first because that takes out a read lock
client, err := b.clientIAM(ctx, req.Storage)
if err != nil {
return nil, err
}
if client == nil {
return nil, fmt.Errorf("nil IAM client")
}
b.clientMutex.Lock()
defer b.clientMutex.Unlock()
rawRootConfig, err := req.Storage.Get(ctx, "config/root")
if err != nil {
return nil, err
}
if rawRootConfig == nil {
return nil, fmt.Errorf("no configuration found for config/root")
}
var config rootConfig
if err := rawRootConfig.DecodeJSON(&config); err != nil {
return nil, errwrap.Wrapf("error reading root configuration: {{err}}", err)
}
if config.AccessKey == "" || config.SecretKey == "" {
return logical.ErrorResponse("Cannot call config/rotate-root when either access_key or secret_key is empty"), nil
}
var getUserInput iam.GetUserInput // empty input means get current user
getUserRes, err := client.GetUser(&getUserInput)
if err != nil {
return nil, errwrap.Wrapf("error calling GetUser: {{err}}", err)
}
if getUserRes == nil {
return nil, fmt.Errorf("nil response from GetUser")
}
if getUserRes.User == nil {
return nil, fmt.Errorf("nil user returned from GetUser")
}
if getUserRes.User.UserName == nil {
return nil, fmt.Errorf("nil UserName returned from GetUser")
}
createAccessKeyInput := iam.CreateAccessKeyInput{
UserName: getUserRes.User.UserName,
}
createAccessKeyRes, err := client.CreateAccessKey(&createAccessKeyInput)
if err != nil {
return nil, errwrap.Wrapf("error calling CreateAccessKey: {{err}}", err)
}
if createAccessKeyRes.AccessKey == nil {
return nil, fmt.Errorf("nil response from CreateAccessKey")
}
if createAccessKeyRes.AccessKey.AccessKeyId == nil || createAccessKeyRes.AccessKey.SecretAccessKey == nil {
return nil, fmt.Errorf("nil AccessKeyId or SecretAccessKey returned from CreateAccessKey")
}
oldAccessKey := config.AccessKey
config.AccessKey = *createAccessKeyRes.AccessKey.AccessKeyId
config.SecretKey = *createAccessKeyRes.AccessKey.SecretAccessKey
newEntry, err := logical.StorageEntryJSON("config/root", config)
if err != nil {
return nil, errwrap.Wrapf("error generating new config/root JSON: {{err}}", err)
}
if err := req.Storage.Put(ctx, newEntry); err != nil {
return nil, errwrap.Wrapf("error saving new config/root: {{err}}", err)
}
b.iamClient = nil
b.stsClient = nil
deleteAccessKeyInput := iam.DeleteAccessKeyInput{
AccessKeyId: aws.String(oldAccessKey),
UserName: getUserRes.User.UserName,
}
_, err = client.DeleteAccessKey(&deleteAccessKeyInput)
if err != nil {
return nil, errwrap.Wrapf("error deleting old access key: {{err}}", err)
}
return &logical.Response{
Data: map[string]interface{}{
"access_key": config.AccessKey,
},
}, nil
}
const pathConfigRotateRootHelpSyn = `
Request to rotate the AWS credentials used by Vault
`
const pathConfigRotateRootHelpDesc = `
This path attempts to rotate the AWS credentials used by Vault for this mount.
It is only valid if Vault has been configured to use AWS IAM credentials via the
config/root endpoint.
`

View file

@ -111,7 +111,7 @@ func (b *backend) pathCredsRead(ctx context.Context, req *logical.Request, d *fr
}
}
func pathUserRollback(ctx context.Context, req *logical.Request, _kind string, data interface{}) error {
func (b *backend) pathUserRollback(ctx context.Context, req *logical.Request, _kind string, data interface{}) error {
var entry walUser
if err := mapstructure.Decode(data, &entry); err != nil {
return err
@ -119,7 +119,7 @@ func pathUserRollback(ctx context.Context, req *logical.Request, _kind string, d
username := entry.UserName
// Get the client
client, err := clientIAM(ctx, req.Storage)
client, err := b.clientIAM(ctx, req.Storage)
if err != nil {
return err
}

View file

@ -9,11 +9,11 @@ import (
"github.com/hashicorp/vault/logical/framework"
)
var walRollbackMap = map[string]framework.WALRollbackFunc{
"user": pathUserRollback,
}
func (b *backend) walRollback(ctx context.Context, req *logical.Request, kind string, data interface{}) error {
walRollbackMap := map[string]framework.WALRollbackFunc{
"user": b.pathUserRollback,
}
if !b.System().LocalMount() && b.System().ReplicationState().HasState(consts.ReplicationPerformancePrimary) {
return nil
}

View file

@ -11,15 +11,16 @@ import (
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/awsutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
const SecretAccessKeyType = "access_keys"
const secretAccessKeyType = "access_keys"
func secretAccessKeys(b *backend) *framework.Secret {
return &framework.Secret{
Type: SecretAccessKeyType,
Type: secretAccessKeyType,
Fields: map[string]*framework.FieldSchema{
"access_key": &framework.FieldSchema{
Type: framework.TypeString,
@ -37,7 +38,7 @@ func secretAccessKeys(b *backend) *framework.Secret {
},
Renew: b.secretAccessKeysRenew,
Revoke: secretAccessKeysRevoke,
Revoke: b.secretAccessKeysRevoke,
}
}
@ -67,14 +68,14 @@ func genUsername(displayName, policyName, userType string) (ret string, warning
func (b *backend) secretTokenCreate(ctx context.Context, s logical.Storage,
displayName, policyName, policy string,
lifeTimeInSeconds int64) (*logical.Response, error) {
STSClient, err := clientSTS(ctx, s)
stsClient, err := b.clientSTS(ctx, s)
if err != nil {
return logical.ErrorResponse(err.Error()), nil
}
username, usernameWarning := genUsername(displayName, policyName, "sts")
tokenResp, err := STSClient.GetFederationToken(
tokenResp, err := stsClient.GetFederationToken(
&sts.GetFederationTokenInput{
Name: aws.String(username),
Policy: aws.String(policy),
@ -83,10 +84,10 @@ func (b *backend) secretTokenCreate(ctx context.Context, s logical.Storage,
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error generating STS keys: %s", err)), nil
"Error generating STS keys: %s", err)), awsutil.CheckAWSError(err)
}
resp := b.Secret(SecretAccessKeyType).Response(map[string]interface{}{
resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{
"access_key": *tokenResp.Credentials.AccessKeyId,
"secret_key": *tokenResp.Credentials.SecretAccessKey,
"security_token": *tokenResp.Credentials.SessionToken,
@ -112,7 +113,7 @@ func (b *backend) secretTokenCreate(ctx context.Context, s logical.Storage,
func (b *backend) assumeRole(ctx context.Context, s logical.Storage,
displayName, roleName, roleArn, policy string,
lifeTimeInSeconds int64) (*logical.Response, error) {
STSClient, err := clientSTS(ctx, s)
stsClient, err := b.clientSTS(ctx, s)
if err != nil {
return logical.ErrorResponse(err.Error()), nil
}
@ -127,14 +128,14 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage,
if policy != "" {
assumeRoleInput.SetPolicy(policy)
}
tokenResp, err := STSClient.AssumeRole(assumeRoleInput)
tokenResp, err := stsClient.AssumeRole(assumeRoleInput)
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error assuming role: %s", err)), nil
"Error assuming role: %s", err)), awsutil.CheckAWSError(err)
}
resp := b.Secret(SecretAccessKeyType).Response(map[string]interface{}{
resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{
"access_key": *tokenResp.Credentials.AccessKeyId,
"secret_key": *tokenResp.Credentials.SecretAccessKey,
"security_token": *tokenResp.Credentials.SessionToken,
@ -161,7 +162,7 @@ func (b *backend) secretAccessKeysCreate(
ctx context.Context,
s logical.Storage,
displayName, policyName string, role *awsRoleEntry) (*logical.Response, error) {
client, err := clientIAM(ctx, s)
iamClient, err := b.clientIAM(ctx, s)
if err != nil {
return logical.ErrorResponse(err.Error()), nil
}
@ -172,7 +173,7 @@ func (b *backend) secretAccessKeysCreate(
// the user is created because if switch the order then the WAL put
// can fail, which would put us in an awkward position: we have a user
// we need to rollback but can't put the WAL entry to do the rollback.
walId, err := framework.PutWAL(ctx, s, "user", &walUser{
walID, err := framework.PutWAL(ctx, s, "user", &walUser{
UserName: username,
})
if err != nil {
@ -180,57 +181,57 @@ func (b *backend) secretAccessKeysCreate(
}
// Create the user
_, err = client.CreateUser(&iam.CreateUserInput{
_, err = iamClient.CreateUser(&iam.CreateUserInput{
UserName: aws.String(username),
})
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error creating IAM user: %s", err)), nil
"Error creating IAM user: %s", err)), awsutil.CheckAWSError(err)
}
for _, arn := range role.PolicyArns {
// Attach existing policy against user
_, err = client.AttachUserPolicy(&iam.AttachUserPolicyInput{
_, err = iamClient.AttachUserPolicy(&iam.AttachUserPolicyInput{
UserName: aws.String(username),
PolicyArn: aws.String(arn),
})
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error attaching user policy: %s", err)), nil
"Error attaching user policy: %s", err)), awsutil.CheckAWSError(err)
}
}
if role.PolicyDocument != "" {
// Add new inline user policy against user
_, err = client.PutUserPolicy(&iam.PutUserPolicyInput{
_, err = iamClient.PutUserPolicy(&iam.PutUserPolicyInput{
UserName: aws.String(username),
PolicyName: aws.String(policyName),
PolicyDocument: aws.String(role.PolicyDocument),
})
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error putting user policy: %s", err)), nil
"Error putting user policy: %s", err)), awsutil.CheckAWSError(err)
}
}
// Create the keys
keyResp, err := client.CreateAccessKey(&iam.CreateAccessKeyInput{
keyResp, err := iamClient.CreateAccessKey(&iam.CreateAccessKeyInput{
UserName: aws.String(username),
})
if err != nil {
return logical.ErrorResponse(fmt.Sprintf(
"Error creating access keys: %s", err)), nil
"Error creating access keys: %s", err)), awsutil.CheckAWSError(err)
}
// Remove the WAL entry, we succeeded! If we fail, we don't return
// the secret because it'll get rolled back anyways, so we have to return
// an error here.
if err := framework.DeleteWAL(ctx, s, walId); err != nil {
if err := framework.DeleteWAL(ctx, s, walID); err != nil {
return nil, errwrap.Wrapf("failed to commit WAL entry: {{err}}", err)
}
// Return the info!
resp := b.Secret(SecretAccessKeyType).Response(map[string]interface{}{
resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{
"access_key": *keyResp.AccessKey.AccessKeyId,
"secret_key": *keyResp.AccessKey.SecretAccessKey,
"security_token": nil,
@ -281,7 +282,7 @@ func (b *backend) secretAccessKeysRenew(ctx context.Context, req *logical.Reques
return resp, nil
}
func secretAccessKeysRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
func (b *backend) secretAccessKeysRevoke(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
// STS cleans up after itself so we can skip this if is_sts internal data
// element set to true. If is_sts is not set, assumes old version
@ -309,7 +310,7 @@ func secretAccessKeysRevoke(ctx context.Context, req *logical.Request, d *framew
}
// Use the user rollback mechanism to delete this user
err := pathUserRollback(ctx, req, "user", map[string]interface{}{
err := b.pathUserRollback(ctx, req, "user", map[string]interface{}{
"username": username,
})
if err != nil {

View file

@ -14,6 +14,7 @@ import (
"github.com/go-test/deep"
"github.com/hashicorp/vault/builtin/logical/database/dbplugin"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/pluginutil"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
@ -58,7 +59,7 @@ func preparePostgresTestContainer(t *testing.T, s logical.Storage, b logical.Bac
// exponential backoff-retry
if err = pool.Retry(func() error {
// This will cause a validation to run
resp, err := b.HandleRequest(context.Background(), &logical.Request{
resp, err := b.HandleRequest(namespace.TestContext(), &logical.Request{
Storage: s,
Operation: logical.UpdateOperation,
Path: "config/postgresql",
@ -100,7 +101,7 @@ func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
sys := vault.TestDynamicSystemView(cores[0].Core)
vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", "TestBackend_PluginMain")
vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", "TestBackend_PluginMain", []string{}, "")
return cluster, sys
}
@ -227,7 +228,7 @@ func TestBackend_config_connection(t *testing.T) {
t.Fatal("expected not exists")
}
resp, err = b.HandleRequest(context.Background(), configReq)
resp, err = b.HandleRequest(namespace.TestContext(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v\n", err, resp)
}
@ -242,7 +243,7 @@ func TestBackend_config_connection(t *testing.T) {
"root_credentials_rotate_statements": []string{},
}
configReq.Operation = logical.ReadOperation
resp, err = b.HandleRequest(context.Background(), configReq)
resp, err = b.HandleRequest(namespace.TestContext(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -279,7 +280,7 @@ func TestBackend_config_connection(t *testing.T) {
t.Fatal("expected exists")
}
resp, err = b.HandleRequest(context.Background(), configReq)
resp, err = b.HandleRequest(namespace.TestContext(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v\n", err, resp)
}
@ -294,7 +295,7 @@ func TestBackend_config_connection(t *testing.T) {
"root_credentials_rotate_statements": []string{},
}
configReq.Operation = logical.ReadOperation
resp, err = b.HandleRequest(context.Background(), configReq)
resp, err = b.HandleRequest(namespace.TestContext(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -320,7 +321,7 @@ func TestBackend_config_connection(t *testing.T) {
Data: configData,
}
resp, err = b.HandleRequest(context.Background(), configReq)
resp, err = b.HandleRequest(namespace.TestContext(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v\n", err, resp)
}
@ -335,7 +336,7 @@ func TestBackend_config_connection(t *testing.T) {
"root_credentials_rotate_statements": []string{},
}
configReq.Operation = logical.ReadOperation
resp, err = b.HandleRequest(context.Background(), configReq)
resp, err = b.HandleRequest(namespace.TestContext(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -351,7 +352,7 @@ func TestBackend_config_connection(t *testing.T) {
Storage: config.StorageView,
Path: "config/",
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil {
t.Fatal(err)
}
@ -381,7 +382,7 @@ func TestBackend_BadConnectionString(t *testing.T) {
respCheck := func(req *logical.Request) {
t.Helper()
resp, err := b.HandleRequest(context.Background(), req)
resp, err := b.HandleRequest(namespace.TestContext(), req)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -440,7 +441,7 @@ func TestBackend_basic(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err := b.HandleRequest(context.Background(), req)
resp, err := b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -457,7 +458,7 @@ func TestBackend_basic(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -469,7 +470,7 @@ func TestBackend_basic(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
credsResp, err := b.HandleRequest(context.Background(), req)
credsResp, err := b.HandleRequest(namespace.TestContext(), req)
if err != nil || (credsResp != nil && credsResp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
@ -486,7 +487,7 @@ func TestBackend_basic(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -498,7 +499,7 @@ func TestBackend_basic(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
credsResp, err = b.HandleRequest(context.Background(), req)
credsResp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (credsResp != nil && credsResp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
@ -519,7 +520,7 @@ func TestBackend_basic(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -533,7 +534,7 @@ func TestBackend_basic(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
credsResp, err = b.HandleRequest(context.Background(), req)
credsResp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (credsResp != nil && credsResp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
@ -546,7 +547,7 @@ func TestBackend_basic(t *testing.T) {
}
// Revoke creds
resp, err = b.HandleRequest(context.Background(), &logical.Request{
resp, err = b.HandleRequest(namespace.TestContext(), &logical.Request{
Operation: logical.RevokeOperation,
Storage: config.StorageView,
Secret: &logical.Secret{
@ -575,7 +576,7 @@ func TestBackend_basic(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
credsResp, err = b.HandleRequest(context.Background(), req)
credsResp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (credsResp != nil && credsResp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
@ -589,13 +590,13 @@ func TestBackend_basic(t *testing.T) {
Path: "roles/plugin-role-test",
Storage: config.StorageView,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
// Revoke creds
resp, err = b.HandleRequest(context.Background(), &logical.Request{
resp, err = b.HandleRequest(namespace.TestContext(), &logical.Request{
Operation: logical.RevokeOperation,
Storage: config.StorageView,
Secret: &logical.Secret{
@ -647,7 +648,7 @@ func TestBackend_connectionCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err := b.HandleRequest(context.Background(), req)
resp, err := b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -666,7 +667,7 @@ func TestBackend_connectionCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -685,7 +686,7 @@ func TestBackend_connectionCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -694,7 +695,7 @@ func TestBackend_connectionCrud(t *testing.T) {
}
req.Operation = logical.ReadOperation
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -706,7 +707,7 @@ func TestBackend_connectionCrud(t *testing.T) {
req.Operation = logical.UpdateOperation
connURL = strings.Replace(connURL, "postgres:secret", "{{username}}:{{password}}", -1)
data["connection_url"] = connURL
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -722,7 +723,7 @@ func TestBackend_connectionCrud(t *testing.T) {
"root_credentials_rotate_statements": []string(nil),
}
req.Operation = logical.ReadOperation
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -740,7 +741,7 @@ func TestBackend_connectionCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -753,7 +754,7 @@ func TestBackend_connectionCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
credsResp, err := b.HandleRequest(context.Background(), req)
credsResp, err := b.HandleRequest(namespace.TestContext(), req)
if err != nil || (credsResp != nil && credsResp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
@ -774,14 +775,14 @@ func TestBackend_connectionCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
// Read connection
req.Operation = logical.ReadOperation
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -824,7 +825,7 @@ func TestBackend_roleCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err := b.HandleRequest(context.Background(), req)
resp, err := b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -844,7 +845,7 @@ func TestBackend_roleCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -868,7 +869,7 @@ func TestBackend_roleCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -912,7 +913,7 @@ func TestBackend_roleCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%v resp:%#v\n", err, resp)
}
@ -936,7 +937,7 @@ func TestBackend_roleCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -1050,7 +1051,7 @@ func TestBackend_roleCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -1063,7 +1064,7 @@ func TestBackend_roleCrud(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -1101,7 +1102,7 @@ func TestBackend_allowedRoles(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err := b.HandleRequest(context.Background(), req)
resp, err := b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -1119,7 +1120,7 @@ func TestBackend_allowedRoles(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -1136,7 +1137,7 @@ func TestBackend_allowedRoles(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -1149,7 +1150,7 @@ func TestBackend_allowedRoles(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
credsResp, err := b.HandleRequest(context.Background(), req)
credsResp, err := b.HandleRequest(namespace.TestContext(), req)
if err != logical.ErrPermissionDenied {
t.Fatalf("expected error to be:%s got:%#v\n", logical.ErrPermissionDenied, err)
}
@ -1166,7 +1167,7 @@ func TestBackend_allowedRoles(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -1179,7 +1180,7 @@ func TestBackend_allowedRoles(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
credsResp, err = b.HandleRequest(context.Background(), req)
credsResp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (credsResp != nil && credsResp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
@ -1200,7 +1201,7 @@ func TestBackend_allowedRoles(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -1213,7 +1214,7 @@ func TestBackend_allowedRoles(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
credsResp, err = b.HandleRequest(context.Background(), req)
credsResp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (credsResp != nil && credsResp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
@ -1234,7 +1235,7 @@ func TestBackend_allowedRoles(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -1247,7 +1248,7 @@ func TestBackend_allowedRoles(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
credsResp, err = b.HandleRequest(context.Background(), req)
credsResp, err = b.HandleRequest(namespace.TestContext(), req)
if err != logical.ErrPermissionDenied {
t.Fatalf("expected error to be:%s got:%#v\n", logical.ErrPermissionDenied, err)
}
@ -1260,7 +1261,7 @@ func TestBackend_allowedRoles(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
credsResp, err = b.HandleRequest(context.Background(), req)
credsResp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (credsResp != nil && credsResp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
@ -1303,7 +1304,7 @@ func TestBackend_RotateRootCredentials(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err := b.HandleRequest(context.Background(), req)
resp, err := b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -1320,7 +1321,7 @@ func TestBackend_RotateRootCredentials(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@ -1332,7 +1333,7 @@ func TestBackend_RotateRootCredentials(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
credsResp, err := b.HandleRequest(context.Background(), req)
credsResp, err := b.HandleRequest(namespace.TestContext(), req)
if err != nil || (credsResp != nil && credsResp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
@ -1344,7 +1345,7 @@ func TestBackend_RotateRootCredentials(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
resp, err = b.HandleRequest(context.Background(), req)
resp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (credsResp != nil && credsResp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
@ -1365,7 +1366,7 @@ func TestBackend_RotateRootCredentials(t *testing.T) {
Storage: config.StorageView,
Data: data,
}
credsResp, err = b.HandleRequest(context.Background(), req)
credsResp, err = b.HandleRequest(namespace.TestContext(), req)
if err != nil || (credsResp != nil && credsResp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}

View file

@ -1,12 +1,14 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: builtin/logical/database/dbplugin/database.proto
package dbplugin // import "github.com/hashicorp/vault/builtin/logical/database/dbplugin"
package dbplugin
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
math "math"
)
import (
context "golang.org/x/net/context"
@ -37,16 +39,17 @@ func (m *InitializeRequest) Reset() { *m = InitializeRequest{} }
func (m *InitializeRequest) String() string { return proto.CompactTextString(m) }
func (*InitializeRequest) ProtoMessage() {}
func (*InitializeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{0}
return fileDescriptor_7bf7b4c7fef2f66e, []int{0}
}
func (m *InitializeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_InitializeRequest.Unmarshal(m, b)
}
func (m *InitializeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_InitializeRequest.Marshal(b, m, deterministic)
}
func (dst *InitializeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_InitializeRequest.Merge(dst, src)
func (m *InitializeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_InitializeRequest.Merge(m, src)
}
func (m *InitializeRequest) XXX_Size() int {
return xxx_messageInfo_InitializeRequest.Size(m)
@ -83,16 +86,17 @@ func (m *InitRequest) Reset() { *m = InitRequest{} }
func (m *InitRequest) String() string { return proto.CompactTextString(m) }
func (*InitRequest) ProtoMessage() {}
func (*InitRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{1}
return fileDescriptor_7bf7b4c7fef2f66e, []int{1}
}
func (m *InitRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_InitRequest.Unmarshal(m, b)
}
func (m *InitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_InitRequest.Marshal(b, m, deterministic)
}
func (dst *InitRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_InitRequest.Merge(dst, src)
func (m *InitRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_InitRequest.Merge(m, src)
}
func (m *InitRequest) XXX_Size() int {
return xxx_messageInfo_InitRequest.Size(m)
@ -130,16 +134,17 @@ func (m *CreateUserRequest) Reset() { *m = CreateUserRequest{} }
func (m *CreateUserRequest) String() string { return proto.CompactTextString(m) }
func (*CreateUserRequest) ProtoMessage() {}
func (*CreateUserRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{2}
return fileDescriptor_7bf7b4c7fef2f66e, []int{2}
}
func (m *CreateUserRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateUserRequest.Unmarshal(m, b)
}
func (m *CreateUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateUserRequest.Marshal(b, m, deterministic)
}
func (dst *CreateUserRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateUserRequest.Merge(dst, src)
func (m *CreateUserRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateUserRequest.Merge(m, src)
}
func (m *CreateUserRequest) XXX_Size() int {
return xxx_messageInfo_CreateUserRequest.Size(m)
@ -184,16 +189,17 @@ func (m *RenewUserRequest) Reset() { *m = RenewUserRequest{} }
func (m *RenewUserRequest) String() string { return proto.CompactTextString(m) }
func (*RenewUserRequest) ProtoMessage() {}
func (*RenewUserRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{3}
return fileDescriptor_7bf7b4c7fef2f66e, []int{3}
}
func (m *RenewUserRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RenewUserRequest.Unmarshal(m, b)
}
func (m *RenewUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RenewUserRequest.Marshal(b, m, deterministic)
}
func (dst *RenewUserRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RenewUserRequest.Merge(dst, src)
func (m *RenewUserRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RenewUserRequest.Merge(m, src)
}
func (m *RenewUserRequest) XXX_Size() int {
return xxx_messageInfo_RenewUserRequest.Size(m)
@ -237,16 +243,17 @@ func (m *RevokeUserRequest) Reset() { *m = RevokeUserRequest{} }
func (m *RevokeUserRequest) String() string { return proto.CompactTextString(m) }
func (*RevokeUserRequest) ProtoMessage() {}
func (*RevokeUserRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{4}
return fileDescriptor_7bf7b4c7fef2f66e, []int{4}
}
func (m *RevokeUserRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RevokeUserRequest.Unmarshal(m, b)
}
func (m *RevokeUserRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RevokeUserRequest.Marshal(b, m, deterministic)
}
func (dst *RevokeUserRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RevokeUserRequest.Merge(dst, src)
func (m *RevokeUserRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RevokeUserRequest.Merge(m, src)
}
func (m *RevokeUserRequest) XXX_Size() int {
return xxx_messageInfo_RevokeUserRequest.Size(m)
@ -282,16 +289,17 @@ func (m *RotateRootCredentialsRequest) Reset() { *m = RotateRootCredenti
func (m *RotateRootCredentialsRequest) String() string { return proto.CompactTextString(m) }
func (*RotateRootCredentialsRequest) ProtoMessage() {}
func (*RotateRootCredentialsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{5}
return fileDescriptor_7bf7b4c7fef2f66e, []int{5}
}
func (m *RotateRootCredentialsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RotateRootCredentialsRequest.Unmarshal(m, b)
}
func (m *RotateRootCredentialsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RotateRootCredentialsRequest.Marshal(b, m, deterministic)
}
func (dst *RotateRootCredentialsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RotateRootCredentialsRequest.Merge(dst, src)
func (m *RotateRootCredentialsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RotateRootCredentialsRequest.Merge(m, src)
}
func (m *RotateRootCredentialsRequest) XXX_Size() int {
return xxx_messageInfo_RotateRootCredentialsRequest.Size(m)
@ -331,16 +339,17 @@ func (m *Statements) Reset() { *m = Statements{} }
func (m *Statements) String() string { return proto.CompactTextString(m) }
func (*Statements) ProtoMessage() {}
func (*Statements) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{6}
return fileDescriptor_7bf7b4c7fef2f66e, []int{6}
}
func (m *Statements) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Statements.Unmarshal(m, b)
}
func (m *Statements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Statements.Marshal(b, m, deterministic)
}
func (dst *Statements) XXX_Merge(src proto.Message) {
xxx_messageInfo_Statements.Merge(dst, src)
func (m *Statements) XXX_Merge(src proto.Message) {
xxx_messageInfo_Statements.Merge(m, src)
}
func (m *Statements) XXX_Size() int {
return xxx_messageInfo_Statements.Size(m)
@ -423,16 +432,17 @@ func (m *UsernameConfig) Reset() { *m = UsernameConfig{} }
func (m *UsernameConfig) String() string { return proto.CompactTextString(m) }
func (*UsernameConfig) ProtoMessage() {}
func (*UsernameConfig) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{7}
return fileDescriptor_7bf7b4c7fef2f66e, []int{7}
}
func (m *UsernameConfig) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UsernameConfig.Unmarshal(m, b)
}
func (m *UsernameConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UsernameConfig.Marshal(b, m, deterministic)
}
func (dst *UsernameConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_UsernameConfig.Merge(dst, src)
func (m *UsernameConfig) XXX_Merge(src proto.Message) {
xxx_messageInfo_UsernameConfig.Merge(m, src)
}
func (m *UsernameConfig) XXX_Size() int {
return xxx_messageInfo_UsernameConfig.Size(m)
@ -468,16 +478,17 @@ func (m *InitResponse) Reset() { *m = InitResponse{} }
func (m *InitResponse) String() string { return proto.CompactTextString(m) }
func (*InitResponse) ProtoMessage() {}
func (*InitResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{8}
return fileDescriptor_7bf7b4c7fef2f66e, []int{8}
}
func (m *InitResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_InitResponse.Unmarshal(m, b)
}
func (m *InitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_InitResponse.Marshal(b, m, deterministic)
}
func (dst *InitResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_InitResponse.Merge(dst, src)
func (m *InitResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_InitResponse.Merge(m, src)
}
func (m *InitResponse) XXX_Size() int {
return xxx_messageInfo_InitResponse.Size(m)
@ -507,16 +518,17 @@ func (m *CreateUserResponse) Reset() { *m = CreateUserResponse{} }
func (m *CreateUserResponse) String() string { return proto.CompactTextString(m) }
func (*CreateUserResponse) ProtoMessage() {}
func (*CreateUserResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{9}
return fileDescriptor_7bf7b4c7fef2f66e, []int{9}
}
func (m *CreateUserResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateUserResponse.Unmarshal(m, b)
}
func (m *CreateUserResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateUserResponse.Marshal(b, m, deterministic)
}
func (dst *CreateUserResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateUserResponse.Merge(dst, src)
func (m *CreateUserResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateUserResponse.Merge(m, src)
}
func (m *CreateUserResponse) XXX_Size() int {
return xxx_messageInfo_CreateUserResponse.Size(m)
@ -552,16 +564,17 @@ func (m *TypeResponse) Reset() { *m = TypeResponse{} }
func (m *TypeResponse) String() string { return proto.CompactTextString(m) }
func (*TypeResponse) ProtoMessage() {}
func (*TypeResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{10}
return fileDescriptor_7bf7b4c7fef2f66e, []int{10}
}
func (m *TypeResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TypeResponse.Unmarshal(m, b)
}
func (m *TypeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TypeResponse.Marshal(b, m, deterministic)
}
func (dst *TypeResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_TypeResponse.Merge(dst, src)
func (m *TypeResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_TypeResponse.Merge(m, src)
}
func (m *TypeResponse) XXX_Size() int {
return xxx_messageInfo_TypeResponse.Size(m)
@ -590,16 +603,17 @@ func (m *RotateRootCredentialsResponse) Reset() { *m = RotateRootCredent
func (m *RotateRootCredentialsResponse) String() string { return proto.CompactTextString(m) }
func (*RotateRootCredentialsResponse) ProtoMessage() {}
func (*RotateRootCredentialsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{11}
return fileDescriptor_7bf7b4c7fef2f66e, []int{11}
}
func (m *RotateRootCredentialsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RotateRootCredentialsResponse.Unmarshal(m, b)
}
func (m *RotateRootCredentialsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RotateRootCredentialsResponse.Marshal(b, m, deterministic)
}
func (dst *RotateRootCredentialsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RotateRootCredentialsResponse.Merge(dst, src)
func (m *RotateRootCredentialsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RotateRootCredentialsResponse.Merge(m, src)
}
func (m *RotateRootCredentialsResponse) XXX_Size() int {
return xxx_messageInfo_RotateRootCredentialsResponse.Size(m)
@ -627,16 +641,17 @@ func (m *Empty) Reset() { *m = Empty{} }
func (m *Empty) String() string { return proto.CompactTextString(m) }
func (*Empty) ProtoMessage() {}
func (*Empty) Descriptor() ([]byte, []int) {
return fileDescriptor_database_4b48b7d6479beb92, []int{12}
return fileDescriptor_7bf7b4c7fef2f66e, []int{12}
}
func (m *Empty) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Empty.Unmarshal(m, b)
}
func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
}
func (dst *Empty) XXX_Merge(src proto.Message) {
xxx_messageInfo_Empty.Merge(dst, src)
func (m *Empty) XXX_Merge(src proto.Message) {
xxx_messageInfo_Empty.Merge(m, src)
}
func (m *Empty) XXX_Size() int {
return xxx_messageInfo_Empty.Size(m)
@ -968,10 +983,10 @@ var _Database_serviceDesc = grpc.ServiceDesc{
}
func init() {
proto.RegisterFile("builtin/logical/database/dbplugin/database.proto", fileDescriptor_database_4b48b7d6479beb92)
proto.RegisterFile("builtin/logical/database/dbplugin/database.proto", fileDescriptor_7bf7b4c7fef2f66e)
}
var fileDescriptor_database_4b48b7d6479beb92 = []byte{
var fileDescriptor_7bf7b4c7fef2f66e = []byte{
// 724 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xd1, 0x4e, 0xdb, 0x4a,
0x10, 0x95, 0x93, 0x00, 0xc9, 0x80, 0x80, 0xec, 0x05, 0x64, 0xf9, 0x72, 0x6f, 0x91, 0x1f, 0x28,

View file

@ -10,6 +10,7 @@ import (
log "github.com/hashicorp/go-hclog"
plugin "github.com/hashicorp/go-plugin"
"github.com/hashicorp/vault/builtin/logical/database/dbplugin"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/pluginutil"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
@ -93,8 +94,8 @@ func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
cores := cluster.Cores
sys := vault.TestDynamicSystemView(cores[0].Core)
vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", "TestPlugin_GRPC_Main")
vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin-netRPC", "TestPlugin_NetRPC_Main")
vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin", "TestPlugin_GRPC_Main", []string{}, "")
vault.TestAddTestPlugin(t, cores[0].Core, "test-plugin-netRPC", "TestPlugin_NetRPC_Main", []string{}, "")
return cluster, sys
}
@ -147,7 +148,7 @@ func TestPlugin_Init(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.Cleanup()
dbRaw, err := dbplugin.PluginFactory(context.Background(), "test-plugin", sys, log.NewNullLogger())
dbRaw, err := dbplugin.PluginFactory(namespace.TestContext(), "test-plugin", sys, log.NewNullLogger())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -171,7 +172,7 @@ func TestPlugin_CreateUser(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.Cleanup()
db, err := dbplugin.PluginFactory(context.Background(), "test-plugin", sys, log.NewNullLogger())
db, err := dbplugin.PluginFactory(namespace.TestContext(), "test-plugin", sys, log.NewNullLogger())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -211,7 +212,7 @@ func TestPlugin_RenewUser(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.Cleanup()
db, err := dbplugin.PluginFactory(context.Background(), "test-plugin", sys, log.NewNullLogger())
db, err := dbplugin.PluginFactory(namespace.TestContext(), "test-plugin", sys, log.NewNullLogger())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -245,7 +246,7 @@ func TestPlugin_RevokeUser(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.Cleanup()
db, err := dbplugin.PluginFactory(context.Background(), "test-plugin", sys, log.NewNullLogger())
db, err := dbplugin.PluginFactory(namespace.TestContext(), "test-plugin", sys, log.NewNullLogger())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -287,7 +288,7 @@ func TestPlugin_NetRPC_Init(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.Cleanup()
dbRaw, err := dbplugin.PluginFactory(context.Background(), "test-plugin-netRPC", sys, log.NewNullLogger())
dbRaw, err := dbplugin.PluginFactory(namespace.TestContext(), "test-plugin-netRPC", sys, log.NewNullLogger())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -311,7 +312,7 @@ func TestPlugin_NetRPC_CreateUser(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.Cleanup()
db, err := dbplugin.PluginFactory(context.Background(), "test-plugin-netRPC", sys, log.NewNullLogger())
db, err := dbplugin.PluginFactory(namespace.TestContext(), "test-plugin-netRPC", sys, log.NewNullLogger())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -351,7 +352,7 @@ func TestPlugin_NetRPC_RenewUser(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.Cleanup()
db, err := dbplugin.PluginFactory(context.Background(), "test-plugin-netRPC", sys, log.NewNullLogger())
db, err := dbplugin.PluginFactory(namespace.TestContext(), "test-plugin-netRPC", sys, log.NewNullLogger())
if err != nil {
t.Fatalf("err: %s", err)
}
@ -385,7 +386,7 @@ func TestPlugin_NetRPC_RevokeUser(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.Cleanup()
db, err := dbplugin.PluginFactory(context.Background(), "test-plugin-netRPC", sys, log.NewNullLogger())
db, err := dbplugin.PluginFactory(namespace.TestContext(), "test-plugin-netRPC", sys, log.NewNullLogger())
if err != nil {
t.Fatalf("err: %s", err)
}

View file

@ -501,7 +501,7 @@ func parseOtherSANs(others []string) (map[string][]string, error) {
if len(splitType) != 2 {
return nil, fmt.Errorf("expected a colon in other SAN %q", other)
}
if strings.ToLower(splitType[0]) != "utf8" {
if !strings.EqualFold(splitType[0], "utf8") {
return nil, fmt.Errorf("only utf8 other SANs are supported; found non-supported type in other SAN %q", other)
}
result[splitOther[0]] = append(result[splitOther[0]], splitType[1])

View file

@ -9,6 +9,7 @@ import (
"time"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/framework"
)
@ -56,6 +57,11 @@ Defaults to 72 hours.`,
}
func (b *backend) pathTidyWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
// If we are a performance standby forward the request to the active node
if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) {
return nil, logical.ErrReadOnly
}
safetyBuffer := d.Get("safety_buffer").(int)
tidyCertStore := d.Get("tidy_cert_store").(bool)
tidyRevokedCerts := d.Get("tidy_revoked_certs").(bool)

View file

@ -19,6 +19,11 @@ func (b *backend) pathRestore() *framework.Path {
Type: framework.TypeString,
Description: "If set, this will be the name of the restored key.",
},
"force": &framework.FieldSchema{
Type: framework.TypeBool,
Description: "If set and a key by the given name exists, force the restore operation and override the key.",
Default: false,
},
},
Callbacks: map[logical.Operation]framework.OperationFunc{
@ -32,11 +37,12 @@ func (b *backend) pathRestore() *framework.Path {
func (b *backend) pathRestoreUpdate(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
backupB64 := d.Get("backup").(string)
force := d.Get("force").(bool)
if backupB64 == "" {
return logical.ErrorResponse("'backup' must be supplied"), nil
}
return nil, b.lm.RestorePolicy(ctx, req.Storage, d.Get("name").(string), backupB64)
return nil, b.lm.RestorePolicy(ctx, req.Storage, d.Get("name").(string), backupB64, force)
}
const pathRestoreHelpSyn = `Restore the named key`

View file

@ -0,0 +1,195 @@
package transit
import (
"context"
"fmt"
"testing"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/logical"
)
func TestTransit_Restore(t *testing.T) {
// Test setup:
// - Create a key
// - Configure it to be exportable, allowing deletion, and backups
// - Capture backup
// - Delete key
// - Run test cases
//
// Each test case should start with no key present. If the 'Seed' parameter is
// in the struct, we'll start by restoring it (without force) to run that test
// as if the key already existed
keyType := "aes256-gcm96"
b, s := createBackendWithStorage(t)
keyName := testhelpers.RandomWithPrefix("my-key")
// Create a key
keyReq := &logical.Request{
Path: "keys/" + keyName,
Operation: logical.UpdateOperation,
Storage: s,
Data: map[string]interface{}{
"type": keyType,
"exportable": true,
},
}
resp, err := b.HandleRequest(context.Background(), keyReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("resp: %#v\nerr: %v", resp, err)
}
// Configure the key to allow its deletion and backup
configReq := &logical.Request{
Path: fmt.Sprintf("keys/%s/config", keyName),
Operation: logical.UpdateOperation,
Storage: s,
Data: map[string]interface{}{
"deletion_allowed": true,
"allow_plaintext_backup": true,
},
}
resp, err = b.HandleRequest(context.Background(), configReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("resp: %#v\nerr: %v", resp, err)
}
// Take a backup of the key
backupReq := &logical.Request{
Path: "backup/" + keyName,
Operation: logical.ReadOperation,
Storage: s,
}
resp, err = b.HandleRequest(context.Background(), backupReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("resp: %#v\nerr: %v", resp, err)
}
backupKey := resp.Data["backup"].(string)
if backupKey == "" {
t.Fatal("failed to get a backup")
}
// Delete the key to start test cases with clean slate
keyReq.Operation = logical.DeleteOperation
resp, err = b.HandleRequest(context.Background(), keyReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("resp: %#v\nerr: %v", resp, err)
}
// helper func to get a pointer value for a boolean
boolPtr := func(b bool) *bool {
return &b
}
keyExitsError := fmt.Errorf("key \"%s\" already exists", keyName)
testCases := []struct {
Name string
// Seed dermines if we start the test by restoring the initial backup we
// took, to test a restore operation based on the key existing or not
Seed bool
// Force is a pointer to differenciate between default false and given false
Force *bool
// The error we expect, if any
ExpectedErr error
}{
{
// key does not already exist
Name: "Default restore",
},
{
// key already exists
Name: "Restore-without-force",
Seed: true,
ExpectedErr: keyExitsError,
},
{
// key already exists, use force to force a restore
Name: "Restore-with-force",
Seed: true,
Force: boolPtr(true),
},
{
// using force shouldn't matter if the key doesn't exist
Name: "Restore-with-force-no-seed",
Force: boolPtr(true),
},
{
// using force shouldn't matter if the key doesn't exist
Name: "Restore-force-false",
Force: boolPtr(false),
},
{
// using false force should still error
Name: "Restore-force-false",
Seed: true,
Force: boolPtr(false),
ExpectedErr: keyExitsError,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
var resp *logical.Response
var err error
if tc.Seed {
// restore our key to test a pre-existing key
seedRestoreReq := &logical.Request{
Path: "restore",
Operation: logical.UpdateOperation,
Storage: s,
Data: map[string]interface{}{
"backup": backupKey,
},
}
resp, err := b.HandleRequest(context.Background(), seedRestoreReq)
if resp != nil && resp.IsError() {
t.Fatalf("resp: %#v\nerr: %v", resp, err)
}
if err != nil && tc.ExpectedErr == nil {
t.Fatalf("did not expect an error in SeedKey restore: %s", err)
}
}
restoreReq := &logical.Request{
Path: "restore",
Operation: logical.UpdateOperation,
Storage: s,
Data: map[string]interface{}{
"backup": backupKey,
},
}
if tc.Force != nil {
restoreReq.Data["force"] = *tc.Force
}
resp, err = b.HandleRequest(context.Background(), restoreReq)
if resp != nil && resp.IsError() {
t.Fatalf("resp: %#v\nerr: %v", resp, err)
}
if err == nil && tc.ExpectedErr != nil {
t.Fatalf("expected an error, but got none")
}
if err != nil && tc.ExpectedErr == nil {
t.Fatalf("unexpected error:%s", err)
}
if err != nil && tc.ExpectedErr != nil {
if err.Error() != tc.ExpectedErr.Error() {
t.Fatalf("expected error: (%s), got: (%s)", tc.ExpectedErr.Error(), err.Error())
}
}
// cleanup / delete key after each run
keyReq.Operation = logical.DeleteOperation
resp, err = b.HandleRequest(context.Background(), keyReq)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("resp: %#v\nerr: %v", resp, err)
}
})
}
}

View file

@ -89,7 +89,7 @@ func testConfig(t *testing.T) (*logical.BackendConfig, func()) {
os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMain")
vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestBackend_PluginMain", []string{}, "")
return config, func() {
cluster.Cleanup()

View file

@ -281,7 +281,6 @@ func (c *AgentCommand) Run(args []string) int {
authConfig := &auth.AuthConfig{
Logger: c.logger.Named(fmt.Sprintf("auth.%s", config.AutoAuth.Method.Type)),
MountPath: config.AutoAuth.Method.MountPath,
WrapTTL: config.AutoAuth.Method.WrapTTL,
Config: config.AutoAuth.Method.Config,
}
switch config.AutoAuth.Method.Type {
@ -324,8 +323,9 @@ func (c *AgentCommand) Run(args []string) int {
})
ah := auth.NewAuthHandler(&auth.AuthHandlerConfig{
Logger: c.logger.Named("auth.handler"),
Client: c.client,
Logger: c.logger.Named("auth.handler"),
Client: c.client,
WrapTTL: config.AutoAuth.Method.WrapTTL,
})
// Start things running

View file

@ -1,9 +1,7 @@
package agent
import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"os"
"strings"
@ -29,14 +27,11 @@ import (
)
const (
envVarRunAccTests = "VAULT_ACC"
envVarAccessKey = "ALICLOUD_TEST_ACCESS_KEY"
envVarSecretKey = "ALICLOUD_TEST_SECRET_KEY"
envVarRoleArn = "ALICLOUD_TEST_ROLE_ARN"
envVarAlicloudAccessKey = "ALICLOUD_TEST_ACCESS_KEY"
envVarAlicloudSecretKey = "ALICLOUD_TEST_SECRET_KEY"
envVarAlicloudRoleArn = "ALICLOUD_TEST_ROLE_ARN"
)
var runAcceptanceTests = os.Getenv(envVarRunAccTests) == "1"
func TestAliCloudEndToEnd(t *testing.T) {
if !runAcceptanceTests {
t.SkipNow()
@ -66,7 +61,7 @@ func TestAliCloudEndToEnd(t *testing.T) {
}
if _, err := client.Logical().Write("auth/alicloud/role/test", map[string]interface{}{
"arn": os.Getenv(envVarRoleArn),
"arn": os.Getenv(envVarAlicloudRoleArn),
}); err != nil {
t.Fatal(err)
}
@ -91,9 +86,9 @@ func TestAliCloudEndToEnd(t *testing.T) {
Logger: logger.Named("auth.alicloud"),
MountPath: "auth/alicloud",
Config: map[string]interface{}{
"role": "test",
"region": "us-west-1",
"cred_check_freq_seconds": 1,
"role": "test",
"region": "us-west-1",
"credential_poll_interval": 1,
},
})
if err != nil {
@ -162,22 +157,10 @@ func TestAliCloudEndToEnd(t *testing.T) {
}
}
func readToken(fileName string) (*logical.HTTPWrapInfo, error) {
b, err := ioutil.ReadFile(fileName)
if err != nil {
return nil, err
}
wrapper := &logical.HTTPWrapInfo{}
if err := json.NewDecoder(bytes.NewReader(b)).Decode(wrapper); err != nil {
return nil, err
}
return wrapper, nil
}
func setAliCloudEnvCreds() error {
config := sdk.NewConfig()
config.Scheme = "https"
client, err := sts.NewClientWithOptions("us-west-1", config, credentials.NewAccessKeyCredential(os.Getenv(envVarAccessKey), os.Getenv(envVarSecretKey)))
client, err := sts.NewClientWithOptions("us-west-1", config, credentials.NewAccessKeyCredential(os.Getenv(envVarAlicloudAccessKey), os.Getenv(envVarAlicloudSecretKey)))
if err != nil {
return err
}
@ -186,7 +169,7 @@ func setAliCloudEnvCreds() error {
return err
}
assumeRoleReq := sts.CreateAssumeRoleRequest()
assumeRoleReq.RoleArn = os.Getenv(envVarRoleArn)
assumeRoleReq.RoleArn = os.Getenv(envVarAlicloudRoleArn)
assumeRoleReq.RoleSessionName = strings.Replace(roleSessionName, "-", "", -1)
assumeRoleResp, err := client.AssumeRole(assumeRoleReq)
if err != nil {

View file

@ -60,9 +60,11 @@ func NewAliCloudAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) {
// Check for an optional custom frequency at which we should poll for creds.
credCheckFreqSec := defaultCredCheckFreqSeconds
if checkFreqRaw, ok := conf.Config["cred_check_freq_seconds"]; ok {
if checkFreqRaw, ok := conf.Config["credential_poll_interval"]; ok {
if credFreq, ok := checkFreqRaw.(int); ok {
credCheckFreqSec = credFreq
} else {
return nil, errors.New("could not convert 'credential_poll_interval' config value to int")
}
}

View file

@ -7,13 +7,17 @@ import (
"fmt"
"io/ioutil"
"net/http"
"reflect"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/hashicorp/errwrap"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-hclog"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/api"
awsauth "github.com/hashicorp/vault/builtin/credential/aws"
"github.com/hashicorp/vault/builtin/credential/aws"
"github.com/hashicorp/vault/command/agent/auth"
)
@ -21,18 +25,36 @@ const (
typeEC2 = "ec2"
typeIAM = "iam"
identityEndpoint = "http://169.254.169.254/latest/dynamic/instance-identity"
/*
IAM creds can be inferred from instance metadata or the container
identity service, and those creds expire at varying intervals with
new creds becoming available at likewise varying intervals. Let's
default to polling once a minute so all changes can be picked up
rather quickly. This is configurable, however.
*/
defaultCredentialPollInterval = 60
)
type awsMethod struct {
logger hclog.Logger
authType string
nonce string
mountPath string
role string
headerValue string
accessKey string
secretKey string
sessionToken string
logger hclog.Logger
authType string
nonce string
mountPath string
role string
headerValue string
// These are used to share the latest creds safely across goroutines.
credLock sync.Mutex
lastCreds *credentials.Credentials
// Notifies the outer environment that it should call Authenticate again.
credsFound chan struct{}
// Detects that the outer environment is closing.
stopCh chan struct{}
}
func NewAWSAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) {
@ -44,8 +66,10 @@ func NewAWSAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) {
}
a := &awsMethod{
logger: conf.Logger,
mountPath: conf.MountPath,
logger: conf.Logger,
mountPath: conf.MountPath,
credsFound: make(chan struct{}),
stopCh: make(chan struct{}),
}
typeRaw, ok := conf.Config["type"]
@ -75,25 +99,28 @@ func NewAWSAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) {
return nil, errors.New("'type' value is invalid")
}
accessKey := ""
accessKeyRaw, ok := conf.Config["access_key"]
if ok {
a.accessKey, ok = accessKeyRaw.(string)
accessKey, ok = accessKeyRaw.(string)
if !ok {
return nil, errors.New("could not convert 'access_key' value into string")
}
}
secretKey := ""
secretKeyRaw, ok := conf.Config["secret_key"]
if ok {
a.secretKey, ok = secretKeyRaw.(string)
secretKey, ok = secretKeyRaw.(string)
if !ok {
return nil, errors.New("could not convert 'secret_key' value into string")
}
}
sessionToken := ""
sessionTokenRaw, ok := conf.Config["session_token"]
if ok {
a.sessionToken, ok = sessionTokenRaw.(string)
sessionToken, ok = sessionTokenRaw.(string)
if !ok {
return nil, errors.New("could not convert 'session_token' value into string")
}
@ -107,6 +134,29 @@ func NewAWSAuthMethod(conf *auth.AuthConfig) (auth.AuthMethod, error) {
}
}
if a.authType == typeIAM {
// Check for an optional custom frequency at which we should poll for creds.
credentialPollIntervalSec := defaultCredentialPollInterval
if credentialPollIntervalRaw, ok := conf.Config["credential_poll_interval"]; ok {
if credentialPollInterval, ok := credentialPollIntervalRaw.(int); ok {
credentialPollIntervalSec = credentialPollInterval
} else {
return nil, errors.New("could not convert 'credential_poll_interval' into int")
}
}
// Do an initial population of the creds because we want to err right away if we can't
// even get a first set.
creds, err := awsauth.RetrieveCreds(accessKey, secretKey, sessionToken)
if err != nil {
return nil, err
}
a.lastCreds = creds
go a.pollForCreds(accessKey, secretKey, sessionToken, credentialPollIntervalSec)
}
return a, nil
}
@ -173,18 +223,22 @@ func (a *awsMethod) Authenticate(ctx context.Context, client *api.Client) (retTo
// Add the reauthentication value, if we have one
if a.nonce == "" {
uuid, err := uuid.GenerateUUID()
uid, err := uuid.GenerateUUID()
if err != nil {
retErr = errwrap.Wrapf("error generating uuid for reauthentication value: {{err}}", err)
return
}
a.nonce = uuid
a.nonce = uid
}
data["nonce"] = a.nonce
default:
// This is typeIAM.
a.credLock.Lock()
defer a.credLock.Unlock()
var err error
data, err = awsauth.GenerateLoginData(a.accessKey, a.secretKey, a.sessionToken, a.headerValue)
data, err = awsauth.GenerateLoginData(a.lastCreds, a.headerValue)
if err != nil {
retErr = errwrap.Wrapf("error creating login value: {{err}}", err)
return
@ -197,11 +251,60 @@ func (a *awsMethod) Authenticate(ctx context.Context, client *api.Client) (retTo
}
func (a *awsMethod) NewCreds() chan struct{} {
return nil
return a.credsFound
}
func (a *awsMethod) CredSuccess() {
}
func (a *awsMethod) CredSuccess() {}
func (a *awsMethod) Shutdown() {
close(a.credsFound)
close(a.stopCh)
}
func (a *awsMethod) pollForCreds(accessKey, secretKey, sessionToken string, frequencySeconds int) {
ticker := time.NewTicker(time.Duration(frequencySeconds) * time.Second)
defer ticker.Stop()
for {
select {
case <-a.stopCh:
a.logger.Trace("shutdown triggered, stopping aws auth handler")
return
case <-ticker.C:
if err := a.checkCreds(accessKey, secretKey, sessionToken); err != nil {
a.logger.Warn("unable to retrieve current creds, retaining last creds", err)
}
}
}
}
func (a *awsMethod) checkCreds(accessKey, secretKey, sessionToken string) error {
a.credLock.Lock()
defer a.credLock.Unlock()
a.logger.Trace("checking for new credentials")
currentCreds, err := awsauth.RetrieveCreds(accessKey, secretKey, sessionToken)
if err != nil {
return err
}
currentVal, err := currentCreds.Get()
if err != nil {
return err
}
lastVal, err := a.lastCreds.Get()
if err != nil {
return err
}
// These will always have different pointers regardless of whether their
// values are identical, hence the use of DeepEqual.
if !a.lastCreds.IsExpired() && reflect.DeepEqual(currentVal, lastVal) {
a.logger.Trace("credentials are unchanged and still valid")
return nil
}
a.lastCreds = currentCreds
a.logger.Trace("new credentials detected, triggering Authenticate")
a.credsFound <- struct{}{}
return nil
}

View file

@ -0,0 +1,218 @@
package agent
import (
"context"
"fmt"
"io/ioutil"
"os"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/api"
vaultaws "github.com/hashicorp/vault/builtin/credential/aws"
"github.com/hashicorp/vault/command/agent/auth"
agentaws "github.com/hashicorp/vault/command/agent/auth/aws"
"github.com/hashicorp/vault/command/agent/sink"
"github.com/hashicorp/vault/command/agent/sink/file"
"github.com/hashicorp/vault/helper/logging"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
)
const (
// These are the access key and secret that should be used when calling "AssumeRole"
// for the given AWS_TEST_ROLE_ARN.
envVarAwsTestAccessKey = "AWS_TEST_ACCESS_KEY"
envVarAwsTestSecretKey = "AWS_TEST_SECRET_KEY"
envVarAwsTestRoleArn = "AWS_TEST_ROLE_ARN"
// The AWS SDK doesn't export its standard env vars so they're captured here.
// These are used for the duration of the test to make sure the agent is able to
// pick up creds from the env.
//
// To run this test, do not set these. Only the above ones need to be set.
envVarAwsAccessKey = "AWS_ACCESS_KEY_ID"
envVarAwsSecretKey = "AWS_SECRET_ACCESS_KEY"
envVarAwsSessionToken = "AWS_SESSION_TOKEN"
)
func TestAWSEndToEnd(t *testing.T) {
if !runAcceptanceTests {
t.SkipNow()
}
logger := logging.NewVaultLogger(hclog.Trace)
coreConfig := &vault.CoreConfig{
Logger: logger,
CredentialBackends: map[string]logical.Factory{
"aws": vaultaws.Factory,
},
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
defer cluster.Cleanup()
vault.TestWaitActive(t, cluster.Cores[0].Core)
client := cluster.Cores[0].Client
// Setup Vault
if err := client.Sys().EnableAuthWithOptions("aws", &api.EnableAuthOptions{
Type: "aws",
}); err != nil {
t.Fatal(err)
}
if _, err := client.Logical().Write("auth/aws/role/test", map[string]interface{}{
"auth_type": "iam",
"policies": "default",
// Retain thru the account number of the given arn and wildcard the rest.
"bound_iam_principal_arn": os.Getenv(envVarAwsTestRoleArn)[:25] + "*",
}); err != nil {
fmt.Println(err)
t.Fatal(err)
}
ctx, cancelFunc := context.WithCancel(context.Background())
timer := time.AfterFunc(30*time.Second, func() {
cancelFunc()
})
defer timer.Stop()
// We're going to feed aws auth creds via env variables.
if err := setAwsEnvCreds(); err != nil {
t.Fatal(err)
}
defer func() {
if err := unsetAwsEnvCreds(); err != nil {
t.Fatal(err)
}
}()
am, err := agentaws.NewAWSAuthMethod(&auth.AuthConfig{
Logger: logger.Named("auth.aws"),
MountPath: "auth/aws",
Config: map[string]interface{}{
"role": "test",
"type": "iam",
"credential_poll_interval": 1,
},
})
if err != nil {
t.Fatal(err)
}
ahConfig := &auth.AuthHandlerConfig{
Logger: logger.Named("auth.handler"),
Client: client,
}
ah := auth.NewAuthHandler(ahConfig)
go ah.Run(ctx, am)
defer func() {
<-ah.DoneCh
}()
tmpFile, err := ioutil.TempFile("", "auth.tokensink.test.")
if err != nil {
t.Fatal(err)
}
tokenSinkFileName := tmpFile.Name()
tmpFile.Close()
os.Remove(tokenSinkFileName)
t.Logf("output: %s", tokenSinkFileName)
config := &sink.SinkConfig{
Logger: logger.Named("sink.file"),
Config: map[string]interface{}{
"path": tokenSinkFileName,
},
WrapTTL: 10 * time.Second,
}
fs, err := file.NewFileSink(config)
if err != nil {
t.Fatal(err)
}
config.Sink = fs
ss := sink.NewSinkServer(&sink.SinkServerConfig{
Logger: logger.Named("sink.server"),
Client: client,
})
go ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config})
defer func() {
<-ss.DoneCh
}()
if stat, err := os.Lstat(tokenSinkFileName); err == nil {
t.Fatalf("expected err but got %s", stat)
} else if !os.IsNotExist(err) {
t.Fatal("expected notexist err")
}
// Wait 2 seconds for the env variables to be detected and an auth to be generated.
time.Sleep(time.Second * 2)
token, err := readToken(tokenSinkFileName)
if err != nil {
t.Fatal(err)
}
if token.Token == "" {
t.Fatal("expected token but didn't receive it")
}
}
func setAwsEnvCreds() error {
cfg := &aws.Config{
Credentials: credentials.NewStaticCredentials(os.Getenv(envVarAwsTestAccessKey), os.Getenv(envVarAwsTestSecretKey), ""),
}
sess, err := session.NewSession(cfg)
if err != nil {
return err
}
client := sts.New(sess)
roleArn := os.Getenv(envVarAwsTestRoleArn)
uid, err := uuid.GenerateUUID()
if err != nil {
return err
}
input := &sts.AssumeRoleInput{
RoleArn: &roleArn,
RoleSessionName: &uid,
}
output, err := client.AssumeRole(input)
if err != nil {
return err
}
if err := os.Setenv(envVarAwsAccessKey, *output.Credentials.AccessKeyId); err != nil {
return err
}
if err := os.Setenv(envVarAwsSecretKey, *output.Credentials.SecretAccessKey); err != nil {
return err
}
return os.Setenv(envVarAwsSessionToken, *output.Credentials.SessionToken)
}
func unsetAwsEnvCreds() error {
if err := os.Unsetenv(envVarAwsAccessKey); err != nil {
return err
}
if err := os.Unsetenv(envVarAwsSecretKey); err != nil {
return err
}
return os.Unsetenv(envVarAwsSessionToken)
}

View file

@ -1,16 +1,25 @@
package agent
import (
"bytes"
"crypto/ecdsa"
"crypto/x509"
"encoding/json"
"encoding/pem"
"io/ioutil"
"os"
"testing"
"time"
"github.com/hashicorp/vault/logical"
jose "gopkg.in/square/go-jose.v2"
"gopkg.in/square/go-jose.v2/jwt"
)
const envVarRunAccTests = "VAULT_ACC"
var runAcceptanceTests = os.Getenv(envVarRunAccTests) == "1"
func GetTestJWT(t *testing.T) (string, *ecdsa.PrivateKey) {
t.Helper()
cl := jwt.Claims{
@ -51,6 +60,18 @@ func GetTestJWT(t *testing.T) (string, *ecdsa.PrivateKey) {
return raw, key
}
func readToken(fileName string) (*logical.HTTPWrapInfo, error) {
b, err := ioutil.ReadFile(fileName)
if err != nil {
return nil, err
}
wrapper := &logical.HTTPWrapInfo{}
if err := json.NewDecoder(bytes.NewReader(b)).Decode(wrapper); err != nil {
return nil, err
}
return wrapper, nil
}
const (
TestECDSAPrivKey string = `-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIKfldwWLPYsHjRL9EVTsjSbzTtcGRu6icohNfIqcb6A+oAoGCCqGSM49

View file

@ -7,6 +7,7 @@ import (
"syscall"
ad "github.com/hashicorp/vault-plugin-secrets-ad/plugin"
alicloud "github.com/hashicorp/vault-plugin-secrets-alicloud"
azure "github.com/hashicorp/vault-plugin-secrets-azure"
gcp "github.com/hashicorp/vault-plugin-secrets-gcp/plugin"
kv "github.com/hashicorp/vault-plugin-secrets-kv"
@ -121,6 +122,7 @@ var (
logicalBackends = map[string]logical.Factory{
"ad": ad.Factory,
"alicloud": alicloud.Factory,
"aws": aws.Factory,
"azure": azure.Factory,
"cassandra": cassandra.Factory,
@ -373,6 +375,13 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) {
BaseCommand: getBaseCommand(),
}, nil
},
"operator migrate": func() (cli.Command, error) {
return &OperatorMigrateCommand{
BaseCommand: getBaseCommand(),
PhysicalBackends: physicalBackends,
ShutdownCh: MakeShutdownCh(),
}, nil
},
"operator rekey": func() (cli.Command, error) {
return &OperatorRekeyCommand{
BaseCommand: getBaseCommand(),

View file

@ -131,8 +131,7 @@ func (t TableFormatter) Output(ui cli.Ui, secret *api.Secret, data interface{})
case []string:
return t.OutputList(ui, nil, data)
case map[string]interface{}:
t.OutputMap(ui, data.(map[string]interface{}))
return nil
return t.OutputMap(ui, data.(map[string]interface{}))
default:
return errors.New("cannot use the table formatter for this type")
}
@ -246,11 +245,7 @@ func (t TableFormatter) OutputSecret(ui cli.Ui, secret *api.Secret) error {
v := secret.Data[k]
// If the field "looks" like a TTL, print it as a time duration instead.
if k == "period" || strings.HasSuffix(k, "_period") ||
k == "ttl" || strings.HasSuffix(k, "_ttl") ||
k == "duration" || strings.HasSuffix(k, "_duration") ||
k == "lease_max" || k == "ttl_max" {
if looksLikeDuration(k) {
v = humanDurationInt(v)
}
@ -273,7 +268,7 @@ func (t TableFormatter) OutputSecret(ui cli.Ui, secret *api.Secret) error {
return nil
}
func (t TableFormatter) OutputMap(ui cli.Ui, data map[string]interface{}) {
func (t TableFormatter) OutputMap(ui cli.Ui, data map[string]interface{}) error {
out := make([]string, 0, len(data)+1)
if len(data) > 0 {
keys := make([]string, 0, len(data))
@ -283,14 +278,21 @@ func (t TableFormatter) OutputMap(ui cli.Ui, data map[string]interface{}) {
sort.Strings(keys)
for _, k := range keys {
out = append(out, fmt.Sprintf("%s %s %v", k, hopeDelim, data[k]))
v := data[k]
// If the field "looks" like a TTL, print it as a time duration instead.
if looksLikeDuration(k) {
v = humanDurationInt(v)
}
out = append(out, fmt.Sprintf("%s %s %v", k, hopeDelim, v))
}
}
// If we got this far and still don't have any data, there's nothing to print,
// sorry.
if len(out) == 0 {
return
return nil
}
// Prepend the header
@ -299,6 +301,7 @@ func (t TableFormatter) OutputMap(ui cli.Ui, data map[string]interface{}) {
ui.Output(tableOutput(out, &columnize.Config{
Delim: hopeDelim,
}))
return nil
}
// OutputSealStatus will print *api.SealStatusResponse in the CLI according to the format provided
@ -378,3 +381,13 @@ func OutputSealStatus(ui cli.Ui, client *api.Client, status *api.SealStatusRespo
ui.Output(tableOutput(out, nil))
return 0
}
// looksLikeDuration checks if the given key "k" looks like a duration value.
// This is used to pretty-format duration values in responses, especially from
// plugins.
func looksLikeDuration(k string) bool {
return k == "period" || strings.HasSuffix(k, "_period") ||
k == "ttl" || strings.HasSuffix(k, "_ttl") ||
k == "duration" || strings.HasSuffix(k, "_duration") ||
k == "lease_max" || k == "ttl_max"
}

View file

@ -10,6 +10,7 @@ import (
credToken "github.com/hashicorp/vault/builtin/credential/token"
credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
"github.com/hashicorp/vault/command/token"
"github.com/hashicorp/vault/vault"
)
func testLoginCommand(tb testing.TB) (*cli.MockUi, *LoginCommand) {
@ -78,7 +79,7 @@ func TestLoginCommand_Run(t *testing.T) {
t.Fatal(err)
}
if l, exp := len(storedToken), 36; l != exp {
if l, exp := len(storedToken), vault.TokenLength; l != exp {
t.Errorf("expected token to be %d characters, was %d: %q", exp, l, storedToken)
}
})
@ -205,7 +206,7 @@ func TestLoginCommand_Run(t *testing.T) {
// Verify only the token was printed
token := ui.OutputWriter.String()
if l, exp := len(token), 36; l != exp {
if l, exp := len(token), vault.TokenLength; l != exp {
t.Errorf("expected token to be %d characters, was %d: %q", exp, l, token)
}

View file

@ -3,14 +3,15 @@
package command
import (
"encoding/base64"
"io"
"os"
"regexp"
"strings"
"testing"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/xor"
"github.com/hashicorp/vault/vault"
"github.com/mitchellh/cli"
)
@ -40,7 +41,7 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
"-init",
"-otp", "not-a-valid-otp",
},
"illegal base64 data at input",
"OTP string is wrong length",
2,
},
{
@ -122,8 +123,8 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
t.Run("decode", func(t *testing.T) {
t.Parallel()
encoded := "L9MaZ/4mQanpOV6QeWd84g=="
otp := "dIeeezkjpDUv3fy7MYPOLQ=="
encoded := "Bxg9JQQqOCNKBRICNwMIRzo2J3cWCBRi"
otp := "3JhHkONiyiaNYj14nnD9xZQS"
client, closer := testVaultServer(t)
defer closer()
@ -150,7 +151,7 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
w.Close()
os.Stdout = old
expected := "5b54841c-c705-e59c-c6e4-a22b48e4b2cf"
expected := "4RUmoevJ3lsLni9sTXcNnRE1"
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
if combined != expected {
t.Errorf("expected %q to be %q", combined, expected)
@ -160,7 +161,7 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
t.Run("cancel", func(t *testing.T) {
t.Parallel()
otp := "dIeeezkjpDUv3fy7MYPOLQ=="
otp := "3JhHkONiyiaNYj14nnD9xZQS"
client, closer := testVaultServer(t)
defer closer()
@ -199,7 +200,7 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
t.Run("init_otp", func(t *testing.T) {
t.Parallel()
otp := "dIeeezkjpDUv3fy7MYPOLQ=="
otp := "3JhHkONiyiaNYj14nnD9xZQS"
client, closer := testVaultServer(t)
defer closer()
@ -296,17 +297,16 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
t.Run("provide_arg", func(t *testing.T) {
t.Parallel()
otp := "dIeeezkjpDUv3fy7MYPOLQ=="
client, keys, closer := testVaultServerUnseal(t)
defer closer()
// Initialize a generation
status, err := client.Sys().GenerateRootInit(otp, "")
status, err := client.Sys().GenerateRootInit("", "")
if err != nil {
t.Fatal(err)
}
nonce := status.Nonce
otp := status.OTP
// Supply the first n-1 unseal keys
for _, key := range keys[:len(keys)-1] {
@ -340,16 +340,17 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
t.Fatalf("no match: %#v", match)
}
tokenBytes, err := xor.XORBase64(match[0][1], otp)
if err != nil {
t.Fatal(err)
}
token, err := uuid.FormatUUID(tokenBytes)
tokenBytes, err := base64.RawStdEncoding.DecodeString(match[0][1])
if err != nil {
t.Fatal(err)
}
if l, exp := len(token), 36; l != exp {
token, err := xor.XORBytes(tokenBytes, []byte(otp))
if err != nil {
t.Fatal(err)
}
if l, exp := len(token), vault.TokenLength; l != exp {
t.Errorf("expected %d to be %d: %s", l, exp, token)
}
})
@ -357,17 +358,16 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
t.Run("provide_stdin", func(t *testing.T) {
t.Parallel()
otp := "dIeeezkjpDUv3fy7MYPOLQ=="
client, keys, closer := testVaultServerUnseal(t)
defer closer()
// Initialize a generation
status, err := client.Sys().GenerateRootInit(otp, "")
status, err := client.Sys().GenerateRootInit("", "")
if err != nil {
t.Fatal(err)
}
nonce := status.Nonce
otp := status.OTP
// Supply the first n-1 unseal keys
for _, key := range keys[:len(keys)-1] {
@ -415,16 +415,28 @@ func TestOperatorGenerateRootCommand_Run(t *testing.T) {
t.Fatalf("no match: %#v", match)
}
tokenBytes, err := xor.XORBase64(match[0][1], otp)
if err != nil {
t.Fatal(err)
}
token, err := uuid.FormatUUID(tokenBytes)
// encodedOTP := base64.RawStdEncoding.EncodeToString([]byte(otp))
// tokenBytes, err := xor.XORBase64(match[0][1], encodedOTP)
// if err != nil {
// t.Fatal(err)
// }
// token, err := uuid.FormatUUID(tokenBytes)
// if err != nil {
// t.Fatal(err)
// }
tokenBytes, err := base64.RawStdEncoding.DecodeString(match[0][1])
if err != nil {
t.Fatal(err)
}
if l, exp := len(token), 36; l != exp {
token, err := xor.XORBytes(tokenBytes, []byte(otp))
if err != nil {
t.Fatal(err)
}
if l, exp := len(token), vault.TokenLength; l != exp {
t.Errorf("expected %d to be %d: %s", l, exp, token)
}
})

View file

@ -12,6 +12,7 @@ import (
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/pgpkeys"
"github.com/hashicorp/vault/vault"
"github.com/mitchellh/cli"
)
@ -332,7 +333,7 @@ func TestOperatorInitCommand_Run(t *testing.T) {
root := match[0][1]
decryptedRoot := testPGPDecrypt(t, pgpkeys.TestPrivKey1, root)
if l, exp := len(decryptedRoot), 36; l != exp {
if l, exp := len(decryptedRoot), vault.TokenLength; l != exp {
t.Errorf("expected %d to be %d", l, exp)
}
})

331
command/operator_migrate.go Normal file
View file

@ -0,0 +1,331 @@
package command
import (
"context"
"fmt"
"io/ioutil"
"os"
"sort"
"strings"
"time"
"github.com/hashicorp/errwrap"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/helper/logging"
"github.com/hashicorp/vault/physical"
"github.com/mitchellh/cli"
"github.com/pkg/errors"
"github.com/posener/complete"
)
var _ cli.Command = (*OperatorMigrateCommand)(nil)
var _ cli.CommandAutocomplete = (*OperatorMigrateCommand)(nil)
var errAbort = errors.New("Migration aborted")
type OperatorMigrateCommand struct {
*BaseCommand
PhysicalBackends map[string]physical.Factory
flagConfig string
flagStart string
flagReset bool
logger log.Logger
ShutdownCh chan struct{}
}
type migratorConfig struct {
StorageSource *server.Storage `hcl:"-"`
StorageDestination *server.Storage `hcl:"-"`
}
func (c *OperatorMigrateCommand) Synopsis() string {
return "Migrates Vault data between storage backends"
}
func (c *OperatorMigrateCommand) Help() string {
helpText := `
Usage: vault operator migrate [options]
This command starts a storage backend migration process to copy all data
from one backend to another. This operates directly on encrypted data and
does not require a Vault server, nor any unsealing.
Start a migration with a configuration file:
$ vault operator migrate -config=migrate.hcl
For more information, please see the documentation.
` + c.Flags().Help()
return strings.TrimSpace(helpText)
}
func (c *OperatorMigrateCommand) Flags() *FlagSets {
set := NewFlagSets(c.UI)
f := set.NewFlagSet("Command Options")
f.StringVar(&StringVar{
Name: "config",
Target: &c.flagConfig,
Completion: complete.PredictOr(
complete.PredictFiles("*.hcl"),
),
Usage: "Path to a configuration file. This configuration file should " +
"contain only migrator directives.",
})
f.StringVar(&StringVar{
Name: "start",
Target: &c.flagStart,
Usage: "Only copy keys lexicographically at or after this value.",
})
f.BoolVar(&BoolVar{
Name: "reset",
Target: &c.flagReset,
Usage: "Reset the migration lock. No migration will occur.",
})
return set
}
func (c *OperatorMigrateCommand) AutocompleteArgs() complete.Predictor {
return nil
}
func (c *OperatorMigrateCommand) AutocompleteFlags() complete.Flags {
return c.Flags().Completions()
}
func (c *OperatorMigrateCommand) Run(args []string) int {
c.logger = logging.NewVaultLogger(log.Info)
f := c.Flags()
if err := f.Parse(args); err != nil {
c.UI.Error(err.Error())
return 1
}
if c.flagConfig == "" {
c.UI.Error("Must specify exactly one config path using -config")
return 1
}
config, err := c.loadMigratorConfig(c.flagConfig)
if err != nil {
c.UI.Error(fmt.Sprintf("Error loading configuration from %s: %s", c.flagConfig, err))
return 1
}
if err := c.migrate(config); err != nil {
if err == errAbort {
return 0
}
c.UI.Error(fmt.Sprintf("Error migrating: %s", err))
return 2
}
if c.flagReset {
c.UI.Output("Success! Migration lock reset (if it was set).")
} else {
c.UI.Output("Success! All of the keys have been migrated.")
}
return 0
}
// migrate attempts to instantiate the source and destinations backends,
// and then invoke the migration the the root of the keyspace.
func (c *OperatorMigrateCommand) migrate(config *migratorConfig) error {
from, err := c.newBackend(config.StorageSource.Type, config.StorageSource.Config)
if err != nil {
return errwrap.Wrapf("error mounting 'storage_source': {{err}}", err)
}
if c.flagReset {
if err := SetMigration(from, false); err != nil {
return errwrap.Wrapf("error reseting migration lock: {{err}}", err)
}
return nil
}
to, err := c.newBackend(config.StorageDestination.Type, config.StorageDestination.Config)
if err != nil {
return errwrap.Wrapf("error mounting 'storage_destination': {{err}}", err)
}
migrationStatus, err := CheckMigration(from)
if err != nil {
return errors.New("error checking migration status")
}
if migrationStatus != nil {
return fmt.Errorf("Storage migration in progress (started: %s).", migrationStatus.Start.Format(time.RFC3339))
}
if err := SetMigration(from, true); err != nil {
return errwrap.Wrapf("error setting migration lock: {{err}}", err)
}
defer SetMigration(from, false)
ctx, cancelFunc := context.WithCancel(context.Background())
doneCh := make(chan error)
go func() {
doneCh <- c.migrateAll(ctx, from, to)
}()
select {
case err := <-doneCh:
return err
case <-c.ShutdownCh:
c.UI.Output("==> Migration shutdown triggered\n")
cancelFunc()
<-doneCh
return errAbort
}
return nil
}
// migrateAll copies all keys in lexicographic order.
func (c *OperatorMigrateCommand) migrateAll(ctx context.Context, from physical.Backend, to physical.Backend) error {
return dfsScan(ctx, from, func(ctx context.Context, path string) error {
if path < c.flagStart || path == migrationLock {
return nil
}
entry, err := from.Get(ctx, path)
if err != nil {
return errwrap.Wrapf("error reading entry: {{err}}", err)
}
if entry == nil {
return nil
}
if err := to.Put(ctx, entry); err != nil {
return errwrap.Wrapf("error writing entry: {{err}}", err)
}
c.logger.Info("copied key: " + path)
return nil
})
}
func (c *OperatorMigrateCommand) newBackend(kind string, conf map[string]string) (physical.Backend, error) {
factory, ok := c.PhysicalBackends[kind]
if !ok {
return nil, fmt.Errorf("no Vault storage backend named: %+q", kind)
}
return factory(conf, c.logger)
}
// loadMigratorConfig loads the configuration at the given path
func (c *OperatorMigrateCommand) loadMigratorConfig(path string) (*migratorConfig, error) {
fi, err := os.Stat(path)
if err != nil {
return nil, err
}
if fi.IsDir() {
return nil, fmt.Errorf("location is a directory, not a file")
}
d, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
obj, err := hcl.ParseBytes(d)
if err != nil {
return nil, err
}
var result migratorConfig
if err := hcl.DecodeObject(&result, obj); err != nil {
return nil, err
}
list, ok := obj.Node.(*ast.ObjectList)
if !ok {
return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
}
// Look for storage_* stanzas
for _, stanza := range []string{"storage_source", "storage_destination"} {
o := list.Filter(stanza)
if len(o.Items) != 1 {
return nil, fmt.Errorf("exactly one '%s' block is required", stanza)
}
if err := parseStorage(&result, o, stanza); err != nil {
return nil, errwrap.Wrapf("error parsing '%s': {{err}}", err)
}
}
return &result, nil
}
// parseStorage reuses the existing storage parsing that's part of the main Vault
// config processing, but only keeps the storage result.
func parseStorage(result *migratorConfig, list *ast.ObjectList, name string) error {
tmpConfig := new(server.Config)
if err := server.ParseStorage(tmpConfig, list, name); err != nil {
return err
}
switch name {
case "storage_source":
result.StorageSource = tmpConfig.Storage
case "storage_destination":
result.StorageDestination = tmpConfig.Storage
default:
return fmt.Errorf("unknown storage name: %s", name)
}
return nil
}
// dfsScan will invoke cb with every key from source.
// Keys will be traversed in lexicographic, depth-first order.
func dfsScan(ctx context.Context, source physical.Backend, cb func(ctx context.Context, path string) error) error {
dfs := []string{""}
for l := len(dfs); l > 0; l = len(dfs) {
key := dfs[len(dfs)-1]
if key == "" || strings.HasSuffix(key, "/") {
children, err := source.List(ctx, key)
if err != nil {
return errwrap.Wrapf("failed to scan for children: {{err}}", err)
}
sort.Strings(children)
// remove List-triggering key and add children in reverse order
dfs = dfs[:len(dfs)-1]
for i := len(children) - 1; i >= 0; i-- {
dfs = append(dfs, key+children[i])
}
} else {
err := cb(ctx, key)
if err != nil {
return err
}
dfs = dfs[:len(dfs)-1]
}
select {
case <-ctx.Done():
return nil
default:
}
}
return nil
}

View file

@ -0,0 +1,304 @@
package command
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/go-test/deep"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/helper/base62"
"github.com/hashicorp/vault/helper/testhelpers"
"github.com/hashicorp/vault/physical"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func TestMigration(t *testing.T) {
t.Run("Default", func(t *testing.T) {
data := generateData()
fromFactory := physicalBackends["file"]
folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
defer os.RemoveAll(folder)
confFrom := map[string]string{
"path": folder,
}
from, err := fromFactory(confFrom, nil)
if err != nil {
t.Fatal(err)
}
if err := storeData(from, data); err != nil {
t.Fatal(err)
}
toFactory := physicalBackends["inmem"]
confTo := map[string]string{}
to, err := toFactory(confTo, nil)
if err != nil {
t.Fatal(err)
}
cmd := OperatorMigrateCommand{
logger: log.NewNullLogger(),
}
if err := cmd.migrateAll(context.Background(), from, to); err != nil {
t.Fatal(err)
}
if err := compareStoredData(to, data, ""); err != nil {
t.Fatal(err)
}
})
t.Run("Start option", func(t *testing.T) {
data := generateData()
fromFactory := physicalBackends["inmem"]
confFrom := map[string]string{}
from, err := fromFactory(confFrom, nil)
if err != nil {
t.Fatal(err)
}
if err := storeData(from, data); err != nil {
t.Fatal(err)
}
toFactory := physicalBackends["file"]
folder := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
defer os.RemoveAll(folder)
confTo := map[string]string{
"path": folder,
}
to, err := toFactory(confTo, nil)
if err != nil {
t.Fatal(err)
}
const start = "m"
cmd := OperatorMigrateCommand{
logger: log.NewNullLogger(),
flagStart: start,
}
if err := cmd.migrateAll(context.Background(), from, to); err != nil {
t.Fatal(err)
}
if err := compareStoredData(to, data, start); err != nil {
t.Fatal(err)
}
})
t.Run("Config parsing", func(t *testing.T) {
cmd := new(OperatorMigrateCommand)
cfgName := filepath.Join(os.TempDir(), testhelpers.RandomWithPrefix("migrator"))
ioutil.WriteFile(cfgName, []byte(`
storage_source "src_type" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}`), 0644)
defer os.Remove(cfgName)
expCfg := &migratorConfig{
StorageSource: &server.Storage{
Type: "src_type",
Config: map[string]string{
"path": "src_path",
},
},
StorageDestination: &server.Storage{
Type: "dest_type",
Config: map[string]string{
"path": "dest_path",
},
},
}
cfg, err := cmd.loadMigratorConfig(cfgName)
if err != nil {
t.Fatal(cfg)
}
if diff := deep.Equal(cfg, expCfg); diff != nil {
t.Fatal(diff)
}
verifyBad := func(cfg string) {
ioutil.WriteFile(cfgName, []byte(cfg), 0644)
_, err := cmd.loadMigratorConfig(cfgName)
if err == nil {
t.Fatalf("expected error but none received from: %v", cfg)
}
}
// missing source
verifyBad(`
storage_destination "dest_type" {
path = "dest_path"
}`)
// missing destination
verifyBad(`
storage_source "src_type" {
path = "src_path"
}`)
// duplicate source
verifyBad(`
storage_source "src_type" {
path = "src_path"
}
storage_source "src_type2" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}`)
// duplicate destination
verifyBad(`
storage_source "src_type" {
path = "src_path"
}
storage_destination "dest_type" {
path = "dest_path"
}
storage_destination "dest_type2" {
path = "dest_path"
}`)
})
t.Run("DFS Scan", func(t *testing.T) {
s, _ := physicalBackends["inmem"](map[string]string{}, nil)
data := generateData()
data["cc"] = []byte{}
data["c/d/e/f"] = []byte{}
data["c/d/e/g"] = []byte{}
data["c"] = []byte{}
storeData(s, data)
l := randomLister{s}
var out []string
dfsScan(context.Background(), l, func(ctx context.Context, path string) error {
out = append(out, path)
return nil
})
var keys []string
for key := range data {
keys = append(keys, key)
}
sort.Strings(keys)
if !reflect.DeepEqual(keys, out) {
t.Fatalf("expected equal: %v, %v", keys, out)
}
})
}
// randomLister wraps a physical backend, providing a List method
// that returns results in a random order.
type randomLister struct {
b physical.Backend
}
func (l randomLister) List(ctx context.Context, path string) ([]string, error) {
result, err := l.b.List(ctx, path)
if err != nil {
return nil, err
}
rand.Shuffle(len(result), func(i, j int) {
result[i], result[j] = result[j], result[i]
})
return result, err
}
func (l randomLister) Get(ctx context.Context, path string) (*physical.Entry, error) {
return l.b.Get(ctx, path)
}
func (l randomLister) Put(ctx context.Context, entry *physical.Entry) error {
return l.b.Put(ctx, entry)
}
func (l randomLister) Delete(ctx context.Context, path string) error {
return l.b.Delete(ctx, path)
}
// generateData creates a map of 500 random keys and values
func generateData() map[string][]byte {
result := make(map[string][]byte)
for i := 0; i < 500; i++ {
segments := make([]string, rand.Intn(8)+1)
for j := 0; j < len(segments); j++ {
s, _ := base62.Random(6, false)
segments[j] = s
}
data := make([]byte, 100)
rand.Read(data)
result[strings.Join(segments, "/")] = data
}
return result
}
func storeData(s physical.Backend, ref map[string][]byte) error {
for k, v := range ref {
entry := physical.Entry{
Key: k,
Value: v,
}
err := s.Put(context.Background(), &entry)
if err != nil {
return err
}
}
return nil
}
func compareStoredData(s physical.Backend, ref map[string][]byte, start string) error {
for k, v := range ref {
entry, err := s.Get(context.Background(), k)
if err != nil {
return err
}
if k >= start {
if entry == nil {
return fmt.Errorf("key not found: %s", k)
}
if !bytes.Equal(v, entry.Value) {
return fmt.Errorf("values differ for key: %s", k)
}
} else {
if entry != nil {
return fmt.Errorf("found key the should have been skipped by start option: %s", k)
}
}
}
return nil
}

View file

@ -6,6 +6,7 @@ import (
"strings"
"github.com/hashicorp/hcl/hcl/printer"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/vault"
"github.com/mitchellh/cli"
homedir "github.com/mitchellh/go-homedir"
@ -85,8 +86,9 @@ func (c *PolicyFmtCommand) Run(args []string) int {
return 1
}
// Actually parse the policy
if _, err := vault.ParseACLPolicy(string(b)); err != nil {
// Actually parse the policy. We always use the root namespace here because
// we don't want to modify the results.
if _, err := vault.ParseACLPolicy(namespace.RootNamespace, string(b)); err != nil {
c.UI.Error(err.Error())
return 1
}

View file

@ -35,9 +35,12 @@ import (
sockaddr "github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/command/server"
serverseal "github.com/hashicorp/vault/command/server/seal"
"github.com/hashicorp/vault/helper/gated-writer"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/logging"
"github.com/hashicorp/vault/helper/mlock"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/reload"
vaulthttp "github.com/hashicorp/vault/http"
@ -50,6 +53,8 @@ import (
var _ cli.Command = (*ServerCommand)(nil)
var _ cli.CommandAutocomplete = (*ServerCommand)(nil)
const migrationLock = "core/migration"
type ServerCommand struct {
*BaseCommand
@ -458,12 +463,43 @@ func (c *ServerCommand) Run(args []string) int {
return 1
}
migrationStatus, err := CheckMigration(backend)
if err != nil {
c.UI.Error("Error checking migration status")
return 1
}
if migrationStatus != nil {
startTime := migrationStatus.Start.Format(time.RFC3339)
c.UI.Error(wrapAtLength(fmt.Sprintf("Storage migration in progress (started: %s). "+
"Use 'vault operator migrate -reset' to force clear the migration lock.", startTime)))
return 1
}
infoKeys := make([]string, 0, 10)
info := make(map[string]string)
info["log level"] = c.flagLogLevel
infoKeys = append(infoKeys, "log level")
var seal vault.Seal = vault.NewDefaultSeal()
sealType := "shamir"
if config.Seal != nil || os.Getenv("VAULT_SEAL_TYPE") != "" {
if config.Seal == nil {
sealType = os.Getenv("VAULT_SEAL_TYPE")
} else {
sealType = config.Seal.Type
}
}
sealLogger := c.logger.Named(sealType)
allLoggers = append(allLoggers, sealLogger)
seal, sealConfigError := serverseal.ConfigureSeal(config, &infoKeys, &info, sealLogger, vault.NewDefaultSeal())
if sealConfigError != nil {
if !errwrap.ContainsType(sealConfigError, new(logical.KeyNotFoundError)) {
c.UI.Error(fmt.Sprintf(
"Error parsing Seal configuration: %s", sealConfigError))
return 1
}
}
// Ensure that the seal finalizer is called, even if using verify-only
defer func() {
@ -481,24 +517,26 @@ func (c *ServerCommand) Run(args []string) int {
}
coreConfig := &vault.CoreConfig{
Physical: backend,
RedirectAddr: config.Storage.RedirectAddr,
HAPhysical: nil,
Seal: seal,
AuditBackends: c.AuditBackends,
CredentialBackends: c.CredentialBackends,
LogicalBackends: c.LogicalBackends,
Logger: c.logger,
DisableCache: config.DisableCache,
DisableMlock: config.DisableMlock,
MaxLeaseTTL: config.MaxLeaseTTL,
DefaultLeaseTTL: config.DefaultLeaseTTL,
ClusterName: config.ClusterName,
CacheSize: config.CacheSize,
PluginDirectory: config.PluginDirectory,
EnableUI: config.EnableUI,
EnableRaw: config.EnableRawEndpoint,
AllLoggers: allLoggers,
Physical: backend,
RedirectAddr: config.Storage.RedirectAddr,
HAPhysical: nil,
Seal: seal,
AuditBackends: c.AuditBackends,
CredentialBackends: c.CredentialBackends,
LogicalBackends: c.LogicalBackends,
Logger: c.logger,
DisableCache: config.DisableCache,
DisableMlock: config.DisableMlock,
MaxLeaseTTL: config.MaxLeaseTTL,
DefaultLeaseTTL: config.DefaultLeaseTTL,
ClusterName: config.ClusterName,
CacheSize: config.CacheSize,
PluginDirectory: config.PluginDirectory,
EnableUI: config.EnableUI,
EnableRaw: config.EnableRawEndpoint,
DisableSealWrap: config.DisableSealWrap,
DisablePerformanceStandby: config.DisablePerformanceStandby,
AllLoggers: allLoggers,
}
if c.flagDev {
coreConfig.DevToken = c.flagDevRootTokenID
@ -522,6 +560,10 @@ func (c *ServerCommand) Run(args []string) int {
return c.enableThreeNodeDevCluster(coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
}
if c.flagDevFourCluster {
return c.enableFourClusterDev(coreConfig, info, infoKeys, c.flagDevListenAddr, os.Getenv("VAULT_DEV_TEMP_DIR"))
}
var disableClustering bool
// Initialize the separate HA storage backend, if it exists
@ -887,7 +929,7 @@ CLUSTER_SYNTHESIS_COMPLETE:
return false
}
if err := sd.RunServiceDiscovery(c.WaitGroup, c.ShutdownCh, coreConfig.RedirectAddr, activeFunc, core.Sealed); err != nil {
if err := sd.RunServiceDiscovery(c.WaitGroup, c.ShutdownCh, coreConfig.RedirectAddr, activeFunc, core.Sealed, core.PerfStandby); err != nil {
c.UI.Error(fmt.Sprintf("Error initializing service discovery: %v", err))
return 1
}
@ -1014,6 +1056,18 @@ CLUSTER_SYNTHESIS_COMPLETE:
go server.Serve(ln.Listener)
}
if sealConfigError != nil {
init, err := core.Initialized(context.Background())
if err != nil {
c.UI.Error(fmt.Sprintf("Error checking if core is initialized: %v", err))
return 1
}
if init {
c.UI.Error("Vault is initialized but no Seal key could be loaded")
return 1
}
}
if newCoreError != nil {
c.UI.Warn(wrapAtLength(
"WARNING! A non-fatal error occurred during initialization. Please " +
@ -1126,6 +1180,8 @@ CLUSTER_SYNTHESIS_COMPLETE:
}
func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig) (*vault.InitResult, error) {
ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
var recoveryConfig *vault.SealConfig
barrierConfig := &vault.SealConfig{
SecretShares: 1,
@ -1143,8 +1199,6 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig
barrierConfig.StoredShares = 1
}
ctx := context.Background()
// Initialize it with a basic single key
init, err := core.Initialize(ctx, &vault.InitParams{
BarrierConfig: barrierConfig,
@ -1210,7 +1264,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig
"no_default_policy": true,
},
}
resp, err := core.HandleRequest(context.Background(), req)
resp, err := core.HandleRequest(ctx, req)
if err != nil {
return nil, errwrap.Wrapf(fmt.Sprintf("failed to create root token with ID %q: {{err}}", coreConfig.DevToken), err)
}
@ -1226,7 +1280,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig
req.ID = "dev-revoke-init-root"
req.Path = "auth/token/revoke-self"
req.Data = nil
resp, err = core.HandleRequest(context.Background(), req)
resp, err = core.HandleRequest(ctx, req)
if err != nil {
return nil, errwrap.Wrapf("failed to revoke initial root token: {{err}}", err)
}
@ -1253,7 +1307,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, coreConfig *vault.CoreConfig
},
},
}
resp, err := core.HandleRequest(context.Background(), req)
resp, err := core.HandleRequest(ctx, req)
if err != nil {
return nil, errwrap.Wrapf("error upgrading default K/V store: {{err}}", err)
}
@ -1317,6 +1371,8 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m
testCluster.Start()
ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
if base.DevToken != "" {
req := &logical.Request{
ID: "dev-gen-root",
@ -1330,7 +1386,7 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m
"no_default_policy": true,
},
}
resp, err := testCluster.Cores[0].HandleRequest(context.Background(), req)
resp, err := testCluster.Cores[0].HandleRequest(ctx, req)
if err != nil {
c.UI.Error(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err))
return 1
@ -1349,7 +1405,7 @@ func (c *ServerCommand) enableThreeNodeDevCluster(base *vault.CoreConfig, info m
req.ID = "dev-revoke-init-root"
req.Path = "auth/token/revoke-self"
req.Data = nil
resp, err = testCluster.Cores[0].HandleRequest(context.Background(), req)
resp, err = testCluster.Cores[0].HandleRequest(ctx, req)
if err != nil {
c.UI.Output(fmt.Sprintf("failed to revoke initial root token: %s", err))
return 1
@ -1482,7 +1538,8 @@ func (c *ServerCommand) addPlugin(path, token string, core *vault.Core) error {
"command": name,
},
}
if _, err := core.HandleRequest(context.Background(), req); err != nil {
ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
if _, err := core.HandleRequest(ctx, req); err != nil {
return err
}
@ -1732,6 +1789,51 @@ func (c *ServerCommand) removePidFile(pidPath string) error {
return os.Remove(pidPath)
}
type MigrationStatus struct {
Start time.Time `json:"start"`
}
func CheckMigration(b physical.Backend) (*MigrationStatus, error) {
entry, err := b.Get(context.Background(), migrationLock)
if err != nil {
return nil, err
}
if entry == nil {
return nil, nil
}
var status MigrationStatus
if err := jsonutil.DecodeJSON(entry.Value, &status); err != nil {
return nil, err
}
return &status, nil
}
func SetMigration(b physical.Backend, active bool) error {
if !active {
return b.Delete(context.Background(), migrationLock)
}
status := MigrationStatus{
Start: time.Now(),
}
enc, err := jsonutil.EncodeJSON(status)
if err != nil {
return err
}
entry := &physical.Entry{
Key: migrationLock,
Value: enc,
}
return b.Put(context.Background(), entry)
}
type grpclogFaker struct {
logger log.Logger
log bool

View file

@ -46,7 +46,7 @@ type Config struct {
DefaultLeaseTTLRaw interface{} `hcl:"default_lease_ttl"`
DefaultMaxRequestDuration time.Duration `hcl:"-"`
DefaultMaxRequestDurationRaw interface{} `hcl:"default_max_request_time"`
DefaultMaxRequestDurationRaw interface{} `hcl:"default_max_request_duration"`
ClusterName string `hcl:"cluster_name"`
ClusterCipherSuites string `hcl:"cluster_cipher_suites"`
@ -458,12 +458,12 @@ func ParseConfig(d string, logger log.Logger) (*Config, error) {
// Look for storage but still support old backend
if o := list.Filter("storage"); len(o.Items) > 0 {
if err := parseStorage(&result, o, "storage"); err != nil {
if err := ParseStorage(&result, o, "storage"); err != nil {
return nil, errwrap.Wrapf("error parsing 'storage': {{err}}", err)
}
} else {
if o := list.Filter("backend"); len(o.Items) > 0 {
if err := parseStorage(&result, o, "backend"); err != nil {
if err := ParseStorage(&result, o, "backend"); err != nil {
return nil, errwrap.Wrapf("error parsing 'backend': {{err}}", err)
}
}
@ -583,7 +583,7 @@ func isTemporaryFile(name string) bool {
(strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs
}
func parseStorage(result *Config, list *ast.ObjectList, name string) error {
func ParseStorage(result *Config, list *ast.ObjectList, name string) error {
if len(list.Items) > 1 {
return fmt.Errorf("only one %q block is permitted", name)
}

View file

@ -0,0 +1,15 @@
package seal
import (
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/command/server"
"github.com/hashicorp/vault/vault"
)
var (
ConfigureSeal func(*server.Config, *[]string, *map[string]string, log.Logger, vault.Seal) (vault.Seal, error) = configureSeal
)
func configureSeal(config *server.Config, infoKeys *[]string, info *map[string]string, logger log.Logger, inseal vault.Seal) (seal vault.Seal, err error) {
return inseal, nil
}

View file

@ -0,0 +1,313 @@
package command
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"fmt"
"io/ioutil"
"math/big"
mathrand "math/rand"
"net"
"path/filepath"
"sort"
"strings"
"time"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/testhelpers"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/vault/version"
testing "github.com/mitchellh/go-testing-interface"
"github.com/pkg/errors"
)
func (c *ServerCommand) enableFourClusterDev(base *vault.CoreConfig, info map[string]string, infoKeys []string, devListenAddress, tempDir string) int {
var err error
ctx := namespace.RootContext(nil)
clusters := map[string]*vault.TestCluster{}
if base.DevToken == "" {
base.DevToken = "root"
}
base.EnableRaw = true
// Without setting something in the future we get bombarded with warnings which are quite annoying during testing
base.DevLicenseDuration = 6 * time.Hour
// Set a base temp dir
if tempDir == "" {
tempDir, err = ioutil.TempDir("", "vault-test-cluster-")
if err != nil {
c.UI.Error(fmt.Sprintf("failed to create top-level temp dir: %s", err))
return 1
}
}
caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
c.UI.Error(fmt.Sprintf("Failed to generate CA key: %s", err))
return 1
}
certIPs := []net.IP{
net.IPv6loopback,
net.ParseIP("127.0.0.1"),
}
caCertTemplate := &x509.Certificate{
Subject: pkix.Name{
CommonName: "localhost",
},
DNSNames: []string{"localhost"},
IPAddresses: certIPs,
KeyUsage: x509.KeyUsage(x509.KeyUsageCertSign | x509.KeyUsageCRLSign),
SerialNumber: big.NewInt(mathrand.Int63()),
NotBefore: time.Now().Add(-30 * time.Second),
NotAfter: time.Now().Add(262980 * time.Hour),
BasicConstraintsValid: true,
IsCA: true,
}
caBytes, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, caKey.Public(), caKey)
if err != nil {
c.UI.Error(fmt.Sprintf("Failed to generate certificate: %s", err))
return 1
}
getCluster := func(name string) error {
factory := c.PhysicalBackends["inmem_transactional_ha"]
backend, err := factory(nil, c.logger)
if err != nil {
c.UI.Error(fmt.Sprintf("Error initializing storage of type %s: %s", "inmem_transactional_ha", err))
return errors.New("")
}
base.Physical = backend
base.Seal = vault.NewDefaultSeal()
testCluster := vault.NewTestCluster(&testing.RuntimeT{}, base, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
//BaseListenAddress: c.flagDevListenAddr,
Logger: c.logger.Named(name),
TempDir: fmt.Sprintf("%s/%s", tempDir, name),
CAKey: caKey,
CACert: caBytes,
})
clusters[name] = testCluster
for i, core := range testCluster.Cores {
info[fmt.Sprintf("%s node %d redirect address", name, i)] = fmt.Sprintf("https://%s", core.Listeners[0].Address.String())
infoKeys = append(infoKeys, fmt.Sprintf("%s node %d redirect address", name, i))
core.Server.Handler = vaulthttp.Handler(&vault.HandlerProperties{
Core: core.Core,
})
core.SetClusterHandler(core.Server.Handler)
}
testCluster.Start()
req := &logical.Request{
ID: "dev-gen-root",
Operation: logical.UpdateOperation,
ClientToken: testCluster.RootToken,
Path: "auth/token/create",
Data: map[string]interface{}{
"id": base.DevToken,
"policies": []string{"root"},
"no_parent": true,
"no_default_policy": true,
},
}
resp, err := testCluster.Cores[0].HandleRequest(ctx, req)
if err != nil {
c.UI.Error(fmt.Sprintf("failed to create root token with ID %s: %s", base.DevToken, err))
return errors.New("")
}
if resp == nil {
c.UI.Error(fmt.Sprintf("nil response when creating root token with ID %s", base.DevToken))
return errors.New("")
}
if resp.Auth == nil {
c.UI.Error(fmt.Sprintf("nil auth when creating root token with ID %s", base.DevToken))
return errors.New("")
}
testCluster.RootToken = resp.Auth.ClientToken
req.ID = "dev-revoke-init-root"
req.Path = "auth/token/revoke-self"
req.Data = nil
resp, err = testCluster.Cores[0].HandleRequest(ctx, req)
if err != nil {
c.UI.Output(fmt.Sprintf("failed to revoke initial root token: %s", err))
return errors.New("")
}
for _, core := range testCluster.Cores {
core.Client.SetToken(base.DevToken)
}
return nil
}
err = getCluster("perf-pri")
if err != nil {
return 1
}
err = getCluster("perf-pri-dr")
if err != nil {
return 1
}
err = getCluster("perf-sec")
if err != nil {
return 1
}
err = getCluster("perf-sec-dr")
if err != nil {
return 1
}
clusterCleanup := func() {
for name, cluster := range clusters {
cluster.Cleanup()
// Shutdown will wait until after Vault is sealed, which means the
// request forwarding listeners will also be closed (and also
// waited for).
for _, core := range cluster.Cores {
if err := core.Shutdown(); err != nil {
c.UI.Error(fmt.Sprintf("Error with cluster %s core shutdown: %s", name, err))
}
}
}
}
defer c.cleanupGuard.Do(clusterCleanup)
info["cluster parameters path"] = tempDir
infoKeys = append(infoKeys, "cluster parameters path")
verInfo := version.GetVersion()
info["version"] = verInfo.FullVersionNumber(false)
infoKeys = append(infoKeys, "version")
if verInfo.Revision != "" {
info["version sha"] = strings.Trim(verInfo.Revision, "'")
infoKeys = append(infoKeys, "version sha")
}
infoKeys = append(infoKeys, "cgo")
info["cgo"] = "disabled"
if version.CgoEnabled {
info["cgo"] = "enabled"
}
// Server configuration output
padding := 40
sort.Strings(infoKeys)
c.UI.Output("==> Vault server configuration:\n")
for _, k := range infoKeys {
c.UI.Output(fmt.Sprintf(
"%s%s: %s",
strings.Repeat(" ", padding-len(k)),
strings.Title(k),
info[k]))
}
c.UI.Output("")
// Set the token
tokenHelper, err := c.TokenHelper()
if err != nil {
c.UI.Error(fmt.Sprintf("Error getting token helper: %s", err))
return 1
}
if err := tokenHelper.Store(base.DevToken); err != nil {
c.UI.Error(fmt.Sprintf("Error storing in token helper: %s", err))
return 1
}
if err := ioutil.WriteFile(filepath.Join(tempDir, "root_token"), []byte(base.DevToken), 0755); err != nil {
c.UI.Error(fmt.Sprintf("Error writing token to tempfile: %s", err))
return 1
}
c.UI.Output(fmt.Sprintf(
"\nRoot Token: %s\n", base.DevToken,
))
for i, key := range clusters["perf-pri"].BarrierKeys {
c.UI.Output(fmt.Sprintf(
"Unseal Key %d: %s",
i+1, base64.StdEncoding.EncodeToString(key),
))
}
c.UI.Output(fmt.Sprintf(
"\nUseful env vars:\n"+
"export VAULT_TOKEN=%s\n"+
"export VAULT_CACERT=%s/perf-pri/ca_cert.pem\n",
base.DevToken,
tempDir,
))
c.UI.Output(fmt.Sprintf("Addresses of initial active nodes:"))
clusterNames := []string{}
for name := range clusters {
clusterNames = append(clusterNames, name)
}
sort.Strings(clusterNames)
for _, name := range clusterNames {
c.UI.Output(fmt.Sprintf(
"%s:\n"+
"export VAULT_ADDR=%s\n",
name,
clusters[name].Cores[0].Client.Address(),
))
}
// Output the header that the server has started
c.UI.Output("==> Vault clusters started! Log data will stream in below:\n")
// Inform any tests that the server is ready
select {
case c.startedCh <- struct{}{}:
default:
}
// Release the log gate.
c.logGate.Flush()
testhelpers.SetupFourClusterReplication(&testing.RuntimeT{},
clusters["perf-pri"],
clusters["perf-sec"],
clusters["perf-pri-dr"],
clusters["perf-sec-dr"],
)
// Wait for shutdown
shutdownTriggered := false
for !shutdownTriggered {
select {
case <-c.ShutdownCh:
c.UI.Output("==> Vault shutdown triggered")
// Stop the listners so that we don't process further client requests.
c.cleanupGuard.Do(clusterCleanup)
shutdownTriggered = true
case <-c.SighupCh:
c.UI.Output("==> Vault reload triggered")
for name, cluster := range clusters {
for _, core := range cluster.Cores {
if err := c.Reload(core.ReloadFuncsLock, core.ReloadFuncs, nil); err != nil {
c.UI.Error(fmt.Sprintf("Error(s) were encountered during reload of cluster %s cores: %s", name, err))
}
}
}
}
}
return 0
}

View file

@ -20,10 +20,8 @@ import (
"time"
"github.com/hashicorp/vault/physical"
physInmem "github.com/hashicorp/vault/physical/inmem"
"github.com/mitchellh/cli"
physConsul "github.com/hashicorp/vault/physical/consul"
physFile "github.com/hashicorp/vault/physical/file"
)
func testRandomPort(tb testing.TB) int {
@ -56,31 +54,23 @@ func testBaseHCL(tb testing.TB) string {
}
const (
consulHCL = `
backend "consul" {
prefix = "foo/"
inmemHCL = `
backend "inmem_ha" {
advertise_addr = "http://127.0.0.1:8200"
disable_registration = "true"
}
`
haConsulHCL = `
ha_backend "consul" {
prefix = "bar/"
haInmemHCL = `
ha_backend "inmem_ha" {
redirect_addr = "http://127.0.0.1:8200"
disable_registration = "true"
}
`
badHAConsulHCL = `
ha_backend "file" {
path = "/dev/null"
}
badHAInmemHCL = `
ha_backend "inmem" {}
`
reloadHCL = `
backend "file" {
path = "/dev/null"
}
backend "inmem" {}
disable_mlock = true
listener "tcp" {
address = "127.0.0.1:8203"
@ -101,8 +91,8 @@ func testServerCommand(tb testing.TB) (*cli.MockUi, *ServerCommand) {
ShutdownCh: MakeShutdownCh(),
SighupCh: MakeSighupCh(),
PhysicalBackends: map[string]physical.Factory{
"file": physFile.NewFileBackend,
"consul": physConsul.NewConsulBackend,
"inmem": physInmem.NewInmem,
"inmem_ha": physInmem.NewInmemHA,
},
// These prevent us from random sleep guessing...
@ -216,19 +206,19 @@ func TestServer(t *testing.T) {
}{
{
"common_ha",
testBaseHCL(t) + consulHCL,
testBaseHCL(t) + inmemHCL,
"(HA available)",
0,
},
{
"separate_ha",
testBaseHCL(t) + consulHCL + haConsulHCL,
testBaseHCL(t) + inmemHCL + haInmemHCL,
"HA Storage:",
0,
},
{
"bad_separate_ha",
testBaseHCL(t) + consulHCL + badHAConsulHCL,
testBaseHCL(t) + inmemHCL + badHAInmemHCL,
"Specified HA storage does not support HA",
1,
},

View file

@ -67,10 +67,13 @@ func (c *TokenCapabilitiesCommand) Run(args []string) int {
token := ""
path := ""
args = f.Args()
switch {
case len(args) == 1:
switch len(args) {
case 0:
c.UI.Error(fmt.Sprintf("Not enough arguments (expected 1-2, got 0)"))
return 1
case 1:
path = args[0]
case len(args) == 2:
case 2:
token, path = args[0], args[1]
default:
c.UI.Error(fmt.Sprintf("Too many arguments (expected 1-2, got %d)", len(args)))

31
helper/awsutil/error.go Normal file
View file

@ -0,0 +1,31 @@
package awsutil
import (
awsRequest "github.com/aws/aws-sdk-go/aws/request"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/logical"
)
// CheckAWSError will examine an error and convert to a logical error if
// appropriate. If no appropriate error is found, return nil
func CheckAWSError(err error) error {
// IsErrorThrottle will check if the error returned is one that matches
// known request limiting errors:
// https://github.com/aws/aws-sdk-go/blob/488d634b5a699b9118ac2befb5135922b4a77210/aws/request/retryer.go#L35
if awsRequest.IsErrorThrottle(err) {
return logical.ErrUpstreamRateLimited
}
return nil
}
// AppendLogicalError checks if the given error is a known AWS error we modify,
// and if so then returns a go-multierror, appending the original and the
// logical error.
// If the error is not an AWS error, or not an error we wish to modify, then
// return the original error.
func AppendLogicalError(err error) error {
if awserr := CheckAWSError(err); awserr != nil {
err = multierror.Append(err, awserr)
}
return err
}

View file

@ -0,0 +1,89 @@
package awsutil
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws/awserr"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/logical"
)
func Test_CheckAWSError(t *testing.T) {
testCases := []struct {
Name string
Err error
Expected error
}{
{
Name: "Something not checked",
Err: fmt.Errorf("something"),
},
{
Name: "Upstream throttle error",
Err: awserr.New("Throttling", "", nil),
Expected: logical.ErrUpstreamRateLimited,
},
{
Name: "Upstream RequestLimitExceeded",
Err: awserr.New("RequestLimitExceeded", "Request rate limited", nil),
Expected: logical.ErrUpstreamRateLimited,
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
err := CheckAWSError(tc.Err)
if err == nil && tc.Expected != nil {
t.Fatalf("expected non-nil error (%#v), got nil", tc.Expected)
}
if err != nil && tc.Expected == nil {
t.Fatalf("expected nil error, got (%#v)", err)
}
if err != tc.Expected {
t.Fatalf("expected error (%#v), got (%#v)", tc.Expected, err)
}
})
}
}
func Test_AppendLogicalError(t *testing.T) {
awsErr := awserr.New("Throttling", "", nil)
testCases := []struct {
Name string
Err error
Expected error
}{
{
Name: "Something not checked",
Err: fmt.Errorf("something"),
Expected: fmt.Errorf("something"),
},
{
Name: "Upstream throttle error",
Err: awsErr,
Expected: multierror.Append(awsErr, logical.ErrUpstreamRateLimited),
},
{
Name: "Nil",
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(t *testing.T) {
err := AppendLogicalError(tc.Err)
if err == nil && tc.Expected != nil {
t.Fatalf("expected non-nil error (%#v), got nil", tc.Expected)
}
if err != nil && tc.Expected == nil {
t.Fatalf("expected nil error, got (%#v)", err)
}
if err == nil && tc.Expected == nil {
return
}
if err.Error() != tc.Expected.Error() {
t.Fatalf("expected error (%#v), got (%#v)", tc.Expected.Error(), err.Error())
}
})
}
}

View file

@ -9,6 +9,7 @@ import (
"github.com/golang/snappy"
"github.com/hashicorp/errwrap"
"github.com/pierrec/lz4"
)
const (
@ -17,41 +18,39 @@ const (
// The value of this constant should not be a first character of any
// valid JSON string.
// Byte value used as canary when using Gzip format
CompressionTypeGzip = "gzip"
CompressionCanaryGzip byte = 'G'
// Byte value used as canary when using Lzw format
CompressionCanaryLzw byte = 'L'
CompressionTypeLZW = "lzw"
CompressionCanaryLZW byte = 'L'
// Byte value used as canary when using Snappy format
CompressionTypeSnappy = "snappy"
CompressionCanarySnappy byte = 'S'
CompressionTypeLzw = "lzw"
CompressionTypeGzip = "gzip"
CompressionTypeSnappy = "snappy"
CompressionTypeLZ4 = "lz4"
CompressionCanaryLZ4 byte = '4'
)
// SnappyReadCloser embeds the snappy reader which implements the io.Reader
// interface. The decompress procedure in this utility expects an
// io.ReadCloser. This type implements the io.Closer interface to retain the
// generic way of decompression.
type SnappyReadCloser struct {
*snappy.Reader
type CompressUtilReadCloser struct {
io.Reader
}
// Close is a noop method implemented only to satisfy the io.Closer interface
func (s *SnappyReadCloser) Close() error {
func (c *CompressUtilReadCloser) Close() error {
return nil
}
// CompressionConfig is used to select a compression type to be performed by
// Compress and Decompress utilities.
// Supported types are:
// * CompressionTypeLzw
// * CompressionTypeLZW
// * CompressionTypeGzip
// * CompressionTypeSnappy
// * CompressionTypeLZ4
//
// When using CompressionTypeGzip, the compression levels can also be chosen:
// * gzip.DefaultCompression
@ -82,10 +81,10 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
// Write the canary into the buffer and create writer to compress the
// input data based on the configured type
switch config.Type {
case CompressionTypeLzw:
buf.Write([]byte{CompressionCanaryLzw})
case CompressionTypeLZW:
buf.Write([]byte{CompressionCanaryLZW})
writer = lzw.NewWriter(&buf, lzw.LSB, 8)
case CompressionTypeGzip:
buf.Write([]byte{CompressionCanaryGzip})
@ -100,9 +99,15 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
config.GzipCompressionLevel = gzip.DefaultCompression
}
writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel)
case CompressionTypeSnappy:
buf.Write([]byte{CompressionCanarySnappy})
writer = snappy.NewBufferedWriter(&buf)
case CompressionTypeLZ4:
buf.Write([]byte{CompressionCanaryLZ4})
writer = lz4.NewWriter(&buf)
default:
return nil, fmt.Errorf("unsupported compression type")
}
@ -142,30 +147,40 @@ func Decompress(data []byte) ([]byte, bool, error) {
return nil, false, fmt.Errorf("'data' being decompressed is empty")
}
switch {
canary := data[0]
cData := data[1:]
switch canary {
// If the first byte matches the canary byte, remove the canary
// byte and try to decompress the data that is after the canary.
case data[0] == CompressionCanaryGzip:
case CompressionCanaryGzip:
if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary")
}
data = data[1:]
reader, err = gzip.NewReader(bytes.NewReader(data))
case data[0] == CompressionCanaryLzw:
if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary")
}
data = data[1:]
reader = lzw.NewReader(bytes.NewReader(data), lzw.LSB, 8)
reader, err = gzip.NewReader(bytes.NewReader(cData))
case data[0] == CompressionCanarySnappy:
case CompressionCanaryLZW:
if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary")
}
data = data[1:]
reader = &SnappyReadCloser{
Reader: snappy.NewReader(bytes.NewReader(data)),
reader = lzw.NewReader(bytes.NewReader(cData), lzw.LSB, 8)
case CompressionCanarySnappy:
if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary")
}
reader = &CompressUtilReadCloser{
Reader: snappy.NewReader(bytes.NewReader(cData)),
}
case CompressionCanaryLZ4:
if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary")
}
reader = &CompressUtilReadCloser{
Reader: lz4.NewReader(bytes.NewReader(cData)),
}
default:
// If the first byte doesn't match the canary byte, it means
// that the content was not compressed at all. Indicate the

View file

@ -3,65 +3,90 @@ package compressutil
import (
"bytes"
"compress/gzip"
"encoding/json"
"testing"
)
func TestCompressUtil_CompressSnappy(t *testing.T) {
input := map[string]interface{}{
"sample": "data",
"verification": "process",
func TestCompressUtil_CompressDecompress(t *testing.T) {
t.Parallel()
tests := []struct {
compressionType string
compressionConfig CompressionConfig
canary byte
}{
{"GZIP default implicit",
CompressionConfig{Type: CompressionTypeGzip},
CompressionCanaryGzip,
},
{"GZIP default explicit",
CompressionConfig{Type: CompressionTypeGzip, GzipCompressionLevel: gzip.DefaultCompression},
CompressionCanaryGzip,
},
{"GZIP best speed",
CompressionConfig{Type: CompressionTypeGzip, GzipCompressionLevel: gzip.BestSpeed},
CompressionCanaryGzip,
},
{"GZIP best compression",
CompressionConfig{Type: CompressionTypeGzip, GzipCompressionLevel: gzip.BestCompression},
CompressionCanaryGzip,
},
{"Snappy",
CompressionConfig{Type: CompressionTypeSnappy},
CompressionCanarySnappy,
},
{"LZ4",
CompressionConfig{Type: CompressionTypeLZ4},
CompressionCanaryLZ4,
},
{"LZW",
CompressionConfig{Type: CompressionTypeLZW},
CompressionCanaryLZW,
},
}
// Encode input into JSON
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
if err := enc.Encode(input); err != nil {
t.Fatal(err)
}
inputJSONBytes := buf.Bytes()
inputJSONBytes := []byte(`{"sample":"data","verification":"process"}`)
// Set Snappy compression in the configuration
compressionConfig := &CompressionConfig{
Type: CompressionTypeSnappy,
}
for _, test := range tests {
// Compress the input
compressedJSONBytes, err := Compress(inputJSONBytes, &test.compressionConfig)
if err != nil {
t.Fatalf("compress error (%s): %s", test.compressionType, err)
}
if len(compressedJSONBytes) == 0 {
t.Fatalf("failed to compress data in %s format", test.compressionType)
}
// Compress the input
compressedJSONBytes, err := Compress(inputJSONBytes, compressionConfig)
if err != nil {
t.Fatal(err)
}
// Check the presence of the canary
if compressedJSONBytes[0] != test.canary {
t.Fatalf("bad (%s): compression canary: expected: %d actual: %d", test.compressionType, test.canary, compressedJSONBytes[0])
}
decompressedJSONBytes, wasNotCompressed, err := Decompress(compressedJSONBytes)
if err != nil {
t.Fatal(err)
}
decompressedJSONBytes, wasNotCompressed, err := Decompress(compressedJSONBytes)
if err != nil {
t.Fatalf("decompress error (%s): %s", test.compressionType, err)
}
// Check if the input for decompress was not compressed in the first place
if wasNotCompressed {
t.Fatalf("bad: expected compressed bytes")
}
// Check if the input for decompress was not compressed in the first place
if wasNotCompressed {
t.Fatalf("bad (%s): expected compressed bytes", test.compressionType)
}
// Compare the value after decompression
if string(inputJSONBytes) != string(decompressedJSONBytes) {
t.Fatalf("bad: decompressed value;\nexpected: %q\nactual: %q", string(inputJSONBytes), string(decompressedJSONBytes))
if len(decompressedJSONBytes) == 0 {
t.Fatalf("bad (%s): expected decompressed bytes", test.compressionType)
}
// Compare the value after decompression
if !bytes.Equal(inputJSONBytes, decompressedJSONBytes) {
t.Fatalf("bad (%s): decompressed value;\nexpected: %q\nactual: %q", test.compressionType, string(inputJSONBytes), string(decompressedJSONBytes))
}
}
}
func TestCompressUtil_CompressDecompress(t *testing.T) {
input := map[string]interface{}{
"sample": "data",
"verification": "process",
}
func TestCompressUtil_InvalidConfigurations(t *testing.T) {
t.Parallel()
// Encode input into JSON
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
if err := enc.Encode(input); err != nil {
t.Fatal(err)
}
inputJSONBytes := []byte(`{"sample":"data","verification":"process"}`)
inputJSONBytes := buf.Bytes()
// Test nil configuration
if _, err := Compress(inputJSONBytes, nil); err == nil {
t.Fatal("expected an error")
@ -71,163 +96,4 @@ func TestCompressUtil_CompressDecompress(t *testing.T) {
if _, err := Compress(inputJSONBytes, &CompressionConfig{}); err == nil {
t.Fatal("expected an error")
}
// Compress input using lzw format
compressedJSONBytes, err := Compress(inputJSONBytes, &CompressionConfig{
Type: CompressionTypeLzw,
})
if err != nil {
t.Fatal("expected an error")
}
if len(compressedJSONBytes) == 0 {
t.Fatal("failed to compress data in lzw format")
}
// Check the presence of the canary
if compressedJSONBytes[0] != CompressionCanaryLzw {
t.Fatalf("bad: compression canary: expected: %d actual: %d", CompressionCanaryLzw, compressedJSONBytes[0])
}
// Decompress the input and check the output
decompressedJSONBytes, uncompressed, err := Decompress(compressedJSONBytes)
if err != nil {
t.Fatal(err)
}
if uncompressed {
t.Fatal("failed to recognize compressed data")
}
if len(decompressedJSONBytes) == 0 {
t.Fatal("failed to decompress lzw formatted data")
}
if string(inputJSONBytes) != string(decompressedJSONBytes) {
t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
}
// Compress input using Gzip format, assume DefaultCompression
compressedJSONBytes, err = Compress(inputJSONBytes, &CompressionConfig{
Type: CompressionTypeGzip,
})
if err != nil {
t.Fatal("expected an error")
}
if len(compressedJSONBytes) == 0 {
t.Fatal("failed to compress data in lzw format")
}
// Check the presence of the canary
if compressedJSONBytes[0] != CompressionCanaryGzip {
t.Fatalf("bad: compression canary: expected: %d actual: %d", CompressionCanaryGzip, compressedJSONBytes[0])
}
// Decompress the input and check the output
decompressedJSONBytes, uncompressed, err = Decompress(compressedJSONBytes)
if err != nil {
t.Fatal(err)
}
if uncompressed {
t.Fatal("failed to recognize compressed data")
}
if len(decompressedJSONBytes) == 0 {
t.Fatal("failed to decompress lzw formatted data")
}
if string(inputJSONBytes) != string(decompressedJSONBytes) {
t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
}
// Compress input using Gzip format: DefaultCompression
compressedJSONBytes, err = Compress(inputJSONBytes, &CompressionConfig{
Type: CompressionTypeGzip,
GzipCompressionLevel: gzip.DefaultCompression,
})
if err != nil {
t.Fatal("expected an error")
}
if len(compressedJSONBytes) == 0 {
t.Fatal("failed to compress data in lzw format")
}
// Check the presence of the canary
if compressedJSONBytes[0] != CompressionCanaryGzip {
t.Fatalf("bad: compression canary: expected: %d actual: %d", CompressionCanaryGzip, compressedJSONBytes[0])
}
// Decompress the input and check the output
decompressedJSONBytes, uncompressed, err = Decompress(compressedJSONBytes)
if err != nil {
t.Fatal(err)
}
if uncompressed {
t.Fatal("failed to recognize compressed data")
}
if len(decompressedJSONBytes) == 0 {
t.Fatal("failed to decompress lzw formatted data")
}
if string(inputJSONBytes) != string(decompressedJSONBytes) {
t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
}
// Compress input using Gzip format, BestCompression
compressedJSONBytes, err = Compress(inputJSONBytes, &CompressionConfig{
Type: CompressionTypeGzip,
GzipCompressionLevel: gzip.BestCompression,
})
if err != nil {
t.Fatal("expected an error")
}
if len(compressedJSONBytes) == 0 {
t.Fatal("failed to compress data in lzw format")
}
// Check the presence of the canary
if compressedJSONBytes[0] != CompressionCanaryGzip {
t.Fatalf("bad: compression canary: expected: %d actual: %d", CompressionCanaryGzip, compressedJSONBytes[0])
}
// Decompress the input and check the output
decompressedJSONBytes, uncompressed, err = Decompress(compressedJSONBytes)
if err != nil {
t.Fatal(err)
}
if uncompressed {
t.Fatal("failed to recognize compressed data")
}
if len(decompressedJSONBytes) == 0 {
t.Fatal("failed to decompress lzw formatted data")
}
if string(inputJSONBytes) != string(decompressedJSONBytes) {
t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
}
// Compress input using Gzip format, BestSpeed
compressedJSONBytes, err = Compress(inputJSONBytes, &CompressionConfig{
Type: CompressionTypeGzip,
GzipCompressionLevel: gzip.BestSpeed,
})
if err != nil {
t.Fatal("expected an error")
}
if len(compressedJSONBytes) == 0 {
t.Fatal("failed to compress data in lzw format")
}
// Check the presence of the canary
if compressedJSONBytes[0] != CompressionCanaryGzip {
t.Fatalf("bad: compression canary: expected: %d actual: %d",
CompressionCanaryGzip, compressedJSONBytes[0])
}
// Decompress the input and check the output
decompressedJSONBytes, uncompressed, err = Decompress(compressedJSONBytes)
if err != nil {
t.Fatal(err)
}
if uncompressed {
t.Fatal("failed to recognize compressed data")
}
if len(decompressedJSONBytes) == 0 {
t.Fatal("failed to decompress lzw formatted data")
}
if string(inputJSONBytes) != string(decompressedJSONBytes) {
t.Fatalf("bad: mismatch: inputJSONBytes: %s\n decompressedJSONBytes: %s", string(inputJSONBytes), string(decompressedJSONBytes))
}
}

View file

@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: helper/forwarding/types.proto
package forwarding // import "github.com/hashicorp/vault/helper/forwarding"
package forwarding
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -38,16 +40,17 @@ func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {}
func (*Request) Descriptor() ([]byte, []int) {
return fileDescriptor_types_7ccf0973261c4726, []int{0}
return fileDescriptor_e38697de88a2f47c, []int{0}
}
func (m *Request) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Request.Unmarshal(m, b)
}
func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Request.Marshal(b, m, deterministic)
}
func (dst *Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_Request.Merge(dst, src)
func (m *Request) XXX_Merge(src proto.Message) {
xxx_messageInfo_Request.Merge(m, src)
}
func (m *Request) XXX_Size() int {
return xxx_messageInfo_Request.Size(m)
@ -129,16 +132,17 @@ func (m *URL) Reset() { *m = URL{} }
func (m *URL) String() string { return proto.CompactTextString(m) }
func (*URL) ProtoMessage() {}
func (*URL) Descriptor() ([]byte, []int) {
return fileDescriptor_types_7ccf0973261c4726, []int{1}
return fileDescriptor_e38697de88a2f47c, []int{1}
}
func (m *URL) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_URL.Unmarshal(m, b)
}
func (m *URL) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_URL.Marshal(b, m, deterministic)
}
func (dst *URL) XXX_Merge(src proto.Message) {
xxx_messageInfo_URL.Merge(dst, src)
func (m *URL) XXX_Merge(src proto.Message) {
xxx_messageInfo_URL.Merge(m, src)
}
func (m *URL) XXX_Size() int {
return xxx_messageInfo_URL.Size(m)
@ -209,16 +213,17 @@ func (m *HeaderEntry) Reset() { *m = HeaderEntry{} }
func (m *HeaderEntry) String() string { return proto.CompactTextString(m) }
func (*HeaderEntry) ProtoMessage() {}
func (*HeaderEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_types_7ccf0973261c4726, []int{2}
return fileDescriptor_e38697de88a2f47c, []int{2}
}
func (m *HeaderEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_HeaderEntry.Unmarshal(m, b)
}
func (m *HeaderEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_HeaderEntry.Marshal(b, m, deterministic)
}
func (dst *HeaderEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_HeaderEntry.Merge(dst, src)
func (m *HeaderEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_HeaderEntry.Merge(m, src)
}
func (m *HeaderEntry) XXX_Size() int {
return xxx_messageInfo_HeaderEntry.Size(m)
@ -245,6 +250,7 @@ type Response struct {
// Added in 0.6.2 to ensure that the content-type is set appropriately, as
// well as any other information
HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries,proto3" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
LastRemoteWal uint64 `protobuf:"varint,5,opt,name=last_remote_wal,json=lastRemoteWal,proto3" json:"last_remote_wal,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -254,16 +260,17 @@ func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) {
return fileDescriptor_types_7ccf0973261c4726, []int{3}
return fileDescriptor_e38697de88a2f47c, []int{3}
}
func (m *Response) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Response.Unmarshal(m, b)
}
func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Response.Marshal(b, m, deterministic)
}
func (dst *Response) XXX_Merge(src proto.Message) {
xxx_messageInfo_Response.Merge(dst, src)
func (m *Response) XXX_Merge(src proto.Message) {
xxx_messageInfo_Response.Merge(m, src)
}
func (m *Response) XXX_Size() int {
return xxx_messageInfo_Response.Size(m)
@ -295,6 +302,13 @@ func (m *Response) GetHeaderEntries() map[string]*HeaderEntry {
return nil
}
func (m *Response) GetLastRemoteWal() uint64 {
if m != nil {
return m.LastRemoteWal
}
return 0
}
func init() {
proto.RegisterType((*Request)(nil), "forwarding.Request")
proto.RegisterMapType((map[string]*HeaderEntry)(nil), "forwarding.Request.HeaderEntriesEntry")
@ -304,40 +318,40 @@ func init() {
proto.RegisterMapType((map[string]*HeaderEntry)(nil), "forwarding.Response.HeaderEntriesEntry")
}
func init() {
proto.RegisterFile("helper/forwarding/types.proto", fileDescriptor_types_7ccf0973261c4726)
}
func init() { proto.RegisterFile("helper/forwarding/types.proto", fileDescriptor_e38697de88a2f47c) }
var fileDescriptor_types_7ccf0973261c4726 = []byte{
// 475 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0x4f, 0x8f, 0xd3, 0x3e,
0x10, 0x55, 0x9a, 0x6e, 0xff, 0x4c, 0x77, 0x7f, 0xbf, 0xc5, 0x07, 0x30, 0x8b, 0x10, 0xa1, 0x12,
0x10, 0x09, 0x48, 0xa4, 0x72, 0x41, 0xdc, 0x60, 0x85, 0xc4, 0x61, 0x41, 0x60, 0x69, 0x0f, 0x70,
0x89, 0xdc, 0x78, 0x5a, 0x47, 0x34, 0x75, 0x6a, 0x3b, 0x5b, 0xe5, 0x63, 0xf1, 0x9d, 0x38, 0xf1,
0x29, 0x90, 0x9d, 0xb0, 0x0d, 0x5a, 0x21, 0x4e, 0x9c, 0x32, 0xef, 0xbd, 0xc9, 0x78, 0xde, 0x8c,
0x0d, 0xf7, 0x25, 0x6e, 0x2a, 0xd4, 0xe9, 0x4a, 0xe9, 0x3d, 0xd7, 0xa2, 0xd8, 0xae, 0x53, 0xdb,
0x54, 0x68, 0x92, 0x4a, 0x2b, 0xab, 0x08, 0x1c, 0xf8, 0xf9, 0xf7, 0x01, 0x8c, 0x19, 0xee, 0x6a,
0x34, 0x96, 0xdc, 0x86, 0x51, 0x89, 0x56, 0x2a, 0x41, 0x07, 0x51, 0x10, 0x4f, 0x59, 0x87, 0xc8,
0x43, 0x08, 0x6b, 0xbd, 0xa1, 0x61, 0x14, 0xc4, 0xb3, 0xc5, 0xff, 0xc9, 0xe1, 0xef, 0xe4, 0x92,
0x5d, 0x30, 0xa7, 0x91, 0xf7, 0xf0, 0x9f, 0x44, 0x2e, 0x50, 0x67, 0xb8, 0xb5, 0xba, 0x40, 0x43,
0x87, 0x51, 0x18, 0xcf, 0x16, 0x8f, 0xfb, 0xd9, 0xdd, 0x39, 0xc9, 0x3b, 0x9f, 0xf9, 0xb6, 0x4d,
0x74, 0x9f, 0x86, 0x9d, 0xc8, 0x3e, 0x47, 0x08, 0x0c, 0x97, 0x4a, 0x34, 0xf4, 0x28, 0x0a, 0xe2,
0x63, 0xe6, 0x63, 0xc7, 0x49, 0x65, 0x2c, 0x1d, 0xf9, 0xde, 0x7c, 0x4c, 0x1e, 0xc0, 0x4c, 0x63,
0xa9, 0x2c, 0x66, 0x5c, 0x08, 0x4d, 0xc7, 0x5e, 0x82, 0x96, 0x7a, 0x2d, 0x84, 0x26, 0x4f, 0xe1,
0x56, 0x85, 0xa8, 0xb3, 0x1c, 0xb5, 0x2d, 0x56, 0x45, 0xce, 0x2d, 0x1a, 0x3a, 0x89, 0xc2, 0xf8,
0x98, 0x9d, 0x3a, 0xe1, 0xbc, 0xc7, 0x9f, 0x7d, 0x06, 0x72, 0xb3, 0x35, 0x72, 0x0a, 0xe1, 0x57,
0x6c, 0x68, 0xe0, 0x6b, 0xbb, 0x90, 0x3c, 0x87, 0xa3, 0x2b, 0xbe, 0xa9, 0xd1, 0x8f, 0x69, 0xb6,
0xb8, 0xd3, 0xf7, 0x78, 0x28, 0xd0, 0xb0, 0x36, 0xeb, 0xd5, 0xe0, 0x65, 0x30, 0xff, 0x16, 0x40,
0x78, 0xc9, 0x2e, 0xdc, 0x88, 0x4d, 0x2e, 0xb1, 0xc4, 0xae, 0x5e, 0x87, 0x1c, 0xaf, 0x2a, 0xbe,
0xeb, 0x6a, 0x4e, 0x59, 0x87, 0xae, 0x4d, 0x0f, 0x7b, 0xa6, 0x09, 0x0c, 0x2b, 0x6e, 0xa5, 0x1f,
0xce, 0x94, 0xf9, 0x98, 0xdc, 0x85, 0x89, 0xe6, 0xfb, 0xcc, 0xf3, 0xed, 0x80, 0xc6, 0x9a, 0xef,
0x3f, 0x3a, 0xe9, 0x1e, 0x4c, 0x9d, 0xb4, 0xab, 0x51, 0x37, 0x74, 0xe2, 0x35, 0x97, 0xfb, 0xc9,
0x61, 0x72, 0x06, 0x93, 0x95, 0xe6, 0xeb, 0x12, 0xb7, 0x96, 0x4e, 0x5b, 0xed, 0x17, 0x9e, 0x3f,
0x82, 0x59, 0xcf, 0x8d, 0x6b, 0xd1, 0xfb, 0x31, 0x34, 0x88, 0x42, 0xd7, 0x62, 0x8b, 0xe6, 0x3f,
0x02, 0x98, 0x30, 0x34, 0x95, 0xda, 0x1a, 0x74, 0x0b, 0x31, 0x96, 0xdb, 0xda, 0x64, 0xb9, 0x12,
0xad, 0x99, 0x13, 0x06, 0x2d, 0x75, 0xae, 0x04, 0x5e, 0x6f, 0x36, 0xec, 0x6d, 0xf6, 0xc3, 0x1f,
0x2e, 0xcf, 0x93, 0xdf, 0x2f, 0x4f, 0x7b, 0xc4, 0xdf, 0x6f, 0xcf, 0x3f, 0xdc, 0xe3, 0x9b, 0xe4,
0xcb, 0xb3, 0x75, 0x61, 0x65, 0xbd, 0x4c, 0x72, 0x55, 0xa6, 0x92, 0x1b, 0x59, 0xe4, 0x4a, 0x57,
0xe9, 0x15, 0xaf, 0x37, 0x36, 0xbd, 0xf1, 0xec, 0x96, 0x23, 0xff, 0xe2, 0x5e, 0xfc, 0x0c, 0x00,
0x00, 0xff, 0xff, 0x03, 0xfa, 0xd9, 0x51, 0x92, 0x03, 0x00, 0x00,
var fileDescriptor_e38697de88a2f47c = []byte{
// 497 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0xc1, 0x6e, 0xd3, 0x40,
0x10, 0x95, 0xe3, 0xb4, 0x49, 0x26, 0x0d, 0x2d, 0x7b, 0x80, 0xa5, 0x08, 0x61, 0x22, 0x51, 0x22,
0x01, 0x8e, 0x14, 0x2e, 0x88, 0x1b, 0x54, 0x48, 0x1c, 0x0a, 0x82, 0x95, 0x2a, 0x04, 0x17, 0x6b,
0xe3, 0x9d, 0x64, 0x2d, 0xec, 0xac, 0xb3, 0xbb, 0x6e, 0xe4, 0xdf, 0xe0, 0x4f, 0xf8, 0x27, 0x3e,
0x04, 0xed, 0xda, 0x34, 0x46, 0x15, 0x12, 0x17, 0x4e, 0x99, 0xf7, 0xde, 0x64, 0x3c, 0x6f, 0x66,
0x16, 0x1e, 0x48, 0xcc, 0x4b, 0xd4, 0xf3, 0x95, 0xd2, 0x3b, 0xae, 0x45, 0xb6, 0x59, 0xcf, 0x6d,
0x5d, 0xa2, 0x89, 0x4b, 0xad, 0xac, 0x22, 0xb0, 0xe7, 0xa7, 0x3f, 0x7b, 0x30, 0x60, 0xb8, 0xad,
0xd0, 0x58, 0x72, 0x07, 0x0e, 0x0b, 0xb4, 0x52, 0x09, 0xda, 0x8b, 0x82, 0xd9, 0x88, 0xb5, 0x88,
0x3c, 0x82, 0xb0, 0xd2, 0x39, 0x0d, 0xa3, 0x60, 0x36, 0x5e, 0x1c, 0xc7, 0xfb, 0x7f, 0xc7, 0x97,
0xec, 0x82, 0x39, 0x8d, 0xbc, 0x87, 0x5b, 0x12, 0xb9, 0x40, 0x9d, 0xe0, 0xc6, 0xea, 0x0c, 0x0d,
0xed, 0x47, 0xe1, 0x6c, 0xbc, 0x38, 0xeb, 0x66, 0xb7, 0xdf, 0x89, 0xdf, 0xf9, 0xcc, 0xb7, 0x4d,
0xa2, 0xfb, 0xa9, 0xd9, 0x44, 0x76, 0x39, 0x42, 0xa0, 0xbf, 0x54, 0xa2, 0xa6, 0x07, 0x51, 0x30,
0x3b, 0x62, 0x3e, 0x76, 0x9c, 0x54, 0xc6, 0xd2, 0x43, 0xdf, 0x9b, 0x8f, 0xc9, 0x43, 0x18, 0x6b,
0x2c, 0x94, 0xc5, 0x84, 0x0b, 0xa1, 0xe9, 0xc0, 0x4b, 0xd0, 0x50, 0xaf, 0x85, 0xd0, 0xe4, 0x29,
0xdc, 0x2e, 0x11, 0x75, 0x92, 0xa2, 0xb6, 0xd9, 0x2a, 0x4b, 0xb9, 0x45, 0x43, 0x87, 0x51, 0x38,
0x3b, 0x62, 0x27, 0x4e, 0x38, 0xef, 0xf0, 0xa7, 0x5f, 0x80, 0xdc, 0x6c, 0x8d, 0x9c, 0x40, 0xf8,
0x0d, 0x6b, 0x1a, 0xf8, 0xda, 0x2e, 0x24, 0xcf, 0xe1, 0xe0, 0x8a, 0xe7, 0x15, 0xfa, 0x31, 0x8d,
0x17, 0x77, 0xbb, 0x1e, 0xf7, 0x05, 0x6a, 0xd6, 0x64, 0xbd, 0xea, 0xbd, 0x0c, 0xa6, 0x3f, 0x02,
0x08, 0x2f, 0xd9, 0x85, 0x1b, 0xb1, 0x49, 0x25, 0x16, 0xd8, 0xd6, 0x6b, 0x91, 0xe3, 0x55, 0xc9,
0xb7, 0x6d, 0xcd, 0x11, 0x6b, 0xd1, 0xb5, 0xe9, 0x7e, 0xc7, 0x34, 0x81, 0x7e, 0xc9, 0xad, 0xf4,
0xc3, 0x19, 0x31, 0x1f, 0x93, 0x7b, 0x30, 0xd4, 0x7c, 0x97, 0x78, 0xbe, 0x19, 0xd0, 0x40, 0xf3,
0xdd, 0x47, 0x27, 0xdd, 0x87, 0x91, 0x93, 0xb6, 0x15, 0xea, 0x9a, 0x0e, 0xbd, 0xe6, 0x72, 0x3f,
0x39, 0x4c, 0x4e, 0x61, 0xb8, 0xd2, 0x7c, 0x5d, 0xe0, 0xc6, 0xd2, 0x51, 0xa3, 0xfd, 0xc6, 0xd3,
0xc7, 0x30, 0xee, 0xb8, 0x71, 0x2d, 0x7a, 0x3f, 0x86, 0x06, 0x51, 0xe8, 0x5a, 0x6c, 0xd0, 0xf4,
0x7b, 0x0f, 0x86, 0x0c, 0x4d, 0xa9, 0x36, 0x06, 0xdd, 0x42, 0x8c, 0xe5, 0xb6, 0x32, 0x49, 0xaa,
0x44, 0x63, 0x66, 0xc2, 0xa0, 0xa1, 0xce, 0x95, 0xc0, 0xeb, 0xcd, 0x86, 0x9d, 0xcd, 0x7e, 0xf8,
0xcb, 0xf1, 0x3c, 0xf9, 0xf3, 0x78, 0x9a, 0x4f, 0xfc, 0xc3, 0xf5, 0x9c, 0xc1, 0x71, 0xce, 0x8d,
0x4d, 0xda, 0xd3, 0xd8, 0xf1, 0xdc, 0xcf, 0xaa, 0xcf, 0x26, 0x8e, 0x66, 0x9e, 0xfd, 0xcc, 0xf3,
0xff, 0xb8, 0xef, 0x37, 0xf1, 0xd7, 0x67, 0xeb, 0xcc, 0xca, 0x6a, 0x19, 0xa7, 0xaa, 0x98, 0x4b,
0x6e, 0x64, 0x96, 0x2a, 0x5d, 0xce, 0xaf, 0x78, 0x95, 0xdb, 0xf9, 0x8d, 0xe7, 0xb9, 0x3c, 0xf4,
0x2f, 0xf3, 0xc5, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xbd, 0xb1, 0xfc, 0xba, 0x03, 0x00,
0x00,
}

View file

@ -45,4 +45,5 @@ message Response {
// Added in 0.6.2 to ensure that the content-type is set appropriately, as
// well as any other information
map<string, HeaderEntry> header_entries = 4;
uint64 last_remote_wal = 5;
}

View file

@ -39,7 +39,7 @@ func GenerateForwardedHTTPRequest(req *http.Request, addr string) (*http.Request
newBody, err = jsonutil.EncodeJSON(fq)
case "json_compress":
newBody, err = jsonutil.EncodeJSONAndCompress(fq, &compressutil.CompressionConfig{
Type: compressutil.CompressionTypeLzw,
Type: compressutil.CompressionTypeLZW,
})
case "proto3":
fallthrough

View file

@ -0,0 +1,70 @@
// +build !enterprise
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: helper/identity/mfa/types.proto
package mfa
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Secret struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Secret) Reset() { *m = Secret{} }
func (m *Secret) String() string { return proto.CompactTextString(m) }
func (*Secret) ProtoMessage() {}
func (*Secret) Descriptor() ([]byte, []int) {
return fileDescriptor_2eb73493aac0ba29, []int{0}
}
func (m *Secret) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Secret.Unmarshal(m, b)
}
func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Secret.Marshal(b, m, deterministic)
}
func (m *Secret) XXX_Merge(src proto.Message) {
xxx_messageInfo_Secret.Merge(m, src)
}
func (m *Secret) XXX_Size() int {
return xxx_messageInfo_Secret.Size(m)
}
func (m *Secret) XXX_DiscardUnknown() {
xxx_messageInfo_Secret.DiscardUnknown(m)
}
var xxx_messageInfo_Secret proto.InternalMessageInfo
func init() {
proto.RegisterType((*Secret)(nil), "mfa.Secret")
}
func init() { proto.RegisterFile("helper/identity/mfa/types.proto", fileDescriptor_2eb73493aac0ba29) }
var fileDescriptor_2eb73493aac0ba29 = []byte{
// 111 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcf, 0x48, 0xcd, 0x29,
0x48, 0x2d, 0xd2, 0xcf, 0x4c, 0x49, 0xcd, 0x2b, 0xc9, 0x2c, 0xa9, 0xd4, 0xcf, 0x4d, 0x4b, 0xd4,
0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0xce, 0x4d, 0x4b,
0x54, 0xe2, 0xe0, 0x62, 0x0b, 0x4e, 0x4d, 0x2e, 0x4a, 0x2d, 0x71, 0x32, 0x88, 0xd2, 0x4b, 0xcf,
0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0xcf, 0x48, 0x2c, 0xce, 0xc8, 0x4c, 0xce,
0x2f, 0x2a, 0xd0, 0x2f, 0x4b, 0x2c, 0xcd, 0x29, 0xd1, 0xc7, 0x62, 0x58, 0x12, 0x1b, 0xd8, 0x1c,
0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xc9, 0x73, 0x5e, 0x6a, 0x00, 0x00, 0x00,
}

View file

@ -0,0 +1,7 @@
syntax = "proto3";
option go_package = "github.com/hashicorp/vault/helper/identity/mfa";
package mfa;
message Secret {}

View file

@ -4,6 +4,8 @@ import (
"errors"
"fmt"
"strings"
"github.com/hashicorp/vault/helper/namespace"
)
var (
@ -18,6 +20,7 @@ type PopulateStringInput struct {
String string
Entity *Entity
Groups []*Group
Namespace *namespace.Namespace
}
func PopulateString(p *PopulateStringInput) (bool, string, error) {
@ -58,7 +61,7 @@ func PopulateString(p *PopulateStringInput) (bool, string, error) {
case 2:
subst = true
if !p.ValidityCheckOnly {
tmplStr, err := performTemplating(strings.TrimSpace(splitPiece[0]), p.Entity, p.Groups)
tmplStr, err := performTemplating(p.Namespace, strings.TrimSpace(splitPiece[0]), p.Entity, p.Groups)
if err != nil {
return false, "", err
}
@ -73,7 +76,7 @@ func PopulateString(p *PopulateStringInput) (bool, string, error) {
return subst, b.String(), nil
}
func performTemplating(input string, entity *Entity, groups []*Group) (string, error) {
func performTemplating(ns *namespace.Namespace, input string, entity *Entity, groups []*Group) (string, error) {
performAliasTemplating := func(trimmed string, alias *Alias) (string, error) {
switch {
case trimmed == "id":
@ -151,9 +154,15 @@ func performTemplating(input string, entity *Entity, groups []*Group) (string, e
}
var found *Group
for _, group := range groups {
compare := group.Name
var compare string
if ids {
compare = group.ID
} else {
if ns != nil && group.NamespaceID == ns.ID {
compare = group.Name
} else {
continue
}
}
if compare == accessorSplit[0] {

View file

@ -3,6 +3,8 @@ package identity
import (
"errors"
"testing"
"github.com/hashicorp/vault/helper/namespace"
)
func TestPopulate_Basic(t *testing.T) {
@ -165,9 +167,10 @@ func TestPopulate_Basic(t *testing.T) {
var groups []*Group
if test.groupName != "" {
groups = append(groups, &Group{
ID: "groupID",
Name: test.groupName,
Metadata: test.groupMetadata,
ID: "groupID",
Name: test.groupName,
Metadata: test.groupMetadata,
NamespaceID: namespace.RootNamespace.ID,
})
}
subst, out, err := PopulateString(&PopulateStringInput{
@ -175,6 +178,7 @@ func TestPopulate_Basic(t *testing.T) {
String: test.input,
Entity: entity,
Groups: groups,
Namespace: namespace.RootNamespace,
})
if err != nil {
if test.err == nil {

View file

@ -1,12 +1,15 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: helper/identity/types.proto
package identity // import "github.com/hashicorp/vault/helper/identity"
package identity
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
mfa "github.com/hashicorp/vault/helper/identity/mfa"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -56,7 +59,11 @@ type Group struct {
// Memberships of the internal groups can be managed over the API whereas
// the memberships on the external group --for which a corresponding alias
// will be set-- will be managed automatically.
Type string `sentinel:"" protobuf:"bytes,12,opt,name=type,proto3" json:"type,omitempty"`
Type string `sentinel:"" protobuf:"bytes,12,opt,name=type,proto3" json:"type,omitempty"`
// NamespaceID is the identifier of the namespace to which this group
// belongs to. Do not return this value over the API when reading the
// group.
NamespaceID string `sentinel:"" protobuf:"bytes,13,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -66,16 +73,17 @@ func (m *Group) Reset() { *m = Group{} }
func (m *Group) String() string { return proto.CompactTextString(m) }
func (*Group) ProtoMessage() {}
func (*Group) Descriptor() ([]byte, []int) {
return fileDescriptor_types_0360db4a8e77dd9b, []int{0}
return fileDescriptor_319efdc71a5d7416, []int{0}
}
func (m *Group) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Group.Unmarshal(m, b)
}
func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Group.Marshal(b, m, deterministic)
}
func (dst *Group) XXX_Merge(src proto.Message) {
xxx_messageInfo_Group.Merge(dst, src)
func (m *Group) XXX_Merge(src proto.Message) {
xxx_messageInfo_Group.Merge(m, src)
}
func (m *Group) XXX_Size() int {
return xxx_messageInfo_Group.Size(m)
@ -170,6 +178,13 @@ func (m *Group) GetType() string {
return ""
}
func (m *Group) GetNamespaceID() string {
if m != nil {
return m.NamespaceID
}
return ""
}
// Entity represents an entity that gets persisted and indexed.
// Entity is fundamentally composed of zero or many aliases.
type Entity struct {
@ -211,9 +226,16 @@ type Entity struct {
// the entities belonging to a particular bucket during invalidation of the
// storage key.
BucketKeyHash string `sentinel:"" protobuf:"bytes,9,opt,name=bucket_key_hash,json=bucketKeyHash,proto3" json:"bucket_key_hash,omitempty"`
// MFASecrets holds the MFA secrets indexed by the identifier of the MFA
// method configuration.
MFASecrets map[string]*mfa.Secret `sentinel:"" protobuf:"bytes,10,rep,name=mfa_secrets,json=mfaSecrets,proto3" json:"mfa_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Disabled indicates whether tokens associated with the account should not
// be able to be used
Disabled bool `sentinel:"" protobuf:"varint,11,opt,name=disabled,proto3" json:"disabled,omitempty"`
Disabled bool `sentinel:"" protobuf:"varint,11,opt,name=disabled,proto3" json:"disabled,omitempty"`
// NamespaceID is the identifier of the namespace to which this entity
// belongs to. Do not return this value over the API when reading the
// entity.
NamespaceID string `sentinel:"" protobuf:"bytes,12,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -223,16 +245,17 @@ func (m *Entity) Reset() { *m = Entity{} }
func (m *Entity) String() string { return proto.CompactTextString(m) }
func (*Entity) ProtoMessage() {}
func (*Entity) Descriptor() ([]byte, []int) {
return fileDescriptor_types_0360db4a8e77dd9b, []int{1}
return fileDescriptor_319efdc71a5d7416, []int{1}
}
func (m *Entity) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Entity.Unmarshal(m, b)
}
func (m *Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Entity.Marshal(b, m, deterministic)
}
func (dst *Entity) XXX_Merge(src proto.Message) {
xxx_messageInfo_Entity.Merge(dst, src)
func (m *Entity) XXX_Merge(src proto.Message) {
xxx_messageInfo_Entity.Merge(m, src)
}
func (m *Entity) XXX_Size() int {
return xxx_messageInfo_Entity.Size(m)
@ -306,6 +329,13 @@ func (m *Entity) GetBucketKeyHash() string {
return ""
}
func (m *Entity) GetMFASecrets() map[string]*mfa.Secret {
if m != nil {
return m.MFASecrets
}
return nil
}
func (m *Entity) GetDisabled() bool {
if m != nil {
return m.Disabled
@ -313,6 +343,13 @@ func (m *Entity) GetDisabled() bool {
return false
}
func (m *Entity) GetNamespaceID() string {
if m != nil {
return m.NamespaceID
}
return ""
}
// Alias represents the alias that gets stored inside of the
// entity object in storage and also represents in an in-memory index of an
// alias object.
@ -349,25 +386,29 @@ type Alias struct {
LastUpdateTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,9,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"`
// MergedFromCanonicalIDs is the FIFO history of merging activity
MergedFromCanonicalIDs []string `sentinel:"" protobuf:"bytes,10,rep,name=merged_from_canonical_ids,json=mergedFromCanonicalIds,proto3" json:"merged_from_canonical_ids,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
// NamespaceID is the identifier of the namespace to which this alias
// belongs.
NamespaceID string `sentinel:"" protobuf:"bytes,11,opt,name=namespace_id,json=namespaceID,proto3" json:"namespace_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Alias) Reset() { *m = Alias{} }
func (m *Alias) String() string { return proto.CompactTextString(m) }
func (*Alias) ProtoMessage() {}
func (*Alias) Descriptor() ([]byte, []int) {
return fileDescriptor_types_0360db4a8e77dd9b, []int{2}
return fileDescriptor_319efdc71a5d7416, []int{2}
}
func (m *Alias) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Alias.Unmarshal(m, b)
}
func (m *Alias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Alias.Marshal(b, m, deterministic)
}
func (dst *Alias) XXX_Merge(src proto.Message) {
xxx_messageInfo_Alias.Merge(dst, src)
func (m *Alias) XXX_Merge(src proto.Message) {
xxx_messageInfo_Alias.Merge(m, src)
}
func (m *Alias) XXX_Size() int {
return xxx_messageInfo_Alias.Size(m)
@ -448,58 +489,308 @@ func (m *Alias) GetMergedFromCanonicalIDs() []string {
return nil
}
func (m *Alias) GetNamespaceID() string {
if m != nil {
return m.NamespaceID
}
return ""
}
// Deprecated. Retained for backwards compatibility.
type EntityStorageEntry struct {
Personas []*PersonaIndexEntry `sentinel:"" protobuf:"bytes,1,rep,name=personas,proto3" json:"personas,omitempty"`
ID string `sentinel:"" protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
Name string `sentinel:"" protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
Metadata map[string]string `sentinel:"" protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
CreationTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,5,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"`
LastUpdateTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,6,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"`
MergedEntityIDs []string `sentinel:"" protobuf:"bytes,7,rep,name=merged_entity_ids,json=mergedEntityIDs,proto3" json:"merged_entity_ids,omitempty"`
Policies []string `sentinel:"" protobuf:"bytes,8,rep,name=policies,proto3" json:"policies,omitempty"`
BucketKeyHash string `sentinel:"" protobuf:"bytes,9,opt,name=bucket_key_hash,json=bucketKeyHash,proto3" json:"bucket_key_hash,omitempty"`
MFASecrets map[string]*mfa.Secret `sentinel:"" protobuf:"bytes,10,rep,name=mfa_secrets,json=mfaSecrets,proto3" json:"mfa_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EntityStorageEntry) Reset() { *m = EntityStorageEntry{} }
func (m *EntityStorageEntry) String() string { return proto.CompactTextString(m) }
func (*EntityStorageEntry) ProtoMessage() {}
func (*EntityStorageEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_319efdc71a5d7416, []int{3}
}
func (m *EntityStorageEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EntityStorageEntry.Unmarshal(m, b)
}
func (m *EntityStorageEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EntityStorageEntry.Marshal(b, m, deterministic)
}
func (m *EntityStorageEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_EntityStorageEntry.Merge(m, src)
}
func (m *EntityStorageEntry) XXX_Size() int {
return xxx_messageInfo_EntityStorageEntry.Size(m)
}
func (m *EntityStorageEntry) XXX_DiscardUnknown() {
xxx_messageInfo_EntityStorageEntry.DiscardUnknown(m)
}
var xxx_messageInfo_EntityStorageEntry proto.InternalMessageInfo
func (m *EntityStorageEntry) GetPersonas() []*PersonaIndexEntry {
if m != nil {
return m.Personas
}
return nil
}
func (m *EntityStorageEntry) GetID() string {
if m != nil {
return m.ID
}
return ""
}
func (m *EntityStorageEntry) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *EntityStorageEntry) GetMetadata() map[string]string {
if m != nil {
return m.Metadata
}
return nil
}
func (m *EntityStorageEntry) GetCreationTime() *timestamp.Timestamp {
if m != nil {
return m.CreationTime
}
return nil
}
func (m *EntityStorageEntry) GetLastUpdateTime() *timestamp.Timestamp {
if m != nil {
return m.LastUpdateTime
}
return nil
}
func (m *EntityStorageEntry) GetMergedEntityIDs() []string {
if m != nil {
return m.MergedEntityIDs
}
return nil
}
func (m *EntityStorageEntry) GetPolicies() []string {
if m != nil {
return m.Policies
}
return nil
}
func (m *EntityStorageEntry) GetBucketKeyHash() string {
if m != nil {
return m.BucketKeyHash
}
return ""
}
func (m *EntityStorageEntry) GetMFASecrets() map[string]*mfa.Secret {
if m != nil {
return m.MFASecrets
}
return nil
}
// Deprecated. Retained for backwards compatibility.
type PersonaIndexEntry struct {
ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
EntityID string `sentinel:"" protobuf:"bytes,2,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
MountType string `sentinel:"" protobuf:"bytes,3,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"`
MountAccessor string `sentinel:"" protobuf:"bytes,4,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"`
MountPath string `sentinel:"" protobuf:"bytes,5,opt,name=mount_path,json=mountPath,proto3" json:"mount_path,omitempty"`
Metadata map[string]string `sentinel:"" protobuf:"bytes,6,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Name string `sentinel:"" protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
CreationTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,8,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"`
LastUpdateTime *timestamp.Timestamp `sentinel:"" protobuf:"bytes,9,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"`
MergedFromEntityIDs []string `sentinel:"" protobuf:"bytes,10,rep,name=merged_from_entity_ids,json=mergedFromEntityIDs,proto3" json:"merged_from_entity_ids,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PersonaIndexEntry) Reset() { *m = PersonaIndexEntry{} }
func (m *PersonaIndexEntry) String() string { return proto.CompactTextString(m) }
func (*PersonaIndexEntry) ProtoMessage() {}
func (*PersonaIndexEntry) Descriptor() ([]byte, []int) {
return fileDescriptor_319efdc71a5d7416, []int{4}
}
func (m *PersonaIndexEntry) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PersonaIndexEntry.Unmarshal(m, b)
}
func (m *PersonaIndexEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PersonaIndexEntry.Marshal(b, m, deterministic)
}
func (m *PersonaIndexEntry) XXX_Merge(src proto.Message) {
xxx_messageInfo_PersonaIndexEntry.Merge(m, src)
}
func (m *PersonaIndexEntry) XXX_Size() int {
return xxx_messageInfo_PersonaIndexEntry.Size(m)
}
func (m *PersonaIndexEntry) XXX_DiscardUnknown() {
xxx_messageInfo_PersonaIndexEntry.DiscardUnknown(m)
}
var xxx_messageInfo_PersonaIndexEntry proto.InternalMessageInfo
func (m *PersonaIndexEntry) GetID() string {
if m != nil {
return m.ID
}
return ""
}
func (m *PersonaIndexEntry) GetEntityID() string {
if m != nil {
return m.EntityID
}
return ""
}
func (m *PersonaIndexEntry) GetMountType() string {
if m != nil {
return m.MountType
}
return ""
}
func (m *PersonaIndexEntry) GetMountAccessor() string {
if m != nil {
return m.MountAccessor
}
return ""
}
func (m *PersonaIndexEntry) GetMountPath() string {
if m != nil {
return m.MountPath
}
return ""
}
func (m *PersonaIndexEntry) GetMetadata() map[string]string {
if m != nil {
return m.Metadata
}
return nil
}
func (m *PersonaIndexEntry) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *PersonaIndexEntry) GetCreationTime() *timestamp.Timestamp {
if m != nil {
return m.CreationTime
}
return nil
}
func (m *PersonaIndexEntry) GetLastUpdateTime() *timestamp.Timestamp {
if m != nil {
return m.LastUpdateTime
}
return nil
}
func (m *PersonaIndexEntry) GetMergedFromEntityIDs() []string {
if m != nil {
return m.MergedFromEntityIDs
}
return nil
}
func init() {
proto.RegisterType((*Group)(nil), "identity.Group")
proto.RegisterMapType((map[string]string)(nil), "identity.Group.MetadataEntry")
proto.RegisterType((*Entity)(nil), "identity.Entity")
proto.RegisterMapType((map[string]string)(nil), "identity.Entity.MetadataEntry")
proto.RegisterMapType((map[string]*mfa.Secret)(nil), "identity.Entity.MFASecretsEntry")
proto.RegisterType((*Alias)(nil), "identity.Alias")
proto.RegisterMapType((map[string]string)(nil), "identity.Alias.MetadataEntry")
proto.RegisterType((*EntityStorageEntry)(nil), "identity.EntityStorageEntry")
proto.RegisterMapType((map[string]string)(nil), "identity.EntityStorageEntry.MetadataEntry")
proto.RegisterMapType((map[string]*mfa.Secret)(nil), "identity.EntityStorageEntry.MFASecretsEntry")
proto.RegisterType((*PersonaIndexEntry)(nil), "identity.PersonaIndexEntry")
proto.RegisterMapType((map[string]string)(nil), "identity.PersonaIndexEntry.MetadataEntry")
}
func init() { proto.RegisterFile("helper/identity/types.proto", fileDescriptor_types_0360db4a8e77dd9b) }
func init() { proto.RegisterFile("helper/identity/types.proto", fileDescriptor_319efdc71a5d7416) }
var fileDescriptor_types_0360db4a8e77dd9b = []byte{
// 656 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0x5d, 0x6f, 0xd3, 0x3c,
0x14, 0xc7, 0xd5, 0xa6, 0x2f, 0xe9, 0x69, 0xd7, 0xed, 0xb1, 0x1e, 0xa1, 0x50, 0x34, 0xe8, 0x26,
0x0d, 0x95, 0x09, 0x25, 0xd2, 0xb8, 0x61, 0xe3, 0x02, 0x0d, 0x18, 0x30, 0x21, 0x24, 0x14, 0x8d,
0x1b, 0x6e, 0x22, 0x37, 0xf1, 0x1a, 0x6b, 0x49, 0x1c, 0xc5, 0xce, 0x44, 0xbe, 0x0e, 0x5f, 0x8d,
0x6b, 0xbe, 0x03, 0xf2, 0x71, 0xd3, 0x96, 0x75, 0xbc, 0x4c, 0xdb, 0x9d, 0xfd, 0x3f, 0xc7, 0xc7,
0xf6, 0xf9, 0xff, 0xe2, 0xc0, 0x83, 0x98, 0x25, 0x39, 0x2b, 0x3c, 0x1e, 0xb1, 0x4c, 0x71, 0x55,
0x79, 0xaa, 0xca, 0x99, 0x74, 0xf3, 0x42, 0x28, 0x41, 0xec, 0x5a, 0x1d, 0x3d, 0x9a, 0x09, 0x31,
0x4b, 0x98, 0x87, 0xfa, 0xb4, 0x3c, 0xf7, 0x14, 0x4f, 0x99, 0x54, 0x34, 0xcd, 0x4d, 0xea, 0xee,
0xb7, 0x16, 0xb4, 0xdf, 0x15, 0xa2, 0xcc, 0xc9, 0x10, 0x9a, 0x3c, 0x72, 0x1a, 0xe3, 0xc6, 0xa4,
0xe7, 0x37, 0x79, 0x44, 0x08, 0xb4, 0x32, 0x9a, 0x32, 0xa7, 0x89, 0x0a, 0x8e, 0xc9, 0x08, 0xec,
0x5c, 0x24, 0x3c, 0xe4, 0x4c, 0x3a, 0xd6, 0xd8, 0x9a, 0xf4, 0xfc, 0xc5, 0x9c, 0x4c, 0x60, 0x2b,
0xa7, 0x05, 0xcb, 0x54, 0x30, 0xd3, 0xf5, 0x02, 0x1e, 0x49, 0xa7, 0x85, 0x39, 0x43, 0xa3, 0xe3,
0x36, 0xa7, 0x91, 0x24, 0xfb, 0xf0, 0x5f, 0xca, 0xd2, 0x29, 0x2b, 0x02, 0x73, 0x4a, 0x4c, 0x6d,
0x63, 0xea, 0xa6, 0x09, 0x9c, 0xa0, 0xae, 0x73, 0x0f, 0xc1, 0x4e, 0x99, 0xa2, 0x11, 0x55, 0xd4,
0xe9, 0x8c, 0xad, 0x49, 0xff, 0x60, 0xdb, 0xad, 0x6f, 0xe7, 0x62, 0x45, 0xf7, 0xe3, 0x3c, 0x7e,
0x92, 0xa9, 0xa2, 0xf2, 0x17, 0xe9, 0xe4, 0x25, 0x6c, 0x84, 0x05, 0xa3, 0x8a, 0x8b, 0x2c, 0xd0,
0xd7, 0x76, 0xba, 0xe3, 0xc6, 0xa4, 0x7f, 0x30, 0x72, 0x4d, 0x4f, 0xdc, 0xba, 0x27, 0xee, 0x59,
0xdd, 0x13, 0x7f, 0x50, 0x2f, 0xd0, 0x12, 0x79, 0x03, 0x5b, 0x09, 0x95, 0x2a, 0x28, 0xf3, 0x88,
0x2a, 0x66, 0x6a, 0xd8, 0x7f, 0xad, 0x31, 0xd4, 0x6b, 0x3e, 0xe3, 0x12, 0xac, 0xb2, 0x03, 0x83,
0x54, 0x44, 0xfc, 0xbc, 0x0a, 0x78, 0x16, 0xb1, 0xaf, 0x4e, 0x6f, 0xdc, 0x98, 0xb4, 0xfc, 0xbe,
0xd1, 0x4e, 0xb5, 0x44, 0x1e, 0xc3, 0xe6, 0xb4, 0x0c, 0x2f, 0x98, 0x0a, 0x2e, 0x58, 0x15, 0xc4,
0x54, 0xc6, 0x0e, 0x60, 0xd7, 0x37, 0x8c, 0xfc, 0x81, 0x55, 0xef, 0xa9, 0x8c, 0xc9, 0x1e, 0xb4,
0x69, 0xc2, 0xa9, 0x74, 0xfa, 0x78, 0x8a, 0xcd, 0x65, 0x27, 0x8e, 0xb5, 0xec, 0x9b, 0xa8, 0x76,
0x4e, 0xd3, 0xe0, 0x0c, 0x8c, 0x73, 0x7a, 0x3c, 0x7a, 0x01, 0x1b, 0xbf, 0xf4, 0x89, 0x6c, 0x81,
0x75, 0xc1, 0xaa, 0xb9, 0xdf, 0x7a, 0x48, 0xfe, 0x87, 0xf6, 0x25, 0x4d, 0xca, 0xda, 0x71, 0x33,
0x39, 0x6a, 0x3e, 0x6f, 0xec, 0x7e, 0xb7, 0xa0, 0x63, 0x2c, 0x21, 0x4f, 0xa0, 0x8b, 0x9b, 0x30,
0xe9, 0x34, 0xd0, 0x8e, 0xb5, 0x43, 0xd4, 0xf1, 0x39, 0x50, 0xcd, 0x35, 0xa0, 0xac, 0x15, 0xa0,
0x8e, 0x56, 0xec, 0x6d, 0x61, 0xbd, 0x87, 0xcb, 0x7a, 0x66, 0xcb, 0x7f, 0xf7, 0xb7, 0x7d, 0x07,
0xfe, 0x76, 0x6e, 0xec, 0x2f, 0xd2, 0x5c, 0xcc, 0x58, 0xb4, 0x4a, 0x73, 0xb7, 0xa6, 0x59, 0x07,
0x96, 0x34, 0xaf, 0x7e, 0x3f, 0xf6, 0x95, 0xef, 0xe7, 0x1a, 0x08, 0x7a, 0xd7, 0x41, 0x30, 0x02,
0x3b, 0xe2, 0x92, 0x4e, 0x13, 0x16, 0x21, 0x07, 0xb6, 0xbf, 0x98, 0xdf, 0xce, 0xe5, 0x1f, 0x16,
0xb4, 0xd1, 0xc2, 0xb5, 0xa7, 0x60, 0x07, 0x06, 0x21, 0xcd, 0x44, 0xc6, 0x43, 0x9a, 0x04, 0x0b,
0x4f, 0xfb, 0x0b, 0xed, 0x34, 0x22, 0xdb, 0x00, 0xa9, 0x28, 0x33, 0x15, 0x20, 0x79, 0xc6, 0xe2,
0x1e, 0x2a, 0x67, 0x55, 0xce, 0xc8, 0x1e, 0x0c, 0x4d, 0x98, 0x86, 0x21, 0x93, 0x52, 0x14, 0x4e,
0xcb, 0xdc, 0x0d, 0xd5, 0xe3, 0xb9, 0xb8, 0xac, 0x92, 0x53, 0x15, 0xa3, 0x9f, 0x75, 0x95, 0x4f,
0x54, 0xc5, 0x7f, 0x7e, 0x0c, 0xf0, 0xe8, 0xbf, 0x85, 0xa5, 0x86, 0xaf, 0xbb, 0x02, 0xdf, 0x1a,
0x40, 0xf6, 0x1d, 0x00, 0xd4, 0xbb, 0x31, 0x40, 0x87, 0x70, 0x7f, 0x0e, 0xd0, 0x79, 0x21, 0xd2,
0x60, 0xb5, 0xd3, 0xd2, 0x01, 0xa4, 0xe4, 0x9e, 0x49, 0x78, 0x5b, 0x88, 0xf4, 0xf5, 0xb2, 0xe9,
0xf2, 0x56, 0x7e, 0xbf, 0x7a, 0xfa, 0x65, 0x7f, 0xc6, 0x55, 0x5c, 0x4e, 0xdd, 0x50, 0xa4, 0x9e,
0x06, 0x8e, 0x87, 0xa2, 0xc8, 0xbd, 0x4b, 0x5a, 0x26, 0xca, 0xbb, 0xf2, 0x7f, 0x99, 0x76, 0xf0,
0x26, 0xcf, 0x7e, 0x06, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x89, 0x41, 0x55, 0x79, 0x06, 0x00, 0x00,
var fileDescriptor_319efdc71a5d7416 = []byte{
// 861 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x96, 0xcf, 0x8f, 0xdb, 0x44,
0x14, 0xc7, 0x95, 0x1f, 0x4e, 0xec, 0xe7, 0xfc, 0xd8, 0x0e, 0xa8, 0x32, 0x59, 0x95, 0x66, 0x2b,
0x15, 0xa5, 0xab, 0xca, 0x91, 0xb6, 0x07, 0x68, 0x39, 0xa0, 0x05, 0xb6, 0x10, 0x50, 0xa5, 0xca,
0x2d, 0x17, 0x2e, 0xd6, 0xc4, 0x9e, 0x24, 0xa3, 0xb5, 0x3d, 0x96, 0x67, 0x5c, 0x91, 0xff, 0x80,
0x23, 0x17, 0xfe, 0x24, 0xfe, 0x28, 0x6e, 0x68, 0x66, 0x6c, 0xc7, 0x8d, 0xd3, 0xa5, 0x2b, 0x22,
0x04, 0x52, 0x6f, 0xf6, 0x77, 0xde, 0xbc, 0x3c, 0xbf, 0xf7, 0x79, 0x5f, 0x05, 0x4e, 0x37, 0x24,
0x4a, 0x49, 0x36, 0xa7, 0x21, 0x49, 0x04, 0x15, 0xdb, 0xb9, 0xd8, 0xa6, 0x84, 0xbb, 0x69, 0xc6,
0x04, 0x43, 0x66, 0xa9, 0x4e, 0xee, 0xaf, 0x19, 0x5b, 0x47, 0x64, 0xae, 0xf4, 0x65, 0xbe, 0x9a,
0x0b, 0x1a, 0x13, 0x2e, 0x70, 0x9c, 0xea, 0xd0, 0xc9, 0xfd, 0xfd, 0x3c, 0xf1, 0x0a, 0xd7, 0x73,
0x3d, 0xf8, 0xa3, 0x0b, 0xc6, 0x77, 0x19, 0xcb, 0x53, 0x34, 0x82, 0x36, 0x0d, 0x9d, 0xd6, 0xb4,
0x35, 0xb3, 0xbc, 0x36, 0x0d, 0x11, 0x82, 0x6e, 0x82, 0x63, 0xe2, 0xb4, 0x95, 0xa2, 0x9e, 0xd1,
0x04, 0xcc, 0x94, 0x45, 0x34, 0xa0, 0x84, 0x3b, 0x9d, 0x69, 0x67, 0x66, 0x79, 0xd5, 0x3b, 0x9a,
0xc1, 0x49, 0x8a, 0x33, 0x92, 0x08, 0x7f, 0x2d, 0xf3, 0xf9, 0x34, 0xe4, 0x4e, 0x57, 0xc5, 0x8c,
0xb4, 0xae, 0x7e, 0x66, 0x11, 0x72, 0x74, 0x0e, 0x77, 0x62, 0x12, 0x2f, 0x49, 0xe6, 0xeb, 0xa2,
0x54, 0xa8, 0xa1, 0x42, 0xc7, 0xfa, 0xe0, 0x4a, 0xe9, 0x32, 0xf6, 0x29, 0x98, 0x31, 0x11, 0x38,
0xc4, 0x02, 0x3b, 0xbd, 0x69, 0x67, 0x66, 0x5f, 0xdc, 0x73, 0xcb, 0x8f, 0x71, 0x55, 0x46, 0xf7,
0x45, 0x71, 0x7e, 0x95, 0x88, 0x6c, 0xeb, 0x55, 0xe1, 0xe8, 0x2b, 0x18, 0x06, 0x19, 0xc1, 0x82,
0xb2, 0xc4, 0x97, 0x7d, 0x71, 0xfa, 0xd3, 0xd6, 0xcc, 0xbe, 0x98, 0xb8, 0xba, 0x69, 0x6e, 0xd9,
0x34, 0xf7, 0x75, 0xd9, 0x34, 0x6f, 0x50, 0x5e, 0x90, 0x12, 0xfa, 0x16, 0x4e, 0x22, 0xcc, 0x85,
0x9f, 0xa7, 0x21, 0x16, 0x44, 0xe7, 0x30, 0xff, 0x36, 0xc7, 0x48, 0xde, 0xf9, 0x49, 0x5d, 0x51,
0x59, 0xce, 0x60, 0x10, 0xb3, 0x90, 0xae, 0xb6, 0x3e, 0x4d, 0x42, 0xf2, 0x8b, 0x63, 0x4d, 0x5b,
0xb3, 0xae, 0x67, 0x6b, 0x6d, 0x21, 0x25, 0xf4, 0x19, 0x8c, 0x97, 0x79, 0x70, 0x4d, 0x84, 0x7f,
0x4d, 0xb6, 0xfe, 0x06, 0xf3, 0x8d, 0x03, 0xaa, 0xeb, 0x43, 0x2d, 0xff, 0x48, 0xb6, 0xdf, 0x63,
0xbe, 0x41, 0x0f, 0xc1, 0xc0, 0x11, 0xc5, 0xdc, 0xb1, 0x55, 0x15, 0xe3, 0x5d, 0x27, 0x2e, 0xa5,
0xec, 0xe9, 0x53, 0x39, 0x39, 0x39, 0x62, 0x67, 0xa0, 0x27, 0x27, 0x9f, 0x65, 0x15, 0x72, 0x82,
0x3c, 0xc5, 0x01, 0xf1, 0x69, 0xe8, 0x0c, 0xd5, 0x99, 0x5d, 0x69, 0x8b, 0x70, 0xf2, 0x25, 0x0c,
0xdf, 0x6a, 0x25, 0x3a, 0x81, 0xce, 0x35, 0xd9, 0x16, 0x48, 0xc8, 0x47, 0xf4, 0x31, 0x18, 0x6f,
0x70, 0x94, 0x97, 0x50, 0xe8, 0x97, 0x67, 0xed, 0x2f, 0x5a, 0x0f, 0x7e, 0x37, 0xa0, 0xa7, 0xa7,
0x86, 0x1e, 0x41, 0x5f, 0xd5, 0x41, 0xb8, 0xd3, 0x52, 0x13, 0x6b, 0xd4, 0x59, 0x9e, 0x17, 0xcc,
0xb5, 0x1b, 0xcc, 0x75, 0x6a, 0xcc, 0x3d, 0xab, 0x11, 0xd0, 0x55, 0xf9, 0x3e, 0xdd, 0xe5, 0xd3,
0x3f, 0xf9, 0xfe, 0x08, 0x18, 0x47, 0x40, 0xa0, 0x77, 0x6b, 0x04, 0x14, 0xf0, 0xd9, 0x9a, 0x84,
0x75, 0xe0, 0xfb, 0x25, 0xf0, 0xf2, 0x60, 0x07, 0x7c, 0x7d, 0xc5, 0xcc, 0xbd, 0x15, 0x3b, 0xc0,
0x89, 0x75, 0x88, 0x93, 0x4b, 0xb0, 0xe3, 0x15, 0xf6, 0x39, 0x09, 0x32, 0x22, 0xb8, 0x03, 0xaa,
0x6b, 0xd3, 0x66, 0xd7, 0x56, 0xf8, 0x95, 0x0e, 0xd1, 0x7d, 0x83, 0xb8, 0x12, 0x64, 0x19, 0x21,
0xe5, 0x78, 0x19, 0x91, 0x50, 0xd1, 0x66, 0x7a, 0xd5, 0x7b, 0x83, 0xa5, 0xc1, 0x71, 0x59, 0x9a,
0xfc, 0x00, 0xe3, 0xbd, 0xd2, 0x0e, 0x5c, 0x3f, 0xab, 0x5f, 0xb7, 0x2f, 0x6c, 0x37, 0x5e, 0x61,
0x57, 0xdf, 0xa9, 0x73, 0xf9, 0x5b, 0x17, 0x0c, 0x05, 0x5d, 0xc3, 0xdf, 0xce, 0x60, 0x10, 0xe0,
0x84, 0x25, 0x34, 0xc0, 0x91, 0x5f, 0x51, 0x68, 0x57, 0xda, 0x22, 0x44, 0xf7, 0x00, 0x62, 0x96,
0x27, 0xc2, 0x57, 0xeb, 0xa4, 0xa1, 0xb4, 0x94, 0xf2, 0x5a, 0xee, 0xd4, 0x43, 0x18, 0xe9, 0x63,
0x1c, 0x04, 0x84, 0x73, 0x96, 0x39, 0x5d, 0x3d, 0x0d, 0xa5, 0x5e, 0x16, 0xe2, 0x2e, 0x4b, 0x8a,
0xc5, 0x46, 0x11, 0x58, 0x66, 0x79, 0x89, 0xc5, 0xe6, 0x66, 0x87, 0x53, 0xa5, 0xbf, 0x13, 0xef,
0x72, 0x5d, 0xfa, 0xb5, 0x75, 0x69, 0x20, 0x6f, 0x1e, 0x01, 0x79, 0xeb, 0xd6, 0xc8, 0x3f, 0x85,
0x4f, 0x0a, 0xe4, 0x57, 0x19, 0x8b, 0xfd, 0x7a, 0xa7, 0x35, 0x90, 0x96, 0x77, 0x57, 0x07, 0x3c,
0xcf, 0x58, 0xfc, 0xcd, 0xae, 0xe9, 0xbc, 0x81, 0x97, 0x7d, 0x64, 0xab, 0xfa, 0xd5, 0x00, 0xa4,
0x37, 0xe0, 0x95, 0x60, 0x19, 0x5e, 0x13, 0x9d, 0xe2, 0x73, 0x30, 0x53, 0x92, 0x71, 0x96, 0xe0,
0xd2, 0xb7, 0x4e, 0x77, 0x73, 0x78, 0xa9, 0x4f, 0x94, 0x5d, 0x17, 0x53, 0x28, 0x83, 0xdf, 0xcb,
0xc4, 0x9e, 0x37, 0x4c, 0xec, 0x7c, 0x7f, 0x1d, 0xeb, 0xc5, 0x7c, 0x30, 0xb4, 0xb7, 0x0d, 0xed,
0xc5, 0x21, 0x43, 0x7b, 0x7c, 0x73, 0x07, 0xdf, 0x6d, 0x6e, 0xff, 0x1d, 0x77, 0xfa, 0xb3, 0x03,
0x77, 0x1a, 0x68, 0x35, 0x9c, 0xea, 0x14, 0xac, 0xaa, 0xcd, 0x45, 0x3d, 0x26, 0x29, 0xfa, 0xfb,
0xef, 0x78, 0xd4, 0x55, 0xc3, 0xa3, 0x1e, 0xdd, 0xb0, 0x1b, 0xff, 0x47, 0xbf, 0x7a, 0x02, 0x77,
0xeb, 0x7e, 0x55, 0xc3, 0x5a, 0x9b, 0xd5, 0x47, 0x3b, 0xb3, 0xaa, 0xd0, 0xfe, 0x47, 0x1c, 0x7d,
0xfd, 0xf8, 0xe7, 0xf3, 0x35, 0x15, 0x9b, 0x7c, 0xe9, 0x06, 0x2c, 0x9e, 0x4b, 0xf6, 0x69, 0xc0,
0xb2, 0x74, 0xfe, 0x06, 0xe7, 0x91, 0x98, 0xef, 0xfd, 0x6f, 0x5f, 0xf6, 0xd4, 0x37, 0x3c, 0xf9,
0x2b, 0x00, 0x00, 0xff, 0xff, 0x66, 0xa2, 0xa4, 0x7e, 0x19, 0x0c, 0x00, 0x00,
}

View file

@ -5,6 +5,7 @@ option go_package = "github.com/hashicorp/vault/helper/identity";
package identity;
import "google/protobuf/timestamp.proto";
import "helper/identity/mfa/types.proto";
// Group represents an identity group.
message Group {
@ -56,14 +57,12 @@ message Group {
// will be set-- will be managed automatically.
string type = 12;
// **Enterprise only**
// NamespaceID is the identifier of the namespace to which this group
// NamespaceID is the identifier of the namespace to which this group
// belongs to. Do not return this value over the API when reading the
// group.
//string namespace_id = 13;
string namespace_id = 13;
}
// Entity represents an entity that gets persisted and indexed.
// Entity is fundamentally composed of zero or many aliases.
message Entity {
@ -114,20 +113,18 @@ message Entity {
// storage key.
string bucket_key_hash = 9;
// **Enterprise only**
// MFASecrets holds the MFA secrets indexed by the identifier of the MFA
// method configuration.
//map<string, mfa.Secret> mfa_secrets = 10;
map<string, mfa.Secret> mfa_secrets = 10;
// Disabled indicates whether tokens associated with the account should not
// be able to be used
bool disabled = 11;
// **Enterprise only**
// NamespaceID is the identifier of the namespace to which this entity
// belongs to. Do not return this value over the API when reading the
// entity.
//string namespace_id = 12;
string namespace_id = 12;
}
// Alias represents the alias that gets stored inside of the
@ -175,4 +172,36 @@ message Alias {
// MergedFromCanonicalIDs is the FIFO history of merging activity
repeated string merged_from_canonical_ids = 10;
// NamespaceID is the identifier of the namespace to which this alias
// belongs.
string namespace_id = 11;
}
// Deprecated. Retained for backwards compatibility.
message EntityStorageEntry {
repeated PersonaIndexEntry personas = 1;
string id = 2;
string name = 3;
map<string, string> metadata = 4;
google.protobuf.Timestamp creation_time = 5;
google.protobuf.Timestamp last_update_time= 6;
repeated string merged_entity_ids = 7;
repeated string policies = 8;
string bucket_key_hash = 9;
map<string, mfa.Secret> mfa_secrets = 10;
}
// Deprecated. Retained for backwards compatibility.
message PersonaIndexEntry {
string id = 1;
string entity_id = 2;
string mount_type = 3;
string mount_accessor = 4;
string mount_path = 5;
map<string, string> metadata = 6;
string name = 7;
google.protobuf.Timestamp creation_time = 8;
google.protobuf.Timestamp last_update_time = 9;
repeated string merged_from_entity_ids = 10;
}

View file

@ -79,7 +79,7 @@ func (lm *LockManager) InvalidatePolicy(name string) {
// RestorePolicy acquires an exclusive lock on the policy name and restores the
// given policy along with the archive.
func (lm *LockManager) RestorePolicy(ctx context.Context, storage logical.Storage, name, backup string) error {
func (lm *LockManager) RestorePolicy(ctx context.Context, storage logical.Storage, name, backup string, force bool) error {
backupBytes, err := base64.StdEncoding.DecodeString(backup)
if err != nil {
return err
@ -103,26 +103,48 @@ func (lm *LockManager) RestorePolicy(ctx context.Context, storage logical.Storag
lock.Lock()
defer lock.Unlock()
// If the policy is in cache, error out. Anywhere that would put it in the
// cache will also be protected by the mutex above, so we don't need to
// re-check the cache later.
_, ok := lm.cache.Load(name)
if ok {
return fmt.Errorf(fmt.Sprintf("key %q already exists", name))
// If the policy is in cache and 'force' is not specified, error out. Anywhere
// that would put it in the cache will also be protected by the mutex above,
// so we don't need to re-check the cache later.
pRaw, ok := lm.cache.Load(name)
if ok && !force {
return fmt.Errorf("key %q already exists", name)
}
// If the policy exists in storage, error out
p, err := lm.getPolicyFromStorage(ctx, storage, name)
if err != nil {
return err
// Conditionally look up the policy from storage, depending on the use of
// 'force' and if the policy was found in cache.
//
// - If was not found in cache and we are not using 'force', look for it in
// storage. If found, error out.
//
// - If it was found in cache and we are using 'force', pRaw will not be nil
// and we do not look the policy up from storage
//
// - If it was found in cache and we are not using 'force', we should have
// returned above wih error
var p *Policy
if pRaw == nil {
p, err = lm.getPolicyFromStorage(ctx, storage, name)
if err != nil {
return err
}
if p != nil && !force {
return fmt.Errorf("key %q already exists", name)
}
}
// If both pRaw and p above are nil and 'force' is specified, we don't need to
// grab policy locks as we have ensured it doesn't already exist, so there
// will be no races as nothing else has this pointer. If 'force' was not used,
// an error would have been returned by now if the policy already existed
if pRaw != nil {
p = pRaw.(*Policy)
}
if p != nil {
return fmt.Errorf(fmt.Sprintf("key %q already exists", name))
p.l.Lock()
defer p.l.Unlock()
}
// We don't need to grab policy locks as we have ensured it doesn't already
// exist, so there will be no races as nothing else has this pointer.
// Restore the archived keys
if keyData.ArchivedKeys != nil {
err = keyData.Policy.storeArchive(ctx, storage, keyData.ArchivedKeys)

12
helper/license/feature.go Normal file
View file

@ -0,0 +1,12 @@
// +build !enterprise
package license
// Features is a bitmask of feature flags
type Features uint
const FeatureNone Features = 0
func (f Features) HasFeature(flag Features) bool {
return false
}

View file

@ -3,7 +3,6 @@ package namespace
import (
"context"
"errors"
"net/http"
"strings"
)
@ -27,10 +26,6 @@ var (
}
)
var AdjustRequest = func(r *http.Request) (*http.Request, int) {
return r.WithContext(ContextWithNamespace(r.Context(), RootNamespace)), 0
}
func (n *Namespace) HasParent(possibleParent *Namespace) bool {
switch {
case n.Path == "":
@ -105,3 +100,15 @@ func Canonicalize(nsPath string) string {
return nsPath
}
func SplitIDFromString(input string) (string, string) {
idx := strings.LastIndex(input, ".")
if idx == -1 {
return input, ""
}
if idx == len(input)-1 {
return input, ""
}
return input[:idx], input[idx+1:]
}

View file

@ -43,6 +43,7 @@ type PluginRunner struct {
Name string `json:"name" structs:"name"`
Command string `json:"command" structs:"command"`
Args []string `json:"args" structs:"args"`
Env []string `json:"env" structs:"env"`
Sha256 []byte `json:"sha256" structs:"sha256"`
Builtin bool `json:"builtin" structs:"builtin"`
BuiltinFactory func() (interface{}, error) `json:"-" structs:"-"`
@ -65,6 +66,10 @@ func (r *PluginRunner) RunMetadataMode(ctx context.Context, wrapper RunnerUtil,
func (r *PluginRunner) runCommon(ctx context.Context, wrapper RunnerUtil, pluginMap map[string]plugin.Plugin, hs plugin.HandshakeConfig, env []string, logger log.Logger, isMetadataMode bool) (*plugin.Client, error) {
cmd := exec.Command(r.Command, r.Args...)
// `env` should always go last to avoid overwriting internal values that might
// have been provided externally.
cmd.Env = append(cmd.Env, r.Env...)
cmd.Env = append(cmd.Env, env...)
// Add the mlock setting to the ENV of the plugin

View file

@ -1,12 +1,14 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: helper/storagepacker/types.proto
package storagepacker // import "github.com/hashicorp/vault/helper/storagepacker"
package storagepacker
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import any "github.com/golang/protobuf/ptypes/any"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -31,16 +33,17 @@ func (m *Item) Reset() { *m = Item{} }
func (m *Item) String() string { return proto.CompactTextString(m) }
func (*Item) ProtoMessage() {}
func (*Item) Descriptor() ([]byte, []int) {
return fileDescriptor_types_6092d91e5958c3db, []int{0}
return fileDescriptor_c0e98c66c4f51b7f, []int{0}
}
func (m *Item) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Item.Unmarshal(m, b)
}
func (m *Item) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Item.Marshal(b, m, deterministic)
}
func (dst *Item) XXX_Merge(src proto.Message) {
xxx_messageInfo_Item.Merge(dst, src)
func (m *Item) XXX_Merge(src proto.Message) {
xxx_messageInfo_Item.Merge(m, src)
}
func (m *Item) XXX_Size() int {
return xxx_messageInfo_Item.Size(m)
@ -77,16 +80,17 @@ func (m *Bucket) Reset() { *m = Bucket{} }
func (m *Bucket) String() string { return proto.CompactTextString(m) }
func (*Bucket) ProtoMessage() {}
func (*Bucket) Descriptor() ([]byte, []int) {
return fileDescriptor_types_6092d91e5958c3db, []int{1}
return fileDescriptor_c0e98c66c4f51b7f, []int{1}
}
func (m *Bucket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Bucket.Unmarshal(m, b)
}
func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
}
func (dst *Bucket) XXX_Merge(src proto.Message) {
xxx_messageInfo_Bucket.Merge(dst, src)
func (m *Bucket) XXX_Merge(src proto.Message) {
xxx_messageInfo_Bucket.Merge(m, src)
}
func (m *Bucket) XXX_Size() int {
return xxx_messageInfo_Bucket.Size(m)
@ -116,11 +120,9 @@ func init() {
proto.RegisterType((*Bucket)(nil), "storagepacker.Bucket")
}
func init() {
proto.RegisterFile("helper/storagepacker/types.proto", fileDescriptor_types_6092d91e5958c3db)
}
func init() { proto.RegisterFile("helper/storagepacker/types.proto", fileDescriptor_c0e98c66c4f51b7f) }
var fileDescriptor_types_6092d91e5958c3db = []byte{
var fileDescriptor_c0e98c66c4f51b7f = []byte{
// 219 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8f, 0x41, 0x4b, 0xc3, 0x40,
0x10, 0x85, 0x49, 0xaa, 0x15, 0xb7, 0x28, 0xb2, 0x7a, 0x88, 0x9e, 0x42, 0x4f, 0xf1, 0x32, 0x83,

View file

@ -7,7 +7,7 @@ import (
"math/rand"
"time"
uuid "github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/xor"
"github.com/hashicorp/vault/vault"
"github.com/mitchellh/go-testing-interface"
@ -23,16 +23,6 @@ func GenerateRoot(t testing.T, cluster *vault.TestCluster, drToken bool) string
}
func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, drToken bool) (string, error) {
buf := make([]byte, 16)
readLen, err := rand.Read(buf)
if err != nil {
return "", err
}
if readLen != 16 {
return "", fmt.Errorf("wrong readlen: %d", readLen)
}
otp := base64.StdEncoding.EncodeToString(buf)
// If recovery keys supported, use those to perform root token generation instead
var keys [][]byte
if cluster.Cores[0].SealAccess().RecoveryKeySupported() {
@ -46,7 +36,7 @@ func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, drToken bool
if drToken {
f = client.Sys().GenerateDROperationTokenInit
}
status, err := f(otp, "")
status, err := f("", "")
if err != nil {
return "", err
}
@ -55,6 +45,8 @@ func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, drToken bool
return "", fmt.Errorf("need more keys than have, need %d have %d", status.Required, len(keys))
}
otp := status.OTP
for i, key := range keys {
if i >= status.Required {
break
@ -71,15 +63,16 @@ func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, drToken bool
if !status.Complete {
return "", errors.New("generate root operation did not end successfully")
}
tokenBytes, err := xor.XORBase64(status.EncodedToken, otp)
tokenBytes, err := base64.RawStdEncoding.DecodeString(status.EncodedToken)
if err != nil {
return "", err
}
token, err := uuid.FormatUUID(tokenBytes)
tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp))
if err != nil {
return "", err
}
return token, nil
return string(tokenBytes), nil
}
// RandomWithPrefix is used to generate a unique name with a prefix, for
@ -87,3 +80,185 @@ func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, drToken bool
func RandomWithPrefix(name string) string {
return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
}
func EnsureCoresUnsealed(t testing.T, c *vault.TestCluster) {
t.Helper()
for _, core := range c.Cores {
if !core.Sealed() {
continue
}
client := core.Client
client.Sys().ResetUnsealProcess()
for j := 0; j < len(c.BarrierKeys); j++ {
statusResp, err := client.Sys().Unseal(base64.StdEncoding.EncodeToString(c.BarrierKeys[j]))
if err != nil {
// Sometimes when we get here it's already unsealed on its own
// and then this fails for DR secondaries so check again
if core.Sealed() {
t.Fatal(err)
}
break
}
if statusResp == nil {
t.Fatal("nil status response during unseal")
}
if !statusResp.Sealed {
break
}
}
if core.Sealed() {
t.Fatal("core is still sealed")
}
}
}
func WaitForReplicationState(t testing.T, c *vault.Core, state consts.ReplicationState) {
timeout := time.Now().Add(10 * time.Second)
for {
if time.Now().After(timeout) {
t.Fatalf("timeout waiting for core to have state %d", uint32(state))
}
state := c.ReplicationState()
if state.HasState(state) {
break
}
time.Sleep(1 * time.Second)
}
}
func SetupFourClusterReplication(t testing.T, perfPrimary, perfSecondary, perfDRSecondary, perfSecondaryDRSecondary *vault.TestCluster) {
// Enable dr primary
_, err := perfPrimary.Cores[0].Client.Logical().Write("sys/replication/dr/primary/enable", nil)
if err != nil {
t.Fatal(err)
}
WaitForReplicationState(t, perfPrimary.Cores[0].Core, consts.ReplicationDRPrimary)
// Enable performance primary
_, err = perfPrimary.Cores[0].Client.Logical().Write("sys/replication/primary/enable", nil)
if err != nil {
t.Fatal(err)
}
WaitForReplicationState(t, perfPrimary.Cores[0].Core, consts.ReplicationPerformancePrimary)
// get dr token
secret, err := perfPrimary.Cores[0].Client.Logical().Write("sys/replication/dr/primary/secondary-token", map[string]interface{}{
"id": "1",
})
if err != nil {
t.Fatal(err)
}
token := secret.WrapInfo.Token
// enable dr secondary
secret, err = perfDRSecondary.Cores[0].Client.Logical().Write("sys/replication/dr/secondary/enable", map[string]interface{}{
"token": token,
"ca_file": perfPrimary.CACertPEMFile,
})
if err != nil {
t.Fatal(err)
}
WaitForReplicationState(t, perfDRSecondary.Cores[0].Core, consts.ReplicationDRSecondary)
perfDRSecondary.BarrierKeys = perfPrimary.BarrierKeys
EnsureCoresUnsealed(t, perfDRSecondary)
// get performance token
secret, err = perfPrimary.Cores[0].Client.Logical().Write("sys/replication/primary/secondary-token", map[string]interface{}{
"id": "1",
})
if err != nil {
t.Fatal(err)
}
token = secret.WrapInfo.Token
// enable performace secondary
secret, err = perfSecondary.Cores[0].Client.Logical().Write("sys/replication/secondary/enable", map[string]interface{}{
"token": token,
"ca_file": perfPrimary.CACertPEMFile,
})
if err != nil {
t.Fatal(err)
}
WaitForReplicationState(t, perfSecondary.Cores[0].Core, consts.ReplicationPerformanceSecondary)
time.Sleep(time.Second * 3)
perfSecondary.BarrierKeys = perfPrimary.BarrierKeys
EnsureCoresUnsealed(t, perfSecondary)
rootToken := GenerateRoot(t, perfSecondary, false)
perfSecondary.Cores[0].Client.SetToken(rootToken)
// Enable dr primary on perf secondary
_, err = perfSecondary.Cores[0].Client.Logical().Write("sys/replication/dr/primary/enable", nil)
if err != nil {
t.Fatal(err)
}
WaitForReplicationState(t, perfSecondary.Cores[0].Core, consts.ReplicationDRPrimary)
// get dr token from perf secondary
secret, err = perfSecondary.Cores[0].Client.Logical().Write("sys/replication/dr/primary/secondary-token", map[string]interface{}{
"id": "1",
})
if err != nil {
t.Fatal(err)
}
token = secret.WrapInfo.Token
// enable dr secondary
secret, err = perfSecondaryDRSecondary.Cores[0].Client.Logical().Write("sys/replication/dr/secondary/enable", map[string]interface{}{
"token": token,
"ca_file": perfSecondary.CACertPEMFile,
})
if err != nil {
t.Fatal(err)
}
WaitForReplicationState(t, perfSecondaryDRSecondary.Cores[0].Core, consts.ReplicationDRSecondary)
perfSecondaryDRSecondary.BarrierKeys = perfPrimary.BarrierKeys
EnsureCoresUnsealed(t, perfSecondaryDRSecondary)
perfDRSecondary.Cores[0].Client.SetToken(perfPrimary.Cores[0].Client.Token())
perfSecondaryDRSecondary.Cores[0].Client.SetToken(rootToken)
}
func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestClusterCore {
for i := 0; i < 10; i++ {
for _, core := range cluster.Cores {
leaderResp, err := core.Client.Sys().Leader()
if err != nil {
t.Fatal(err)
}
if leaderResp.IsSelf {
return core
}
}
time.Sleep(1 * time.Second)
}
t.Fatal("could not derive the active core")
return nil
}
func WaitForNCoresSealed(t testing.T, cluster *vault.TestCluster, n int) {
for i := 0; i < 10; i++ {
sealed := 0
for _, core := range cluster.Cores {
if core.Core.Sealed() {
sealed++
}
}
if sealed >= n {
return
}
time.Sleep(time.Second)
}
t.Fatalf("%d cores were not sealed", n)
}

View file

@ -165,7 +165,7 @@ func TestAuthTokenRenew(t *testing.T) {
if err == nil {
t.Fatal("should not be allowed to renew root token")
}
if !strings.Contains(err.Error(), "lease is not renewable") {
if !strings.Contains(err.Error(), "invalid lease ID") {
t.Fatalf("wrong error; got %v", err)
}

View file

@ -11,6 +11,7 @@ import (
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/builtin/logical/transit"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/forwarding"
"github.com/hashicorp/vault/helper/logging"
"github.com/hashicorp/vault/logical"
@ -58,7 +59,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) {
if err != nil {
b.Fatal(err)
}
req.Header.Set(AuthHeaderName, cluster.RootToken)
req.Header.Set(consts.AuthHeaderName, cluster.RootToken)
_, err = client.Do(req)
if err != nil {
b.Fatal(err)
@ -71,7 +72,7 @@ func BenchmarkHTTP_Forwarding_Stress(b *testing.B) {
if err != nil {
b.Fatal(err)
}
req.Header.Set(AuthHeaderName, cluster.RootToken)
req.Header.Set(consts.AuthHeaderName, cluster.RootToken)
w := forwarding.NewRPCResponseWriter()
handler.ServeHTTP(w, req)
switch w.StatusCode() {

View file

@ -20,6 +20,7 @@ import (
"github.com/hashicorp/vault/api"
credCert "github.com/hashicorp/vault/builtin/credential/cert"
"github.com/hashicorp/vault/builtin/logical/transit"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/keysutil"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
@ -179,7 +180,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32)
if err != nil {
t.Fatal(err)
}
req.Header.Set(AuthHeaderName, cluster.RootToken)
req.Header.Set(consts.AuthHeaderName, cluster.RootToken)
_, err = client.Do(req)
if err != nil {
t.Fatal(err)
@ -232,7 +233,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32)
if err != nil {
return nil, err
}
req.Header.Set(AuthHeaderName, cluster.RootToken)
req.Header.Set(consts.AuthHeaderName, cluster.RootToken)
resp, err := client.Do(req)
if err != nil {
return nil, err
@ -472,7 +473,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
if err != nil {
t.Fatal(err)
}
req.Header.Set(AuthHeaderName, cluster.RootToken)
req.Header.Set(consts.AuthHeaderName, cluster.RootToken)
_, err = client.Do(req)
if err != nil {
t.Fatal(err)
@ -494,7 +495,7 @@ func TestHTTP_Forwarding_ClientTLS(t *testing.T) {
if err != nil {
t.Fatal(err)
}
req.Header.Set(AuthHeaderName, cluster.RootToken)
req.Header.Set(consts.AuthHeaderName, cluster.RootToken)
_, err = client.Do(req)
if err != nil {
t.Fatal(err)

View file

@ -21,15 +21,14 @@ import (
sockaddr "github.com/hashicorp/go-sockaddr"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/jsonutil"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/helper/pathmanager"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
)
const (
// AuthHeaderName is the name of the header containing the token.
AuthHeaderName = "X-Vault-Token"
// WrapTTLHeaderName is the name of the header containing a directive to
// wrap the response
WrapTTLHeaderName = "X-Vault-Wrap-TTL"
@ -62,11 +61,13 @@ const (
)
var (
ReplicationStaleReadTimeout = 2 * time.Second
// Set to false by stub_asset if the ui build tag isn't enabled
uiBuiltIn = true
// perfStandbyAlwaysForwardPaths is used to check a requested path against
// the always forward list
perfStandbyAlwaysForwardPaths = pathmanager.New()
injectDataIntoTopRoutes = []string{
"/v1/sys/audit",
"/v1/sys/audit/",
@ -114,14 +115,11 @@ func Handler(props *vault.HandlerProperties) http.Handler {
mux.Handle("/v1/sys/rekey-recovery-key/init", handleRequestForwarding(core, handleSysRekeyInit(core, true)))
mux.Handle("/v1/sys/rekey-recovery-key/update", handleRequestForwarding(core, handleSysRekeyUpdate(core, true)))
mux.Handle("/v1/sys/rekey-recovery-key/verify", handleRequestForwarding(core, handleSysRekeyVerify(core, true)))
mux.Handle("/v1/sys/wrapping/lookup", handleRequestForwarding(core, handleLogical(core, wrappingVerificationFunc)))
mux.Handle("/v1/sys/wrapping/rewrap", handleRequestForwarding(core, handleLogical(core, wrappingVerificationFunc)))
mux.Handle("/v1/sys/wrapping/unwrap", handleRequestForwarding(core, handleLogical(core, wrappingVerificationFunc)))
for _, path := range injectDataIntoTopRoutes {
mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core, nil)))
mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core)))
}
mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core, nil)))
mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core, nil)))
mux.Handle("/v1/sys/", handleRequestForwarding(core, handleLogical(core)))
mux.Handle("/v1/", handleRequestForwarding(core, handleLogical(core)))
if core.UIEnabled() == true {
if uiBuiltIn {
mux.Handle("/ui/", http.StripPrefix("/ui/", gziphandler.GzipHandler(handleUIHeaders(core, handleUI(http.FileServer(&UIAssetWrapper{FileSystem: assetFS()}))))))
@ -131,13 +129,13 @@ func Handler(props *vault.HandlerProperties) http.Handler {
mux.Handle("/", handleRootRedirect())
}
additionalRoutes(mux, core)
// Wrap the handler in another handler to trigger all help paths.
helpWrappedHandler := wrapHelpHandler(mux, core)
corsWrappedHandler := wrapCORSHandler(helpWrappedHandler, core)
// Wrap the help wrapped handler with another layer with a generic
// handler
genericWrappedHandler := wrapGenericHandler(corsWrappedHandler, props.MaxRequestSize, props.MaxRequestDuration)
genericWrappedHandler := genericWrapping(core, corsWrappedHandler, props)
// Wrap the handler with PrintablePathCheckHandler to check for non-printable
// characters in the request path.
@ -152,7 +150,7 @@ func Handler(props *vault.HandlerProperties) http.Handler {
// wrapGenericHandler wraps the handler with an extra layer of handler where
// tasks that should be commonly handled for all the requests and/or responses
// are performed.
func wrapGenericHandler(h http.Handler, maxRequestSize int64, maxRequestDuration time.Duration) http.Handler {
func wrapGenericHandler(core *vault.Core, h http.Handler, maxRequestSize int64, maxRequestDuration time.Duration) http.Handler {
if maxRequestDuration == 0 {
maxRequestDuration = vault.DefaultMaxRequestDuration
}
@ -170,7 +168,26 @@ func wrapGenericHandler(h http.Handler, maxRequestSize int64, maxRequestDuration
if maxRequestSize > 0 {
ctx = context.WithValue(ctx, "max_request_size", maxRequestSize)
}
ctx = context.WithValue(ctx, "original_request_path", r.URL.Path)
r = r.WithContext(ctx)
switch {
case strings.HasPrefix(r.URL.Path, "/v1/"):
newR, status := adjustRequest(core, r)
if status != 0 {
respondError(w, status, nil)
cancelFunc()
return
}
r = newR
case strings.HasPrefix(r.URL.Path, "/ui/"), r.URL.Path == "/":
default:
respondError(w, http.StatusNotFound, nil)
cancelFunc()
return
}
h.ServeHTTP(w, r)
cancelFunc()
return
@ -268,12 +285,12 @@ func WrapForwardedForHandler(h http.Handler, authorizedAddrs []*sockaddr.SockAdd
// A lookup on a token that is about to expire returns nil, which means by the
// time we can validate a wrapping token lookup will return nil since it will
// be revoked after the call. So we have to do the validation here.
func wrappingVerificationFunc(core *vault.Core, req *logical.Request) error {
func wrappingVerificationFunc(ctx context.Context, core *vault.Core, req *logical.Request) error {
if req == nil {
return fmt.Errorf("invalid request")
}
valid, err := core.ValidateWrappingToken(req)
valid, err := core.ValidateWrappingToken(ctx, req)
if err != nil {
return errwrap.Wrapf("error validating wrapping token: {{err}}", err)
}
@ -396,16 +413,18 @@ func parseRequest(r *http.Request, w http.ResponseWriter, out interface{}) error
// falling back on the older behavior of redirecting the client
func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get(vault.IntNoForwardingHeaderName) != "" {
handler.ServeHTTP(w, r)
return
}
if r.Header.Get(NoRequestForwardingHeaderName) != "" {
// Forwarding explicitly disabled, fall back to previous behavior
core.Logger().Debug("handleRequestForwarding: forwarding disabled by client request")
handler.ServeHTTP(w, r)
return
// If we are a performance standby we can handle the request.
if core.PerfStandby() {
ns, err := namespace.FromContext(r.Context())
if err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
if !perfStandbyAlwaysForwardPaths.HasPath(path) {
handler.ServeHTTP(w, r)
return
}
}
// Note: in an HA setup, this call will also ensure that connections to
@ -432,42 +451,65 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle
return
}
// Attempt forwarding the request. If we cannot forward -- perhaps it's
// been disabled on the active node -- this will return with an
// ErrCannotForward and we simply fall back
statusCode, header, retBytes, err := core.ForwardRequest(r)
if err != nil {
if err == vault.ErrCannotForward {
core.Logger().Debug("handleRequestForwarding: cannot forward (possibly disabled on active node), falling back")
} else {
core.Logger().Error("handleRequestForwarding: error forwarding request", "error", err)
}
// Fall back to redirection
handler.ServeHTTP(w, r)
return
}
if header != nil {
for k, v := range header {
w.Header()[k] = v
}
}
w.WriteHeader(statusCode)
w.Write(retBytes)
forwardRequest(core, w, r)
return
})
}
func forwardRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) {
if r.Header.Get(vault.IntNoForwardingHeaderName) != "" {
respondStandby(core, w, r.URL)
return
}
if r.Header.Get(NoRequestForwardingHeaderName) != "" {
// Forwarding explicitly disabled, fall back to previous behavior
core.Logger().Debug("handleRequestForwarding: forwarding disabled by client request")
respondStandby(core, w, r.URL)
return
}
// Attempt forwarding the request. If we cannot forward -- perhaps it's
// been disabled on the active node -- this will return with an
// ErrCannotForward and we simply fall back
statusCode, header, retBytes, err := core.ForwardRequest(r)
if err != nil {
if err == vault.ErrCannotForward {
core.Logger().Debug("handleRequestForwarding: cannot forward (possibly disabled on active node), falling back")
} else {
core.Logger().Error("handleRequestForwarding: error forwarding request", "error", err)
}
// Fall back to redirection
respondStandby(core, w, r.URL)
return
}
if header != nil {
for k, v := range header {
w.Header()[k] = v
}
}
w.WriteHeader(statusCode)
w.Write(retBytes)
}
// request is a helper to perform a request and properly exit in the
// case of an error.
func request(core *vault.Core, w http.ResponseWriter, rawReq *http.Request, r *logical.Request) (*logical.Response, bool) {
resp, err := core.HandleRequest(rawReq.Context(), r)
if r.LastRemoteWAL() > 0 && !vault.WaitUntilWALShipped(rawReq.Context(), core, r.LastRemoteWAL()) {
if resp == nil {
resp = &logical.Response{}
}
resp.AddWarning("Timeout hit while waiting for local replicated cluster to apply primary's write; this client may encounter stale reads of values written during this operation.")
}
if errwrap.Contains(err, consts.ErrStandby.Error()) {
respondStandby(core, w, rawReq.URL)
return resp, false
}
if respondErrorCommon(w, r, resp, err) {
return resp, false
}
@ -526,16 +568,19 @@ func respondStandby(core *vault.Core, w http.ResponseWriter, reqURL *url.URL) {
}
// requestAuth adds the token to the logical.Request if it exists.
func requestAuth(core *vault.Core, r *http.Request, req *logical.Request) *logical.Request {
func requestAuth(core *vault.Core, r *http.Request, req *logical.Request) (*logical.Request, error) {
// Attach the header value if we have it
if v := r.Header.Get(AuthHeaderName); v != "" {
if v := r.Header.Get(consts.AuthHeaderName); v != "" {
req.ClientToken = v
// Also attach the accessor if we have it. This doesn't fail if it
// doesn't exist because the request may be to an unauthenticated
// endpoint/login endpoint where a bad current token doesn't matter, or
// a token from a Vault version pre-accessors.
te, err := core.LookupToken(v)
te, err := core.LookupToken(r.Context(), v)
if err != nil && strings.Count(v, ".") != 2 {
return req, err
}
if err == nil && te != nil {
req.ClientTokenAccessor = te.Accessor
req.ClientTokenRemainingUses = te.NumUses
@ -543,7 +588,22 @@ func requestAuth(core *vault.Core, r *http.Request, req *logical.Request) *logic
}
}
return req
return req, nil
}
func requestPolicyOverride(r *http.Request, req *logical.Request) error {
raw := r.Header.Get(PolicyOverrideHeaderName)
if raw == "" {
return nil
}
override, err := parseutil.ParseBool(raw)
if err != nil {
return err
}
req.PolicyOverride = override
return nil
}
// requestWrapInfo adds the WrapInfo value to the logical.Request if wrap info exists
@ -576,6 +636,52 @@ func requestWrapInfo(r *http.Request, req *logical.Request) (*logical.Request, e
return req, nil
}
// parseMFAHeader parses the MFAHeaderName in the request headers and organizes
// them with MFA method name as the index.
func parseMFAHeader(req *logical.Request) error {
if req == nil {
return fmt.Errorf("request is nil")
}
if req.Headers == nil {
return nil
}
// Reset and initialize the credentials in the request
req.MFACreds = make(map[string][]string)
for _, mfaHeaderValue := range req.Headers[canonicalMFAHeaderName] {
// Skip the header with no value in it
if mfaHeaderValue == "" {
continue
}
// Handle the case where only method name is mentioned and no value
// is supplied
if !strings.Contains(mfaHeaderValue, ":") {
// Mark the presense of method name, but set an empty set to it
// indicating that there were no values supplied for the method
if req.MFACreds[mfaHeaderValue] == nil {
req.MFACreds[mfaHeaderValue] = []string{}
}
continue
}
shardSplits := strings.SplitN(mfaHeaderValue, ":", 2)
if shardSplits[0] == "" {
return fmt.Errorf("invalid data in header %q; missing method name", MFAHeaderName)
}
if shardSplits[1] == "" {
return fmt.Errorf("invalid data in header %q; missing method value", MFAHeaderName)
}
req.MFACreds[shardSplits[0]] = append(req.MFACreds[shardSplits[0]], shardSplits[1])
}
return nil
}
func respondError(w http.ResponseWriter, status int, err error) {
logical.AdjustErrorStatusCode(&status, err)

View file

@ -6,6 +6,7 @@ import (
"errors"
"net/http"
"net/http/httptest"
"net/textproto"
"reflect"
"strings"
"testing"
@ -16,6 +17,93 @@ import (
"github.com/hashicorp/vault/vault"
)
func TestHandler_parseMFAHandler(t *testing.T) {
var err error
var expectedMFACreds logical.MFACreds
req := &logical.Request{
Headers: make(map[string][]string),
}
headerName := textproto.CanonicalMIMEHeaderKey(MFAHeaderName)
// Set TOTP passcode in the MFA header
req.Headers[headerName] = []string{
"my_totp:123456",
"my_totp:111111",
"my_second_mfa:hi=hello",
"my_third_mfa",
}
err = parseMFAHeader(req)
if err != nil {
t.Fatal(err)
}
// Verify that it is being parsed properly
expectedMFACreds = logical.MFACreds{
"my_totp": []string{
"123456",
"111111",
},
"my_second_mfa": []string{
"hi=hello",
},
"my_third_mfa": []string{},
}
if !reflect.DeepEqual(expectedMFACreds, req.MFACreds) {
t.Fatalf("bad: parsed MFACreds; expected: %#v\n actual: %#v\n", expectedMFACreds, req.MFACreds)
}
// Split the creds of a method type in different headers and check if they
// all get merged together
req.Headers[headerName] = []string{
"my_mfa:passcode=123456",
"my_mfa:month=july",
"my_mfa:day=tuesday",
}
err = parseMFAHeader(req)
if err != nil {
t.Fatal(err)
}
expectedMFACreds = logical.MFACreds{
"my_mfa": []string{
"passcode=123456",
"month=july",
"day=tuesday",
},
}
if !reflect.DeepEqual(expectedMFACreds, req.MFACreds) {
t.Fatalf("bad: parsed MFACreds; expected: %#v\n actual: %#v\n", expectedMFACreds, req.MFACreds)
}
// Header without method name should error out
req.Headers[headerName] = []string{
":passcode=123456",
}
err = parseMFAHeader(req)
if err == nil {
t.Fatalf("expected an error; actual: %#v\n", req.MFACreds)
}
// Header without method name and method value should error out
req.Headers[headerName] = []string{
":",
}
err = parseMFAHeader(req)
if err == nil {
t.Fatalf("expected an error; actual: %#v\n", req.MFACreds)
}
// Header without method name and method value should error out
req.Headers[headerName] = []string{
"my_totp:",
}
err = parseMFAHeader(req)
if err == nil {
t.Fatalf("expected an error; actual: %#v\n", req.MFACreds)
}
}
func TestHandler_cors(t *testing.T) {
core, _, _ := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
@ -106,7 +194,7 @@ func TestHandler_CacheControlNoStore(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
req.Header.Set(AuthHeaderName, token)
req.Header.Set(consts.AuthHeaderName, token)
req.Header.Set(WrapTTLHeaderName, "60s")
client := cleanhttp.DefaultClient()
@ -139,7 +227,7 @@ func TestHandler_Accepted(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
req.Header.Set(AuthHeaderName, token)
req.Header.Set(consts.AuthHeaderName, token)
client := cleanhttp.DefaultClient()
resp, err := client.Do(req)
@ -160,7 +248,7 @@ func TestSysMounts_headerAuth(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
req.Header.Set(AuthHeaderName, token)
req.Header.Set(consts.AuthHeaderName, token)
client := cleanhttp.DefaultClient()
resp, err := client.Do(req)
@ -310,7 +398,7 @@ func TestSysMounts_headerAuth_Wrapped(t *testing.T) {
if err != nil {
t.Fatalf("err: %s", err)
}
req.Header.Set(AuthHeaderName, token)
req.Header.Set(consts.AuthHeaderName, token)
req.Header.Set(WrapTTLHeaderName, "60s")
client := cleanhttp.DefaultClient()
@ -379,6 +467,30 @@ func TestHandler_sealed(t *testing.T) {
testResponseStatus(t, resp, 503)
}
func TestHandler_ui_default(t *testing.T) {
core := vault.TestCoreUI(t, false)
ln, addr := TestServer(t, core)
defer ln.Close()
resp, err := http.Get(addr + "/ui/")
if err != nil {
t.Fatalf("err: %s", err)
}
testResponseStatus(t, resp, 404)
}
func TestHandler_ui_enabled(t *testing.T) {
core := vault.TestCoreUI(t, true)
ln, addr := TestServer(t, core)
defer ln.Close()
resp, err := http.Get(addr + "/ui/")
if err != nil {
t.Fatalf("err: %s", err)
}
testResponseStatus(t, resp, 200)
}
func TestHandler_error(t *testing.T) {
w := httptest.NewRecorder()
@ -429,7 +541,7 @@ func testNonPrintable(t *testing.T, disable bool) {
if err != nil {
t.Fatalf("err: %s", err)
}
req.Header.Set(AuthHeaderName, token)
req.Header.Set(consts.AuthHeaderName, token)
client := cleanhttp.DefaultClient()
resp, err := client.Do(req)

View file

@ -3,6 +3,8 @@ package http
import (
"net/http"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
)
@ -24,22 +26,31 @@ func wrapHelpHandler(h http.Handler, core *vault.Core) http.Handler {
})
}
func handleHelp(core *vault.Core, w http.ResponseWriter, req *http.Request) {
path, ok := stripPrefix("/v1/", req.URL.Path)
if !ok {
respondError(w, http.StatusNotFound, nil)
func handleHelp(core *vault.Core, w http.ResponseWriter, r *http.Request) {
ns, err := namespace.FromContext(r.Context())
if err != nil {
respondError(w, http.StatusBadRequest, nil)
return
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
req, err := requestAuth(core, r, &logical.Request{
Operation: logical.HelpOperation,
Path: path,
Connection: getConnection(r),
})
if err != nil {
if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
respondError(w, http.StatusForbidden, nil)
return
}
respondError(w, http.StatusBadRequest, errwrap.Wrapf("error performing token check: {{err}}", err))
return
}
lreq := requestAuth(core, req, &logical.Request{
Operation: logical.HelpOperation,
Path: path,
Connection: getConnection(req),
})
resp, err := core.HandleRequest(req.Context(), lreq)
resp, err := core.HandleRequest(r.Context(), req)
if err != nil {
respondErrorCommon(w, lreq, resp, err)
respondErrorCommon(w, req, resp, err)
return
}

View file

@ -12,6 +12,7 @@ import (
"time"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/jsonutil"
)
@ -68,7 +69,7 @@ func testHttpData(t *testing.T, method string, token string, addr string, body i
req.Header.Set("Content-Type", "application/json")
if len(token) != 0 {
req.Header.Set("X-Vault-Token", token)
req.Header.Set(consts.AuthHeaderName, token)
}
client := cleanhttp.DefaultClient()
@ -89,8 +90,8 @@ func testHttpData(t *testing.T, method string, token string, addr string, body i
return nil
}
// mutate the subsequent redirect requests with the first Header
if token := via[0].Header.Get("X-Vault-Token"); len(token) != 0 {
req.Header.Set("X-Vault-Token", token)
if token := via[0].Header.Get(consts.AuthHeaderName); len(token) != 0 {
req.Header.Set(consts.AuthHeaderName, token)
}
return nil
}

View file

@ -3,6 +3,7 @@ package http
import (
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
@ -12,21 +13,17 @@ import (
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
)
type PrepareRequestFunc func(*vault.Core, *logical.Request) error
func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Request) (*logical.Request, int, error) {
// Determine the path...
if !strings.HasPrefix(r.URL.Path, "/v1/") {
return nil, http.StatusNotFound, nil
}
path := r.URL.Path[len("/v1/"):]
if path == "" {
return nil, http.StatusNotFound, nil
ns, err := namespace.FromContext(r.Context())
if err != nil {
return nil, http.StatusBadRequest, nil
}
path := ns.TrimmedPath(r.URL.Path[len("/v1/"):])
var data map[string]interface{}
@ -103,13 +100,12 @@ func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Reques
return nil, http.StatusMethodNotAllowed, nil
}
var err error
request_id, err := uuid.GenerateUUID()
if err != nil {
return nil, http.StatusBadRequest, errwrap.Wrapf("failed to generate identifier for the request: {{err}}", err)
}
req := requestAuth(core, r, &logical.Request{
req, err := requestAuth(core, r, &logical.Request{
ID: request_id,
Operation: op,
Path: path,
@ -117,24 +113,40 @@ func buildLogicalRequest(core *vault.Core, w http.ResponseWriter, r *http.Reques
Connection: getConnection(r),
Headers: r.Header,
})
if err != nil {
if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
return nil, http.StatusForbidden, nil
}
return nil, http.StatusBadRequest, errwrap.Wrapf("error performing token check: {{err}}", err)
}
req, err = requestWrapInfo(r, req)
if err != nil {
return nil, http.StatusBadRequest, errwrap.Wrapf("error parsing X-Vault-Wrap-TTL header: {{err}}", err)
}
err = parseMFAHeader(req)
if err != nil {
return nil, http.StatusBadRequest, errwrap.Wrapf("failed to parse X-Vault-MFA header: {{err}}", err)
}
err = requestPolicyOverride(r, req)
if err != nil {
return nil, http.StatusBadRequest, errwrap.Wrapf(fmt.Sprintf(`failed to parse %s header: {{err}}`, PolicyOverrideHeaderName), err)
}
return req, 0, nil
}
func handleLogical(core *vault.Core, prepareRequestCallback PrepareRequestFunc) http.Handler {
return handleLogicalInternal(core, false, prepareRequestCallback)
func handleLogical(core *vault.Core) http.Handler {
return handleLogicalInternal(core, false)
}
func handleLogicalWithInjector(core *vault.Core, prepareRequestCallback PrepareRequestFunc) http.Handler {
return handleLogicalInternal(core, true, prepareRequestCallback)
func handleLogicalWithInjector(core *vault.Core) http.Handler {
return handleLogicalInternal(core, true)
}
func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, prepareRequestCallback PrepareRequestFunc) http.Handler {
func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
req, statusCode, err := buildLogicalRequest(core, w, r)
if err != nil || statusCode != 0 {
@ -142,14 +154,53 @@ func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, prepar
return
}
// Certain endpoints may require changes to the request object. They
// will have a callback registered to do the needed operations, so
// invoke it before proceeding.
if prepareRequestCallback != nil {
if err := prepareRequestCallback(core, req); err != nil {
respondError(w, http.StatusBadRequest, err)
// Always forward requests that are using a limited use count token
if core.PerfStandby() && req.ClientTokenRemainingUses > 0 {
forwardRequest(core, w, r)
return
}
// req.Path will be relative by this point. The prefix check is first
// to fail faster if we're not in this situation since it's a hot path
switch {
case strings.HasPrefix(req.Path, "sys/wrapping/"), strings.HasPrefix(req.Path, "auth/token/"):
// Get the token ns info; if we match the paths below we want to
// swap in the token context (but keep the relative path)
if err != nil {
core.Logger().Warn("error looking up just-set context", "error", err)
respondError(w, http.StatusInternalServerError, err)
return
}
te := req.TokenEntry()
newCtx := r.Context()
if te != nil {
ns, err := vault.NamespaceByID(newCtx, te.NamespaceID, core)
if err != nil {
core.Logger().Warn("error looking up namespace from the token's namespace ID", "error", err)
respondError(w, http.StatusInternalServerError, err)
return
}
if ns != nil {
newCtx = namespace.ContextWithNamespace(newCtx, ns)
}
}
switch req.Path {
case "sys/wrapping/lookup", "sys/wrapping/rewrap", "sys/wrapping/unwrap":
r = r.WithContext(newCtx)
if err := wrappingVerificationFunc(r.Context(), core, req); err != nil {
if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
respondError(w, http.StatusForbidden, err)
} else {
respondError(w, http.StatusBadRequest, err)
}
return
}
// The -self paths have no meaning outside of the token NS, so
// requests for these paths always go to the token NS
case "auth/token/lookup-self", "auth/token/renew-self", "auth/token/revoke-self":
r = r.WithContext(newCtx)
}
}
// Make the internal request. We attach the connection info

View file

@ -15,7 +15,9 @@ import (
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/logging"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/physical"
"github.com/hashicorp/vault/physical/inmem"
@ -267,8 +269,10 @@ func TestLogical_RequestSizeLimit(t *testing.T) {
}
func TestLogical_ListSuffix(t *testing.T) {
core, _, _ := vault.TestCoreUnsealed(t)
core, _, rootToken := vault.TestCoreUnsealed(t)
req, _ := http.NewRequest("GET", "http://127.0.0.1:8200/v1/secret/foo", nil)
req = req.WithContext(namespace.RootContext(nil))
req.Header.Add(consts.AuthHeaderName, rootToken)
lreq, status, err := buildLogicalRequest(core, nil, req)
if err != nil {
t.Fatal(err)
@ -281,6 +285,8 @@ func TestLogical_ListSuffix(t *testing.T) {
}
req, _ = http.NewRequest("GET", "http://127.0.0.1:8200/v1/secret/foo?list=true", nil)
req = req.WithContext(namespace.RootContext(nil))
req.Header.Add(consts.AuthHeaderName, rootToken)
lreq, status, err = buildLogicalRequest(core, nil, req)
if err != nil {
t.Fatal(err)
@ -293,6 +299,8 @@ func TestLogical_ListSuffix(t *testing.T) {
}
req, _ = http.NewRequest("LIST", "http://127.0.0.1:8200/v1/secret/foo", nil)
req = req.WithContext(namespace.RootContext(nil))
req.Header.Add(consts.AuthHeaderName, rootToken)
lreq, status, err = buildLogicalRequest(core, nil, req)
if err != nil {
t.Fatal(err)

View file

@ -15,18 +15,24 @@ import (
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/logical/plugin"
"github.com/hashicorp/vault/logical/plugin/mock"
"github.com/hashicorp/vault/physical"
"github.com/hashicorp/vault/physical/inmem"
"github.com/hashicorp/vault/vault"
)
func getPluginClusterAndCore(t testing.TB, logger log.Logger) (*vault.TestCluster, *vault.TestClusterCore) {
inm, err := inmem.NewTransactionalInmem(nil, logger)
if err != nil {
t.Fatal(err)
}
inmha, err := inmem.NewInmemHA(nil, logger)
if err != nil {
t.Fatal(err)
}
coreConfig := &vault.CoreConfig{
Physical: inmha,
Physical: inm,
HAPhysical: inmha.(physical.HABackend),
LogicalBackends: map[string]logical.Factory{
"plugin": bplugin.Factory,
},
@ -44,7 +50,7 @@ func getPluginClusterAndCore(t testing.TB, logger log.Logger) (*vault.TestCluste
os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
vault.TestWaitActive(t, core.Core)
vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestPlugin_PluginMain")
vault.TestAddTestPlugin(t, core.Core, "mock-plugin", "TestPlugin_PluginMain", []string{}, "")
// Mount the mock plugin
err = core.Client.Sys().Mount("mock", &api.MountInput{

View file

@ -7,6 +7,7 @@ import (
"fmt"
"net/http"
"github.com/hashicorp/vault/helper/base62"
"github.com/hashicorp/vault/vault"
)
@ -14,7 +15,7 @@ func handleSysGenerateRootAttempt(core *vault.Core, generateStrategy vault.Gener
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
handleSysGenerateRootAttemptGet(core, w, r)
handleSysGenerateRootAttemptGet(core, w, r, "")
case "POST", "PUT":
handleSysGenerateRootAttemptPut(core, w, r, generateStrategy)
case "DELETE":
@ -25,7 +26,7 @@ func handleSysGenerateRootAttempt(core *vault.Core, generateStrategy vault.Gener
})
}
func handleSysGenerateRootAttemptGet(core *vault.Core, w http.ResponseWriter, r *http.Request) {
func handleSysGenerateRootAttemptGet(core *vault.Core, w http.ResponseWriter, r *http.Request, otp string) {
ctx, cancel := core.GetContext()
defer cancel()
@ -65,10 +66,12 @@ func handleSysGenerateRootAttemptGet(core *vault.Core, w http.ResponseWriter, r
// Format the status
status := &GenerateRootStatusResponse{
Started: false,
Progress: progress,
Required: sealConfig.SecretThreshold,
Complete: false,
Started: false,
Progress: progress,
Required: sealConfig.SecretThreshold,
Complete: false,
OTPLength: vault.TokenLength,
OTP: otp,
}
if generationConfig != nil {
status.Nonce = generationConfig.Nonce
@ -87,19 +90,32 @@ func handleSysGenerateRootAttemptPut(core *vault.Core, w http.ResponseWriter, r
return
}
if len(req.OTP) > 0 && len(req.PGPKey) > 0 {
respondError(w, http.StatusBadRequest, fmt.Errorf("only one of \"otp\" and \"pgp_key\" must be specified"))
return
var err error
var genned bool
switch {
case len(req.PGPKey) > 0, len(req.OTP) > 0:
default:
genned = true
req.OTP, err = base62.Random(vault.TokenLength, true)
if err != nil {
respondError(w, http.StatusInternalServerError, err)
return
}
}
// Attemptialize the generation
err := core.GenerateRootInit(req.OTP, req.PGPKey, generateStrategy)
if err != nil {
if err := core.GenerateRootInit(req.OTP, req.PGPKey, generateStrategy); err != nil {
respondError(w, http.StatusBadRequest, err)
return
}
handleSysGenerateRootAttemptGet(core, w, r)
if genned {
handleSysGenerateRootAttemptGet(core, w, r, req.OTP)
return
}
handleSysGenerateRootAttemptGet(core, w, r, "")
}
func handleSysGenerateRootAttemptDelete(core *vault.Core, w http.ResponseWriter, r *http.Request) {
@ -184,6 +200,8 @@ type GenerateRootStatusResponse struct {
EncodedToken string `json:"encoded_token"`
EncodedRootToken string `json:"encoded_root_token"`
PGPFingerprint string `json:"pgp_fingerprint"`
OTP string `json:"otp"`
OTPLength int `json:"otp_length"`
}
type GenerateRootUpdateRequest struct {

View file

@ -9,7 +9,6 @@ import (
"reflect"
"testing"
"github.com/hashicorp/go-uuid"
"github.com/hashicorp/vault/helper/pgpkeys"
"github.com/hashicorp/vault/helper/xor"
"github.com/hashicorp/vault/vault"
@ -36,6 +35,8 @@ func TestSysGenerateRootAttempt_Status(t *testing.T) {
"encoded_root_token": "",
"pgp_fingerprint": "",
"nonce": "",
"otp": "",
"otp_length": json.Number("24"),
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
@ -70,6 +71,8 @@ func TestSysGenerateRootAttempt_Setup_OTP(t *testing.T) {
"encoded_token": "",
"encoded_root_token": "",
"pgp_fingerprint": "",
"otp": "",
"otp_length": json.Number("24"),
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
@ -92,6 +95,8 @@ func TestSysGenerateRootAttempt_Setup_OTP(t *testing.T) {
"encoded_token": "",
"encoded_root_token": "",
"pgp_fingerprint": "",
"otp": "",
"otp_length": json.Number("24"),
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
@ -126,6 +131,8 @@ func TestSysGenerateRootAttempt_Setup_PGP(t *testing.T) {
"encoded_token": "",
"encoded_root_token": "",
"pgp_fingerprint": "816938b8a29146fbe245dd29e7cbaf8e011db793",
"otp": "",
"otp_length": json.Number("24"),
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
@ -163,6 +170,8 @@ func TestSysGenerateRootAttempt_Cancel(t *testing.T) {
"encoded_token": "",
"encoded_root_token": "",
"pgp_fingerprint": "",
"otp": "",
"otp_length": json.Number("24"),
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
@ -192,6 +201,8 @@ func TestSysGenerateRootAttempt_Cancel(t *testing.T) {
"encoded_root_token": "",
"pgp_fingerprint": "",
"nonce": "",
"otp": "",
"otp_length": json.Number("24"),
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
@ -251,18 +262,11 @@ func TestSysGenerateRoot_Update_OTP(t *testing.T) {
defer ln.Close()
TestServerAuth(t, addr, token)
otpBytes, err := vault.GenerateRandBytes(16)
if err != nil {
t.Fatal(err)
}
otp := base64.StdEncoding.EncodeToString(otpBytes)
resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{
"otp": otp,
})
resp := testHttpPut(t, token, addr+"/v1/sys/generate-root/attempt", map[string]interface{}{})
var rootGenerationStatus map[string]interface{}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &rootGenerationStatus)
otp := rootGenerationStatus["otp"].(string)
var actual map[string]interface{}
var expected map[string]interface{}
@ -280,6 +284,8 @@ func TestSysGenerateRoot_Update_OTP(t *testing.T) {
"required": json.Number(fmt.Sprintf("%d", len(keys))),
"started": true,
"pgp_fingerprint": "",
"otp": "",
"otp_length": json.Number("0"),
}
if i+1 == len(keys) {
expected["complete"] = true
@ -296,19 +302,22 @@ func TestSysGenerateRoot_Update_OTP(t *testing.T) {
}
expected["encoded_token"] = actual["encoded_token"]
expected["encoded_root_token"] = actual["encoded_root_token"]
expected["encoded_token"] = actual["encoded_token"]
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
}
decodedToken, err := xor.XORBase64(otp, actual["encoded_root_token"].(string))
tokenBytes, err := base64.RawStdEncoding.DecodeString(expected["encoded_token"].(string))
if err != nil {
t.Fatal(err)
}
newRootToken, err := uuid.FormatUUID(decodedToken)
tokenBytes, err = xor.XORBytes(tokenBytes, []byte(otp))
if err != nil {
t.Fatal(err)
}
newRootToken := string(tokenBytes)
actual = map[string]interface{}{}
expected = map[string]interface{}{
@ -374,6 +383,8 @@ func TestSysGenerateRoot_Update_PGP(t *testing.T) {
"required": json.Number(fmt.Sprintf("%d", len(keys))),
"started": true,
"pgp_fingerprint": "816938b8a29146fbe245dd29e7cbaf8e011db793",
"otp": "",
"otp_length": json.Number("0"),
}
if i+1 == len(keys) {
expected["complete"] = true
@ -390,12 +401,13 @@ func TestSysGenerateRoot_Update_PGP(t *testing.T) {
}
expected["encoded_token"] = actual["encoded_token"]
expected["encoded_root_token"] = actual["encoded_root_token"]
expected["encoded_token"] = actual["encoded_token"]
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
}
decodedTokenBuf, err := pgpkeys.DecryptBytes(actual["encoded_root_token"].(string), pgpkeys.TestPrivKey1)
decodedTokenBuf, err := pgpkeys.DecryptBytes(actual["encoded_token"].(string), pgpkeys.TestPrivKey1)
if err != nil {
t.Fatal(err)
}

View file

@ -29,134 +29,140 @@ func TestSysRekey_Init_pgpKeysEntriesForRekey(t *testing.T) {
}
func TestSysRekey_Init_Status(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
TestServerAuth(t, addr, token)
t.Run("status-barrier-default", func(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
TestServerAuth(t, addr, token)
resp, err := http.Get(addr + "/v1/sys/rekey/init")
if err != nil {
t.Fatalf("err: %s", err)
}
resp, err := http.Get(addr + "/v1/sys/rekey/init")
if err != nil {
t.Fatalf("err: %s", err)
}
var actual map[string]interface{}
expected := map[string]interface{}{
"started": false,
"t": json.Number("0"),
"n": json.Number("0"),
"progress": json.Number("0"),
"required": json.Number("3"),
"pgp_fingerprints": interface{}(nil),
"backup": false,
"nonce": "",
"verification_required": false,
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
}
var actual map[string]interface{}
expected := map[string]interface{}{
"started": false,
"t": json.Number("0"),
"n": json.Number("0"),
"progress": json.Number("0"),
"required": json.Number("3"),
"pgp_fingerprints": interface{}(nil),
"backup": false,
"nonce": "",
"verification_required": false,
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
}
})
}
func TestSysRekey_Init_Setup(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
TestServerAuth(t, addr, token)
t.Run("init-barrier-barrier-key", func(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
TestServerAuth(t, addr, token)
// Start rekey
resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
"secret_shares": 5,
"secret_threshold": 3,
// Start rekey
resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
"secret_shares": 5,
"secret_threshold": 3,
})
testResponseStatus(t, resp, 200)
var actual map[string]interface{}
expected := map[string]interface{}{
"started": true,
"t": json.Number("3"),
"n": json.Number("5"),
"progress": json.Number("0"),
"required": json.Number("3"),
"pgp_fingerprints": interface{}(nil),
"backup": false,
"verification_required": false,
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
if actual["nonce"].(string) == "" {
t.Fatalf("nonce was empty")
}
expected["nonce"] = actual["nonce"]
if diff := deep.Equal(actual, expected); diff != nil {
t.Fatal(diff)
}
// Get rekey status
resp = testHttpGet(t, token, addr+"/v1/sys/rekey/init")
actual = map[string]interface{}{}
expected = map[string]interface{}{
"started": true,
"t": json.Number("3"),
"n": json.Number("5"),
"progress": json.Number("0"),
"required": json.Number("3"),
"pgp_fingerprints": interface{}(nil),
"backup": false,
"verification_required": false,
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
if actual["nonce"].(string) == "" {
t.Fatalf("nonce was empty")
}
if actual["nonce"].(string) == "" {
t.Fatalf("nonce was empty")
}
expected["nonce"] = actual["nonce"]
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
}
})
testResponseStatus(t, resp, 200)
var actual map[string]interface{}
expected := map[string]interface{}{
"started": true,
"t": json.Number("3"),
"n": json.Number("5"),
"progress": json.Number("0"),
"required": json.Number("3"),
"pgp_fingerprints": interface{}(nil),
"backup": false,
"verification_required": false,
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
if actual["nonce"].(string) == "" {
t.Fatalf("nonce was empty")
}
expected["nonce"] = actual["nonce"]
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
}
// Get rekey status
resp = testHttpGet(t, token, addr+"/v1/sys/rekey/init")
actual = map[string]interface{}{}
expected = map[string]interface{}{
"started": true,
"t": json.Number("3"),
"n": json.Number("5"),
"progress": json.Number("0"),
"required": json.Number("3"),
"pgp_fingerprints": interface{}(nil),
"backup": false,
"verification_required": false,
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
if actual["nonce"].(string) == "" {
t.Fatalf("nonce was empty")
}
if actual["nonce"].(string) == "" {
t.Fatalf("nonce was empty")
}
expected["nonce"] = actual["nonce"]
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
}
}
func TestSysRekey_Init_Cancel(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
TestServerAuth(t, addr, token)
t.Run("cancel-barrier-barrier-key", func(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
TestServerAuth(t, addr, token)
resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
"secret_shares": 5,
"secret_threshold": 3,
resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
"secret_shares": 5,
"secret_threshold": 3,
})
testResponseStatus(t, resp, 200)
resp = testHttpDelete(t, token, addr+"/v1/sys/rekey/init")
testResponseStatus(t, resp, 204)
resp, err := http.Get(addr + "/v1/sys/rekey/init")
if err != nil {
t.Fatalf("err: %s", err)
}
var actual map[string]interface{}
expected := map[string]interface{}{
"started": false,
"t": json.Number("0"),
"n": json.Number("0"),
"progress": json.Number("0"),
"required": json.Number("3"),
"pgp_fingerprints": interface{}(nil),
"backup": false,
"nonce": "",
"verification_required": false,
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
}
})
testResponseStatus(t, resp, 200)
resp = testHttpDelete(t, token, addr+"/v1/sys/rekey/init")
testResponseStatus(t, resp, 204)
resp, err := http.Get(addr + "/v1/sys/rekey/init")
if err != nil {
t.Fatalf("err: %s", err)
}
var actual map[string]interface{}
expected := map[string]interface{}{
"started": false,
"t": json.Number("0"),
"n": json.Number("0"),
"progress": json.Number("0"),
"required": json.Number("3"),
"pgp_fingerprints": interface{}(nil),
"backup": false,
"nonce": "",
"verification_required": false,
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("\nexpected: %#v\nactual: %#v", expected, actual)
}
}
func TestSysRekey_badKey(t *testing.T) {
@ -172,71 +178,73 @@ func TestSysRekey_badKey(t *testing.T) {
}
func TestSysRekey_Update(t *testing.T) {
core, keys, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
TestServerAuth(t, addr, token)
t.Run("rekey-barrier-barrier-key", func(t *testing.T) {
core, keys, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
TestServerAuth(t, addr, token)
resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
"secret_shares": 5,
"secret_threshold": 3,
})
var rekeyStatus map[string]interface{}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &rekeyStatus)
var actual map[string]interface{}
var expected map[string]interface{}
for i, key := range keys {
resp = testHttpPut(t, token, addr+"/v1/sys/rekey/update", map[string]interface{}{
"nonce": rekeyStatus["nonce"].(string),
"key": hex.EncodeToString(key),
resp := testHttpPut(t, token, addr+"/v1/sys/rekey/init", map[string]interface{}{
"secret_shares": 5,
"secret_threshold": 3,
})
actual = map[string]interface{}{}
expected = map[string]interface{}{
"started": true,
"nonce": rekeyStatus["nonce"].(string),
"backup": false,
"pgp_fingerprints": interface{}(nil),
"required": json.Number("3"),
"t": json.Number("3"),
"n": json.Number("5"),
"progress": json.Number(fmt.Sprintf("%d", i+1)),
"verification_required": false,
}
var rekeyStatus map[string]interface{}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
testResponseBody(t, resp, &rekeyStatus)
if i+1 == len(keys) {
delete(expected, "started")
delete(expected, "required")
delete(expected, "t")
delete(expected, "n")
delete(expected, "progress")
expected["complete"] = true
expected["keys"] = actual["keys"]
expected["keys_base64"] = actual["keys_base64"]
var actual map[string]interface{}
var expected map[string]interface{}
for i, key := range keys {
resp = testHttpPut(t, token, addr+"/v1/sys/rekey/update", map[string]interface{}{
"nonce": rekeyStatus["nonce"].(string),
"key": hex.EncodeToString(key),
})
actual = map[string]interface{}{}
expected = map[string]interface{}{
"started": true,
"nonce": rekeyStatus["nonce"].(string),
"backup": false,
"pgp_fingerprints": interface{}(nil),
"required": json.Number("3"),
"t": json.Number("3"),
"n": json.Number("5"),
"progress": json.Number(fmt.Sprintf("%d", i+1)),
"verification_required": false,
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
if i+1 == len(keys) {
delete(expected, "started")
delete(expected, "required")
delete(expected, "t")
delete(expected, "n")
delete(expected, "progress")
expected["complete"] = true
expected["keys"] = actual["keys"]
expected["keys_base64"] = actual["keys_base64"]
}
if i+1 < len(keys) && (actual["nonce"] == nil || actual["nonce"].(string) == "") {
t.Fatalf("expected a nonce, i is %d, actual is %#v", i, actual)
}
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("\nexpected: \n%#v\nactual: \n%#v", expected, actual)
}
}
if i+1 < len(keys) && (actual["nonce"] == nil || actual["nonce"].(string) == "") {
t.Fatalf("expected a nonce, i is %d, actual is %#v", i, actual)
retKeys := actual["keys"].([]interface{})
if len(retKeys) != 5 {
t.Fatalf("bad: %#v", retKeys)
}
if diff := deep.Equal(actual, expected); diff != nil {
t.Fatal(diff)
keysB64 := actual["keys_base64"].([]interface{})
if len(keysB64) != 5 {
t.Fatalf("bad: %#v", keysB64)
}
}
retKeys := actual["keys"].([]interface{})
if len(retKeys) != 5 {
t.Fatalf("bad: %#v", retKeys)
}
keysB64 := actual["keys_base64"].([]interface{})
if len(keysB64) != 5 {
t.Fatalf("bad: %#v", keysB64)
}
})
}
func TestSysRekey_ReInitUpdate(t *testing.T) {

View file

@ -36,10 +36,9 @@ func handleSysSeal(core *vault.Core) http.Handler {
if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
respondError(w, http.StatusForbidden, err)
return
} else {
respondError(w, http.StatusInternalServerError, err)
return
}
respondError(w, http.StatusInternalServerError, err)
return
}
respondOk(w, nil)
@ -63,6 +62,10 @@ func handleSysStepDown(core *vault.Core) http.Handler {
// Seal with the token above
if err := core.StepDown(r.Context(), req); err != nil {
if errwrap.Contains(err, logical.ErrPermissionDenied.Error()) {
respondError(w, http.StatusForbidden, err)
return
}
respondError(w, http.StatusInternalServerError, err)
return
}
@ -197,28 +200,30 @@ func handleSysSealStatusRaw(core *vault.Core, w http.ResponseWriter, r *http.Req
progress, nonce := core.SecretProgress()
respondOk(w, &SealStatusResponse{
Type: sealConfig.Type,
Sealed: sealed,
T: sealConfig.SecretThreshold,
N: sealConfig.SecretShares,
Progress: progress,
Nonce: nonce,
Version: version.GetVersion().VersionNumber(),
ClusterName: clusterName,
ClusterID: clusterID,
Type: sealConfig.Type,
Sealed: sealed,
T: sealConfig.SecretThreshold,
N: sealConfig.SecretShares,
Progress: progress,
Nonce: nonce,
Version: version.GetVersion().VersionNumber(),
ClusterName: clusterName,
ClusterID: clusterID,
RecoverySeal: core.SealAccess().RecoveryKeySupported(),
})
}
type SealStatusResponse struct {
Type string `json:"type"`
Sealed bool `json:"sealed"`
T int `json:"t"`
N int `json:"n"`
Progress int `json:"progress"`
Nonce string `json:"nonce"`
Version string `json:"version"`
ClusterName string `json:"cluster_name,omitempty"`
ClusterID string `json:"cluster_id,omitempty"`
Type string `json:"type"`
Sealed bool `json:"sealed"`
T int `json:"t"`
N int `json:"n"`
Progress int `json:"progress"`
Nonce string `json:"nonce"`
Version string `json:"version"`
ClusterName string `json:"cluster_name,omitempty"`
ClusterID string `json:"cluster_id,omitempty"`
RecoverySeal bool `json:"recovery_seal"`
}
type UnsealRequest struct {

View file

@ -1,7 +1,6 @@
package http
import (
"context"
"encoding/hex"
"encoding/json"
"fmt"
@ -10,6 +9,7 @@ import (
"strconv"
"testing"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/logical"
"github.com/hashicorp/vault/vault"
)
@ -27,12 +27,13 @@ func TestSysSealStatus(t *testing.T) {
var actual map[string]interface{}
expected := map[string]interface{}{
"sealed": true,
"t": json.Number("3"),
"n": json.Number("3"),
"progress": json.Number("0"),
"nonce": "",
"type": "shamir",
"sealed": true,
"t": json.Number("3"),
"n": json.Number("3"),
"progress": json.Number("0"),
"nonce": "",
"type": "shamir",
"recovery_seal": false,
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
@ -108,12 +109,13 @@ func TestSysUnseal(t *testing.T) {
var actual map[string]interface{}
expected := map[string]interface{}{
"sealed": true,
"t": json.Number("3"),
"n": json.Number("3"),
"progress": json.Number(fmt.Sprintf("%d", i+1)),
"nonce": "",
"type": "shamir",
"sealed": true,
"t": json.Number("3"),
"n": json.Number("3"),
"progress": json.Number(fmt.Sprintf("%d", i+1)),
"nonce": "",
"type": "shamir",
"recovery_seal": false,
}
if i == len(keys)-1 {
expected["sealed"] = false
@ -187,11 +189,12 @@ func TestSysUnseal_Reset(t *testing.T) {
var actual map[string]interface{}
expected := map[string]interface{}{
"sealed": true,
"t": json.Number("3"),
"n": json.Number("5"),
"progress": json.Number(strconv.Itoa(i + 1)),
"type": "shamir",
"sealed": true,
"t": json.Number("3"),
"n": json.Number("5"),
"progress": json.Number(strconv.Itoa(i + 1)),
"type": "shamir",
"recovery_seal": false,
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
@ -224,11 +227,12 @@ func TestSysUnseal_Reset(t *testing.T) {
actual = map[string]interface{}{}
expected := map[string]interface{}{
"sealed": true,
"t": json.Number("3"),
"n": json.Number("5"),
"progress": json.Number("0"),
"type": "shamir",
"sealed": true,
"t": json.Number("3"),
"n": json.Number("5"),
"progress": json.Number("0"),
"type": "shamir",
"recovery_seal": false,
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
@ -274,7 +278,7 @@ func TestSysSeal_Permissions(t *testing.T) {
},
ClientToken: root,
}
resp, err := core.HandleRequest(context.Background(), req)
resp, err := core.HandleRequest(namespace.RootContext(nil), req)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -289,7 +293,7 @@ func TestSysSeal_Permissions(t *testing.T) {
"policies": []string{"test"},
}
resp, err = core.HandleRequest(context.Background(), req)
resp, err = core.HandleRequest(namespace.RootContext(nil), req)
if err != nil {
t.Fatalf("err: %v %v", err, resp)
}
@ -312,7 +316,7 @@ func TestSysSeal_Permissions(t *testing.T) {
},
ClientToken: root,
}
resp, err = core.HandleRequest(context.Background(), req)
resp, err = core.HandleRequest(namespace.RootContext(nil), req)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -333,7 +337,7 @@ func TestSysSeal_Permissions(t *testing.T) {
},
ClientToken: root,
}
resp, err = core.HandleRequest(context.Background(), req)
resp, err = core.HandleRequest(namespace.RootContext(nil), req)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -354,7 +358,7 @@ func TestSysSeal_Permissions(t *testing.T) {
},
ClientToken: root,
}
resp, err = core.HandleRequest(context.Background(), req)
resp, err = core.HandleRequest(namespace.RootContext(nil), req)
if err != nil {
t.Fatalf("err: %v", err)
}

22
http/util.go Normal file
View file

@ -0,0 +1,22 @@
package http
import (
"net/http"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/vault"
)
var (
adjustRequest = func(c *vault.Core, r *http.Request) (*http.Request, int) {
return r.WithContext(namespace.ContextWithNamespace(r.Context(), namespace.RootNamespace)), 0
}
genericWrapping = func(core *vault.Core, in http.Handler, props *vault.HandlerProperties) http.Handler {
// Wrap the help wrapped handler with another layer with a generic
// handler
return wrapGenericHandler(core, in, props.MaxRequestSize, props.MaxRequestDuration)
}
additionalRoutes = func(mux *http.ServeMux, core *vault.Core) {}
)

View file

@ -34,6 +34,10 @@ type Auth struct {
TokenPolicies []string `json:"token_policies" mapstructure:"token_policies" structs:"token_policies"`
IdentityPolicies []string `json:"identity_policies" mapstructure:"identity_policies" structs:"identity_policies"`
// ExternalNamespacePolicies represent the policies authorized from
// different namespaces indexed by respective namespace identifiers
ExternalNamespacePolicies map[string][]string `json:"external_namespace_policies" mapstructure:"external_namespace_policies" structs:"external_namespace_policies"`
// Metadata is used to attach arbitrary string-type metadata to
// an authenticated user. This metadata will be outputted into the
// audit log.
@ -58,7 +62,7 @@ type Auth struct {
// ExplicitMaxTTL is the max TTL that constrains periodic tokens. For normal
// tokens, this value is constrained by the configured max ttl.
ExplicitMaxTTL time.Duration `json:"-" mapstructure:"-" structs:"-"`
ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl"`
// Number of allowed uses of the issued token
NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"`

View file

@ -20,6 +20,10 @@ var (
// ErrMultiAuthzPending is returned if the the request needs more
// authorizations
ErrMultiAuthzPending = errors.New("request needs further approval")
// ErrUpstreamRateLimited is returned when Vault receives a rate limited
// response from an upstream
ErrUpstreamRateLimited = errors.New("upstream rate limited")
)
type HTTPCodedError interface {
@ -72,3 +76,15 @@ type ReplicationCodedError struct {
func (r *ReplicationCodedError) Error() string {
return r.Msg
}
type KeyNotFoundError struct {
Err error
}
func (e *KeyNotFoundError) WrappedErrors() []error {
return []error{e.Err}
}
func (e *KeyNotFoundError) Error() string {
return e.Err.Error()
}

View file

@ -17,6 +17,7 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/helper/errutil"
"github.com/hashicorp/vault/helper/license"
"github.com/hashicorp/vault/helper/logging"
"github.com/hashicorp/vault/helper/parseutil"
"github.com/hashicorp/vault/logical"
@ -183,6 +184,14 @@ func (b *Backend) HandleRequest(ctx context.Context, req *logical.Request) (*log
return nil, logical.ErrUnsupportedPath
}
// Check if a feature is required and if the license has that feature
if path.FeatureRequired != license.FeatureNone {
hasFeature := b.system.HasFeature(path.FeatureRequired)
if !hasFeature {
return nil, logical.CodedError(401, "Feature Not Enabled")
}
}
// Build up the data for the route, with the URL taking priority
// for the fields over the PUT data.
raw := make(map[string]interface{}, len(path.Fields))

View file

@ -7,6 +7,7 @@ import (
"strings"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/vault/helper/license"
"github.com/hashicorp/vault/logical"
)
@ -69,6 +70,10 @@ type Path struct {
// must have UpdateCapability on the path.
ExistenceCheck ExistenceFunc
// FeatureRequired, if implemented, will validate if the given feature is
// enabled for the set of paths
FeatureRequired license.Features
// Help is text describing how to use this path. This will be used
// to auto-generate the help operation. The Path will automatically
// generate a parameter listing and URL structure based on the

View file

@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: logical/identity.proto
package logical // import "github.com/hashicorp/vault/logical"
package logical
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -20,13 +22,13 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Entity struct {
// ID is the unique identifier for the entity
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
ID string `sentinel:"" protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
// Name is the human-friendly unique identifier for the entity
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
Name string `sentinel:"" protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
// Aliases contains thhe alias mappings for the given entity
Aliases []*Alias `protobuf:"bytes,3,rep,name=aliases,proto3" json:"aliases,omitempty"`
Aliases []*Alias `sentinel:"" protobuf:"bytes,3,rep,name=aliases,proto3" json:"aliases,omitempty"`
// Metadata represents the custom data tied to this entity
Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Metadata map[string]string `sentinel:"" protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -36,16 +38,17 @@ func (m *Entity) Reset() { *m = Entity{} }
func (m *Entity) String() string { return proto.CompactTextString(m) }
func (*Entity) ProtoMessage() {}
func (*Entity) Descriptor() ([]byte, []int) {
return fileDescriptor_identity_8ee6f9f1922f77d7, []int{0}
return fileDescriptor_04442ca37d5e30be, []int{0}
}
func (m *Entity) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Entity.Unmarshal(m, b)
}
func (m *Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Entity.Marshal(b, m, deterministic)
}
func (dst *Entity) XXX_Merge(src proto.Message) {
xxx_messageInfo_Entity.Merge(dst, src)
func (m *Entity) XXX_Merge(src proto.Message) {
xxx_messageInfo_Entity.Merge(m, src)
}
func (m *Entity) XXX_Size() int {
return xxx_messageInfo_Entity.Size(m)
@ -86,14 +89,14 @@ func (m *Entity) GetMetadata() map[string]string {
type Alias struct {
// MountType is the backend mount's type to which this identity belongs
MountType string `protobuf:"bytes,1,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"`
MountType string `sentinel:"" protobuf:"bytes,1,opt,name=mount_type,json=mountType,proto3" json:"mount_type,omitempty"`
// MountAccessor is the identifier of the mount entry to which this
// identity belongs
MountAccessor string `protobuf:"bytes,2,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"`
MountAccessor string `sentinel:"" protobuf:"bytes,2,opt,name=mount_accessor,json=mountAccessor,proto3" json:"mount_accessor,omitempty"`
// Name is the identifier of this identity in its authentication source
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
Name string `sentinel:"" protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
// Metadata represents the custom data tied to this alias
Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Metadata map[string]string `sentinel:"" protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@ -103,16 +106,17 @@ func (m *Alias) Reset() { *m = Alias{} }
func (m *Alias) String() string { return proto.CompactTextString(m) }
func (*Alias) ProtoMessage() {}
func (*Alias) Descriptor() ([]byte, []int) {
return fileDescriptor_identity_8ee6f9f1922f77d7, []int{1}
return fileDescriptor_04442ca37d5e30be, []int{1}
}
func (m *Alias) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Alias.Unmarshal(m, b)
}
func (m *Alias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Alias.Marshal(b, m, deterministic)
}
func (dst *Alias) XXX_Merge(src proto.Message) {
xxx_messageInfo_Alias.Merge(dst, src)
func (m *Alias) XXX_Merge(src proto.Message) {
xxx_messageInfo_Alias.Merge(m, src)
}
func (m *Alias) XXX_Size() int {
return xxx_messageInfo_Alias.Size(m)
@ -158,9 +162,9 @@ func init() {
proto.RegisterMapType((map[string]string)(nil), "logical.Alias.MetadataEntry")
}
func init() { proto.RegisterFile("logical/identity.proto", fileDescriptor_identity_8ee6f9f1922f77d7) }
func init() { proto.RegisterFile("logical/identity.proto", fileDescriptor_04442ca37d5e30be) }
var fileDescriptor_identity_8ee6f9f1922f77d7 = []byte{
var fileDescriptor_04442ca37d5e30be = []byte{
// 287 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x91, 0x4f, 0x4b, 0xc3, 0x40,
0x10, 0xc5, 0x49, 0xd2, 0x3f, 0x76, 0xa4, 0x45, 0x06, 0x91, 0x20, 0x16, 0x4a, 0x50, 0xc8, 0x29,

View file

@ -1,11 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: logical/plugin.proto
package logical // import "github.com/hashicorp/vault/logical"
package logical
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@ -30,16 +32,17 @@ func (m *PluginEnvironment) Reset() { *m = PluginEnvironment{} }
func (m *PluginEnvironment) String() string { return proto.CompactTextString(m) }
func (*PluginEnvironment) ProtoMessage() {}
func (*PluginEnvironment) Descriptor() ([]byte, []int) {
return fileDescriptor_plugin_c3e74d5a6c13acf1, []int{0}
return fileDescriptor_0f04cd6a1a3a5255, []int{0}
}
func (m *PluginEnvironment) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PluginEnvironment.Unmarshal(m, b)
}
func (m *PluginEnvironment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PluginEnvironment.Marshal(b, m, deterministic)
}
func (dst *PluginEnvironment) XXX_Merge(src proto.Message) {
xxx_messageInfo_PluginEnvironment.Merge(dst, src)
func (m *PluginEnvironment) XXX_Merge(src proto.Message) {
xxx_messageInfo_PluginEnvironment.Merge(m, src)
}
func (m *PluginEnvironment) XXX_Size() int {
return xxx_messageInfo_PluginEnvironment.Size(m)
@ -61,9 +64,9 @@ func init() {
proto.RegisterType((*PluginEnvironment)(nil), "logical.PluginEnvironment")
}
func init() { proto.RegisterFile("logical/plugin.proto", fileDescriptor_plugin_c3e74d5a6c13acf1) }
func init() { proto.RegisterFile("logical/plugin.proto", fileDescriptor_0f04cd6a1a3a5255) }
var fileDescriptor_plugin_c3e74d5a6c13acf1 = []byte{
var fileDescriptor_0f04cd6a1a3a5255 = []byte{
// 133 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc9, 0xc9, 0x4f, 0xcf,
0x4c, 0x4e, 0xcc, 0xd1, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9,

View file

@ -11,6 +11,7 @@ import (
"fmt"
"github.com/hashicorp/vault/helper/consts"
"github.com/hashicorp/vault/helper/license"
"github.com/hashicorp/vault/helper/pluginutil"
"github.com/hashicorp/vault/helper/wrapping"
"github.com/hashicorp/vault/logical"
@ -123,6 +124,11 @@ func (s *gRPCSystemViewClient) MlockEnabled() bool {
return reply.Enabled
}
func (s *gRPCSystemViewClient) HasFeature(feature license.Features) bool {
// Not implemented
return false
}
func (s *gRPCSystemViewClient) LocalMount() bool {
reply, err := s.client.LocalMount(context.Background(), &pb.Empty{})
if err != nil {

Some files were not shown because too many files have changed in this diff Show more