This commit is contained in:
Jeff Mitchell 2017-10-23 14:59:37 -04:00
parent d6a9a770c1
commit 47dae8ffc7
16 changed files with 666 additions and 321 deletions

View File

@ -41,6 +41,30 @@ const (
SudoCapabilityInt
)
type PolicyType uint32
const (
PolicyTypeACL PolicyType = iota
PolicyTypeRGP
PolicyTypeEGP
// Triggers a lookup in the map to figure out if ACL or RGP
PolicyTypeToken
)
func (p PolicyType) String() string {
switch p {
case PolicyTypeACL:
return "acl"
case PolicyTypeRGP:
return "rgp"
case PolicyTypeEGP:
return "egp"
}
return ""
}
var (
cap2Int = map[string]uint32{
DenyCapability: DenyCapabilityInt,
@ -57,27 +81,29 @@ var (
// an ACL configuration.
type Policy struct {
Name string `hcl:"name"`
Paths []*PathCapabilities `hcl:"-"`
Paths []*PathRules `hcl:"-"`
Raw string
Type PolicyType
}
// PathCapabilities represents a policy for a path in the namespace.
type PathCapabilities struct {
// PathRules represents a policy for a path in the namespace.
type PathRules struct {
Prefix string
Policy string
Permissions *Permissions
Permissions *ACLPermissions
Glob bool
Capabilities []string
// These keys are used at the top level to make the HCL nicer; we store in
// the Permissions object though
// the ACLPermissions object though
MinWrappingTTLHCL interface{} `hcl:"min_wrapping_ttl"`
MaxWrappingTTLHCL interface{} `hcl:"max_wrapping_ttl"`
AllowedParametersHCL map[string][]interface{} `hcl:"allowed_parameters"`
DeniedParametersHCL map[string][]interface{} `hcl:"denied_parameters"`
}
type Permissions struct {
type ACLPermissions struct {
CapabilitiesBitmap uint32
MinWrappingTTL time.Duration
MaxWrappingTTL time.Duration
@ -85,8 +111,8 @@ type Permissions struct {
DeniedParameters map[string][]interface{}
}
func (p *Permissions) Clone() (*Permissions, error) {
ret := &Permissions{
func (p *ACLPermissions) Clone() (*ACLPermissions, error) {
ret := &ACLPermissions{
CapabilitiesBitmap: p.CapabilitiesBitmap,
MinWrappingTTL: p.MinWrappingTTL,
MaxWrappingTTL: p.MaxWrappingTTL,
@ -122,7 +148,7 @@ func (p *Permissions) Clone() (*Permissions, error) {
// Parse is used to parse the specified ACL rules into an
// intermediary set of policies, before being compiled into
// the ACL
func Parse(rules string) (*Policy, error) {
func ParseACLPolicy(rules string) (*Policy, error) {
// Parse the rules
root, err := hcl.Parse(rules)
if err != nil {
@ -147,6 +173,7 @@ func Parse(rules string) (*Policy, error) {
// Create the initial policy and store the raw text of the rules
var p Policy
p.Raw = rules
p.Type = PolicyTypeACL
if err := hcl.DecodeObject(&p, list); err != nil {
return nil, fmt.Errorf("Failed to parse policy: %s", err)
}
@ -161,7 +188,7 @@ func Parse(rules string) (*Policy, error) {
}
func parsePaths(result *Policy, list *ast.ObjectList) error {
paths := make([]*PathCapabilities, 0, len(list.Items))
paths := make([]*PathRules, 0, len(list.Items))
for _, item := range list.Items {
key := "path"
if len(item.Keys) > 0 {
@ -179,10 +206,10 @@ func parsePaths(result *Policy, list *ast.ObjectList) error {
return multierror.Prefix(err, fmt.Sprintf("path %q:", key))
}
var pc PathCapabilities
var pc PathRules
// allocate memory so that DecodeObject can initialize the Permissions struct
pc.Permissions = new(Permissions)
// allocate memory so that DecodeObject can initialize the ACLPermissions struct
pc.Permissions = new(ACLPermissions)
pc.Prefix = key
if err := hcl.DecodeObject(&pc, item.Val); err != nil {

View File

@ -16,7 +16,7 @@ import (
const (
// policySubPath is the sub-path used for the policy store
// view. This is nested under the system view.
policySubPath = "policy/"
policyACLSubPath = "policy/"
// policyCacheSize is the number of policies that are kept cached
policyCacheSize = 1024
@ -125,39 +125,57 @@ var (
// PolicyStore is used to provide durable storage of policy, and to
// manage ACLs associated with them.
type PolicyStore struct {
view *BarrierView
aclView *BarrierView
tokenPoliciesLRU *lru.TwoQueueCache
lru *lru.TwoQueueCache
// This is used to ensure that writes to the store (acl/rgp) or to the egp
// path tree don't happen concurrently. We are okay reading stale data so
// long as there aren't concurrent writes.
modifyLock *sync.RWMutex
// Stores whether a token policy is ACL or RGP
policyTypeMap sync.Map
}
// PolicyEntry is used to store a policy by name
type PolicyEntry struct {
Version int
Raw string
Type PolicyType
}
// NewPolicyStore creates a new PolicyStore that is backed
// using a given view. It used used to durable store and manage named policy.
func NewPolicyStore(view *BarrierView, system logical.SystemView) *PolicyStore {
p := &PolicyStore{
view: view,
func NewPolicyStore(baseView *BarrierView, system logical.SystemView) *PolicyStore {
ps := &PolicyStore{
aclView: baseView.SubView(policyACLSubPath),
modifyLock: new(sync.RWMutex),
}
if !system.CachingDisabled() {
cache, _ := lru.New2Q(policyCacheSize)
ps.tokenPoliciesLRU = cache
cache, _ = lru.New2Q(policyCacheSize)
p.lru = cache
}
return p
keys, err := logical.CollectKeys(ps.aclView)
if err != nil {
vlogger.Error("error collecting acl policy keys", "error", err)
return nil
}
for _, key := range keys {
ps.policyTypeMap.Store(ps.sanitizeName(key), PolicyTypeACL)
}
// Special-case root; doesn't exist on disk but does need to be found
ps.policyTypeMap.Store("root", PolicyTypeACL)
return ps
}
// setupPolicyStore is used to initialize the policy store
// when the vault is being unsealed.
func (c *Core) setupPolicyStore() error {
// Create a sub-view
view := c.systemBarrierView.SubView(policySubPath)
// Create the policy store
sysView := &dynamicSystemView{core: c}
c.policyStore = NewPolicyStore(view, sysView)
c.policyStore = NewPolicyStore(c.systemBarrierView, sysView)
if c.replicationState.HasState(consts.ReplicationPerformanceSecondary) {
// Policies will sync from the primary
@ -165,7 +183,7 @@ func (c *Core) setupPolicyStore() error {
}
// Ensure that the default policy exists, and if not, create it
policy, err := c.policyStore.GetPolicy("default")
policy, err := c.policyStore.GetPolicy("default", PolicyTypeACL)
if err != nil {
return errwrap.Wrapf("error fetching default policy from store: {{err}}", err)
}
@ -177,7 +195,7 @@ func (c *Core) setupPolicyStore() error {
}
// Ensure that the cubbyhole response wrapping policy exists
policy, err = c.policyStore.GetPolicy(responseWrappingPolicyName)
policy, err = c.policyStore.GetPolicy(responseWrappingPolicyName, PolicyTypeACL)
if err != nil {
return errwrap.Wrapf("error fetching response-wrapping policy from store: {{err}}", err)
}
@ -198,24 +216,41 @@ func (c *Core) teardownPolicyStore() error {
return nil
}
func (ps *PolicyStore) invalidate(name string) {
if ps.lru == nil {
// Nothing to do if the cache is not used
func (ps *PolicyStore) invalidate(name string, policyType PolicyType) {
// This may come with a prefixed "/" due to joining the file path
saneName := strings.TrimPrefix(name, "/")
// We don't lock before removing from the LRU here because the worst that
// can happen is we load again if something since added it
switch policyType {
case PolicyTypeACL:
if ps.tokenPoliciesLRU != nil {
ps.tokenPoliciesLRU.Remove(saneName)
}
default:
// Can't do anything
return
}
// This may come with a prefixed "/" due to joining the file path
ps.lru.Remove(strings.TrimPrefix(name, "/"))
// Force a reload
p, err := ps.GetPolicy(name, policyType)
if err != nil {
vlogger.Error("policy: error fetching policy after invalidation", "name", saneName)
}
}
// SetPolicy is used to create or update the given policy
func (ps *PolicyStore) SetPolicy(p *Policy) error {
defer metrics.MeasureSince([]string{"policy", "set_policy"}, time.Now())
if p == nil {
return fmt.Errorf("nil policy passed in for storage")
}
if p.Name == "" {
return fmt.Errorf("policy name missing")
}
// Policies are normalized to lower-case
p.Name = strings.ToLower(strings.TrimSpace(p.Name))
p.Name = ps.sanitizeName(p.Name)
if strutil.StrListContains(immutablePolicies, p.Name) {
return fmt.Errorf("cannot update %s policy", p.Name)
}
@ -224,70 +259,131 @@ func (ps *PolicyStore) SetPolicy(p *Policy) error {
}
func (ps *PolicyStore) setPolicyInternal(p *Policy) error {
ps.modifyLock.Lock()
defer ps.modifyLock.Unlock()
// Create the entry
entry, err := logical.StorageEntryJSON(p.Name, &PolicyEntry{
Version: 2,
Raw: p.Raw,
Type: p.Type,
})
if err != nil {
return fmt.Errorf("failed to create entry: %v", err)
}
if err := ps.view.Put(entry); err != nil {
return fmt.Errorf("failed to persist policy: %v", err)
switch p.Type {
case PolicyTypeACL:
rgp, err := ps.rgpView.Get(entry.Key)
if err != nil {
return errwrap.Wrapf("failed looking up conflicting policy: {{err}}", err)
}
if rgp != nil {
return fmt.Errorf("cannot reuse policy names between ACLs and RGPs")
}
if err := ps.aclView.Put(entry); err != nil {
return errwrap.Wrapf("failed to persist policy: {{err}}", err)
}
ps.policyTypeMap.Store(p.Name, PolicyTypeACL)
if ps.tokenPoliciesLRU != nil {
// Update the LRU cache
ps.tokenPoliciesLRU.Add(p.Name, p)
}
if ps.lru != nil {
// Update the LRU cache
ps.lru.Add(p.Name, p)
default:
return fmt.Errorf("unknown policy type, cannot set")
}
return nil
}
// GetPolicy is used to fetch the named policy
func (ps *PolicyStore) GetPolicy(name string) (*Policy, error) {
func (ps *PolicyStore) GetPolicy(name string, policyType PolicyType) (*Policy, error) {
defer metrics.MeasureSince([]string{"policy", "get_policy"}, time.Now())
if ps.lru != nil {
// Policies are normalized to lower-case
name = ps.sanitizeName(name)
var cache *lru.TwoQueueCache
var view *BarrierView
switch policyType {
case PolicyTypeACL:
cache = ps.tokenPoliciesLRU
view = ps.aclView
case PolicyTypeToken:
cache = ps.tokenPoliciesLRU
val, ok := ps.policyTypeMap.Load(name)
if !ok {
// Doesn't exist
return nil, nil
}
policyType = val.(PolicyType)
switch policyType {
case PolicyTypeACL:
view = ps.aclView
default:
return nil, fmt.Errorf("invalid type of policy in type map: %s", policyType)
}
}
if cache != nil {
// Check for cached policy
if raw, ok := ps.lru.Get(name); ok {
if raw, ok := cache.Get(name); ok {
return raw.(*Policy), nil
}
}
// Policies are normalized to lower-case
name = strings.ToLower(strings.TrimSpace(name))
// Special case the root policy
if name == "root" {
if policyType == PolicyTypeACL && name == "root" {
p := &Policy{Name: "root"}
if ps.lru != nil {
ps.lru.Add(p.Name, p)
if cache != nil {
cache.Add(p.Name, p)
}
return p, nil
}
// Load the policy in
out, err := ps.view.Get(name)
if err != nil {
return nil, fmt.Errorf("failed to read policy: %v", err)
ps.modifyLock.Lock()
defer ps.modifyLock.Unlock()
// See if anything has added it since we got the lock
if cache != nil {
if raw, ok := cache.Get(name); ok {
return raw.(*Policy), nil
}
}
out, err := view.Get(name)
if err != nil {
return nil, errwrap.Wrapf("failed to read policy: {{err}}", err)
}
if out == nil {
return nil, nil
}
// In Vault 0.1.X we stored the raw policy, but in
// Vault 0.2 we switch to the PolicyEntry
policyEntry := new(PolicyEntry)
var policy *Policy
if err := out.DecodeJSON(policyEntry); err == nil {
// Parse normally
p, err := Parse(policyEntry.Raw)
policy := new(Policy)
err = out.DecodeJSON(policyEntry)
if err != nil {
return nil, fmt.Errorf("failed to parse policy: %v", err)
return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err)
}
p.Name = name
policy = p
// Set these up here so that they're available for loading into
// Sentinel
policy.Name = name
policy.Raw = policyEntry.Raw
policy.Type = policyEntry.Type
switch policyEntry.Type {
case PolicyTypeACL:
// Parse normally
p, err := ParseACLPolicy(policyEntry.Raw)
if err != nil {
return nil, errwrap.Wrapf("failed to parse policy: {{err}}", err)
}
policy.Paths = p.Paths
// Reset this in case they set the name in the policy itself
policy.Name = name
ps.policyTypeMap.Store(name, PolicyTypeACL)
} else {
// On error, attempt to use V1 parsing
p, err := Parse(string(out.Value))
@ -300,24 +396,34 @@ func (ps *PolicyStore) GetPolicy(name string) (*Policy, error) {
for _, pp := range p.Paths {
pp.Glob = true
}
policy = p
default:
return nil, fmt.Errorf("unknown policy type %q", policyEntry.Type.String())
}
if ps.lru != nil {
if cache != nil {
// Update the LRU cache
ps.lru.Add(name, policy)
cache.Add(name, policy)
}
return policy, nil
}
// ListPolicies is used to list the available policies
func (ps *PolicyStore) ListPolicies() ([]string, error) {
func (ps *PolicyStore) ListPolicies(policyType PolicyType) ([]string, error) {
defer metrics.MeasureSince([]string{"policy", "list_policies"}, time.Now())
// Scan the view, since the policy names are the same as the
// key names.
keys, err := logical.CollectKeys(ps.view)
var keys []string
var err error
switch policyType {
case PolicyTypeACL:
keys, err = logical.CollectKeys(ps.aclView)
default:
return nil, fmt.Errorf("unknown policy type %s", policyType)
}
// We only have non-assignable ACL policies at the moment
for _, nonAssignable := range nonAssignablePolicies {
deleteIndex := -1
//Find indices of non-assignable policies in keys
@ -338,24 +444,36 @@ func (ps *PolicyStore) ListPolicies() ([]string, error) {
}
// DeletePolicy is used to delete the named policy
func (ps *PolicyStore) DeletePolicy(name string) error {
func (ps *PolicyStore) DeletePolicy(name string, policyType PolicyType) error {
defer metrics.MeasureSince([]string{"policy", "delete_policy"}, time.Now())
ps.modifyLock.Lock()
defer ps.modifyLock.Unlock()
// Policies are normalized to lower-case
name = strings.ToLower(strings.TrimSpace(name))
name = ps.sanitizeName(name)
switch policyType {
case PolicyTypeACL:
if strutil.StrListContains(immutablePolicies, name) {
return fmt.Errorf("cannot delete %s policy", name)
}
if name == "default" {
return fmt.Errorf("cannot delete default policy")
}
if err := ps.view.Delete(name); err != nil {
return fmt.Errorf("failed to delete policy: %v", err)
err := ps.aclView.Delete(name)
if err != nil {
return errwrap.Wrapf("failed to delete policy: {{err}}", err)
}
if ps.lru != nil {
if ps.tokenPoliciesLRU != nil {
// Clear the cache
ps.lru.Remove(name)
ps.tokenPoliciesLRU.Remove(name)
}
ps.policyTypeMap.Delete(name)
}
return nil
}
@ -364,25 +482,25 @@ func (ps *PolicyStore) DeletePolicy(name string) error {
// named policies.
func (ps *PolicyStore) ACL(names ...string) (*ACL, error) {
// Fetch the policies
var policy []*Policy
var policies []*Policy
for _, name := range names {
p, err := ps.GetPolicy(name)
p, err := ps.GetPolicy(name, PolicyTypeToken)
if err != nil {
return nil, fmt.Errorf("failed to get policy '%s': %v", name, err)
return nil, errwrap.Wrapf("failed to get policy: {{err}}", err)
}
policy = append(policy, p)
policies = append(policies, p)
}
// Construct the ACL
acl, err := NewACL(policy)
acl, err := NewACL(policies)
if err != nil {
return nil, fmt.Errorf("failed to construct ACL: %v", err)
return nil, errwrap.Wrapf("failed to construct ACL: {{err}}", err)
}
return acl, nil
}
func (ps *PolicyStore) createDefaultPolicy() error {
policy, err := Parse(defaultPolicy)
policy, err := ParseACLPolicy(defaultPolicy)
if err != nil {
return errwrap.Wrapf("error parsing default policy: {{err}}", err)
}
@ -392,11 +510,12 @@ func (ps *PolicyStore) createDefaultPolicy() error {
}
policy.Name = "default"
policy.Type = PolicyTypeACL
return ps.setPolicyInternal(policy)
}
func (ps *PolicyStore) createResponseWrappingPolicy() error {
policy, err := Parse(responseWrappingPolicy)
policy, err := ParseACLPolicy(responseWrappingPolicy)
if err != nil {
return errwrap.Wrapf(fmt.Sprintf("error parsing %s policy: {{err}}", responseWrappingPolicyName), err)
}
@ -406,5 +525,10 @@ func (ps *PolicyStore) createResponseWrappingPolicy() error {
}
policy.Name = responseWrappingPolicyName
policy.Type = PolicyTypeACL
return ps.setPolicyInternal(policy)
}
func (ps *PolicyStore) sanitizeName(name string) string {
return strings.ToLower(strings.TrimSpace(name))
}

View File

@ -27,7 +27,7 @@ func TestPolicyStore_Root(t *testing.T) {
ps := mockPolicyStore(t)
// Get should return a special policy
p, err := ps.GetPolicy("root")
p, err := ps.GetPolicy("root", PolicyTypeToken)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -45,7 +45,7 @@ func TestPolicyStore_Root(t *testing.T) {
}
// Delete should fail
err = ps.DeletePolicy("root")
err = ps.DeletePolicy("root", PolicyTypeACL)
if err.Error() != "cannot delete root policy" {
t.Fatalf("err: %v", err)
}
@ -61,7 +61,7 @@ func TestPolicyStore_CRUD(t *testing.T) {
func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) {
// Get should return nothing
p, err := ps.GetPolicy("Dev")
p, err := ps.GetPolicy("Dev", PolicyTypeToken)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -70,13 +70,13 @@ func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) {
}
// Delete should be no-op
err = ps.DeletePolicy("deV")
err = ps.DeletePolicy("deV", PolicyTypeACL)
if err != nil {
t.Fatalf("err: %v", err)
}
// List should be blank
out, err := ps.ListPolicies()
out, err := ps.ListPolicies(PolicyTypeACL)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -85,14 +85,14 @@ func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) {
}
// Set should work
policy, _ := Parse(aclPolicy)
policy, _ := ParseACLPolicy(aclPolicy)
err = ps.SetPolicy(policy)
if err != nil {
t.Fatalf("err: %v", err)
}
// Get should work
p, err = ps.GetPolicy("dEv")
p, err = ps.GetPolicy("dEv", PolicyTypeToken)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -101,7 +101,7 @@ func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) {
}
// List should be one element
out, err = ps.ListPolicies()
out, err = ps.ListPolicies(PolicyTypeACL)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -110,13 +110,13 @@ func testPolicyStore_CRUD(t *testing.T, ps *PolicyStore) {
}
// Delete should be clear the entry
err = ps.DeletePolicy("Dev")
err = ps.DeletePolicy("Dev", PolicyTypeACL)
if err != nil {
t.Fatalf("err: %v", err)
}
// Get should fail
p, err = ps.GetPolicy("deV")
p, err = ps.GetPolicy("deV", PolicyTypeToken)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -134,7 +134,7 @@ func TestPolicyStore_Predefined(t *testing.T) {
t.Fatalf("err: %v", err)
}
// List should be two elements
out, err := core.policyStore.ListPolicies()
out, err := core.policyStore.ListPolicies(PolicyTypeACL)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -143,17 +143,23 @@ func TestPolicyStore_Predefined(t *testing.T) {
t.Fatalf("bad: %v", out)
}
pCubby, err := core.policyStore.GetPolicy("response-wrapping")
pCubby, err := core.policyStore.GetPolicy("response-wrapping", PolicyTypeToken)
if err != nil {
t.Fatalf("err: %v", err)
}
if pCubby == nil {
t.Fatal("nil cubby policy")
}
if pCubby.Raw != responseWrappingPolicy {
t.Fatalf("bad: expected\n%s\ngot\n%s\n", responseWrappingPolicy, pCubby.Raw)
}
pRoot, err := core.policyStore.GetPolicy("root")
pRoot, err := core.policyStore.GetPolicy("root", PolicyTypeToken)
if err != nil {
t.Fatalf("err: %v", err)
}
if pRoot == nil {
t.Fatal("nil root policy")
}
err = core.policyStore.SetPolicy(pCubby)
if err == nil {
@ -163,11 +169,11 @@ func TestPolicyStore_Predefined(t *testing.T) {
if err == nil {
t.Fatalf("expected err setting %s", pRoot.Name)
}
err = core.policyStore.DeletePolicy(pCubby.Name)
err = core.policyStore.DeletePolicy(pCubby.Name, PolicyTypeACL)
if err == nil {
t.Fatalf("expected err deleting %s", pCubby.Name)
}
err = core.policyStore.DeletePolicy(pRoot.Name)
err = core.policyStore.DeletePolicy(pRoot.Name, PolicyTypeACL)
if err == nil {
t.Fatalf("expected err deleting %s", pRoot.Name)
}
@ -176,12 +182,12 @@ func TestPolicyStore_Predefined(t *testing.T) {
func TestPolicyStore_ACL(t *testing.T) {
ps := mockPolicyStore(t)
policy, _ := Parse(aclPolicy)
policy, _ := ParseACLPolicy(aclPolicy)
err := ps.SetPolicy(policy)
if err != nil {
t.Fatalf("err: %v", err)
}
policy, _ = Parse(aclPolicy2)
policy, _ = ParseACLPolicy(aclPolicy2)
err = ps.SetPolicy(policy)
if err != nil {
t.Fatalf("err: %v", err)
@ -193,26 +199,3 @@ func TestPolicyStore_ACL(t *testing.T) {
}
testLayeredACL(t, acl)
}
func TestPolicyStore_v1Upgrade(t *testing.T) {
ps := mockPolicyStore(t)
// Put a V1 record
raw := `path "foo" { policy = "read" }`
ps.view.Put(&logical.StorageEntry{Key: "old", Value: []byte(raw)})
// Do a read
p, err := ps.GetPolicy("old")
if err != nil {
t.Fatalf("err: %v", err)
}
if p == nil || len(p.Paths) != 1 {
t.Fatalf("bad policy: %#v", p)
}
// Check that glob is enabled
if !p.Paths[0].Glob {
t.Fatalf("should enable glob")
}
}

View File

@ -88,7 +88,7 @@ path "test/types" {
`)
func TestPolicy_Parse(t *testing.T) {
p, err := Parse(rawPolicy)
p, err := ParseACLPolicy(rawPolicy)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -97,17 +97,17 @@ func TestPolicy_Parse(t *testing.T) {
t.Fatalf("bad name: %q", p.Name)
}
expect := []*PathCapabilities{
&PathCapabilities{
expect := []*PathRules{
&PathRules{
Prefix: "",
Policy: "deny",
Capabilities: []string{
"deny",
},
Permissions: &Permissions{CapabilitiesBitmap: DenyCapabilityInt},
Permissions: &ACLPermissions{CapabilitiesBitmap: DenyCapabilityInt},
Glob: true,
},
&PathCapabilities{
&PathRules{
Prefix: "stage/",
Policy: "sudo",
Capabilities: []string{
@ -118,22 +118,22 @@ func TestPolicy_Parse(t *testing.T) {
"list",
"sudo",
},
Permissions: &Permissions{
Permissions: &ACLPermissions{
CapabilitiesBitmap: (CreateCapabilityInt | ReadCapabilityInt | UpdateCapabilityInt | DeleteCapabilityInt | ListCapabilityInt | SudoCapabilityInt),
},
Glob: true,
},
&PathCapabilities{
&PathRules{
Prefix: "prod/version",
Policy: "read",
Capabilities: []string{
"read",
"list",
},
Permissions: &Permissions{CapabilitiesBitmap: (ReadCapabilityInt | ListCapabilityInt)},
Permissions: &ACLPermissions{CapabilitiesBitmap: (ReadCapabilityInt | ListCapabilityInt)},
Glob: false,
},
&PathCapabilities{
&PathRules{
Prefix: "foo/bar",
Policy: "read",
Capabilities: []string{
@ -142,14 +142,14 @@ func TestPolicy_Parse(t *testing.T) {
},
MinWrappingTTLHCL: 300,
MaxWrappingTTLHCL: "1h",
Permissions: &Permissions{
Permissions: &ACLPermissions{
CapabilitiesBitmap: (ReadCapabilityInt | ListCapabilityInt),
MinWrappingTTL: 300 * time.Second,
MaxWrappingTTL: 3600 * time.Second,
},
Glob: false,
},
&PathCapabilities{
&PathRules{
Prefix: "foo/bar",
Policy: "",
Capabilities: []string{
@ -158,14 +158,14 @@ func TestPolicy_Parse(t *testing.T) {
},
MinWrappingTTLHCL: "300s",
MaxWrappingTTLHCL: 3600,
Permissions: &Permissions{
Permissions: &ACLPermissions{
CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
MinWrappingTTL: 300 * time.Second,
MaxWrappingTTL: 3600 * time.Second,
},
Glob: false,
},
&PathCapabilities{
&PathRules{
Prefix: "foo/bar",
Policy: "",
Capabilities: []string{
@ -173,13 +173,13 @@ func TestPolicy_Parse(t *testing.T) {
"sudo",
},
AllowedParametersHCL: map[string][]interface{}{"zip": {}, "zap": {}},
Permissions: &Permissions{
Permissions: &ACLPermissions{
CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
AllowedParameters: map[string][]interface{}{"zip": {}, "zap": {}},
},
Glob: false,
},
&PathCapabilities{
&PathRules{
Prefix: "baz/bar",
Policy: "",
Capabilities: []string{
@ -187,13 +187,13 @@ func TestPolicy_Parse(t *testing.T) {
"sudo",
},
DeniedParametersHCL: map[string][]interface{}{"zip": []interface{}{}, "zap": []interface{}{}},
Permissions: &Permissions{
Permissions: &ACLPermissions{
CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
DeniedParameters: map[string][]interface{}{"zip": []interface{}{}, "zap": []interface{}{}},
},
Glob: false,
},
&PathCapabilities{
&PathRules{
Prefix: "biz/bar",
Policy: "",
Capabilities: []string{
@ -202,14 +202,14 @@ func TestPolicy_Parse(t *testing.T) {
},
AllowedParametersHCL: map[string][]interface{}{"zim": {}, "zam": {}},
DeniedParametersHCL: map[string][]interface{}{"zip": {}, "zap": {}},
Permissions: &Permissions{
Permissions: &ACLPermissions{
CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
AllowedParameters: map[string][]interface{}{"zim": {}, "zam": {}},
DeniedParameters: map[string][]interface{}{"zip": {}, "zap": {}},
},
Glob: false,
},
&PathCapabilities{
&PathRules{
Prefix: "test/types",
Policy: "",
Capabilities: []string{
@ -218,7 +218,7 @@ func TestPolicy_Parse(t *testing.T) {
},
AllowedParametersHCL: map[string][]interface{}{"map": []interface{}{map[string]interface{}{"good": "one"}}, "int": []interface{}{1, 2}},
DeniedParametersHCL: map[string][]interface{}{"string": []interface{}{"test"}, "bool": []interface{}{false}},
Permissions: &Permissions{
Permissions: &ACLPermissions{
CapabilitiesBitmap: (CreateCapabilityInt | SudoCapabilityInt),
AllowedParameters: map[string][]interface{}{"map": []interface{}{map[string]interface{}{"good": "one"}}, "int": []interface{}{1, 2}},
DeniedParameters: map[string][]interface{}{"string": []interface{}{"test"}, "bool": []interface{}{false}},
@ -232,7 +232,7 @@ func TestPolicy_Parse(t *testing.T) {
}
func TestPolicy_ParseBadRoot(t *testing.T) {
_, err := Parse(strings.TrimSpace(`
_, err := ParseACLPolicy(strings.TrimSpace(`
name = "test"
bad = "foo"
nope = "yes"
@ -251,7 +251,7 @@ nope = "yes"
}
func TestPolicy_ParseBadPath(t *testing.T) {
_, err := Parse(strings.TrimSpace(`
_, err := ParseACLPolicy(strings.TrimSpace(`
path "/" {
capabilities = ["read"]
capabilites = ["read"]
@ -267,7 +267,7 @@ path "/" {
}
func TestPolicy_ParseBadPolicy(t *testing.T) {
_, err := Parse(strings.TrimSpace(`
_, err := ParseACLPolicy(strings.TrimSpace(`
path "/" {
policy = "banana"
}
@ -282,7 +282,7 @@ path "/" {
}
func TestPolicy_ParseBadWrapping(t *testing.T) {
_, err := Parse(strings.TrimSpace(`
_, err := ParseACLPolicy(strings.TrimSpace(`
path "/" {
policy = "read"
min_wrapping_ttl = 400
@ -299,7 +299,7 @@ path "/" {
}
func TestPolicy_ParseBadCapabilities(t *testing.T) {
_, err := Parse(strings.TrimSpace(`
_, err := ParseACLPolicy(strings.TrimSpace(`
path "/" {
capabilities = ["read", "banana"]
}

View File

@ -41,6 +41,11 @@ type RekeyBackup struct {
Keys map[string][]string
}
// RekeyThreshold returns the secret threshold for the current seal
// config. This threshold can either be the barrier key threshold or
// the recovery key threshold, depending on whether rekey is being
// performed on the recovery key, or whether the seal supports
// recovery keys.
func (c *Core) RekeyThreshold(recovery bool) (int, error) {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
@ -56,7 +61,10 @@ func (c *Core) RekeyThreshold(recovery bool) (int, error) {
var config *SealConfig
var err error
if recovery {
// If we are rekeying the recovery key, or if the seal supports
// recovery keys and we are rekeying the barrier key, we use the
// recovery config as the threshold instead.
if recovery || c.seal.RecoveryKeySupported() {
config, err = c.seal.RecoveryConfig()
} else {
config, err = c.seal.BarrierConfig()
@ -68,7 +76,7 @@ func (c *Core) RekeyThreshold(recovery bool) (int, error) {
return config.SecretThreshold, nil
}
// RekeyProgress is used to return the rekey progress (num shares)
// RekeyProgress is used to return the rekey progress (num shares).
func (c *Core) RekeyProgress(recovery bool) (int, error) {
c.stateLock.RLock()
defer c.stateLock.RUnlock()
@ -117,6 +125,8 @@ func (c *Core) RekeyConfig(recovery bool) (*SealConfig, error) {
return conf, nil
}
// RekeyInit will either initialize the rekey of barrier or recovery key.
// recovery determines whether this is a rekey on the barrier or recovery key.
func (c *Core) RekeyInit(config *SealConfig, recovery bool) error {
if recovery {
return c.RecoveryRekeyInit(config)
@ -138,6 +148,10 @@ func (c *Core) BarrierRekeyInit(config *SealConfig) error {
}
}
if c.seal.RecoveryKeySupported() && c.seal.RecoveryType() == config.Type {
c.logger.Debug("core: using recovery seal configuration to rekey barrier key")
}
// Check if the seal configuration is valid
if err := config.Validate(); err != nil {
c.logger.Error("core: invalid rekey seal configuration", "error", err)
@ -228,6 +242,7 @@ func (c *Core) RecoveryRekeyInit(config *SealConfig) error {
return nil
}
// RekeyUpdate is used to provide a new key part for the barrier or recovery key.
func (c *Core) RekeyUpdate(key []byte, nonce string, recovery bool) (*RekeyResult, error) {
if recovery {
return c.RecoveryRekeyUpdate(key, nonce)
@ -235,7 +250,11 @@ func (c *Core) RekeyUpdate(key []byte, nonce string, recovery bool) (*RekeyResul
return c.BarrierRekeyUpdate(key, nonce)
}
// BarrierRekeyUpdate is used to provide a new key part
// BarrierRekeyUpdate is used to provide a new key part. Barrier rekey can be done
// with unseal keys, or recovery keys if that's supported and we are storing the barrier
// key.
//
// N.B.: If recovery keys are used to rekey, the new barrier key shares are not returned.
func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error) {
// Ensure we are already unsealed
c.stateLock.RLock()
@ -261,7 +280,15 @@ func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error
defer c.rekeyLock.Unlock()
// Get the seal configuration
existingConfig, err := c.seal.BarrierConfig()
var existingConfig *SealConfig
var err error
var useRecovery bool // Determines whether recovery key is being used to rekey the master key
if c.seal.StoredKeysSupported() && c.seal.RecoveryKeySupported() {
existingConfig, err = c.seal.RecoveryConfig()
useRecovery = true
} else {
existingConfig, err = c.seal.BarrierConfig()
}
if err != nil {
return nil, err
}
@ -298,23 +325,30 @@ func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error
return nil, nil
}
// Recover the master key
var masterKey []byte
// Recover the master key or recovery key
var recoveredKey []byte
if existingConfig.SecretThreshold == 1 {
masterKey = c.barrierRekeyProgress[0]
recoveredKey = c.barrierRekeyProgress[0]
c.barrierRekeyProgress = nil
} else {
masterKey, err = shamir.Combine(c.barrierRekeyProgress)
recoveredKey, err = shamir.Combine(c.barrierRekeyProgress)
c.barrierRekeyProgress = nil
if err != nil {
return nil, fmt.Errorf("failed to compute master key: %v", err)
}
}
if err := c.barrier.VerifyMaster(masterKey); err != nil {
if useRecovery {
if err := c.seal.VerifyRecoveryKey(recoveredKey); err != nil {
c.logger.Error("core: rekey aborted, recovery key verification failed", "error", err)
return nil, err
}
} else {
if err := c.barrier.VerifyMaster(recoveredKey); err != nil {
c.logger.Error("core: rekey aborted, master key verification failed", "error", err)
return nil, err
}
}
// Generate a new master key
newMasterKey, err := c.barrier.GenerateKey()
@ -323,11 +357,11 @@ func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error
return nil, fmt.Errorf("master key generation failed: %v", err)
}
// Return the master key if only a single key part is used
results := &RekeyResult{
Backup: c.barrierRekeyConfig.Backup,
}
// Set result.SecretShares to the master key if only a single key
// part is used -- no Shamir split required.
if c.barrierRekeyConfig.SecretShares == 1 {
results.SecretShares = append(results.SecretShares, newMasterKey)
} else {
@ -343,13 +377,14 @@ func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error
// If we are storing any shares, add them to the shares to store and remove
// from the returned keys
var keysToStore [][]byte
if c.barrierRekeyConfig.StoredShares > 0 {
if c.seal.StoredKeysSupported() && c.barrierRekeyConfig.StoredShares > 0 {
for i := 0; i < c.barrierRekeyConfig.StoredShares; i++ {
keysToStore = append(keysToStore, results.SecretShares[0])
results.SecretShares = results.SecretShares[1:]
}
}
// If PGP keys are passed in, encrypt shares with corresponding PGP keys.
if len(c.barrierRekeyConfig.PGPKeys) > 0 {
hexEncodedShares := make([][]byte, len(results.SecretShares))
for i, _ := range results.SecretShares {
@ -360,6 +395,7 @@ func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error
return nil, err
}
// If backup is enabled, store backup info in vault.coreBarrierUnsealKeysBackupPath
if c.barrierRekeyConfig.Backup {
backupInfo := map[string][]string{}
for i := 0; i < len(results.PGPFingerprints); i++ {
@ -453,21 +489,16 @@ func (c *Core) RecoveryRekeyUpdate(key []byte, nonce string) (*RekeyResult, erro
defer c.rekeyLock.Unlock()
// Get the seal configuration
barrierConfig, err := c.seal.BarrierConfig()
if err != nil {
return nil, err
}
// Ensure the barrier is initialized
if barrierConfig == nil {
return nil, ErrNotInit
}
existingConfig, err := c.seal.RecoveryConfig()
if err != nil {
return nil, err
}
// Ensure the seal is initialized
if existingConfig == nil {
return nil, ErrNotInit
}
// Ensure a rekey is in progress
if c.recoveryRekeyConfig == nil {
return nil, fmt.Errorf("no rekey in progress")
@ -496,12 +527,12 @@ func (c *Core) RecoveryRekeyUpdate(key []byte, nonce string) (*RekeyResult, erro
}
// Recover the master key
var masterKey []byte
var recoveryKey []byte
if existingConfig.SecretThreshold == 1 {
masterKey = c.recoveryRekeyProgress[0]
recoveryKey = c.recoveryRekeyProgress[0]
c.recoveryRekeyProgress = nil
} else {
masterKey, err = shamir.Combine(c.recoveryRekeyProgress)
recoveryKey, err = shamir.Combine(c.recoveryRekeyProgress)
c.recoveryRekeyProgress = nil
if err != nil {
return nil, fmt.Errorf("failed to compute recovery key: %v", err)
@ -509,7 +540,7 @@ func (c *Core) RecoveryRekeyUpdate(key []byte, nonce string) (*RekeyResult, erro
}
// Verify the recovery key
if err := c.seal.VerifyRecoveryKey(masterKey); err != nil {
if err := c.seal.VerifyRecoveryKey(recoveryKey); err != nil {
c.logger.Error("core: rekey aborted, recovery key verification failed", "error", err)
return nil, err
}

View File

@ -168,7 +168,7 @@ func testCore_Rekey_Update_Common(t *testing.T, c *Core, keys [][]byte, root str
t.Fatalf("bad: no rekey config received")
}
// Provide the master
// Provide the master/recovery keys
var result *RekeyResult
for _, key := range keys {
result, err = c.RekeyUpdate(key, rkconf.Nonce, recovery)
@ -180,7 +180,7 @@ func testCore_Rekey_Update_Common(t *testing.T, c *Core, keys [][]byte, root str
}
}
if result == nil || len(result.SecretShares) != newConf.SecretShares {
t.Fatalf("Bad: %#v", result)
t.Fatalf("rekey update error: %#v", result)
}
// Should be no progress
@ -189,16 +189,16 @@ func testCore_Rekey_Update_Common(t *testing.T, c *Core, keys [][]byte, root str
t.Fatalf("err: %v", err)
}
if num != 0 {
t.Fatalf("bad: %d", num)
t.Fatalf("rekey progress error: %d", num)
}
// Should be no config
conf, err := c.RekeyConfig(recovery)
if err != nil {
t.Fatalf("err: %v", err)
t.Fatalf("rekey config error: %v", err)
}
if conf != nil {
t.Fatalf("bad: %v", conf)
t.Fatalf("rekey config should be nil, got: %v", conf)
}
// SealConfig should update
@ -209,7 +209,7 @@ func testCore_Rekey_Update_Common(t *testing.T, c *Core, keys [][]byte, root str
sealConf, err = c.seal.BarrierConfig()
}
if err != nil {
t.Fatalf("err: %v", err)
t.Fatalf("seal config retrieval error: %v", err)
}
if sealConf == nil {
t.Fatal("seal configuration is nil")
@ -226,7 +226,7 @@ func testCore_Rekey_Update_Common(t *testing.T, c *Core, keys [][]byte, root str
if err != nil {
t.Fatalf("err: %v", err)
}
for i := 0; i < 3; i++ {
for i := 0; i < newConf.SecretThreshold; i++ {
_, err = TestCoreUnseal(c, TestKeyCopy(result.SecretShares[i]))
if err != nil {
t.Fatalf("err: %v", err)
@ -238,6 +238,13 @@ func testCore_Rekey_Update_Common(t *testing.T, c *Core, keys [][]byte, root str
}
// Start another rekey, this time we require a quorum!
// Skip this step if we are rekeying the barrier key with
// recovery keys, since a new rekey should still be using
// the same set of recovery keys.
if !recovery && c.seal.RecoveryKeySupported() {
return
}
newConf = &SealConfig{
Type: expType,
SecretThreshold: 1,

View File

@ -152,9 +152,11 @@ func (c *Core) startForwarding() error {
}
c.logger.Trace("core: got request forwarding connection")
c.clusterParamsLock.RLock()
go fws.ServeConn(conn, &http2.ServeConnOpts{
Handler: c.rpcServer,
})
c.clusterParamsLock.RUnlock()
default:
c.logger.Debug("core: unknown negotiated protocol on cluster port")

View File

@ -1,16 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: request_forwarding_service.proto
/*
Package vault is a generated protocol buffer package.
It is generated from these files:
request_forwarding_service.proto
It has these top-level messages:
EchoRequest
EchoReply
*/
package vault
import proto "github.com/golang/protobuf/proto"
@ -28,21 +18,20 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type EchoRequest struct {
Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"`
// ClusterAddr is used to send up a standby node's address to the active
// node upon heartbeat
ClusterAddr string `protobuf:"bytes,2,opt,name=cluster_addr,json=clusterAddr" json:"cluster_addr,omitempty"`
// ClusterAddrs is used to send up a list of cluster addresses to a dr
// primary from a dr secondary
ClusterAddrs []string `protobuf:"bytes,3,rep,name=cluster_addrs,json=clusterAddrs" json:"cluster_addrs,omitempty"`
}
func (m *EchoRequest) Reset() { *m = EchoRequest{} }
func (m *EchoRequest) String() string { return proto.CompactTextString(m) }
func (*EchoRequest) ProtoMessage() {}
func (*EchoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*EchoRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} }
func (m *EchoRequest) GetMessage() string {
if m != nil {
@ -58,6 +47,13 @@ func (m *EchoRequest) GetClusterAddr() string {
return ""
}
func (m *EchoRequest) GetClusterAddrs() []string {
if m != nil {
return m.ClusterAddrs
}
return nil
}
type EchoReply struct {
Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"`
ClusterAddrs []string `protobuf:"bytes,2,rep,name=cluster_addrs,json=clusterAddrs" json:"cluster_addrs,omitempty"`
@ -66,7 +62,7 @@ type EchoReply struct {
func (m *EchoReply) Reset() { *m = EchoReply{} }
func (m *EchoReply) String() string { return proto.CompactTextString(m) }
func (*EchoReply) ProtoMessage() {}
func (*EchoReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (*EchoReply) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} }
func (m *EchoReply) GetMessage() string {
if m != nil {
@ -192,24 +188,25 @@ var _RequestForwarding_serviceDesc = grpc.ServiceDesc{
Metadata: "request_forwarding_service.proto",
}
func init() { proto.RegisterFile("request_forwarding_service.proto", fileDescriptor0) }
func init() { proto.RegisterFile("request_forwarding_service.proto", fileDescriptor1) }
var fileDescriptor0 = []byte{
// 254 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0x3d, 0x4f, 0xc3, 0x30,
0x10, 0x86, 0xdb, 0xf2, 0xa5, 0xb8, 0x05, 0x81, 0x61, 0x88, 0x32, 0x85, 0xb0, 0x74, 0x72, 0x24,
0x58, 0x58, 0x18, 0x18, 0x60, 0xe8, 0x98, 0x3f, 0x10, 0xb9, 0xf6, 0x11, 0x47, 0x72, 0x6b, 0x73,
0xe7, 0x14, 0x65, 0xe5, 0x97, 0x23, 0x92, 0x94, 0xa6, 0x0b, 0xe3, 0xbd, 0x27, 0x3d, 0xf7, 0xdc,
0xcb, 0x52, 0x84, 0xcf, 0x06, 0x28, 0x94, 0x1f, 0x0e, 0xbf, 0x24, 0xea, 0x7a, 0x5b, 0x95, 0x04,
0xb8, 0xab, 0x15, 0x08, 0x8f, 0x2e, 0x38, 0x7e, 0xb6, 0x93, 0x8d, 0x0d, 0xc9, 0x73, 0x55, 0x07,
0xd3, 0xac, 0x85, 0x72, 0x9b, 0xdc, 0x48, 0x32, 0xb5, 0x72, 0xe8, 0xf3, 0x6e, 0x97, 0x1b, 0xb0,
0x1e, 0x30, 0x3f, 0x20, 0xf2, 0xd0, 0x7a, 0xa0, 0x1e, 0x90, 0xad, 0xd8, 0xfc, 0x4d, 0x19, 0x57,
0xf4, 0x87, 0x78, 0xcc, 0x2e, 0x36, 0x40, 0x24, 0x2b, 0x88, 0xa7, 0xe9, 0x74, 0x19, 0x15, 0xfb,
0x91, 0xdf, 0xb3, 0x85, 0xb2, 0x0d, 0x05, 0xc0, 0x52, 0x6a, 0x8d, 0xf1, 0xac, 0x5b, 0xcf, 0x87,
0xec, 0x55, 0x6b, 0xcc, 0x56, 0x2c, 0xea, 0x59, 0xde, 0xb6, 0xff, 0x90, 0x1e, 0xd8, 0xe5, 0x98,
0x44, 0xf1, 0x2c, 0x3d, 0x59, 0x46, 0xc5, 0x62, 0x84, 0xa2, 0xc7, 0xef, 0x29, 0xbb, 0x19, 0xa4,
0xde, 0xff, 0xcc, 0xf9, 0x0b, 0xbb, 0x1a, 0xa6, 0xbd, 0xf0, 0xad, 0x38, 0x3c, 0x26, 0x86, 0x30,
0xb9, 0x3b, 0x0e, 0xc9, 0xbb, 0x2d, 0x41, 0x36, 0xe1, 0x82, 0x9d, 0xfe, 0x0a, 0x72, 0x2e, 0xba,
0x6a, 0xc4, 0xe8, 0xf3, 0xe4, 0xfa, 0x28, 0xf3, 0xb6, 0xcd, 0x26, 0xeb, 0xf3, 0xae, 0xa3, 0xa7,
0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6a, 0x13, 0x7f, 0xc2, 0x88, 0x01, 0x00, 0x00,
var fileDescriptor1 = []byte{
// 261 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0x3f, 0x4f, 0xc3, 0x30,
0x10, 0xc5, 0x9b, 0x96, 0x3f, 0x8a, 0x5b, 0x10, 0x18, 0x86, 0x28, 0x53, 0x08, 0x4b, 0x27, 0x47,
0x82, 0x85, 0x85, 0x81, 0x01, 0x06, 0xc6, 0x7c, 0x81, 0x28, 0xb5, 0x8f, 0x38, 0x92, 0x5b, 0x9b,
0x3b, 0xa7, 0x28, 0x2b, 0x9f, 0x1c, 0x91, 0xa4, 0x34, 0x55, 0x25, 0xc6, 0x7b, 0x77, 0xfa, 0xbd,
0x7b, 0x8f, 0x25, 0x08, 0x9f, 0x0d, 0x90, 0x2f, 0x3e, 0x2c, 0x7e, 0x95, 0xa8, 0xea, 0x4d, 0x55,
0x10, 0xe0, 0xb6, 0x96, 0x20, 0x1c, 0x5a, 0x6f, 0xf9, 0xe9, 0xb6, 0x6c, 0x8c, 0x8f, 0x9f, 0xaa,
0xda, 0xeb, 0x66, 0x25, 0xa4, 0x5d, 0x67, 0xba, 0x24, 0x5d, 0x4b, 0x8b, 0x2e, 0xeb, 0x76, 0x99,
0x06, 0xe3, 0x00, 0xb3, 0x3d, 0x22, 0xf3, 0xad, 0x03, 0xea, 0x01, 0xa9, 0x65, 0xf3, 0x57, 0xa9,
0x6d, 0xde, 0x1b, 0xf1, 0x88, 0x9d, 0xaf, 0x81, 0xa8, 0xac, 0x20, 0x0a, 0x92, 0x60, 0x19, 0xe6,
0xbb, 0x91, 0xdf, 0xb1, 0x85, 0x34, 0x0d, 0x79, 0xc0, 0xa2, 0x54, 0x0a, 0xa3, 0x69, 0xb7, 0x9e,
0x0f, 0xda, 0x8b, 0x52, 0xc8, 0xef, 0xd9, 0xc5, 0xf8, 0x84, 0xa2, 0x59, 0x32, 0x5b, 0x86, 0xf9,
0x62, 0x74, 0x43, 0xe9, 0x3b, 0x0b, 0x7b, 0x43, 0x67, 0xda, 0x7f, 0xec, 0x8e, 0x58, 0xd3, 0x63,
0xd6, 0xc3, 0x77, 0xc0, 0xae, 0x87, 0xcf, 0xdf, 0xfe, 0xe2, 0xf1, 0x67, 0x76, 0x39, 0x4c, 0xbb,
0x54, 0x37, 0x62, 0x9f, 0x5e, 0x0c, 0x62, 0x7c, 0x7b, 0x28, 0x92, 0xb3, 0x1b, 0x82, 0x74, 0xc2,
0x05, 0x3b, 0xf9, 0x7d, 0x90, 0x73, 0xd1, 0xf5, 0x27, 0x46, 0xf5, 0xc4, 0x57, 0x07, 0x9a, 0x33,
0x6d, 0x3a, 0x59, 0x9d, 0x75, 0x45, 0x3e, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0xc0, 0xa1, 0xca,
0xfe, 0xad, 0x01, 0x00, 0x00,
}

View File

@ -6,7 +6,12 @@ package vault;
message EchoRequest {
string message = 1;
// ClusterAddr is used to send up a standby node's address to the active
// node upon heartbeat
string cluster_addr = 2;
// ClusterAddrs is used to send up a list of cluster addresses to a dr
// primary from a dr secondary
repeated string cluster_addrs = 3;
}
message EchoReply {

View File

@ -16,6 +16,10 @@ import (
"github.com/hashicorp/vault/logical"
)
const (
replTimeout = 10 * time.Second
)
// HandleRequest is used to handle a new incoming request
func (c *Core) HandleRequest(req *logical.Request) (resp *logical.Response, err error) {
c.stateLock.RLock()
@ -117,7 +121,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
defer metrics.MeasureSince([]string{"core", "handle_request"}, time.Now())
// Validate the token
auth, te, ctErr := c.checkToken(req)
auth, te, ctErr := c.checkToken(req, false)
// We run this logic first because we want to decrement the use count even in the case of an error
if te != nil {
// Attempt to use the token (decrement NumUses)
@ -323,11 +327,16 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
// handleLoginRequest is used to handle a login request, which is an
// unauthenticated request to the backend.
func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *logical.Auth, error) {
func (c *Core) handleLoginRequest(req *logical.Request) (retResp *logical.Response, retAuth *logical.Auth, retErr error) {
defer metrics.MeasureSince([]string{"core", "handle_login_request"}, time.Now())
req.Unauthenticated = true
var auth *logical.Auth
// Create an audit trail of the request, auth is not available on login requests
if err := c.auditBroker.LogRequest(nil, req, c.auditedHeaders, nil); err != nil {
// Create an audit trail of the request. Attach auth if it was returned,
// e.g. if a token was provided.
if err := c.auditBroker.LogRequest(auth, req, c.auditedHeaders, nil); err != nil {
c.logger.Error("core: failed to audit request", "path", req.Path, "error", err)
return nil, nil, ErrInternalError
}
@ -386,7 +395,6 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log
}
// If the response generated an authentication, then generate the token
var auth *logical.Auth
if resp != nil && resp.Auth != nil {
var entity *identity.Entity
auth = resp.Auth

View File

@ -42,7 +42,8 @@ type routeEntry struct {
tainted bool
backend logical.Backend
mountEntry *MountEntry
storageView *BarrierView
storageView logical.Storage
storagePrefix string
rootPaths *radix.Tree
loginPaths *radix.Tree
}
@ -89,6 +90,7 @@ func (r *Router) Mount(backend logical.Backend, prefix string, mountEntry *Mount
}
// Build the paths
var localView logical.Storage = storageView
paths := new(logical.Paths)
if backend != nil {
specialPaths := backend.SpecialPaths()
@ -102,7 +104,8 @@ func (r *Router) Mount(backend logical.Backend, prefix string, mountEntry *Mount
tainted: false,
backend: backend,
mountEntry: mountEntry,
storageView: storageView,
storagePrefix: storageView.prefix,
storageView: localView,
rootPaths: pathsToRadix(paths.Root),
loginPaths: pathsToRadix(paths.Unauthenticated),
}
@ -110,7 +113,7 @@ func (r *Router) Mount(backend logical.Backend, prefix string, mountEntry *Mount
switch {
case prefix == "":
return fmt.Errorf("missing prefix to be used for router entry; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type)
case storageView.prefix == "":
case re.storagePrefix == "":
return fmt.Errorf("missing storage view prefix; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type)
case re.mountEntry.UUID == "":
return fmt.Errorf("missing mount identifier; mount_path: %q, mount_type: %q", re.mountEntry.Path, re.mountEntry.Type)
@ -119,7 +122,7 @@ func (r *Router) Mount(backend logical.Backend, prefix string, mountEntry *Mount
}
r.root.Insert(prefix, re)
r.storagePrefix.Insert(storageView.prefix, re)
r.storagePrefix.Insert(re.storagePrefix, re)
r.mountUUIDCache.Insert(re.mountEntry.UUID, re.mountEntry)
r.mountAccessorCache.Insert(re.mountEntry.Accessor, re.mountEntry)
@ -139,11 +142,13 @@ func (r *Router) Unmount(prefix string) error {
// Call backend's Cleanup routine
re := raw.(*routeEntry)
if re.backend != nil {
re.backend.Cleanup()
}
// Purge from the radix trees
r.root.Delete(prefix)
r.storagePrefix.Delete(re.storageView.prefix)
r.storagePrefix.Delete(re.storagePrefix)
r.mountUUIDCache.Delete(re.mountEntry.UUID)
r.mountAccessorCache.Delete(re.mountEntry.Accessor)
@ -234,10 +239,23 @@ func (r *Router) MatchingMount(path string) string {
return mount
}
// MatchingStorageView returns the storageView used for a path
func (r *Router) MatchingStorageView(path string) *BarrierView {
// MatchingStorageByAPIPath/StoragePath returns the storage used for
// API/Storage paths respectively
func (r *Router) MatchingStorageByAPIPath(path string) logical.Storage {
return r.matchingStorage(path, true)
}
func (r *Router) MatchingStorageByStoragePath(path string) logical.Storage {
return r.matchingStorage(path, false)
}
func (r *Router) matchingStorage(path string, apiPath bool) logical.Storage {
var raw interface{}
var ok bool
r.l.RLock()
_, raw, ok := r.root.LongestPrefix(path)
if apiPath {
_, raw, ok = r.root.LongestPrefix(path)
} else {
_, raw, ok = r.storagePrefix.LongestPrefix(path)
}
r.l.RUnlock()
if !ok {
return nil
@ -278,11 +296,23 @@ func (r *Router) MatchingSystemView(path string) logical.SystemView {
return raw.(*routeEntry).backend.System()
}
// MatchingStoragePrefix returns the mount path matching and storage prefix
// matching the given path
func (r *Router) MatchingStoragePrefix(path string) (string, string, bool) {
// MatchingStoragePrefixByAPIPath/StoragePath returns the mount path matching
// and storage prefix matching the given API/Storage path respectively
func (r *Router) MatchingStoragePrefixByAPIPath(path string) (string, string, bool) {
return r.matchingStoragePrefix(path, true)
}
func (r *Router) MatchingStoragePrefixByStoragePath(path string) (string, string, bool) {
return r.matchingStoragePrefix(path, false)
}
func (r *Router) matchingStoragePrefix(path string, apiPath bool) (string, string, bool) {
var raw interface{}
var ok bool
r.l.RLock()
_, raw, ok := r.storagePrefix.LongestPrefix(path)
if apiPath {
_, raw, ok = r.root.LongestPrefix(path)
} else {
_, raw, ok = r.storagePrefix.LongestPrefix(path)
}
r.l.RUnlock()
if !ok {
return "", "", false
@ -291,10 +321,10 @@ func (r *Router) MatchingStoragePrefix(path string) (string, string, bool) {
// Extract the mount path and storage prefix
re := raw.(*routeEntry)
mountPath := re.mountEntry.Path
prefix := re.storageView.prefix
prefix := re.storagePrefix
// Add back the prefix for credential backends
if strings.HasPrefix(path, credentialBarrierPrefix) {
if !apiPath && strings.HasPrefix(path, credentialBarrierPrefix) {
mountPath = credentialRoutePrefix + mountPath
}
@ -333,6 +363,11 @@ func (r *Router) routeCommon(req *logical.Request, existenceCheck bool) (*logica
strings.Replace(mount, "/", "-", -1)}, time.Now())
re := raw.(*routeEntry)
// Filtered mounts will have a nil backend
if re.backend == nil {
return logical.ErrorResponse(fmt.Sprintf("no handler for route '%s'", req.Path)), false, false, logical.ErrUnsupportedPath
}
// If the path is tainted, we reject any operation except for
// Rollback and Revoke
if re.tainted {
@ -366,7 +401,8 @@ func (r *Router) routeCommon(req *logical.Request, existenceCheck bool) (*logica
req.EntityID = ""
}
// Hash the request token unless this is the token backend
// Hash the request token unless the request is being routed to the token
// or system backend.
clientToken := req.ClientToken
switch {
case strings.HasPrefix(originalPath, "auth/token/"):

View File

@ -122,7 +122,7 @@ func TestRouter_Mount(t *testing.T) {
t.Fatalf("bad: %s", path)
}
if v := r.MatchingStorageView("prod/aws/foo"); v != view {
if v := r.MatchingStorageByAPIPath("prod/aws/foo"); v.(*BarrierView) != view {
t.Fatalf("bad: %v", v)
}
@ -130,7 +130,7 @@ func TestRouter_Mount(t *testing.T) {
t.Fatalf("bad: %s", path)
}
if v := r.MatchingStorageView("stage/aws/foo"); v != nil {
if v := r.MatchingStorageByAPIPath("stage/aws/foo"); v != nil {
t.Fatalf("bad: %v", v)
}
@ -139,7 +139,7 @@ func TestRouter_Mount(t *testing.T) {
t.Fatalf("failed to fetch mount entry using its ID; expected: %#v\n actual: %#v\n", mountEntry, mountEntryFetched)
}
mount, prefix, ok := r.MatchingStoragePrefix("logical/foo")
mount, prefix, ok := r.MatchingStoragePrefixByStoragePath("logical/foo")
if !ok {
t.Fatalf("missing storage prefix")
}
@ -200,7 +200,7 @@ func TestRouter_MountCredential(t *testing.T) {
t.Fatalf("bad: %s", path)
}
if v := r.MatchingStorageView("auth/aws/foo"); v != view {
if v := r.MatchingStorageByAPIPath("auth/aws/foo"); v.(*BarrierView) != view {
t.Fatalf("bad: %v", v)
}
@ -208,7 +208,7 @@ func TestRouter_MountCredential(t *testing.T) {
t.Fatalf("bad: %s", path)
}
if v := r.MatchingStorageView("auth/stage/aws/foo"); v != nil {
if v := r.MatchingStorageByAPIPath("auth/stage/aws/foo"); v != nil {
t.Fatalf("bad: %v", v)
}
@ -217,7 +217,7 @@ func TestRouter_MountCredential(t *testing.T) {
t.Fatalf("failed to fetch mount entry using its ID; expected: %#v\n actual: %#v\n", mountEntry, mountEntryFetched)
}
mount, prefix, ok := r.MatchingStoragePrefix("auth/foo")
mount, prefix, ok := r.MatchingStoragePrefixByStoragePath("auth/foo")
if !ok {
t.Fatalf("missing storage prefix")
}
@ -270,7 +270,7 @@ func TestRouter_Unmount(t *testing.T) {
t.Fatalf("err: %v", err)
}
if _, _, ok := r.MatchingStoragePrefix("logical/foo"); ok {
if _, _, ok := r.MatchingStoragePrefixByStoragePath("logical/foo"); ok {
t.Fatalf("should not have matching storage prefix")
}
}
@ -324,7 +324,7 @@ func TestRouter_Remount(t *testing.T) {
}
// Check the resolve from storage still works
mount, prefix, _ := r.MatchingStoragePrefix("logical/foobar")
mount, prefix, _ := r.MatchingStoragePrefixByStoragePath("logical/foobar")
if mount != "stage/aws/" {
t.Fatalf("bad mount: %s", mount)
}

View File

@ -21,11 +21,33 @@ const (
barrierSealConfigPath = "core/seal-config"
// recoverySealConfigPath is the path to the recovery key seal
// configuration. It is inside the barrier.
// configuration. It lives inside the barrier.
// DEPRECATED: Use recoverySealConfigPlaintextPath instead.
recoverySealConfigPath = "core/recovery-seal-config"
// recoverySealConfigPlaintextPath is the path to the recovery key seal
// configuration. This is stored in plaintext so that we can perform
// auto-unseal.
recoverySealConfigPlaintextPath = "core/recovery-config"
// recoveryKeyPath is the path to the recovery key
recoveryKeyPath = "core/recovery-key"
// hsmStoredKeysPath is the path used for storing HSM-encrypted unseal keys
hsmStoredKeysPath = "core/hsm/barrier-unseal-keys"
// hsmStoredIVPath is the path to the initialization vector for stored keys
hsmStoredIVPath = "core/hsm/iv"
)
const (
SealTypeShamir = "shamir"
SealTypePKCS11 = "hsm-pkcs11-auto"
SealTypeAWSKMS = "awskms-auto"
SealTypeTest = "test-auto"
RecoveryTypeUnsupported = "unsupported"
RecoveryTypeShamir = "shamir"
)
type KeyNotFoundError struct {
@ -86,7 +108,7 @@ func (d *DefaultSeal) Finalize() error {
}
func (d *DefaultSeal) BarrierType() string {
return "shamir"
return SealTypeShamir
}
func (d *DefaultSeal) StoredKeysSupported() bool {
@ -192,7 +214,7 @@ func (d *DefaultSeal) SetBarrierConfig(config *SealConfig) error {
}
func (d *DefaultSeal) RecoveryType() string {
return "unsupported"
return RecoveryTypeUnsupported
}
func (d *DefaultSeal) RecoveryConfig() (*SealConfig, error) {

View File

@ -129,6 +129,7 @@ func TestCoreWithSeal(t testing.T, testSeal Seal, enableRaw bool) *Core {
}
func testCoreConfig(t testing.T, physicalBackend physical.Backend, logger log.Logger) *CoreConfig {
t.Helper()
noopAudits := map[string]audit.Factory{
"noop": func(config *audit.BackendConfig) (audit.Backend, error) {
view := &logical.InmemStorage{}
@ -146,6 +147,7 @@ func testCoreConfig(t testing.T, physicalBackend physical.Backend, logger log.Lo
}, nil
},
}
noopBackends := make(map[string]logical.Factory)
noopBackends["noop"] = func(config *logical.BackendConfig) (logical.Backend, error) {
b := new(framework.Backend)
@ -168,6 +170,7 @@ func testCoreConfig(t testing.T, physicalBackend physical.Backend, logger log.Lo
for backendName, backendFactory := range noopBackends {
logicalBackends[backendName] = backendFactory
}
logicalBackends["kv"] = LeasedPassthroughBackendFactory
for backendName, backendFactory := range testLogicalBackends {
logicalBackends[backendName] = backendFactory
@ -188,26 +191,39 @@ func testCoreConfig(t testing.T, physicalBackend physical.Backend, logger log.Lo
// TestCoreInit initializes the core with a single key, and returns
// the key that must be used to unseal the core and a root token.
func TestCoreInit(t testing.T, core *Core) ([][]byte, string) {
return TestCoreInitClusterWrapperSetup(t, core, nil, nil)
t.Helper()
secretShares, _, root := TestCoreInitClusterWrapperSetup(t, core, nil, nil)
return secretShares, root
}
func TestCoreInitClusterWrapperSetup(t testing.T, core *Core, clusterAddrs []*net.TCPAddr, handler http.Handler) ([][]byte, string) {
func TestCoreInitClusterWrapperSetup(t testing.T, core *Core, clusterAddrs []*net.TCPAddr, handler http.Handler) ([][]byte, [][]byte, string) {
t.Helper()
core.SetClusterListenerAddrs(clusterAddrs)
core.SetClusterHandler(handler)
barrierConfig := &SealConfig{
SecretShares: 3,
SecretThreshold: 3,
}
// If we support storing barrier keys, then set that to equal the min threshold to unseal
if core.seal.StoredKeysSupported() {
barrierConfig.StoredShares = barrierConfig.SecretThreshold
}
recoveryConfig := &SealConfig{
SecretShares: 3,
SecretThreshold: 3,
}
result, err := core.Initialize(&InitParams{
BarrierConfig: &SealConfig{
SecretShares: 3,
SecretThreshold: 3,
},
RecoveryConfig: &SealConfig{
SecretShares: 3,
SecretThreshold: 3,
},
BarrierConfig: barrierConfig,
RecoveryConfig: recoveryConfig,
})
if err != nil {
t.Fatalf("err: %s", err)
}
return result.SecretShares, result.RootToken
return result.SecretShares, result.RecoveryShares, result.RootToken
}
func TestCoreUnseal(core *Core, key []byte) (bool, error) {
@ -217,6 +233,7 @@ func TestCoreUnseal(core *Core, key []byte) (bool, error) {
// TestCoreUnsealed returns a pure in-memory core that is already
// initialized and unsealed.
func TestCoreUnsealed(t testing.T) (*Core, [][]byte, string) {
t.Helper()
core := TestCore(t)
return testCoreUnsealed(t, core)
}
@ -224,11 +241,13 @@ func TestCoreUnsealed(t testing.T) (*Core, [][]byte, string) {
// TestCoreUnsealedRaw returns a pure in-memory core that is already
// initialized, unsealed, and with raw endpoints enabled.
func TestCoreUnsealedRaw(t testing.T) (*Core, [][]byte, string) {
t.Helper()
core := TestCoreRaw(t)
return testCoreUnsealed(t, core)
}
func testCoreUnsealed(t testing.T, core *Core) (*Core, [][]byte, string) {
t.Helper()
keys, token := TestCoreInit(t, core)
for _, key := range keys {
if _, err := TestCoreUnseal(core, TestKeyCopy(key)); err != nil {
@ -248,6 +267,7 @@ func testCoreUnsealed(t testing.T, core *Core) (*Core, [][]byte, string) {
}
func TestCoreUnsealedBackend(t testing.T, backend physical.Backend) (*Core, [][]byte, string) {
t.Helper()
logger := logformat.NewVaultLogger(log.LevelTrace)
conf := testCoreConfig(t, backend, logger)
conf.Seal = NewTestSeal(t, nil)
@ -264,6 +284,10 @@ func TestCoreUnsealedBackend(t testing.T, backend physical.Backend) (*Core, [][]
}
}
if err := core.UnsealWithStoredKeys(); err != nil {
t.Fatal(err)
}
sealed, err := core.Sealed()
if err != nil {
t.Fatalf("err checking seal status: %s", err)
@ -655,6 +679,7 @@ func TestWaitActive(t testing.T, core *Core) {
type TestCluster struct {
BarrierKeys [][]byte
RecoveryKeys [][]byte
CACert *x509.Certificate
CACertBytes []byte
CACertPEM []byte
@ -749,6 +774,7 @@ type TestClusterCore struct {
ServerKey *ecdsa.PrivateKey
ServerKeyPEM []byte
TLSConfig *tls.Config
UnderlyingStorage physical.Backend
}
type TestClusterOptions struct {
@ -757,6 +783,7 @@ type TestClusterOptions struct {
HandlerFunc func(*Core) http.Handler
BaseListenAddress string
NumCores int
SealFunc func() Seal
}
var DefaultNumCores = 3
@ -771,6 +798,12 @@ type certInfo struct {
// NewTestCluster creates a new test cluster based on the provided core config
// and test cluster options.
//
// N.B. Even though a single base CoreConfig is provided, NewTestCluster will instantiate a
// core config for each core it creates. If separate seal per core is desired, opts.SealFunc
// can be provided to generate a seal for each one. Otherwise, the provided base.Seal will be
// shared among cores. NewCore's default behavior is to generate a new DefaultSeal if the
// provided Seal in coreConfig (i.e. base.Seal) is nil.
func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *TestCluster {
var numCores int
if opts == nil || opts.NumCores == 0 {
@ -1077,6 +1110,12 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
if coreConfig.ClusterAddr != "" {
coreConfig.ClusterAddr = fmt.Sprintf("https://127.0.0.1:%d", listeners[i][0].Address.Port+105)
}
// if opts.SealFunc is provided, use that to generate a seal for the config instead
if opts != nil && opts.SealFunc != nil {
coreConfig.Seal = opts.SealFunc()
}
c, err := NewCore(coreConfig)
if err != nil {
t.Fatalf("err: %v", err)
@ -1110,9 +1149,11 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
}
if opts == nil || !opts.SkipInit {
keys, root := TestCoreInitClusterWrapperSetup(t, cores[0], clusterAddrGen(listeners[0]), handlers[0])
barrierKeys, _ := copystructure.Copy(keys)
bKeys, rKeys, root := TestCoreInitClusterWrapperSetup(t, cores[0], clusterAddrGen(listeners[0]), handlers[0])
barrierKeys, _ := copystructure.Copy(bKeys)
testCluster.BarrierKeys = barrierKeys.([][]byte)
recoveryKeys, _ := copystructure.Copy(rKeys)
testCluster.RecoveryKeys = recoveryKeys.([][]byte)
testCluster.RootToken = root
// Write root token and barrier keys
@ -1131,14 +1172,30 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
if err != nil {
t.Fatal(err)
}
for i, key := range testCluster.RecoveryKeys {
buf.Write([]byte(base64.StdEncoding.EncodeToString(key)))
if i < len(testCluster.RecoveryKeys)-1 {
buf.WriteRune('\n')
}
}
err = ioutil.WriteFile(filepath.Join(testCluster.TempDir, "recovery_keys"), buf.Bytes(), 0755)
if err != nil {
t.Fatal(err)
}
// Unseal first core
for _, key := range keys {
for _, key := range bKeys {
if _, err := cores[0].Unseal(TestKeyCopy(key)); err != nil {
t.Fatalf("unseal err: %s", err)
}
}
// If stored keys is supported, the above will no no-op, so trigger auto-unseal
// using stored keys to try to unseal
if err := cores[0].UnsealWithStoredKeys(); err != nil {
t.Fatal(err)
}
// Verify unsealed
sealed, err := cores[0].Sealed()
if err != nil {
@ -1153,11 +1210,17 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
// Unseal other cores unless otherwise specified
if (opts == nil || !opts.KeepStandbysSealed) && numCores > 1 {
for i := 1; i < numCores; i++ {
for _, key := range keys {
for _, key := range bKeys {
if _, err := cores[i].Unseal(TestKeyCopy(key)); err != nil {
t.Fatalf("unseal err: %s", err)
}
}
// If stored keys is supported, the above will no no-op, so trigger auto-unseal
// using stored keys
if err := cores[i].UnsealWithStoredKeys(); err != nil {
t.Fatal(err)
}
}
// Let them come fully up to standby

View File

@ -121,7 +121,9 @@ func NewTokenStore(c *Core, config *logical.BackendConfig) (*TokenStore, error)
}
if c.policyStore != nil {
t.policyLookupFunc = c.policyStore.GetPolicy
t.policyLookupFunc = func(name string) (*Policy, error) {
return c.policyStore.GetPolicy(name, PolicyTypeToken)
}
}
// Setup the framework endpoints
@ -497,7 +499,7 @@ func (ts *TokenStore) Initialize() error {
}
func (ts *TokenStore) Invalidate(key string) {
ts.logger.Trace("token: invalidating key", "key", key)
//ts.logger.Trace("token: invalidating key", "key", key)
switch key {
case tokenSubPath + salt.DefaultLocation:
@ -530,13 +532,13 @@ func (ts *TokenStore) Salt() (*salt.Salt, error) {
// TokenEntry is used to represent a given token
type TokenEntry struct {
// ID of this entry, generally a random UUID
ID string `json:"id" mapstructure:"id" structs:"id"`
ID string `json:"id" mapstructure:"id" structs:"id" sentinel:""`
// Accessor for this token, a random UUID
Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor"`
Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor" sentinel:""`
// Parent token, used for revocation trees
Parent string `json:"parent" mapstructure:"parent" structs:"parent"`
Parent string `json:"parent" mapstructure:"parent" structs:"parent" sentinel:""`
// Which named policies should be used
Policies []string `json:"policies" mapstructure:"policies" structs:"policies"`
@ -545,7 +547,7 @@ type TokenEntry struct {
Path string `json:"path" mapstructure:"path" structs:"path"`
// Used for auditing. This could include things like "source", "user", "ip"
Meta map[string]string `json:"meta" mapstructure:"meta" structs:"meta"`
Meta map[string]string `json:"meta" mapstructure:"meta" structs:"meta" sentinel:"meta"`
// Used for operators to be able to associate with the source
DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"`
@ -560,13 +562,13 @@ type TokenEntry struct {
NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"`
// Time of token creation
CreationTime int64 `json:"creation_time" mapstructure:"creation_time" structs:"creation_time"`
CreationTime int64 `json:"creation_time" mapstructure:"creation_time" structs:"creation_time" sentinel:""`
// Duration set when token was created
TTL time.Duration `json:"ttl" mapstructure:"ttl" structs:"ttl"`
TTL time.Duration `json:"ttl" mapstructure:"ttl" structs:"ttl" sentinel:""`
// Explicit maximum TTL on the token
ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl"`
ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl" sentinel:""`
// If set, the role that was used for parameters at creation time
Role string `json:"role" mapstructure:"role" structs:"role"`
@ -574,17 +576,53 @@ type TokenEntry struct {
// If set, the period of the token. This is only used when created directly
// through the create endpoint; periods managed by roles or other auth
// backends are subject to those renewal rules.
Period time.Duration `json:"period" mapstructure:"period" structs:"period"`
Period time.Duration `json:"period" mapstructure:"period" structs:"period" sentinel:""`
// These are the deprecated fields
DisplayNameDeprecated string `json:"DisplayName" mapstructure:"DisplayName" structs:"DisplayName"`
NumUsesDeprecated int `json:"NumUses" mapstructure:"NumUses" structs:"NumUses"`
CreationTimeDeprecated int64 `json:"CreationTime" mapstructure:"CreationTime" structs:"CreationTime"`
ExplicitMaxTTLDeprecated time.Duration `json:"ExplicitMaxTTL" mapstructure:"ExplicitMaxTTL" structs:"ExplicitMaxTTL"`
DisplayNameDeprecated string `json:"DisplayName" mapstructure:"DisplayName" structs:"DisplayName" sentinel:""`
NumUsesDeprecated int `json:"NumUses" mapstructure:"NumUses" structs:"NumUses" sentinel:""`
CreationTimeDeprecated int64 `json:"CreationTime" mapstructure:"CreationTime" structs:"CreationTime" sentinel:""`
ExplicitMaxTTLDeprecated time.Duration `json:"ExplicitMaxTTL" mapstructure:"ExplicitMaxTTL" structs:"ExplicitMaxTTL" sentinel:""`
EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"`
}
func (te *TokenEntry) SentinelGet(key string) (interface{}, error) {
if te == nil {
return nil, nil
}
switch key {
case "period":
return te.Period, nil
case "period_seconds":
return int64(te.Period.Seconds()), nil
case "explicit_max_ttl":
return te.ExplicitMaxTTL, nil
case "explicit_max_ttl_seconds":
return int64(te.ExplicitMaxTTL.Seconds()), nil
case "creation_ttl":
return te.TTL, nil
case "creation_ttl_seconds":
return int64(te.TTL.Seconds()), nil
case "creation_time":
return time.Unix(te.CreationTime, 0).Format(time.RFC3339Nano), nil
case "creation_time_unix":
return time.Unix(te.CreationTime, 0), nil
case "meta", "metadata":
return te.Meta, nil
}
return nil, nil
}
// tsRoleEntry contains token store role information
type tsRoleEntry struct {
// The name of the role. Embedded so it can be used for pathing
@ -698,8 +736,8 @@ func (ts *TokenStore) createAccessor(entry *TokenEntry) error {
if err != nil {
return err
}
path := accessorPrefix + saltID
path := accessorPrefix + saltID
aEntry := &accessorEntry{
TokenID: entry.ID,
AccessorID: entry.Accessor,
@ -2021,7 +2059,6 @@ func (ts *TokenStore) handleLookup(
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
}
out, err := ts.lookupSalted(saltedId, true)
if err != nil {
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
}

View File

@ -222,8 +222,11 @@ func testCoreMakeToken(t *testing.T, c *Core, root, client, ttl string, policy [
if err != nil {
t.Fatalf("err: %v %v", err, resp)
}
if resp.IsError() {
t.Fatalf("err: %v %v", err, *resp)
}
if resp.Auth.ClientToken != client {
t.Fatalf("bad: %#v", resp)
t.Fatalf("bad: %#v", *resp)
}
}
@ -1109,7 +1112,7 @@ func TestTokenStore_HandleRequest_CreateToken_NonRoot_RootChild(t *testing.T) {
core, ts, _, root := TestCoreWithTokenStore(t)
ps := core.policyStore
policy, _ := Parse(tokenCreationPolicy)
policy, _ := ParseACLPolicy(tokenCreationPolicy)
policy.Name = "test1"
if err := ps.SetPolicy(policy); err != nil {
t.Fatal(err)
@ -1965,19 +1968,19 @@ func TestTokenStore_RoleDisallowedPolicies(t *testing.T) {
ps := core.policyStore
// Create 3 different policies
policy, _ := Parse(tokenCreationPolicy)
policy, _ := ParseACLPolicy(tokenCreationPolicy)
policy.Name = "test1"
if err := ps.SetPolicy(policy); err != nil {
t.Fatal(err)
}
policy, _ = Parse(tokenCreationPolicy)
policy, _ = ParseACLPolicy(tokenCreationPolicy)
policy.Name = "test2"
if err := ps.SetPolicy(policy); err != nil {
t.Fatal(err)
}
policy, _ = Parse(tokenCreationPolicy)
policy, _ = ParseACLPolicy(tokenCreationPolicy)
policy.Name = "test3"
if err := ps.SetPolicy(policy); err != nil {
t.Fatal(err)
@ -2894,7 +2897,7 @@ func TestTokenStore_NoDefaultPolicy(t *testing.T) {
core, ts, _, root := TestCoreWithTokenStore(t)
ps := core.policyStore
policy, _ := Parse(tokenCreationPolicy)
policy, _ := ParseACLPolicy(tokenCreationPolicy)
policy.Name = "policy1"
if err := ps.SetPolicy(policy); err != nil {
t.Fatal(err)