Merge branch 'master' into fix-ou-ordering
This commit is contained in:
commit
56c46b852c
|
@ -13,10 +13,14 @@ IMPROVEMENTS:
|
|||
|
||||
BUG FIXES:
|
||||
|
||||
* auth/okta: Fix handling of group names containing slashes [GH-6665]
|
||||
* core: Correctly honor non-HMAC request keys when auditing requests [GH-6653]
|
||||
* core: Fix the `x-vault-unauthenticated` value in OpenAPI for a number of endpoints [GH-6654]
|
||||
* pki: fix a panic when a client submits a null value [GH-5679]
|
||||
* replication: Fix an issue causing startup problems if a namespace policy
|
||||
wasn't replicated properly
|
||||
* storage/dynamodb: Fix an issue where a deleted lock key in DynamoDB (HA) could cause
|
||||
constant switching of the active node [GH-6637]
|
||||
* storage/dynamodb: Eliminate a high-CPU condition that could occur if an error was
|
||||
received from the DynamoDB API [GH-6640]
|
||||
* replication: Properly update mount entry cache on a secondary to apply all
|
||||
|
|
|
@ -26,12 +26,12 @@ func pathGroups(b *backend) *framework.Path {
|
|||
return &framework.Path{
|
||||
Pattern: `groups/(?P<name>.+)`,
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"name": &framework.FieldSchema{
|
||||
"name": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Name of the LDAP group.",
|
||||
},
|
||||
|
||||
"policies": &framework.FieldSchema{
|
||||
"policies": {
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Description: "Comma-separated list of policies associated to the group.",
|
||||
},
|
||||
|
@ -132,17 +132,14 @@ func (b *backend) pathGroupWrite(ctx context.Context, req *logical.Request, d *f
|
|||
}
|
||||
|
||||
func (b *backend) pathGroupList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
keys, err := logical.CollectKeys(ctx, req.Storage)
|
||||
keys, err := logical.CollectKeysWithPrefix(ctx, req.Storage, "group/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
retKeys := make([]string, 0)
|
||||
for _, key := range keys {
|
||||
if strings.HasPrefix(key, "group/") && !strings.HasPrefix(key, "/") {
|
||||
retKeys = append(retKeys, strings.TrimPrefix(key, "group/"))
|
||||
}
|
||||
for i := range keys {
|
||||
keys[i] = strings.TrimPrefix(keys[i], "group/")
|
||||
}
|
||||
return logical.ListResponse(retKeys), nil
|
||||
return logical.ListResponse(keys), nil
|
||||
}
|
||||
|
||||
type GroupEntry struct {
|
||||
|
|
|
@ -148,18 +148,14 @@ func (b *backend) pathUserWrite(ctx context.Context, req *logical.Request, d *fr
|
|||
}
|
||||
|
||||
func (b *backend) pathUserList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
keys, err := logical.CollectKeys(ctx, req.Storage)
|
||||
keys, err := logical.CollectKeysWithPrefix(ctx, req.Storage, "user/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
retKeys := make([]string, 0)
|
||||
for _, key := range keys {
|
||||
if strings.HasPrefix(key, "user/") && !strings.HasPrefix(key, "/") {
|
||||
retKeys = append(retKeys, strings.TrimPrefix(key, "user/"))
|
||||
}
|
||||
for i := range keys {
|
||||
keys[i] = strings.TrimPrefix(keys[i], "user/")
|
||||
}
|
||||
return logical.ListResponse(retKeys), nil
|
||||
|
||||
return logical.ListResponse(keys), nil
|
||||
}
|
||||
|
||||
type UserEntry struct {
|
||||
|
|
|
@ -26,12 +26,12 @@ func pathGroups(b *backend) *framework.Path {
|
|||
return &framework.Path{
|
||||
Pattern: `groups/(?P<name>.+)`,
|
||||
Fields: map[string]*framework.FieldSchema{
|
||||
"name": &framework.FieldSchema{
|
||||
"name": {
|
||||
Type: framework.TypeString,
|
||||
Description: "Name of the Okta group.",
|
||||
},
|
||||
|
||||
"policies": &framework.FieldSchema{
|
||||
"policies": {
|
||||
Type: framework.TypeCommaStringSlice,
|
||||
Description: "Comma-separated list of policies associated to the group.",
|
||||
},
|
||||
|
@ -57,10 +57,12 @@ func (b *backend) Group(ctx context.Context, s logical.Storage, n string) (*Grou
|
|||
return nil, "", err
|
||||
}
|
||||
if entry == nil {
|
||||
entries, err := s.List(ctx, "group/")
|
||||
entries, err := groupList(ctx, s)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
|
||||
}
|
||||
|
||||
for _, groupName := range entries {
|
||||
if strings.EqualFold(groupName, n) {
|
||||
entry, err = s.Get(ctx, "group/"+groupName)
|
||||
|
@ -157,13 +159,27 @@ func (b *backend) pathGroupWrite(ctx context.Context, req *logical.Request, d *f
|
|||
}
|
||||
|
||||
func (b *backend) pathGroupList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
|
||||
groups, err := req.Storage.List(ctx, "group/")
|
||||
groups, err := groupList(ctx, req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return logical.ListResponse(groups), nil
|
||||
}
|
||||
|
||||
func groupList(ctx context.Context, s logical.Storage) ([]string, error) {
|
||||
keys, err := logical.CollectKeysWithPrefix(ctx, s, "group/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range keys {
|
||||
keys[i] = strings.TrimPrefix(keys[i], "group/")
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
type GroupEntry struct {
|
||||
Policies []string
|
||||
}
|
||||
|
|
108
builtin/credential/okta/path_groups_test.go
Normal file
108
builtin/credential/okta/path_groups_test.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
package okta
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
|
||||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/sdk/helper/logging"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
)
|
||||
|
||||
func TestGroupsList(t *testing.T) {
|
||||
b, storage := getBackend(t)
|
||||
|
||||
groups := []string{
|
||||
"%20\\",
|
||||
"foo",
|
||||
"zfoo",
|
||||
"🙂",
|
||||
"foo/nested",
|
||||
"foo/even/more/nested",
|
||||
}
|
||||
|
||||
for _, group := range groups {
|
||||
req := &logical.Request{
|
||||
Operation: logical.UpdateOperation,
|
||||
Path: "groups/" + group,
|
||||
Storage: storage,
|
||||
Data: map[string]interface{}{
|
||||
"policies": []string{group + "_a", group + "_b"},
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := b.HandleRequest(context.Background(), req)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("err:%s resp:%#v\n", err, resp)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
for _, group := range groups {
|
||||
for _, upper := range []bool{false, true} {
|
||||
groupPath := group
|
||||
if upper {
|
||||
groupPath = strings.ToUpper(group)
|
||||
}
|
||||
req := &logical.Request{
|
||||
Operation: logical.ReadOperation,
|
||||
Path: "groups/" + groupPath,
|
||||
Storage: storage,
|
||||
}
|
||||
|
||||
resp, err := b.HandleRequest(context.Background(), req)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("err:%s resp:%#v\n", err, resp)
|
||||
}
|
||||
if resp == nil {
|
||||
t.Fatal("unexpected nil response")
|
||||
}
|
||||
|
||||
expected := []string{group + "_a", group + "_b"}
|
||||
|
||||
if diff := deep.Equal(resp.Data["policies"].([]string), expected); diff != nil {
|
||||
t.Fatal(diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
req := &logical.Request{
|
||||
Operation: logical.ListOperation,
|
||||
Path: "groups",
|
||||
Storage: storage,
|
||||
}
|
||||
|
||||
resp, err := b.HandleRequest(context.Background(), req)
|
||||
if err != nil || (resp != nil && resp.IsError()) {
|
||||
t.Fatalf("err:%s resp:%#v\n", err, resp)
|
||||
}
|
||||
|
||||
if diff := deep.Equal(resp.Data["keys"].([]string), groups); diff != nil {
|
||||
t.Fatal(diff)
|
||||
}
|
||||
}
|
||||
|
||||
func getBackend(t *testing.T) (logical.Backend, logical.Storage) {
|
||||
defaultLeaseTTLVal := time.Hour * 12
|
||||
maxLeaseTTLVal := time.Hour * 24
|
||||
|
||||
config := &logical.BackendConfig{
|
||||
Logger: logging.NewVaultLogger(log.Trace),
|
||||
|
||||
System: &logical.StaticSystemView{
|
||||
DefaultLeaseTTLVal: defaultLeaseTTLVal,
|
||||
MaxLeaseTTLVal: maxLeaseTTLVal,
|
||||
},
|
||||
StorageView: &logical.InmemStorage{},
|
||||
}
|
||||
b, err := Factory(context.Background(), config)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create backend: %v", err)
|
||||
}
|
||||
|
||||
return b, config.StorageView
|
||||
}
|
|
@ -142,7 +142,7 @@ func (s *StoragePacker) BucketKey(itemID string) string {
|
|||
|
||||
// DeleteItem removes the storage entry which the given key refers to from its
|
||||
// corresponding bucket.
|
||||
func (s *StoragePacker) DeleteItem(itemID string) error {
|
||||
func (s *StoragePacker) DeleteItem(_ context.Context, itemID string) error {
|
||||
|
||||
if itemID == "" {
|
||||
return fmt.Errorf("empty item ID")
|
||||
|
@ -269,7 +269,7 @@ func (s *StoragePacker) GetItem(itemID string) (*Item, error) {
|
|||
}
|
||||
|
||||
// PutItem stores a storage entry in its corresponding bucket
|
||||
func (s *StoragePacker) PutItem(item *Item) error {
|
||||
func (s *StoragePacker) PutItem(_ context.Context, item *Item) error {
|
||||
if item == nil {
|
||||
return fmt.Errorf("nil item")
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package storagepacker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
@ -17,6 +18,8 @@ func BenchmarkStoragePacker(b *testing.B) {
|
|||
b.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
itemID, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
|
@ -27,7 +30,7 @@ func BenchmarkStoragePacker(b *testing.B) {
|
|||
ID: itemID,
|
||||
}
|
||||
|
||||
err = storagePacker.PutItem(item)
|
||||
err = storagePacker.PutItem(ctx, item)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -45,7 +48,7 @@ func BenchmarkStoragePacker(b *testing.B) {
|
|||
b.Fatalf("bad: item ID; expected: %q\n actual: %q", item.ID, fetchedItem.ID)
|
||||
}
|
||||
|
||||
err = storagePacker.DeleteItem(item.ID)
|
||||
err = storagePacker.DeleteItem(ctx, item.ID)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
@ -66,12 +69,14 @@ func TestStoragePacker(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Persist a storage entry
|
||||
item1 := &Item{
|
||||
ID: "item1",
|
||||
}
|
||||
|
||||
err = storagePacker.PutItem(item1)
|
||||
err = storagePacker.PutItem(ctx, item1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -90,7 +95,7 @@ func TestStoragePacker(t *testing.T) {
|
|||
}
|
||||
|
||||
// Delete item1
|
||||
err = storagePacker.DeleteItem(item1.ID)
|
||||
err = storagePacker.DeleteItem(ctx, item1.ID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -112,6 +117,8 @@ func TestStoragePacker_SerializeDeserializeComplexItem(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
timeNow := ptypes.TimestampNow()
|
||||
|
||||
alias1 := &identity.Alias{
|
||||
|
@ -147,7 +154,7 @@ func TestStoragePacker_SerializeDeserializeComplexItem(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = storagePacker.PutItem(&Item{
|
||||
err = storagePacker.PutItem(ctx, &Item{
|
||||
ID: entity.ID,
|
||||
Message: marshaledEntity,
|
||||
})
|
||||
|
|
|
@ -21,8 +21,15 @@ var _ = math.Inf
|
|||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// Item represents an entry that gets inserted into the storage packer
|
||||
type Item struct {
|
||||
ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
// ID must be provided by the caller; the same value, if used with GetItem,
|
||||
// can be used to fetch the item. However, when iterating through a bucket,
|
||||
// this ID will be an internal ID. In other words, outside of the use-case
|
||||
// described above, the caller *must not* rely on this value to be
|
||||
// consistent with what they passed in.
|
||||
ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
// message is the contents of the item
|
||||
Message *any.Any `sentinel:"" protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
|
@ -68,12 +75,22 @@ func (m *Item) GetMessage() *any.Any {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Bucket is a construct to hold multiple items within itself. This
|
||||
// abstraction contains multiple buckets of the same kind within itself and
|
||||
// shares amont them the items that get inserted. When the bucket as a whole
|
||||
// gets too big to hold more items, the contained buckets gets pushed out only
|
||||
// to become independent buckets. Hence, this can grow infinitely in terms of
|
||||
// storage space for items that get inserted.
|
||||
type Bucket struct {
|
||||
Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Items []*Item `sentinel:"" protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
// Key is the storage path where the bucket gets stored
|
||||
Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
// Items holds the items contained within this bucket. Used by v1.
|
||||
Items []*Item `sentinel:"" protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"`
|
||||
// ItemMap stores a mapping of item ID to message. Used by v2.
|
||||
ItemMap map[string]*any.Any `sentinel:"" protobuf:"bytes,3,rep,name=item_map,json=itemMap,proto3" json:"item_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Bucket) Reset() { *m = Bucket{} }
|
||||
|
@ -115,27 +132,39 @@ func (m *Bucket) GetItems() []*Item {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *Bucket) GetItemMap() map[string]*any.Any {
|
||||
if m != nil {
|
||||
return m.ItemMap
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Item)(nil), "storagepacker.Item")
|
||||
proto.RegisterType((*Bucket)(nil), "storagepacker.Bucket")
|
||||
proto.RegisterMapType((map[string]*any.Any)(nil), "storagepacker.Bucket.ItemMapEntry")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("helper/storagepacker/types.proto", fileDescriptor_c0e98c66c4f51b7f) }
|
||||
|
||||
var fileDescriptor_c0e98c66c4f51b7f = []byte{
|
||||
// 219 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8f, 0x41, 0x4b, 0xc3, 0x40,
|
||||
0x10, 0x85, 0x49, 0xaa, 0x15, 0xb7, 0x28, 0xb2, 0x7a, 0x88, 0x9e, 0x42, 0x4f, 0xf1, 0x32, 0x83,
|
||||
0xf5, 0x17, 0x58, 0x50, 0xf0, 0x9a, 0xa3, 0xb7, 0x4d, 0x3a, 0x6e, 0x96, 0x64, 0xbb, 0xcb, 0xee,
|
||||
0xac, 0xb0, 0xff, 0x5e, 0xda, 0xd8, 0x43, 0xc1, 0xdb, 0xc0, 0xfb, 0xf8, 0xe6, 0x3d, 0x51, 0x0f,
|
||||
0x34, 0x79, 0x0a, 0x18, 0xd9, 0x05, 0xa5, 0xc9, 0xab, 0x7e, 0xa4, 0x80, 0x9c, 0x3d, 0x45, 0xf0,
|
||||
0xc1, 0xb1, 0x93, 0x37, 0x67, 0xd1, 0xd3, 0xa3, 0x76, 0x4e, 0x4f, 0x84, 0xc7, 0xb0, 0x4b, 0xdf,
|
||||
0xa8, 0xf6, 0x79, 0x26, 0xd7, 0x1f, 0xe2, 0xe2, 0x93, 0xc9, 0xca, 0x5b, 0x51, 0x9a, 0x5d, 0x55,
|
||||
0xd4, 0x45, 0x73, 0xdd, 0x96, 0x66, 0x27, 0x41, 0x5c, 0x59, 0x8a, 0x51, 0x69, 0xaa, 0xca, 0xba,
|
||||
0x68, 0x56, 0x9b, 0x07, 0x98, 0x25, 0x70, 0x92, 0xc0, 0xdb, 0x3e, 0xb7, 0x27, 0x68, 0xfd, 0x2e,
|
||||
0x96, 0xdb, 0xd4, 0x8f, 0xc4, 0xf2, 0x4e, 0x2c, 0x46, 0xca, 0x7f, 0xaa, 0xc3, 0x29, 0x9f, 0xc5,
|
||||
0xa5, 0x61, 0xb2, 0xb1, 0x2a, 0xeb, 0x45, 0xb3, 0xda, 0xdc, 0xc3, 0x59, 0x3b, 0x38, 0xfc, 0x6f,
|
||||
0x67, 0x62, 0xfb, 0xf2, 0x85, 0xda, 0xf0, 0x90, 0x3a, 0xe8, 0x9d, 0xc5, 0x41, 0xc5, 0xc1, 0xf4,
|
||||
0x2e, 0x78, 0xfc, 0x51, 0x69, 0x62, 0xfc, 0x6f, 0x77, 0xb7, 0x3c, 0x16, 0x7a, 0xfd, 0x0d, 0x00,
|
||||
0x00, 0xff, 0xff, 0x1c, 0x8e, 0xb4, 0xa9, 0x16, 0x01, 0x00, 0x00,
|
||||
// 276 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xcf, 0x4b, 0xc3, 0x30,
|
||||
0x14, 0xc7, 0x69, 0xeb, 0x36, 0x7d, 0x53, 0x91, 0xe8, 0xa1, 0xee, 0x54, 0x7a, 0xaa, 0x1e, 0x12,
|
||||
0x9c, 0x17, 0x11, 0x3c, 0x38, 0x50, 0xf0, 0x20, 0x48, 0x8f, 0x5e, 0x24, 0xed, 0x9e, 0x6d, 0xe8,
|
||||
0x8f, 0x84, 0x24, 0x1d, 0xf4, 0x1f, 0xf5, 0xef, 0x91, 0x36, 0x0e, 0x9c, 0x0c, 0x6f, 0x2f, 0x7c,
|
||||
0x3f, 0xf9, 0xe4, 0x1b, 0x1e, 0x44, 0x25, 0xd6, 0x0a, 0x35, 0x33, 0x56, 0x6a, 0x5e, 0xa0, 0xe2,
|
||||
0x79, 0x85, 0x9a, 0xd9, 0x5e, 0xa1, 0xa1, 0x4a, 0x4b, 0x2b, 0xc9, 0xc9, 0x4e, 0xb4, 0xb8, 0x2c,
|
||||
0xa4, 0x2c, 0x6a, 0x64, 0x63, 0x98, 0x75, 0x9f, 0x8c, 0xb7, 0xbd, 0x23, 0xe3, 0x67, 0x38, 0x78,
|
||||
0xb1, 0xd8, 0x90, 0x53, 0xf0, 0xc5, 0x3a, 0xf4, 0x22, 0x2f, 0x39, 0x4a, 0x7d, 0xb1, 0x26, 0x14,
|
||||
0x66, 0x0d, 0x1a, 0xc3, 0x0b, 0x0c, 0xfd, 0xc8, 0x4b, 0xe6, 0xcb, 0x0b, 0xea, 0x24, 0x74, 0x2b,
|
||||
0xa1, 0x8f, 0x6d, 0x9f, 0x6e, 0xa1, 0xf8, 0xcb, 0x83, 0xe9, 0xaa, 0xcb, 0x2b, 0xb4, 0xe4, 0x0c,
|
||||
0x82, 0x0a, 0xfb, 0x1f, 0xd7, 0x30, 0x92, 0x2b, 0x98, 0x08, 0x8b, 0x8d, 0x09, 0xfd, 0x28, 0x48,
|
||||
0xe6, 0xcb, 0x73, 0xba, 0x53, 0x8f, 0x0e, 0x05, 0x52, 0x47, 0x90, 0x07, 0x38, 0x1c, 0x86, 0x8f,
|
||||
0x86, 0xab, 0x30, 0x18, 0xe9, 0xf8, 0x0f, 0xed, 0x5e, 0x19, 0x2f, 0xbd, 0x72, 0xf5, 0xd4, 0x5a,
|
||||
0xdd, 0xa7, 0x33, 0xe1, 0x4e, 0x8b, 0x37, 0x38, 0xfe, 0x1d, 0xec, 0xe9, 0x72, 0x0d, 0x93, 0x0d,
|
||||
0xaf, 0xbb, 0xff, 0xbf, 0xe5, 0x90, 0x7b, 0xff, 0xce, 0x5b, 0xdd, 0xbc, 0xb3, 0x42, 0xd8, 0xb2,
|
||||
0xcb, 0x68, 0x2e, 0x1b, 0x56, 0x72, 0x53, 0x8a, 0x5c, 0x6a, 0xc5, 0x36, 0xbc, 0xab, 0x2d, 0xdb,
|
||||
0xb7, 0x89, 0x6c, 0x3a, 0xba, 0x6e, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x46, 0x9d, 0x8a, 0xcb,
|
||||
0xa8, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -6,12 +6,29 @@ package storagepacker;
|
|||
|
||||
import "google/protobuf/any.proto";
|
||||
|
||||
// Item represents an entry that gets inserted into the storage packer
|
||||
message Item {
|
||||
string id = 1;
|
||||
google.protobuf.Any message = 2;
|
||||
// ID must be provided by the caller; the same value, if used with GetItem,
|
||||
// can be used to fetch the item. However, when iterating through a bucket,
|
||||
// this ID will be an internal ID. In other words, outside of the use-case
|
||||
// described above, the caller *must not* rely on this value to be
|
||||
// consistent with what they passed in.
|
||||
string id = 1;
|
||||
// message is the contents of the item
|
||||
google.protobuf.Any message = 2;
|
||||
}
|
||||
|
||||
// Bucket is a construct to hold multiple items within itself. This
|
||||
// abstraction contains multiple buckets of the same kind within itself and
|
||||
// shares amont them the items that get inserted. When the bucket as a whole
|
||||
// gets too big to hold more items, the contained buckets gets pushed out only
|
||||
// to become independent buckets. Hence, this can grow infinitely in terms of
|
||||
// storage space for items that get inserted.
|
||||
message Bucket {
|
||||
string key = 1;
|
||||
repeated Item items = 2;
|
||||
// Key is the storage path where the bucket gets stored
|
||||
string key = 1;
|
||||
// Items holds the items contained within this bucket. Used by v1.
|
||||
repeated Item items = 2;
|
||||
// ItemMap stores a mapping of item ID to message. Used by v2.
|
||||
map <string, google.protobuf.Any> item_map = 3;
|
||||
}
|
||||
|
|
|
@ -401,6 +401,9 @@ func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEnt
|
|||
|
||||
ok, resp, _, err := c.kv.Txn(ops, queryOpts)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "is too large") {
|
||||
return errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", physical.ErrValueTooLarge), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if ok && len(resp.Errors) == 0 {
|
||||
|
@ -431,7 +434,13 @@ func (c *ConsulBackend) Put(ctx context.Context, entry *physical.Entry) error {
|
|||
writeOpts = writeOpts.WithContext(ctx)
|
||||
|
||||
_, err := c.kv.Put(pair, writeOpts)
|
||||
return err
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "Value exceeds") {
|
||||
return errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", physical.ErrValueTooLarge), err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get is used to fetch an entry
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -20,8 +22,7 @@ import (
|
|||
type consulConf map[string]string
|
||||
|
||||
var (
|
||||
addrCount int = 0
|
||||
testImagePull sync.Once
|
||||
addrCount int = 0
|
||||
)
|
||||
|
||||
func testConsulBackend(t *testing.T) *ConsulBackend {
|
||||
|
@ -488,7 +489,7 @@ func TestConsulBackend(t *testing.T) {
|
|||
consulToken := os.Getenv("CONSUL_HTTP_TOKEN")
|
||||
addr := os.Getenv("CONSUL_HTTP_ADDR")
|
||||
if addr == "" {
|
||||
cleanup, connURL, token := consul.PrepareTestContainer(t, "1.4.0-rc1")
|
||||
cleanup, connURL, token := consul.PrepareTestContainer(t, "1.4.4")
|
||||
defer cleanup()
|
||||
addr, consulToken = connURL, token
|
||||
}
|
||||
|
@ -522,11 +523,82 @@ func TestConsulBackend(t *testing.T) {
|
|||
physical.ExerciseBackend_ListPrefix(t, b)
|
||||
}
|
||||
|
||||
func TestConsul_TooLarge(t *testing.T) {
|
||||
consulToken := os.Getenv("CONSUL_HTTP_TOKEN")
|
||||
addr := os.Getenv("CONSUL_HTTP_ADDR")
|
||||
if addr == "" {
|
||||
cleanup, connURL, token := consul.PrepareTestContainer(t, "1.4.4")
|
||||
defer cleanup()
|
||||
addr, consulToken = connURL, token
|
||||
}
|
||||
|
||||
conf := api.DefaultConfig()
|
||||
conf.Address = addr
|
||||
conf.Token = consulToken
|
||||
client, err := api.NewClient(conf)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
randPath := fmt.Sprintf("vault-%d/", time.Now().Unix())
|
||||
defer func() {
|
||||
client.KV().DeleteTree(randPath, nil)
|
||||
}()
|
||||
|
||||
logger := logging.NewVaultLogger(log.Debug)
|
||||
|
||||
b, err := NewConsulBackend(map[string]string{
|
||||
"address": conf.Address,
|
||||
"path": randPath,
|
||||
"max_parallel": "256",
|
||||
"token": conf.Token,
|
||||
}, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
zeros := make([]byte, 600000, 600000)
|
||||
n, err := rand.Read(zeros)
|
||||
if n != 600000 {
|
||||
t.Fatalf("expected 500k zeros, read %d", n)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = b.Put(context.Background(), &physical.Entry{
|
||||
Key: "foo",
|
||||
Value: zeros,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), physical.ErrValueTooLarge) {
|
||||
t.Fatalf("expected value too large error, got %v", err)
|
||||
}
|
||||
|
||||
err = b.(physical.Transactional).Transaction(context.Background(), []*physical.TxnEntry{
|
||||
{
|
||||
Operation: physical.PutOperation,
|
||||
Entry: &physical.Entry{
|
||||
Key: "foo",
|
||||
Value: zeros,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
}
|
||||
if !strings.Contains(err.Error(), physical.ErrValueTooLarge) {
|
||||
t.Fatalf("expected value too large error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsulHABackend(t *testing.T) {
|
||||
consulToken := os.Getenv("CONSUL_HTTP_TOKEN")
|
||||
addr := os.Getenv("CONSUL_HTTP_ADDR")
|
||||
if addr == "" {
|
||||
cleanup, connURL, token := consul.PrepareTestContainer(t, "1.4.0-rc1")
|
||||
cleanup, connURL, token := consul.PrepareTestContainer(t, "1.4.4")
|
||||
defer cleanup()
|
||||
addr, consulToken = connURL, token
|
||||
}
|
||||
|
|
|
@ -86,17 +86,24 @@ func ScanView(ctx context.Context, view ClearableView, cb func(path string)) err
|
|||
|
||||
// CollectKeys is used to collect all the keys in a view
|
||||
func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) {
|
||||
// Accumulate the keys
|
||||
var existing []string
|
||||
return CollectKeysWithPrefix(ctx, view, "")
|
||||
}
|
||||
|
||||
// CollectKeysWithPrefix is used to collect all the keys in a view with a given prefix string
|
||||
func CollectKeysWithPrefix(ctx context.Context, view ClearableView, prefix string) ([]string, error) {
|
||||
var keys []string
|
||||
|
||||
cb := func(path string) {
|
||||
existing = append(existing, path)
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
keys = append(keys, path)
|
||||
}
|
||||
}
|
||||
|
||||
// Scan for all the keys
|
||||
if err := ScanView(ctx, view, cb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return existing, nil
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// ClearView is used to delete all the keys in a view
|
||||
|
|
86
sdk/logical/storage_test.go
Normal file
86
sdk/logical/storage_test.go
Normal file
|
@ -0,0 +1,86 @@
|
|||
package logical
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
)
|
||||
|
||||
var keyList = []string{
|
||||
"a",
|
||||
"b",
|
||||
"d",
|
||||
"foo",
|
||||
"foo42",
|
||||
"foo/a/b/c",
|
||||
"c/d/e/f/g",
|
||||
}
|
||||
|
||||
func TestScanView(t *testing.T) {
|
||||
s := prepKeyStorage(t)
|
||||
|
||||
keys := make([]string, 0)
|
||||
err := ScanView(context.Background(), s, func(path string) {
|
||||
keys = append(keys, path)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if diff := deep.Equal(keys, keyList); diff != nil {
|
||||
t.Fatal(diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollectKeys(t *testing.T) {
|
||||
s := prepKeyStorage(t)
|
||||
|
||||
keys, err := CollectKeys(context.Background(), s)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if diff := deep.Equal(keys, keyList); diff != nil {
|
||||
t.Fatal(diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollectKeysPrefix(t *testing.T) {
|
||||
s := prepKeyStorage(t)
|
||||
|
||||
keys, err := CollectKeysWithPrefix(context.Background(), s, "foo")
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
exp := []string{
|
||||
"foo",
|
||||
"foo42",
|
||||
"foo/a/b/c",
|
||||
}
|
||||
|
||||
if diff := deep.Equal(keys, exp); diff != nil {
|
||||
t.Fatal(diff)
|
||||
}
|
||||
}
|
||||
|
||||
func prepKeyStorage(t *testing.T) Storage {
|
||||
t.Helper()
|
||||
s := &InmemStorage{}
|
||||
|
||||
for _, key := range keyList {
|
||||
if err := s.Put(context.Background(), &StorageEntry{
|
||||
Key: key,
|
||||
Value: nil,
|
||||
SealWrap: false,
|
||||
}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
|
@ -3,7 +3,9 @@ package inmem
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -34,14 +36,15 @@ var (
|
|||
// expected to be durable.
|
||||
type InmemBackend struct {
|
||||
sync.RWMutex
|
||||
root *radix.Tree
|
||||
permitPool *physical.PermitPool
|
||||
logger log.Logger
|
||||
failGet *uint32
|
||||
failPut *uint32
|
||||
failDelete *uint32
|
||||
failList *uint32
|
||||
logOps bool
|
||||
root *radix.Tree
|
||||
permitPool *physical.PermitPool
|
||||
logger log.Logger
|
||||
failGet *uint32
|
||||
failPut *uint32
|
||||
failDelete *uint32
|
||||
failList *uint32
|
||||
logOps bool
|
||||
maxValueSize int
|
||||
}
|
||||
|
||||
type TransactionalInmemBackend struct {
|
||||
|
@ -49,36 +52,56 @@ type TransactionalInmemBackend struct {
|
|||
}
|
||||
|
||||
// NewInmem constructs a new in-memory backend
|
||||
func NewInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
in := &InmemBackend{
|
||||
root: radix.New(),
|
||||
permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
|
||||
logger: logger,
|
||||
failGet: new(uint32),
|
||||
failPut: new(uint32),
|
||||
failDelete: new(uint32),
|
||||
failList: new(uint32),
|
||||
logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
|
||||
func NewInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
maxValueSize := 0
|
||||
maxValueSizeStr, ok := conf["max_value_size"]
|
||||
if ok {
|
||||
var err error
|
||||
maxValueSize, err = strconv.Atoi(maxValueSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return in, nil
|
||||
|
||||
return &InmemBackend{
|
||||
root: radix.New(),
|
||||
permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
|
||||
logger: logger,
|
||||
failGet: new(uint32),
|
||||
failPut: new(uint32),
|
||||
failDelete: new(uint32),
|
||||
failList: new(uint32),
|
||||
logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
|
||||
maxValueSize: maxValueSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Basically for now just creates a permit pool of size 1 so only one operation
|
||||
// can run at a time
|
||||
func NewTransactionalInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
in := &TransactionalInmemBackend{
|
||||
InmemBackend: InmemBackend{
|
||||
root: radix.New(),
|
||||
permitPool: physical.NewPermitPool(1),
|
||||
logger: logger,
|
||||
failGet: new(uint32),
|
||||
failPut: new(uint32),
|
||||
failDelete: new(uint32),
|
||||
failList: new(uint32),
|
||||
logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
|
||||
},
|
||||
func NewTransactionalInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
maxValueSize := 0
|
||||
maxValueSizeStr, ok := conf["max_value_size"]
|
||||
if ok {
|
||||
var err error
|
||||
maxValueSize, err = strconv.Atoi(maxValueSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return in, nil
|
||||
|
||||
return &TransactionalInmemBackend{
|
||||
InmemBackend: InmemBackend{
|
||||
root: radix.New(),
|
||||
permitPool: physical.NewPermitPool(1),
|
||||
logger: logger,
|
||||
failGet: new(uint32),
|
||||
failPut: new(uint32),
|
||||
failDelete: new(uint32),
|
||||
failList: new(uint32),
|
||||
logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
|
||||
maxValueSize: maxValueSize,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Put is used to insert or update an entry
|
||||
|
@ -106,6 +129,10 @@ func (i *InmemBackend) PutInternal(ctx context.Context, entry *physical.Entry) e
|
|||
default:
|
||||
}
|
||||
|
||||
if i.maxValueSize > 0 && len(entry.Value) > i.maxValueSize {
|
||||
return fmt.Errorf("%s", physical.ErrValueTooLarge)
|
||||
}
|
||||
|
||||
i.root.Insert(entry.Key, entry.Value)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -20,6 +20,10 @@ const (
|
|||
PutOperation = "put"
|
||||
)
|
||||
|
||||
const (
|
||||
ErrValueTooLarge = "put failed due to value being too large"
|
||||
)
|
||||
|
||||
// ShutdownSignal
|
||||
type ShutdownChannel chan struct{}
|
||||
|
||||
|
|
|
@ -1001,7 +1001,7 @@ func (m *ExpirationManager) RenewToken(ctx context.Context, req *logical.Request
|
|||
if resp.Auth.EntityID != "" &&
|
||||
resp.Auth.GroupAliases != nil &&
|
||||
m.core.identityStore != nil {
|
||||
validAliases, err := m.core.identityStore.refreshExternalGroupMembershipsByEntityID(resp.Auth.EntityID, resp.Auth.GroupAliases)
|
||||
validAliases, err := m.core.identityStore.refreshExternalGroupMembershipsByEntityID(ctx, resp.Auth.EntityID, resp.Auth.GroupAliases)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
log "github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/builtin/credential/ldap"
|
||||
"github.com/hashicorp/vault/helper/namespace"
|
||||
vaulthttp "github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/sdk/logical"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
|
@ -235,7 +236,9 @@ func TestIdentityStore_Integ_GroupAliases(t *testing.T) {
|
|||
// Remove its member entities
|
||||
group.MemberEntityIDs = nil
|
||||
|
||||
err = identityStore.UpsertGroup(group, true)
|
||||
ctx := namespace.RootContext(nil)
|
||||
|
||||
err = identityStore.UpsertGroup(ctx, group, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -256,7 +259,7 @@ func TestIdentityStore_Integ_GroupAliases(t *testing.T) {
|
|||
// Remove its member entities
|
||||
group.MemberEntityIDs = nil
|
||||
|
||||
err = identityStore.UpsertGroup(group, true)
|
||||
err = identityStore.UpsertGroup(ctx, group, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -277,7 +280,7 @@ func TestIdentityStore_Integ_GroupAliases(t *testing.T) {
|
|||
// Remove its member entities
|
||||
group.MemberEntityIDs = nil
|
||||
|
||||
err = identityStore.UpsertGroup(group, true)
|
||||
err = identityStore.UpsertGroup(ctx, group, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -251,7 +251,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) {
|
|||
}
|
||||
|
||||
// Only update MemDB and don't touch the storage
|
||||
err = i.UpsertGroupInTxn(txn, group, false)
|
||||
err = i.UpsertGroupInTxn(ctx, txn, group, false)
|
||||
if err != nil {
|
||||
i.logger.Error("failed to update group in MemDB", "error", err)
|
||||
return
|
||||
|
@ -336,7 +336,7 @@ func (i *IdentityStore) parseEntityFromBucketItem(ctx context.Context, item *sto
|
|||
}
|
||||
|
||||
// Store the entity with new format
|
||||
err = i.entityPacker.PutItem(item)
|
||||
err = i.entityPacker.PutItem(ctx, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -406,7 +406,7 @@ func (i *IdentityStore) pathAliasIDDelete() framework.OperationFunc {
|
|||
Message: entityAsAny,
|
||||
}
|
||||
|
||||
err = i.entityPacker.PutItem(item)
|
||||
err = i.entityPacker.PutItem(ctx, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -262,7 +262,8 @@ func (i *IdentityStore) handleEntityUpdateCommon() framework.OperationFunc {
|
|||
|
||||
// Prepare the response
|
||||
respData := map[string]interface{}{
|
||||
"id": entity.ID,
|
||||
"id": entity.ID,
|
||||
"name": entity.Name,
|
||||
}
|
||||
|
||||
var aliasIDs []string
|
||||
|
@ -490,7 +491,7 @@ func (i *IdentityStore) handleEntityDeleteCommon(ctx context.Context, txn *memdb
|
|||
|
||||
for _, group := range groups {
|
||||
group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, entity.ID)
|
||||
err = i.UpsertGroupInTxn(txn, group, true)
|
||||
err = i.UpsertGroupInTxn(ctx, txn, group, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -509,7 +510,7 @@ func (i *IdentityStore) handleEntityDeleteCommon(ctx context.Context, txn *memdb
|
|||
}
|
||||
|
||||
// Delete the entity from storage
|
||||
err = i.entityPacker.DeleteItem(entity.ID)
|
||||
err = i.entityPacker.DeleteItem(ctx, entity.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -708,7 +709,7 @@ func (i *IdentityStore) mergeEntity(ctx context.Context, txn *memdb.Txn, toEntit
|
|||
|
||||
if persist && !isPerfSecondaryOrStandby {
|
||||
// Delete the entity which we are merging from in storage
|
||||
err = i.entityPacker.DeleteItem(fromEntity.ID)
|
||||
err = i.entityPacker.DeleteItem(ctx, fromEntity.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -732,7 +733,7 @@ func (i *IdentityStore) mergeEntity(ctx context.Context, txn *memdb.Txn, toEntit
|
|||
Message: toEntityAsAny,
|
||||
}
|
||||
|
||||
err = i.entityPacker.PutItem(item)
|
||||
err = i.entityPacker.PutItem(ctx, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -294,7 +294,7 @@ func (i *IdentityStore) pathGroupAliasIDDelete() framework.OperationFunc {
|
|||
// Delete the alias
|
||||
group.Alias = nil
|
||||
|
||||
err = i.UpsertGroupInTxn(txn, group, true)
|
||||
err = i.UpsertGroupInTxn(ctx, txn, group, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -443,7 +443,7 @@ func (i *IdentityStore) handleGroupDeleteCommon(ctx context.Context, key string,
|
|||
}
|
||||
|
||||
// Delete the group from storage
|
||||
err = i.groupPacker.DeleteItem(group.ID)
|
||||
err = i.groupPacker.DeleteItem(ctx, group.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -100,8 +100,10 @@ func TestIdentityStore_GroupEntityMembershipUpgrade(t *testing.T) {
|
|||
// Manually add an invalid entity as the group's member
|
||||
group.MemberEntityIDs = []string{"invalidentityid"}
|
||||
|
||||
ctx := namespace.RootContext(nil)
|
||||
|
||||
// Persist the group
|
||||
err = c.identityStore.UpsertGroupInTxn(txn, group, true)
|
||||
err = c.identityStore.UpsertGroupInTxn(ctx, txn, group, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -86,7 +86,9 @@ func TestIdentityStore_UnsealingWhenConflictingAliasNames(t *testing.T) {
|
|||
ID: entity2.ID,
|
||||
Message: entity2Any,
|
||||
}
|
||||
if err = c.identityStore.entityPacker.PutItem(item); err != nil {
|
||||
|
||||
ctx := namespace.RootContext(nil)
|
||||
if err = c.identityStore.entityPacker.PutItem(ctx, item); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error {
|
|||
// Group's namespace doesn't exist anymore but the group
|
||||
// from the namespace still exists.
|
||||
i.logger.Warn("deleting group and its any existing aliases", "name", group.Name, "namespace_id", group.NamespaceID)
|
||||
err = i.groupPacker.DeleteItem(group.ID)
|
||||
err = i.groupPacker.DeleteItem(ctx, group.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
|
||||
err = i.UpsertGroupInTxn(txn, group, persist)
|
||||
err = i.UpsertGroupInTxn(ctx, txn, group, persist)
|
||||
if err != nil {
|
||||
txn.Abort()
|
||||
return errwrap.Wrapf("failed to update group in memdb: {{err}}", err)
|
||||
|
@ -282,7 +282,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error {
|
|||
// Entity's namespace doesn't exist anymore but the
|
||||
// entity from the namespace still exists.
|
||||
i.logger.Warn("deleting entity and its any existing aliases", "name", entity.Name, "namespace_id", entity.NamespaceID)
|
||||
err = i.entityPacker.DeleteItem(entity.ID)
|
||||
err = i.entityPacker.DeleteItem(ctx, entity.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -419,7 +419,7 @@ func (i *IdentityStore) upsertEntityInTxn(ctx context.Context, txn *memdb.Txn, e
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = i.entityPacker.PutItem(&storagepacker.Item{
|
||||
err = i.entityPacker.PutItem(ctx, &storagepacker.Item{
|
||||
ID: previousEntity.ID,
|
||||
Message: marshaledPreviousEntity,
|
||||
})
|
||||
|
@ -446,7 +446,7 @@ func (i *IdentityStore) upsertEntityInTxn(ctx context.Context, txn *memdb.Txn, e
|
|||
}
|
||||
|
||||
// Persist the entity object
|
||||
err = i.entityPacker.PutItem(item)
|
||||
err = i.entityPacker.PutItem(ctx, item)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1128,7 +1128,7 @@ func (i *IdentityStore) sanitizeAndUpsertGroup(ctx context.Context, group *ident
|
|||
// Remove group ID from the parent group IDs
|
||||
currentMemberGroup.ParentGroupIDs = strutil.StrListDelete(currentMemberGroup.ParentGroupIDs, group.ID)
|
||||
|
||||
err = i.UpsertGroupInTxn(txn, currentMemberGroup, true)
|
||||
err = i.UpsertGroupInTxn(ctx, txn, currentMemberGroup, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1184,7 +1184,7 @@ func (i *IdentityStore) sanitizeAndUpsertGroup(ctx context.Context, group *ident
|
|||
|
||||
// This technically is not upsert. It is only update, only the method
|
||||
// name is upsert here.
|
||||
err = i.UpsertGroupInTxn(txn, memberGroup, true)
|
||||
err = i.UpsertGroupInTxn(ctx, txn, memberGroup, true)
|
||||
if err != nil {
|
||||
// Ideally we would want to revert the whole operation in case of
|
||||
// errors while persisting in member groups. But there is no
|
||||
|
@ -1204,7 +1204,7 @@ ALIAS:
|
|||
}
|
||||
}
|
||||
|
||||
err = i.UpsertGroupInTxn(txn, group, true)
|
||||
err = i.UpsertGroupInTxn(ctx, txn, group, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1331,11 +1331,11 @@ func (i *IdentityStore) MemDBGroupByName(ctx context.Context, groupName string,
|
|||
return i.MemDBGroupByNameInTxn(ctx, txn, groupName, clone)
|
||||
}
|
||||
|
||||
func (i *IdentityStore) UpsertGroup(group *identity.Group, persist bool) error {
|
||||
func (i *IdentityStore) UpsertGroup(ctx context.Context, group *identity.Group, persist bool) error {
|
||||
txn := i.db.Txn(true)
|
||||
defer txn.Abort()
|
||||
|
||||
err := i.UpsertGroupInTxn(txn, group, true)
|
||||
err := i.UpsertGroupInTxn(ctx, txn, group, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1345,7 +1345,7 @@ func (i *IdentityStore) UpsertGroup(group *identity.Group, persist bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (i *IdentityStore) UpsertGroupInTxn(txn *memdb.Txn, group *identity.Group, persist bool) error {
|
||||
func (i *IdentityStore) UpsertGroupInTxn(ctx context.Context, txn *memdb.Txn, group *identity.Group, persist bool) error {
|
||||
var err error
|
||||
|
||||
if txn == nil {
|
||||
|
@ -1401,7 +1401,7 @@ func (i *IdentityStore) UpsertGroupInTxn(txn *memdb.Txn, group *identity.Group,
|
|||
return err
|
||||
}
|
||||
if !sent {
|
||||
if err := i.groupPacker.PutItem(item); err != nil {
|
||||
if err := i.groupPacker.PutItem(ctx, item); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -1845,7 +1845,7 @@ func (i *IdentityStore) MemDBGroupByAliasID(aliasID string, clone bool) (*identi
|
|||
return i.MemDBGroupByAliasIDInTxn(txn, aliasID, clone)
|
||||
}
|
||||
|
||||
func (i *IdentityStore) refreshExternalGroupMembershipsByEntityID(entityID string, groupAliases []*logical.Alias) ([]*logical.Alias, error) {
|
||||
func (i *IdentityStore) refreshExternalGroupMembershipsByEntityID(ctx context.Context, entityID string, groupAliases []*logical.Alias) ([]*logical.Alias, error) {
|
||||
i.logger.Debug("refreshing external group memberships", "entity_id", entityID, "group_aliases", groupAliases)
|
||||
if entityID == "" {
|
||||
return nil, fmt.Errorf("empty entity ID")
|
||||
|
@ -1901,7 +1901,7 @@ func (i *IdentityStore) refreshExternalGroupMembershipsByEntityID(entityID strin
|
|||
|
||||
group.MemberEntityIDs = append(group.MemberEntityIDs, entityID)
|
||||
|
||||
err = i.UpsertGroupInTxn(txn, group, true)
|
||||
err = i.UpsertGroupInTxn(ctx, txn, group, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1923,7 +1923,7 @@ func (i *IdentityStore) refreshExternalGroupMembershipsByEntityID(entityID strin
|
|||
|
||||
group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, entityID)
|
||||
|
||||
err = i.UpsertGroupInTxn(txn, group, true)
|
||||
err = i.UpsertGroupInTxn(ctx, txn, group, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1071,7 +1071,7 @@ func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (re
|
|||
|
||||
auth.EntityID = entity.ID
|
||||
if auth.GroupAliases != nil {
|
||||
validAliases, err := c.identityStore.refreshExternalGroupMembershipsByEntityID(auth.EntityID, auth.GroupAliases)
|
||||
validAliases, err := c.identityStore.refreshExternalGroupMembershipsByEntityID(ctx, auth.EntityID, auth.GroupAliases)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
15
vendor/github.com/hashicorp/vault/sdk/logical/storage.go
generated
vendored
15
vendor/github.com/hashicorp/vault/sdk/logical/storage.go
generated
vendored
|
@ -86,17 +86,24 @@ func ScanView(ctx context.Context, view ClearableView, cb func(path string)) err
|
|||
|
||||
// CollectKeys is used to collect all the keys in a view
|
||||
func CollectKeys(ctx context.Context, view ClearableView) ([]string, error) {
|
||||
// Accumulate the keys
|
||||
var existing []string
|
||||
return CollectKeysWithPrefix(ctx, view, "")
|
||||
}
|
||||
|
||||
// CollectKeysWithPrefix is used to collect all the keys in a view with a given prefix string
|
||||
func CollectKeysWithPrefix(ctx context.Context, view ClearableView, prefix string) ([]string, error) {
|
||||
var keys []string
|
||||
|
||||
cb := func(path string) {
|
||||
existing = append(existing, path)
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
keys = append(keys, path)
|
||||
}
|
||||
}
|
||||
|
||||
// Scan for all the keys
|
||||
if err := ScanView(ctx, view, cb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return existing, nil
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// ClearView is used to delete all the keys in a view
|
||||
|
|
91
vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go
generated
vendored
91
vendor/github.com/hashicorp/vault/sdk/physical/inmem/inmem.go
generated
vendored
|
@ -3,7 +3,9 @@ package inmem
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -34,14 +36,15 @@ var (
|
|||
// expected to be durable.
|
||||
type InmemBackend struct {
|
||||
sync.RWMutex
|
||||
root *radix.Tree
|
||||
permitPool *physical.PermitPool
|
||||
logger log.Logger
|
||||
failGet *uint32
|
||||
failPut *uint32
|
||||
failDelete *uint32
|
||||
failList *uint32
|
||||
logOps bool
|
||||
root *radix.Tree
|
||||
permitPool *physical.PermitPool
|
||||
logger log.Logger
|
||||
failGet *uint32
|
||||
failPut *uint32
|
||||
failDelete *uint32
|
||||
failList *uint32
|
||||
logOps bool
|
||||
maxValueSize int
|
||||
}
|
||||
|
||||
type TransactionalInmemBackend struct {
|
||||
|
@ -49,36 +52,56 @@ type TransactionalInmemBackend struct {
|
|||
}
|
||||
|
||||
// NewInmem constructs a new in-memory backend
|
||||
func NewInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
in := &InmemBackend{
|
||||
root: radix.New(),
|
||||
permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
|
||||
logger: logger,
|
||||
failGet: new(uint32),
|
||||
failPut: new(uint32),
|
||||
failDelete: new(uint32),
|
||||
failList: new(uint32),
|
||||
logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
|
||||
func NewInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
maxValueSize := 0
|
||||
maxValueSizeStr, ok := conf["max_value_size"]
|
||||
if ok {
|
||||
var err error
|
||||
maxValueSize, err = strconv.Atoi(maxValueSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return in, nil
|
||||
|
||||
return &InmemBackend{
|
||||
root: radix.New(),
|
||||
permitPool: physical.NewPermitPool(physical.DefaultParallelOperations),
|
||||
logger: logger,
|
||||
failGet: new(uint32),
|
||||
failPut: new(uint32),
|
||||
failDelete: new(uint32),
|
||||
failList: new(uint32),
|
||||
logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
|
||||
maxValueSize: maxValueSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Basically for now just creates a permit pool of size 1 so only one operation
|
||||
// can run at a time
|
||||
func NewTransactionalInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
in := &TransactionalInmemBackend{
|
||||
InmemBackend: InmemBackend{
|
||||
root: radix.New(),
|
||||
permitPool: physical.NewPermitPool(1),
|
||||
logger: logger,
|
||||
failGet: new(uint32),
|
||||
failPut: new(uint32),
|
||||
failDelete: new(uint32),
|
||||
failList: new(uint32),
|
||||
logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
|
||||
},
|
||||
func NewTransactionalInmem(conf map[string]string, logger log.Logger) (physical.Backend, error) {
|
||||
maxValueSize := 0
|
||||
maxValueSizeStr, ok := conf["max_value_size"]
|
||||
if ok {
|
||||
var err error
|
||||
maxValueSize, err = strconv.Atoi(maxValueSizeStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return in, nil
|
||||
|
||||
return &TransactionalInmemBackend{
|
||||
InmemBackend: InmemBackend{
|
||||
root: radix.New(),
|
||||
permitPool: physical.NewPermitPool(1),
|
||||
logger: logger,
|
||||
failGet: new(uint32),
|
||||
failPut: new(uint32),
|
||||
failDelete: new(uint32),
|
||||
failList: new(uint32),
|
||||
logOps: os.Getenv("VAULT_INMEM_LOG_ALL_OPS") != "",
|
||||
maxValueSize: maxValueSize,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Put is used to insert or update an entry
|
||||
|
@ -106,6 +129,10 @@ func (i *InmemBackend) PutInternal(ctx context.Context, entry *physical.Entry) e
|
|||
default:
|
||||
}
|
||||
|
||||
if i.maxValueSize > 0 && len(entry.Value) > i.maxValueSize {
|
||||
return fmt.Errorf("%s", physical.ErrValueTooLarge)
|
||||
}
|
||||
|
||||
i.root.Insert(entry.Key, entry.Value)
|
||||
return nil
|
||||
}
|
||||
|
|
4
vendor/github.com/hashicorp/vault/sdk/physical/physical.go
generated
vendored
4
vendor/github.com/hashicorp/vault/sdk/physical/physical.go
generated
vendored
|
@ -20,6 +20,10 @@ const (
|
|||
PutOperation = "put"
|
||||
)
|
||||
|
||||
const (
|
||||
ErrValueTooLarge = "put failed due to value being too large"
|
||||
)
|
||||
|
||||
// ShutdownSignal
|
||||
type ShutdownChannel chan struct{}
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ The following parameters are only used with Vault Enterprise
|
|||
- `disable_performance_standby` `(bool: false)` – Specifies whether performance
|
||||
standbys should be disabled on this node. Setting this to true on one Vault
|
||||
node will disable this feature when this node is Active or Standby. It's
|
||||
recomended to sync this setting across all nodes in the cluster.
|
||||
recommended to sync this setting across all nodes in the cluster.
|
||||
|
||||
[storage-backend]: /docs/configuration/storage/index.html
|
||||
[listener]: /docs/configuration/listener/index.html
|
||||
|
|
|
@ -45,14 +45,14 @@ These parameters apply to the `seal` stanza in the Vault configuration file:
|
|||
- `client_id` `(string: <required or MSI>)`: The client id for credentials to query the Azure APIs.
|
||||
May also be specified by the `AZURE_CLIENT_ID` environment variable.
|
||||
|
||||
- `client_secret` `(string: <required or MSI>)`: The client id for credentials to query the Azure APIs.
|
||||
May also be specified by the `AZURE_CLIENT_ID` environment variable.
|
||||
- `client_secret` `(string: <required or MSI>)`: The client secret for credentials to query the Azure APIs.
|
||||
May also be specified by the `AZURE_CLIENT_SECRET` environment variable.
|
||||
|
||||
- `environment` `(string: "AZUREPUBLICCLOUD")`: The Azure Cloud environment API endpoints to use. May also
|
||||
be specified by the `VAULT_AZUREKEYVAULT_VAULT_NAME` environment variable.
|
||||
be specified by the `AZURE_ENVIRONMENT` environment variable.
|
||||
|
||||
- `vault_name` `(string: <required>)`: The Key Vault vault to use the encryption keys for encryption and
|
||||
decryption. May also be specified by the `VAULT_AZUREKEYVAULT_KEY_NAME` environment variable.
|
||||
decryption. May also be specified by the `VAULT_AZUREKEYVAULT_VAULT_NAME` environment variable.
|
||||
|
||||
- `key_name` `(string: <required>)`: The Key Vault key to use for encryption and decryption. May also be specified by the
|
||||
`VAULT_AZUREKEYVAULT_KEY_NAME` environment variable.
|
||||
|
|
|
@ -32,7 +32,7 @@ Docs](/docs/secrets/kv/kv-v1.html)
|
|||
When running v2 of the `kv` backend a key can retain a configurable number of
|
||||
versions. This defaults to 10 versions. The older versions' metadata and data
|
||||
can be retrieved. Additionally, Check-and-Set operations can be used to avoid
|
||||
overwritting data unintentionally.
|
||||
overwriting data unintentionally.
|
||||
|
||||
When a version is deleted the underlying data is not removed, rather it is
|
||||
marked as deleted. Deleted versions can be undeleted. To permanently remove a
|
||||
|
|
Loading…
Reference in a new issue