Port changes from enterprise lease fix (#10020)
This commit is contained in:
parent
3cc15ba146
commit
3f30fc5f4e
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.23.0
|
// protoc-gen-go v1.23.0
|
||||||
// protoc v3.12.2
|
// protoc v3.13.0
|
||||||
// source: helper/forwarding/types.proto
|
// source: helper/forwarding/types.proto
|
||||||
|
|
||||||
package forwarding
|
package forwarding
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.23.0
|
// protoc-gen-go v1.23.0
|
||||||
// protoc v3.12.2
|
// protoc v3.13.0
|
||||||
// source: helper/identity/mfa/types.proto
|
// source: helper/identity/mfa/types.proto
|
||||||
|
|
||||||
package mfa
|
package mfa
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.23.0
|
// protoc-gen-go v1.23.0
|
||||||
// protoc v3.12.2
|
// protoc v3.13.0
|
||||||
// source: helper/identity/types.proto
|
// source: helper/identity/types.proto
|
||||||
|
|
||||||
package identity
|
package identity
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.23.0
|
// protoc-gen-go v1.23.0
|
||||||
// protoc v3.12.2
|
// protoc v3.13.0
|
||||||
// source: helper/storagepacker/types.proto
|
// source: helper/storagepacker/types.proto
|
||||||
|
|
||||||
package storagepacker
|
package storagepacker
|
||||||
|
|
|
@ -81,7 +81,7 @@ func TestRaft_Chunking_Lifecycle(t *testing.T) {
|
||||||
|
|
||||||
t.Log("tearing down cluster")
|
t.Log("tearing down cluster")
|
||||||
require.NoError(b.TeardownCluster(nil))
|
require.NoError(b.TeardownCluster(nil))
|
||||||
require.NoError(b.fsm.db.Close())
|
require.NoError(b.fsm.getDB().Close())
|
||||||
require.NoError(b.stableStore.(*raftboltdb.BoltStore).Close())
|
require.NoError(b.stableStore.(*raftboltdb.BoltStore).Close())
|
||||||
|
|
||||||
t.Log("starting new backend")
|
t.Log("starting new backend")
|
||||||
|
@ -195,6 +195,15 @@ func TestRaft_Chunking_AppliedIndex(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Write a value to fastforward the index
|
||||||
|
err = raft.Put(context.Background(), &physical.Entry{
|
||||||
|
Key: "key",
|
||||||
|
Value: []byte("test"),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
currentIndex := raft.AppliedIndex()
|
currentIndex := raft.AppliedIndex()
|
||||||
// Write some data
|
// Write some data
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
|
|
|
@ -77,6 +77,9 @@ type FSM struct {
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
noopRestore bool
|
noopRestore bool
|
||||||
|
|
||||||
|
// applyDelay is used to simulate a slow apply in tests
|
||||||
|
applyDelay time.Duration
|
||||||
|
|
||||||
db *bolt.DB
|
db *bolt.DB
|
||||||
|
|
||||||
// retoreCb is called after we've restored a snapshot
|
// retoreCb is called after we've restored a snapshot
|
||||||
|
@ -118,6 +121,21 @@ func NewFSM(path string, logger log.Logger) (*FSM, error) {
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *FSM) getDB() *bolt.DB {
|
||||||
|
f.l.RLock()
|
||||||
|
defer f.l.RUnlock()
|
||||||
|
|
||||||
|
return f.db
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFSMDelay adds a delay to the FSM apply. This is used in tests to simulate
|
||||||
|
// a slow apply.
|
||||||
|
func (r *RaftBackend) SetFSMDelay(delay time.Duration) {
|
||||||
|
r.fsm.l.Lock()
|
||||||
|
r.fsm.applyDelay = delay
|
||||||
|
r.fsm.l.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
func (f *FSM) openDBFile(dbPath string) error {
|
func (f *FSM) openDBFile(dbPath string) error {
|
||||||
if len(dbPath) == 0 {
|
if len(dbPath) == 0 {
|
||||||
return errors.New("can not open empty filename")
|
return errors.New("can not open empty filename")
|
||||||
|
@ -222,6 +240,9 @@ func writeSnapshotMetaToDB(metadata *raft.SnapshotMeta, db *bolt.DB) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FSM) witnessSnapshot(metadata *raft.SnapshotMeta) error {
|
func (f *FSM) witnessSnapshot(metadata *raft.SnapshotMeta) error {
|
||||||
|
f.l.RLock()
|
||||||
|
defer f.l.RUnlock()
|
||||||
|
|
||||||
err := writeSnapshotMetaToDB(metadata, f.db)
|
err := writeSnapshotMetaToDB(metadata, f.db)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -448,6 +469,10 @@ func (f *FSM) ApplyBatch(logs []*raft.Log) []interface{} {
|
||||||
f.l.RLock()
|
f.l.RLock()
|
||||||
defer f.l.RUnlock()
|
defer f.l.RUnlock()
|
||||||
|
|
||||||
|
if f.applyDelay > 0 {
|
||||||
|
time.Sleep(f.applyDelay)
|
||||||
|
}
|
||||||
|
|
||||||
err = f.db.Update(func(tx *bolt.Tx) error {
|
err = f.db.Update(func(tx *bolt.Tx) error {
|
||||||
b := tx.Bucket(dataBucketName)
|
b := tx.Bucket(dataBucketName)
|
||||||
for _, commandRaw := range commands {
|
for _, commandRaw := range commands {
|
||||||
|
|
|
@ -768,11 +768,14 @@ func (b *RaftBackend) AppliedIndex() uint64 {
|
||||||
b.l.RLock()
|
b.l.RLock()
|
||||||
defer b.l.RUnlock()
|
defer b.l.RUnlock()
|
||||||
|
|
||||||
if b.raft == nil {
|
if b.fsm == nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
return b.raft.AppliedIndex()
|
// We use the latest index that the FSM has seen here, which may be behind
|
||||||
|
// raft.AppliedIndex() due to the async nature of the raft library.
|
||||||
|
indexState, _ := b.fsm.LatestState()
|
||||||
|
return indexState.Index
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePeer removes the given peer ID from the raft cluster. If the node is
|
// RemovePeer removes the given peer ID from the raft cluster. If the node is
|
||||||
|
|
|
@ -80,7 +80,7 @@ func getRaftWithDir(t testing.TB, bootstrap bool, noStoreState bool, raftDir str
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if backend.AppliedIndex() >= 2 {
|
if backend.raft.AppliedIndex() >= 2 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -156,7 +156,7 @@ func compareFSMsWithErr(t *testing.T, fsm1, fsm2 *FSM) error {
|
||||||
return fmt.Errorf("configs did not match: %+v != %+v", config1, config2)
|
return fmt.Errorf("configs did not match: %+v != %+v", config1, config2)
|
||||||
}
|
}
|
||||||
|
|
||||||
return compareDBs(t, fsm1.db, fsm2.db, false)
|
return compareDBs(t, fsm1.getDB(), fsm2.getDB(), false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB, dataOnly bool) error {
|
func compareDBs(t *testing.T, boltDB1, boltDB2 *bolt.DB, dataOnly bool) error {
|
||||||
|
|
|
@ -259,7 +259,7 @@ func TestRaft_Snapshot_Peers(t *testing.T) {
|
||||||
ensureCommitApplied(t, commitIdx, raft2)
|
ensureCommitApplied(t, commitIdx, raft2)
|
||||||
|
|
||||||
// Make sure the snapshot was applied correctly on the follower
|
// Make sure the snapshot was applied correctly on the follower
|
||||||
if err := compareDBs(t, raft1.fsm.db, raft2.fsm.db, false); err != nil {
|
if err := compareDBs(t, raft1.fsm.getDB(), raft2.fsm.getDB(), false); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -758,13 +758,13 @@ func TestBoltSnapshotStore_CreateInstallSnapshot(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = compareDBs(t, fsm.db, newFSM.db, true)
|
err = compareDBs(t, fsm.getDB(), newFSM.getDB(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure config data is different
|
// Make sure config data is different
|
||||||
err = compareDBs(t, fsm.db, newFSM.db, false)
|
err = compareDBs(t, fsm.getDB(), newFSM.getDB(), false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Fatal("expected error")
|
t.Fatal("expected error")
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.23.0
|
// protoc-gen-go v1.23.0
|
||||||
// protoc v3.12.2
|
// protoc v3.13.0
|
||||||
// source: physical/raft/types.proto
|
// source: physical/raft/types.proto
|
||||||
|
|
||||||
package raft
|
package raft
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.23.0
|
// protoc-gen-go v1.23.0
|
||||||
// protoc v3.12.2
|
// protoc v3.13.0
|
||||||
// source: sdk/database/dbplugin/database.proto
|
// source: sdk/database/dbplugin/database.proto
|
||||||
|
|
||||||
package dbplugin
|
package dbplugin
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.23.0
|
// protoc-gen-go v1.23.0
|
||||||
// protoc v3.12.2
|
// protoc v3.13.0
|
||||||
// source: sdk/logical/identity.proto
|
// source: sdk/logical/identity.proto
|
||||||
|
|
||||||
package logical
|
package logical
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.23.0
|
// protoc-gen-go v1.23.0
|
||||||
// protoc v3.12.2
|
// protoc v3.13.0
|
||||||
// source: sdk/logical/plugin.proto
|
// source: sdk/logical/plugin.proto
|
||||||
|
|
||||||
package logical
|
package logical
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.23.0
|
// protoc-gen-go v1.23.0
|
||||||
// protoc v3.12.2
|
// protoc v3.13.0
|
||||||
// source: sdk/plugin/pb/backend.proto
|
// source: sdk/plugin/pb/backend.proto
|
||||||
|
|
||||||
package pb
|
package pb
|
||||||
|
|
|
@ -1,14 +1,13 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.22.0
|
// protoc-gen-go v1.23.0
|
||||||
// protoc v3.6.1
|
// protoc v3.13.0
|
||||||
// source: vault/activity/activity_log.proto
|
// source: vault/activity/activity_log.proto
|
||||||
|
|
||||||
package activity
|
package activity
|
||||||
|
|
||||||
import (
|
import (
|
||||||
proto "github.com/golang/protobuf/proto"
|
proto "github.com/golang/protobuf/proto"
|
||||||
_ "github.com/golang/protobuf/ptypes/timestamp"
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
|
@ -166,36 +165,34 @@ var File_vault_activity_activity_log_proto protoreflect.FileDescriptor
|
||||||
var file_vault_activity_activity_log_proto_rawDesc = []byte{
|
var file_vault_activity_activity_log_proto_rawDesc = []byte{
|
||||||
0x0a, 0x21, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79,
|
0x0a, 0x21, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79,
|
||||||
0x2f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72,
|
0x2f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72,
|
||||||
0x6f, 0x74, 0x6f, 0x12, 0x08, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x1a, 0x1f, 0x67,
|
0x6f, 0x74, 0x6f, 0x12, 0x08, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x22, 0x6c, 0x0a,
|
||||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
|
0x0c, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x1b, 0x0a,
|
||||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6c,
|
0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||||
0x0a, 0x0c, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x1b,
|
0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e, 0x61,
|
||||||
0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||||
0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6e,
|
0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a,
|
||||||
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
|
0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03,
|
||||||
0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1c,
|
0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x88, 0x02, 0x0a, 0x0b,
|
||||||
0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28,
|
0x4c, 0x6f, 0x67, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x6f,
|
||||||
0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x88, 0x02, 0x0a,
|
0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18,
|
||||||
0x0b, 0x4c, 0x6f, 0x67, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x10,
|
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69,
|
||||||
0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x6e, 0x6f, 0x64, 0x65,
|
0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69,
|
||||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x74,
|
0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x61, 0x63, 0x74, 0x69, 0x76,
|
||||||
0x69, 0x6e, 0x67, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74,
|
0x69, 0x74, 0x79, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64,
|
||||||
0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x61, 0x63, 0x74, 0x69,
|
0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x56, 0x0a, 0x11, 0x6e, 0x6f,
|
||||||
0x76, 0x69, 0x74, 0x79, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72,
|
0x6e, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18,
|
||||||
0x64, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x56, 0x0a, 0x11, 0x6e,
|
0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79,
|
||||||
0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73,
|
0x2e, 0x4c, 0x6f, 0x67, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x6f, 0x6e,
|
||||||
0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74,
|
0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
|
||||||
0x79, 0x2e, 0x4c, 0x6f, 0x67, 0x46, 0x72, 0x61, 0x67, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4e, 0x6f,
|
0x79, 0x52, 0x0f, 0x6e, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x6f, 0x6b, 0x65,
|
||||||
0x6e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x45, 0x6e, 0x74,
|
0x6e, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x4e, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54,
|
||||||
0x72, 0x79, 0x52, 0x0f, 0x6e, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x54, 0x6f, 0x6b,
|
0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
|
||||||
0x65, 0x6e, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x4e, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79,
|
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
|
||||||
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
|
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c,
|
||||||
0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
|
0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
|
||||||
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
|
0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76,
|
||||||
0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75,
|
0x61, 0x75, 0x6c, 0x74, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x76,
|
||||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f,
|
0x69, 0x74, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x61, 0x63, 0x74, 0x69,
|
|
||||||
0x76, 0x69, 0x74, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
|
@ -30,6 +30,7 @@ type InmemLayer struct {
|
||||||
stopCh chan struct{}
|
stopCh chan struct{}
|
||||||
|
|
||||||
connectionCh chan *ConnectionInfo
|
connectionCh chan *ConnectionInfo
|
||||||
|
readerDelay time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInmemLayer returns a new in-memory layer configured to listen on the
|
// NewInmemLayer returns a new in-memory layer configured to listen on the
|
||||||
|
@ -52,6 +53,26 @@ func (l *InmemLayer) SetConnectionCh(ch chan *ConnectionInfo) {
|
||||||
l.l.Unlock()
|
l.l.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (l *InmemLayer) SetReaderDelay(delay time.Duration) {
|
||||||
|
l.l.Lock()
|
||||||
|
defer l.l.Unlock()
|
||||||
|
|
||||||
|
l.readerDelay = delay
|
||||||
|
|
||||||
|
// Update the existing server and client connections
|
||||||
|
for _, servConns := range l.servConns {
|
||||||
|
for _, c := range servConns {
|
||||||
|
c.(*delayedConn).SetDelay(delay)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, clientConns := range l.clientConns {
|
||||||
|
for _, c := range clientConns {
|
||||||
|
c.(*delayedConn).SetDelay(delay)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Addrs implements NetworkLayer.
|
// Addrs implements NetworkLayer.
|
||||||
func (l *InmemLayer) Addrs() []net.Addr {
|
func (l *InmemLayer) Addrs() []net.Addr {
|
||||||
l.l.Lock()
|
l.l.Lock()
|
||||||
|
@ -127,7 +148,7 @@ func (l *InmemLayer) Dial(addr string, timeout time.Duration, tlsConfig *tls.Con
|
||||||
|
|
||||||
tlsConn := tls.Client(conn, tlsConfig)
|
tlsConn := tls.Client(conn, tlsConfig)
|
||||||
|
|
||||||
l.clientConns[addr] = append(l.clientConns[addr], tlsConn)
|
l.clientConns[addr] = append(l.clientConns[addr], conn)
|
||||||
|
|
||||||
return tlsConn, nil
|
return tlsConn, nil
|
||||||
}
|
}
|
||||||
|
@ -149,6 +170,9 @@ func (l *InmemLayer) clientConn(addr string) (net.Conn, error) {
|
||||||
|
|
||||||
retConn, servConn := net.Pipe()
|
retConn, servConn := net.Pipe()
|
||||||
|
|
||||||
|
retConn = newDelayedConn(retConn, l.readerDelay)
|
||||||
|
servConn = newDelayedConn(servConn, l.readerDelay)
|
||||||
|
|
||||||
l.servConns[addr] = append(l.servConns[addr], servConn)
|
l.servConns[addr] = append(l.servConns[addr], servConn)
|
||||||
|
|
||||||
if l.logger.IsDebug() {
|
if l.logger.IsDebug() {
|
||||||
|
@ -372,6 +396,12 @@ func (ic *InmemLayerCluster) SetConnectionCh(ch chan *ConnectionInfo) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ic *InmemLayerCluster) SetReaderDelay(delay time.Duration) {
|
||||||
|
for _, node := range ic.layers {
|
||||||
|
node.SetReaderDelay(delay)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type ConnectionInfo struct {
|
type ConnectionInfo struct {
|
||||||
Node string
|
Node string
|
||||||
Remote string
|
Remote string
|
||||||
|
|
|
@ -0,0 +1,44 @@
|
||||||
|
package cluster
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type delayedConn struct {
|
||||||
|
net.Conn
|
||||||
|
dr *delayedReader
|
||||||
|
}
|
||||||
|
|
||||||
|
func newDelayedConn(conn net.Conn, delay time.Duration) net.Conn {
|
||||||
|
return &delayedConn{
|
||||||
|
Conn: conn,
|
||||||
|
dr: &delayedReader{
|
||||||
|
r: conn,
|
||||||
|
delay: delay,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *delayedConn) Read(data []byte) (int, error) {
|
||||||
|
return conn.dr.Read(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *delayedConn) SetDelay(delay time.Duration) {
|
||||||
|
conn.dr.delay = delay
|
||||||
|
}
|
||||||
|
|
||||||
|
type delayedReader struct {
|
||||||
|
r io.Reader
|
||||||
|
delay time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dr *delayedReader) Read(data []byte) (int, error) {
|
||||||
|
// Sleep for the delay period prior to reading
|
||||||
|
if dr.delay > 0 {
|
||||||
|
time.Sleep(dr.delay)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dr.r.Read(data)
|
||||||
|
}
|
|
@ -981,6 +981,11 @@ func (m *ExpirationManager) Renew(ctx context.Context, leaseID string, increment
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if tokenLeaseTimes == nil {
|
||||||
|
return nil, errors.New("failed to load batch token expiration time")
|
||||||
|
}
|
||||||
|
|
||||||
if le.ExpireTime.After(tokenLeaseTimes.ExpireTime) {
|
if le.ExpireTime.After(tokenLeaseTimes.ExpireTime) {
|
||||||
resp.Secret.TTL = tokenLeaseTimes.ExpireTime.Sub(le.LastRenewalTime)
|
resp.Secret.TTL = tokenLeaseTimes.ExpireTime.Sub(le.LastRenewalTime)
|
||||||
le.ExpireTime = tokenLeaseTimes.ExpireTime
|
le.ExpireTime = tokenLeaseTimes.ExpireTime
|
||||||
|
@ -1200,6 +1205,9 @@ func (m *ExpirationManager) Register(ctx context.Context, req *logical.Request,
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
if tokenLeaseTimes == nil {
|
||||||
|
return "", errors.New("failed to load batch token expiration time")
|
||||||
|
}
|
||||||
if le.ExpireTime.After(tokenLeaseTimes.ExpireTime) {
|
if le.ExpireTime.After(tokenLeaseTimes.ExpireTime) {
|
||||||
le.ExpireTime = tokenLeaseTimes.ExpireTime
|
le.ExpireTime = tokenLeaseTimes.ExpireTime
|
||||||
}
|
}
|
||||||
|
@ -1225,7 +1233,13 @@ func (m *ExpirationManager) Register(ctx context.Context, req *logical.Request,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup revocation timer if there is a lease
|
// Setup revocation timer if there is a lease
|
||||||
m.updatePending(le, resp.Secret.LeaseTotal())
|
m.updatePending(le, le.ExpireTime.Sub(time.Now()))
|
||||||
|
|
||||||
|
// We round here because the clock will have already started
|
||||||
|
// ticking, so we'll end up always returning 299 instead of 300 or
|
||||||
|
// 26399 instead of 26400, say, even if it's just a few
|
||||||
|
// microseconds. This provides a nicer UX.
|
||||||
|
resp.Secret.TTL = le.ExpireTime.Sub(time.Now()).Round(time.Second)
|
||||||
|
|
||||||
// Done
|
// Done
|
||||||
return le.LeaseID, nil
|
return le.LeaseID, nil
|
||||||
|
|
|
@ -15,20 +15,26 @@ import (
|
||||||
"github.com/hashicorp/go-cleanhttp"
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
uuid "github.com/hashicorp/go-uuid"
|
uuid "github.com/hashicorp/go-uuid"
|
||||||
"github.com/hashicorp/vault/api"
|
"github.com/hashicorp/vault/api"
|
||||||
|
credUserpass "github.com/hashicorp/vault/builtin/credential/userpass"
|
||||||
"github.com/hashicorp/vault/helper/namespace"
|
"github.com/hashicorp/vault/helper/namespace"
|
||||||
"github.com/hashicorp/vault/helper/testhelpers"
|
"github.com/hashicorp/vault/helper/testhelpers"
|
||||||
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
|
"github.com/hashicorp/vault/helper/testhelpers/teststorage"
|
||||||
vaulthttp "github.com/hashicorp/vault/http"
|
vaulthttp "github.com/hashicorp/vault/http"
|
||||||
"github.com/hashicorp/vault/physical/raft"
|
"github.com/hashicorp/vault/physical/raft"
|
||||||
|
"github.com/hashicorp/vault/sdk/logical"
|
||||||
"github.com/hashicorp/vault/vault"
|
"github.com/hashicorp/vault/vault"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func raftCluster(t testing.TB) *vault.TestCluster {
|
func raftCluster(t testing.TB) *vault.TestCluster {
|
||||||
var conf vault.CoreConfig
|
conf := &vault.CoreConfig{
|
||||||
|
CredentialBackends: map[string]logical.Factory{
|
||||||
|
"userpass": credUserpass.Factory,
|
||||||
|
},
|
||||||
|
}
|
||||||
var opts = vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler}
|
var opts = vault.TestClusterOptions{HandlerFunc: vaulthttp.Handler}
|
||||||
teststorage.RaftBackendSetup(&conf, &opts)
|
teststorage.RaftBackendSetup(conf, &opts)
|
||||||
cluster := vault.NewTestCluster(t, &conf, &opts)
|
cluster := vault.NewTestCluster(t, conf, &opts)
|
||||||
cluster.Start()
|
cluster.Start()
|
||||||
vault.TestWaitActive(t, cluster.Cores[0].Core)
|
vault.TestWaitActive(t, cluster.Cores[0].Core)
|
||||||
return cluster
|
return cluster
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.23.0
|
// protoc-gen-go v1.23.0
|
||||||
// protoc v3.12.2
|
// protoc v3.13.0
|
||||||
// source: vault/request_forwarding_service.proto
|
// source: vault/request_forwarding_service.proto
|
||||||
|
|
||||||
package vault
|
package vault
|
||||||
|
|
|
@ -29,7 +29,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
replTimeout = 10 * time.Second
|
replTimeout = 1 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -862,19 +862,6 @@ func (c *Core) handleRequest(ctx context.Context, req *logical.Request) (retResp
|
||||||
leaseGenerated = true
|
leaseGenerated = true
|
||||||
resp.Secret.LeaseID = leaseID
|
resp.Secret.LeaseID = leaseID
|
||||||
|
|
||||||
// Get the actual time of the lease
|
|
||||||
le, err := c.expiration.FetchLeaseTimes(ctx, leaseID)
|
|
||||||
if err != nil {
|
|
||||||
c.logger.Error("failed to fetch updated lease time", "request_path", req.Path, "error", err)
|
|
||||||
retErr = multierror.Append(retErr, ErrInternalError)
|
|
||||||
return nil, auth, retErr
|
|
||||||
}
|
|
||||||
// We round here because the clock will have already started
|
|
||||||
// ticking, so we'll end up always returning 299 instead of 300 or
|
|
||||||
// 26399 instead of 26400, say, even if it's just a few
|
|
||||||
// microseconds. This provides a nicer UX.
|
|
||||||
resp.Secret.TTL = le.ExpireTime.Sub(time.Now()).Round(time.Second)
|
|
||||||
|
|
||||||
// Count the lease creation
|
// Count the lease creation
|
||||||
ttl_label := metricsutil.TTLBucket(resp.Secret.TTL)
|
ttl_label := metricsutil.TTLBucket(resp.Secret.TTL)
|
||||||
mountPointWithoutNs := ns.TrimmedPath(req.MountPoint)
|
mountPointWithoutNs := ns.TrimmedPath(req.MountPoint)
|
||||||
|
|
|
@ -1459,6 +1459,8 @@ func NewTestCluster(t testing.T, base *CoreConfig, opts *TestClusterOptions) *Te
|
||||||
coreConfig.DevToken = base.DevToken
|
coreConfig.DevToken = base.DevToken
|
||||||
coreConfig.CounterSyncInterval = base.CounterSyncInterval
|
coreConfig.CounterSyncInterval = base.CounterSyncInterval
|
||||||
coreConfig.RecoveryMode = base.RecoveryMode
|
coreConfig.RecoveryMode = base.RecoveryMode
|
||||||
|
|
||||||
|
testApplyEntBaseConfig(coreConfig, base)
|
||||||
}
|
}
|
||||||
|
|
||||||
if coreConfig.ClusterHeartbeatInterval == 0 {
|
if coreConfig.ClusterHeartbeatInterval == 0 {
|
||||||
|
|
|
@ -12,3 +12,4 @@ func testExtraTestCoreSetup(testing.T, interface{}, *TestClusterCore) {}
|
||||||
func testAdjustUnderlyingStorage(tcc *TestClusterCore) {
|
func testAdjustUnderlyingStorage(tcc *TestClusterCore) {
|
||||||
tcc.UnderlyingStorage = tcc.physical
|
tcc.UnderlyingStorage = tcc.physical
|
||||||
}
|
}
|
||||||
|
func testApplyEntBaseConfig(coreConfig, base *CoreConfig) {}
|
||||||
|
|
Loading…
Reference in New Issue