Three code-level optimization to Iterator::Next()

Summary:
Three small optimizations:
(1) iter_->IsKeyPinned() shouldn't be called if read_options.pin_data is not true. This may trigger function call all the way down the iterator tree.
(2) reuse the iterator key object in DBIter::FindNextUserEntryInternal(). The constructor of the class has some overheads.
(3) Move the switching direction logic in MergingIterator::Next() to a separate function.

These three in total improves readseq performance by about 3% in my benchmark setting.
Closes https://github.com/facebook/rocksdb/pull/2880

Differential Revision: D5829252

Pulled By: siying

fbshipit-source-id: 991aea10c6d6c3b43769cb4db168db62954ad1e3
This commit is contained in:
Siying Dong 2017-09-14 17:43:41 -07:00 committed by Facebook Github Bot
parent 885b1c682e
commit edcbb36944
2 changed files with 45 additions and 38 deletions

View file

@ -264,6 +264,10 @@ class DBIter final: public Iterator {
Status status_;
IterKey saved_key_;
// Reusable internal key data structure. This is only used inside one function
// and should not be used across functions. Reusing this object can reduce
// overhead of calling construction of the function if creating it each time.
ParsedInternalKey ikey_;
std::string saved_value_;
Slice pinned_value_;
Direction direction_;
@ -377,22 +381,20 @@ void DBIter::FindNextUserEntryInternal(bool skipping, bool prefix_check) {
uint64_t num_skipped = 0;
do {
ParsedInternalKey ikey;
if (!ParseKey(&ikey)) {
if (!ParseKey(&ikey_)) {
// Skip corrupted keys.
iter_->Next();
continue;
}
if (iterate_upper_bound_ != nullptr &&
user_comparator_->Compare(ikey.user_key, *iterate_upper_bound_) >= 0) {
user_comparator_->Compare(ikey_.user_key, *iterate_upper_bound_) >= 0) {
break;
}
if (prefix_extractor_ && prefix_check &&
prefix_extractor_->Transform(ikey.user_key)
.compare(prefix_start_key_) != 0) {
prefix_extractor_->Transform(ikey_.user_key)
.compare(prefix_start_key_) != 0) {
break;
}
@ -400,32 +402,31 @@ void DBIter::FindNextUserEntryInternal(bool skipping, bool prefix_check) {
return;
}
if (ikey.sequence <= sequence_) {
if (skipping &&
user_comparator_->Compare(ikey.user_key, saved_key_.GetUserKey()) <=
0) {
if (ikey_.sequence <= sequence_) {
if (skipping && user_comparator_->Compare(ikey_.user_key,
saved_key_.GetUserKey()) <= 0) {
num_skipped++; // skip this entry
PERF_COUNTER_ADD(internal_key_skipped_count, 1);
} else {
num_skipped = 0;
switch (ikey.type) {
switch (ikey_.type) {
case kTypeDeletion:
case kTypeSingleDeletion:
// Arrange to skip all upcoming entries for this key since
// they are hidden by this deletion.
saved_key_.SetUserKey(
ikey.user_key,
!iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */);
ikey_.user_key,
!pin_thru_lifetime_ || !iter_->IsKeyPinned() /* copy */);
skipping = true;
PERF_COUNTER_ADD(internal_delete_skipped_count, 1);
break;
case kTypeValue:
saved_key_.SetUserKey(
ikey.user_key,
!iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */);
ikey_.user_key,
!pin_thru_lifetime_ || !iter_->IsKeyPinned() /* copy */);
if (range_del_agg_.ShouldDelete(
ikey, RangeDelAggregator::RangePositioningMode::
kForwardTraversal)) {
ikey_, RangeDelAggregator::RangePositioningMode::
kForwardTraversal)) {
// Arrange to skip all upcoming entries for this key since
// they are hidden by this deletion.
skipping = true;
@ -438,11 +439,11 @@ void DBIter::FindNextUserEntryInternal(bool skipping, bool prefix_check) {
break;
case kTypeMerge:
saved_key_.SetUserKey(
ikey.user_key,
!iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */);
ikey_.user_key,
!pin_thru_lifetime_ || !iter_->IsKeyPinned() /* copy */);
if (range_del_agg_.ShouldDelete(
ikey, RangeDelAggregator::RangePositioningMode::
kForwardTraversal)) {
ikey_, RangeDelAggregator::RangePositioningMode::
kForwardTraversal)) {
// Arrange to skip all upcoming entries for this key since
// they are hidden by this deletion.
skipping = true;
@ -468,12 +469,12 @@ void DBIter::FindNextUserEntryInternal(bool skipping, bool prefix_check) {
// Here saved_key_ may contain some old key, or the default empty key, or
// key assigned by some random other method. We don't care.
if (user_comparator_->Compare(ikey.user_key, saved_key_.GetUserKey()) <=
if (user_comparator_->Compare(ikey_.user_key, saved_key_.GetUserKey()) <=
0) {
num_skipped++;
} else {
saved_key_.SetUserKey(
ikey.user_key,
ikey_.user_key,
!iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */);
skipping = false;
num_skipped = 0;

View file

@ -156,21 +156,7 @@ class MergingIterator : public InternalIterator {
// true for all of the non-current children since current_ is
// the smallest child and key() == current_->key().
if (direction_ != kForward) {
// Otherwise, advance the non-current children. We advance current_
// just after the if-block.
ClearHeaps();
for (auto& child : children_) {
if (&child != current_) {
child.Seek(key());
if (child.Valid() && comparator_->Equal(key(), child.key())) {
child.Next();
}
}
if (child.Valid()) {
minHeap_.push(&child);
}
}
direction_ = kForward;
SwitchToForward();
// The loop advanced all non-current children to be > key() so current_
// should still be strictly the smallest key.
assert(current_ == CurrentForward());
@ -330,6 +316,8 @@ class MergingIterator : public InternalIterator {
std::unique_ptr<MergerMaxIterHeap> maxHeap_;
PinnedIteratorsManager* pinned_iters_mgr_;
void SwitchToForward();
IteratorWrapper* CurrentForward() const {
assert(direction_ == kForward);
return !minHeap_.empty() ? minHeap_.top() : nullptr;
@ -342,6 +330,24 @@ class MergingIterator : public InternalIterator {
}
};
void MergingIterator::SwitchToForward() {
// Otherwise, advance the non-current children. We advance current_
// just after the if-block.
ClearHeaps();
for (auto& child : children_) {
if (&child != current_) {
child.Seek(key());
if (child.Valid() && comparator_->Equal(key(), child.key())) {
child.Next();
}
}
if (child.Valid()) {
minHeap_.push(&child);
}
}
direction_ = kForward;
}
void MergingIterator::ClearHeaps() {
minHeap_.clear();
if (maxHeap_) {