2013-10-16 21:59:46 +00:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "table/merger.h"
|
|
|
|
|
2014-04-10 04:17:14 +00:00
|
|
|
#include <vector>
|
|
|
|
#include <queue>
|
|
|
|
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/iterator.h"
|
2013-11-18 19:32:54 +00:00
|
|
|
#include "rocksdb/options.h"
|
2012-12-26 19:51:36 +00:00
|
|
|
#include "table/iter_heap.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
#include "table/iterator_wrapper.h"
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
#include "util/arena.h"
|
2013-11-18 19:32:54 +00:00
|
|
|
#include "util/stop_watch.h"
|
|
|
|
#include "util/perf_context_imp.h"
|
2014-05-08 20:32:45 +00:00
|
|
|
#include "util/autovector.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
namespace {
|
2014-04-10 04:17:14 +00:00
|
|
|
typedef std::priority_queue<
|
|
|
|
IteratorWrapper*,
|
|
|
|
std::vector<IteratorWrapper*>,
|
|
|
|
MaxIteratorComparator> MaxIterHeap;
|
|
|
|
|
|
|
|
typedef std::priority_queue<
|
|
|
|
IteratorWrapper*,
|
|
|
|
std::vector<IteratorWrapper*>,
|
|
|
|
MinIteratorComparator> MinIterHeap;
|
|
|
|
|
|
|
|
// Return's a new MaxHeap of IteratorWrapper's using the provided Comparator.
|
|
|
|
MaxIterHeap NewMaxIterHeap(const Comparator* comparator) {
|
|
|
|
return MaxIterHeap(MaxIteratorComparator(comparator));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return's a new MinHeap of IteratorWrapper's using the provided Comparator.
|
|
|
|
MinIterHeap NewMinIterHeap(const Comparator* comparator) {
|
|
|
|
return MinIterHeap(MinIteratorComparator(comparator));
|
|
|
|
}
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
} // namespace
|
2014-04-10 04:17:14 +00:00
|
|
|
|
2014-05-08 20:32:45 +00:00
|
|
|
const size_t kNumIterReserve = 4;
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
class MergingIterator : public Iterator {
|
|
|
|
public:
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
MergingIterator(const Comparator* comparator, Iterator** children, int n,
|
|
|
|
bool is_arena_mode)
|
|
|
|
: is_arena_mode_(is_arena_mode),
|
|
|
|
comparator_(comparator),
|
2013-03-01 02:04:58 +00:00
|
|
|
current_(nullptr),
|
2013-11-18 23:39:42 +00:00
|
|
|
use_heap_(true),
|
2012-12-26 19:51:36 +00:00
|
|
|
direction_(kForward),
|
|
|
|
maxHeap_(NewMaxIterHeap(comparator_)),
|
2014-04-08 20:40:42 +00:00
|
|
|
minHeap_(NewMinIterHeap(comparator_)) {
|
2014-05-08 20:32:45 +00:00
|
|
|
children_.resize(n);
|
2011-03-18 22:37:00 +00:00
|
|
|
for (int i = 0; i < n; i++) {
|
|
|
|
children_[i].Set(children[i]);
|
|
|
|
}
|
2013-08-21 05:58:16 +00:00
|
|
|
for (auto& child : children_) {
|
|
|
|
if (child.Valid()) {
|
|
|
|
minHeap_.push(&child);
|
2012-12-26 19:51:36 +00:00
|
|
|
}
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
virtual void AddIterator(Iterator* iter) {
|
|
|
|
assert(direction_ == kForward);
|
|
|
|
children_.emplace_back(iter);
|
|
|
|
auto new_wrapper = children_.back();
|
|
|
|
if (new_wrapper.Valid()) {
|
|
|
|
minHeap_.push(&new_wrapper);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual ~MergingIterator() {
|
|
|
|
for (auto& child : children_) {
|
|
|
|
child.DeleteIter(is_arena_mode_);
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
virtual bool Valid() const {
|
2013-03-01 02:04:58 +00:00
|
|
|
return (current_ != nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void SeekToFirst() {
|
2012-12-26 19:51:36 +00:00
|
|
|
ClearHeaps();
|
2013-08-21 05:58:16 +00:00
|
|
|
for (auto& child : children_) {
|
|
|
|
child.SeekToFirst();
|
|
|
|
if (child.Valid()) {
|
|
|
|
minHeap_.push(&child);
|
2012-12-26 19:51:36 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
FindSmallest();
|
2011-03-21 19:40:57 +00:00
|
|
|
direction_ = kForward;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void SeekToLast() {
|
2012-12-26 19:51:36 +00:00
|
|
|
ClearHeaps();
|
2013-08-21 05:58:16 +00:00
|
|
|
for (auto& child : children_) {
|
|
|
|
child.SeekToLast();
|
|
|
|
if (child.Valid()) {
|
|
|
|
maxHeap_.push(&child);
|
2012-12-26 19:51:36 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
FindLargest();
|
2011-03-21 19:40:57 +00:00
|
|
|
direction_ = kReverse;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void Seek(const Slice& target) {
|
2013-11-18 23:39:42 +00:00
|
|
|
// Invalidate the heap.
|
|
|
|
use_heap_ = false;
|
|
|
|
IteratorWrapper* first_child = nullptr;
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_TIMER_DECLARE();
|
|
|
|
|
2013-08-21 05:58:16 +00:00
|
|
|
for (auto& child : children_) {
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_TIMER_START(seek_child_seek_time);
|
2013-08-21 05:58:16 +00:00
|
|
|
child.Seek(target);
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_TIMER_STOP(seek_child_seek_time);
|
|
|
|
PERF_COUNTER_ADD(seek_child_seek_count, 1);
|
2013-11-18 19:32:54 +00:00
|
|
|
|
2013-08-21 05:58:16 +00:00
|
|
|
if (child.Valid()) {
|
2013-11-18 23:39:42 +00:00
|
|
|
// This child has valid key
|
|
|
|
if (!use_heap_) {
|
|
|
|
if (first_child == nullptr) {
|
|
|
|
// It's the first child has valid key. Only put it int
|
|
|
|
// current_. Now the values in the heap should be invalid.
|
|
|
|
first_child = &child;
|
|
|
|
} else {
|
|
|
|
// We have more than one children with valid keys. Initialize
|
|
|
|
// the heap and put the first child into the heap.
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_TIMER_START(seek_min_heap_time);
|
2013-11-18 23:39:42 +00:00
|
|
|
ClearHeaps();
|
|
|
|
minHeap_.push(first_child);
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_TIMER_STOP(seek_min_heap_time);
|
2013-11-18 23:39:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (use_heap_) {
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_TIMER_START(seek_min_heap_time);
|
2013-11-18 23:39:42 +00:00
|
|
|
minHeap_.push(&child);
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_TIMER_STOP(seek_min_heap_time);
|
2013-11-18 23:39:42 +00:00
|
|
|
}
|
2012-12-26 19:51:36 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2013-11-18 23:39:42 +00:00
|
|
|
if (use_heap_) {
|
|
|
|
// If heap is valid, need to put the smallest key to curent_.
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_TIMER_START(seek_min_heap_time);
|
2013-11-18 23:39:42 +00:00
|
|
|
FindSmallest();
|
2014-04-08 17:58:07 +00:00
|
|
|
PERF_TIMER_STOP(seek_min_heap_time);
|
2013-11-18 23:39:42 +00:00
|
|
|
} else {
|
|
|
|
// The heap is not valid, then the current_ iterator is the first
|
|
|
|
// one, or null if there is no first child.
|
|
|
|
current_ = first_child;
|
|
|
|
}
|
2014-03-04 00:35:32 +00:00
|
|
|
direction_ = kForward;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void Next() {
|
|
|
|
assert(Valid());
|
2011-03-21 19:40:57 +00:00
|
|
|
|
|
|
|
// Ensure that all children are positioned after key().
|
|
|
|
// If we are moving in the forward direction, it is already
|
|
|
|
// true for all of the non-current_ children since current_ is
|
|
|
|
// the smallest child and key() == current_->key(). Otherwise,
|
|
|
|
// we explicitly position the non-current_ children.
|
|
|
|
if (direction_ != kForward) {
|
2012-12-26 19:51:36 +00:00
|
|
|
ClearHeaps();
|
2013-08-21 05:58:16 +00:00
|
|
|
for (auto& child : children_) {
|
|
|
|
if (&child != current_) {
|
|
|
|
child.Seek(key());
|
|
|
|
if (child.Valid() &&
|
|
|
|
comparator_->Compare(key(), child.key()) == 0) {
|
|
|
|
child.Next();
|
2011-03-21 19:40:57 +00:00
|
|
|
}
|
2013-08-21 05:58:16 +00:00
|
|
|
if (child.Valid()) {
|
|
|
|
minHeap_.push(&child);
|
2012-12-26 19:51:36 +00:00
|
|
|
}
|
2011-03-21 19:40:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
direction_ = kForward;
|
|
|
|
}
|
|
|
|
|
2012-12-26 19:51:36 +00:00
|
|
|
// as the current points to the current record. move the iterator forward.
|
|
|
|
// and if it is valid add it to the heap.
|
2011-03-18 22:37:00 +00:00
|
|
|
current_->Next();
|
2013-11-18 23:39:42 +00:00
|
|
|
if (use_heap_) {
|
|
|
|
if (current_->Valid()) {
|
|
|
|
minHeap_.push(current_);
|
|
|
|
}
|
|
|
|
FindSmallest();
|
|
|
|
} else if (!current_->Valid()) {
|
|
|
|
current_ = nullptr;
|
2012-12-26 19:51:36 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual void Prev() {
|
|
|
|
assert(Valid());
|
2011-03-21 19:40:57 +00:00
|
|
|
// Ensure that all children are positioned before key().
|
|
|
|
// If we are moving in the reverse direction, it is already
|
|
|
|
// true for all of the non-current_ children since current_ is
|
|
|
|
// the largest child and key() == current_->key(). Otherwise,
|
|
|
|
// we explicitly position the non-current_ children.
|
|
|
|
if (direction_ != kReverse) {
|
2012-12-26 19:51:36 +00:00
|
|
|
ClearHeaps();
|
2013-08-21 05:58:16 +00:00
|
|
|
for (auto& child : children_) {
|
|
|
|
if (&child != current_) {
|
|
|
|
child.Seek(key());
|
|
|
|
if (child.Valid()) {
|
2011-03-21 19:40:57 +00:00
|
|
|
// Child is at first entry >= key(). Step back one to be < key()
|
2013-08-21 05:58:16 +00:00
|
|
|
child.Prev();
|
2011-03-21 19:40:57 +00:00
|
|
|
} else {
|
|
|
|
// Child has no entries >= key(). Position at last entry.
|
2013-08-21 05:58:16 +00:00
|
|
|
child.SeekToLast();
|
2011-03-21 19:40:57 +00:00
|
|
|
}
|
2013-08-21 05:58:16 +00:00
|
|
|
if (child.Valid()) {
|
|
|
|
maxHeap_.push(&child);
|
2012-12-26 19:51:36 +00:00
|
|
|
}
|
2011-03-21 19:40:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
direction_ = kReverse;
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
current_->Prev();
|
2012-12-26 19:51:36 +00:00
|
|
|
if (current_->Valid()) {
|
|
|
|
maxHeap_.push(current_);
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
FindLargest();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Slice key() const {
|
|
|
|
assert(Valid());
|
|
|
|
return current_->key();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Slice value() const {
|
|
|
|
assert(Valid());
|
|
|
|
return current_->value();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status status() const {
|
|
|
|
Status status;
|
2013-08-21 05:58:16 +00:00
|
|
|
for (auto& child : children_) {
|
|
|
|
status = child.status();
|
2011-03-18 22:37:00 +00:00
|
|
|
if (!status.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
void FindSmallest();
|
|
|
|
void FindLargest();
|
2012-12-26 19:51:36 +00:00
|
|
|
void ClearHeaps();
|
2011-03-18 22:37:00 +00:00
|
|
|
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
bool is_arena_mode_;
|
2011-03-18 22:37:00 +00:00
|
|
|
const Comparator* comparator_;
|
2014-05-08 20:32:45 +00:00
|
|
|
autovector<IteratorWrapper, kNumIterReserve> children_;
|
2011-03-18 22:37:00 +00:00
|
|
|
IteratorWrapper* current_;
|
2013-11-18 23:39:42 +00:00
|
|
|
// If the value is true, both of iterators in the heap and current_
|
|
|
|
// contain valid rows. If it is false, only current_ can possibly contain
|
|
|
|
// valid rows.
|
2014-03-04 00:35:32 +00:00
|
|
|
// This flag is always true for reverse direction, as we always use heap for
|
|
|
|
// the reverse iterating case.
|
2013-11-18 23:39:42 +00:00
|
|
|
bool use_heap_;
|
2011-03-21 19:40:57 +00:00
|
|
|
// Which direction is the iterator moving?
|
|
|
|
enum Direction {
|
|
|
|
kForward,
|
|
|
|
kReverse
|
|
|
|
};
|
|
|
|
Direction direction_;
|
2012-12-26 19:51:36 +00:00
|
|
|
MaxIterHeap maxHeap_;
|
|
|
|
MinIterHeap minHeap_;
|
2011-03-18 22:37:00 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
void MergingIterator::FindSmallest() {
|
2013-11-18 23:39:42 +00:00
|
|
|
assert(use_heap_);
|
2012-12-26 19:51:36 +00:00
|
|
|
if (minHeap_.empty()) {
|
2013-03-01 02:04:58 +00:00
|
|
|
current_ = nullptr;
|
2012-12-26 19:51:36 +00:00
|
|
|
} else {
|
|
|
|
current_ = minHeap_.top();
|
|
|
|
assert(current_->Valid());
|
|
|
|
minHeap_.pop();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void MergingIterator::FindLargest() {
|
2013-11-18 23:39:42 +00:00
|
|
|
assert(use_heap_);
|
2012-12-26 19:51:36 +00:00
|
|
|
if (maxHeap_.empty()) {
|
2013-03-01 02:04:58 +00:00
|
|
|
current_ = nullptr;
|
2012-12-26 19:51:36 +00:00
|
|
|
} else {
|
|
|
|
current_ = maxHeap_.top();
|
|
|
|
assert(current_->Valid());
|
|
|
|
maxHeap_.pop();
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2012-12-26 19:51:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void MergingIterator::ClearHeaps() {
|
2013-11-18 23:39:42 +00:00
|
|
|
use_heap_ = true;
|
2012-12-26 19:51:36 +00:00
|
|
|
maxHeap_ = NewMaxIterHeap(comparator_);
|
|
|
|
minHeap_ = NewMinIterHeap(comparator_);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n,
|
|
|
|
Arena* arena) {
|
2011-03-18 22:37:00 +00:00
|
|
|
assert(n >= 0);
|
|
|
|
if (n == 0) {
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
return NewEmptyIterator(arena);
|
2011-03-18 22:37:00 +00:00
|
|
|
} else if (n == 1) {
|
|
|
|
return list[0];
|
|
|
|
} else {
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
if (arena == nullptr) {
|
|
|
|
return new MergingIterator(cmp, list, n, false);
|
|
|
|
} else {
|
|
|
|
auto mem = arena->AllocateAligned(sizeof(MergingIterator));
|
|
|
|
return new (mem) MergingIterator(cmp, list, n, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MergeIteratorBuilder::MergeIteratorBuilder(const Comparator* comparator,
|
|
|
|
Arena* a)
|
|
|
|
: first_iter(nullptr), use_merging_iter(false), arena(a) {
|
|
|
|
|
|
|
|
auto mem = arena->AllocateAligned(sizeof(MergingIterator));
|
|
|
|
merge_iter = new (mem) MergingIterator(comparator, nullptr, 0, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MergeIteratorBuilder::AddIterator(Iterator* iter) {
|
|
|
|
if (!use_merging_iter && first_iter != nullptr) {
|
|
|
|
merge_iter->AddIterator(first_iter);
|
|
|
|
use_merging_iter = true;
|
|
|
|
}
|
|
|
|
if (use_merging_iter) {
|
|
|
|
merge_iter->AddIterator(iter);
|
|
|
|
} else {
|
|
|
|
first_iter = iter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* MergeIteratorBuilder::Finish() {
|
|
|
|
if (!use_merging_iter) {
|
|
|
|
return first_iter;
|
|
|
|
} else {
|
|
|
|
auto ret = merge_iter;
|
|
|
|
merge_iter = nullptr;
|
|
|
|
return ret;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|