2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2013-10-16 21:59:46 +00:00
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/iterator.h"
|
2015-10-12 22:06:38 +00:00
|
|
|
#include "table/internal_iterator.h"
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
#include "table/iterator_wrapper.h"
|
|
|
|
#include "util/arena.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
namespace rocksdb {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2015-10-12 22:06:38 +00:00
|
|
|
Cleanable::Cleanable() {
|
2013-03-01 02:04:58 +00:00
|
|
|
cleanup_.function = nullptr;
|
|
|
|
cleanup_.next = nullptr;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2016-12-29 23:48:24 +00:00
|
|
|
Cleanable::~Cleanable() { DoCleanup(); }
|
|
|
|
|
2017-01-08 22:08:51 +00:00
|
|
|
void Cleanable::Reset() {
|
|
|
|
DoCleanup();
|
|
|
|
cleanup_.function = nullptr;
|
|
|
|
cleanup_.next = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Cleanable::DoCleanup() {
|
|
|
|
if (cleanup_.function != nullptr) {
|
|
|
|
(*cleanup_.function)(cleanup_.arg1, cleanup_.arg2);
|
|
|
|
for (Cleanup* c = cleanup_.next; c != nullptr;) {
|
|
|
|
(*c->function)(c->arg1, c->arg2);
|
|
|
|
Cleanup* next = c->next;
|
|
|
|
delete c;
|
|
|
|
c = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-29 23:48:24 +00:00
|
|
|
// If the entire linked list was on heap we could have simply add attach one
|
|
|
|
// link list to another. However the head is an embeded object to avoid the cost
|
|
|
|
// of creating objects for most of the use cases when the Cleanable has only one
|
|
|
|
// Cleanup to do. We could put evernything on heap if benchmarks show no
|
|
|
|
// negative impact on performance.
|
|
|
|
// Also we need to iterate on the linked list since there is no pointer to the
|
|
|
|
// tail. We can add the tail pointer but maintainin it might negatively impact
|
|
|
|
// the perforamnce for the common case of one cleanup where tail pointer is not
|
|
|
|
// needed. Again benchmarks could clarify that.
|
|
|
|
// Even without a tail pointer we could iterate on the list, find the tail, and
|
|
|
|
// have only that node updated without the need to insert the Cleanups one by
|
|
|
|
// one. This however would be redundant when the source Cleanable has one or a
|
|
|
|
// few Cleanups which is the case most of the time.
|
|
|
|
// TODO(myabandeh): if the list is too long we should maintain a tail pointer
|
|
|
|
// and have the entire list (minus the head that has to be inserted separately)
|
|
|
|
// merged with the target linked list at once.
|
|
|
|
void Cleanable::DelegateCleanupsTo(Cleanable* other) {
|
|
|
|
assert(other != nullptr);
|
|
|
|
if (cleanup_.function == nullptr) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Cleanup* c = &cleanup_;
|
|
|
|
other->RegisterCleanup(c->function, c->arg1, c->arg2);
|
|
|
|
c = c->next;
|
|
|
|
while (c != nullptr) {
|
|
|
|
Cleanup* next = c->next;
|
|
|
|
other->RegisterCleanup(c);
|
|
|
|
c = next;
|
|
|
|
}
|
|
|
|
cleanup_.function = nullptr;
|
|
|
|
cleanup_.next = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Cleanable::RegisterCleanup(Cleanable::Cleanup* c) {
|
|
|
|
assert(c != nullptr);
|
|
|
|
if (cleanup_.function == nullptr) {
|
|
|
|
cleanup_.function = c->function;
|
|
|
|
cleanup_.arg1 = c->arg1;
|
|
|
|
cleanup_.arg2 = c->arg2;
|
|
|
|
delete c;
|
|
|
|
} else {
|
|
|
|
c->next = cleanup_.next;
|
|
|
|
cleanup_.next = c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-12 22:06:38 +00:00
|
|
|
void Cleanable::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
|
2013-03-01 02:04:58 +00:00
|
|
|
assert(func != nullptr);
|
2011-03-18 22:37:00 +00:00
|
|
|
Cleanup* c;
|
2013-03-01 02:04:58 +00:00
|
|
|
if (cleanup_.function == nullptr) {
|
2011-03-18 22:37:00 +00:00
|
|
|
c = &cleanup_;
|
|
|
|
} else {
|
|
|
|
c = new Cleanup;
|
|
|
|
c->next = cleanup_.next;
|
|
|
|
cleanup_.next = c;
|
|
|
|
}
|
|
|
|
c->function = func;
|
|
|
|
c->arg1 = arg1;
|
|
|
|
c->arg2 = arg2;
|
|
|
|
}
|
|
|
|
|
2016-02-27 01:13:39 +00:00
|
|
|
Status Iterator::GetProperty(std::string prop_name, std::string* prop) {
|
|
|
|
if (prop == nullptr) {
|
|
|
|
return Status::InvalidArgument("prop is nullptr");
|
|
|
|
}
|
2016-03-01 20:56:21 +00:00
|
|
|
if (prop_name == "rocksdb.iterator.is-key-pinned") {
|
2016-02-27 01:13:39 +00:00
|
|
|
*prop = "0";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
return Status::InvalidArgument("Undentified property.");
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
namespace {
|
|
|
|
class EmptyIterator : public Iterator {
|
|
|
|
public:
|
2013-03-01 02:04:58 +00:00
|
|
|
explicit EmptyIterator(const Status& s) : status_(s) { }
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual bool Valid() const override { return false; }
|
|
|
|
virtual void Seek(const Slice& target) override {}
|
2016-09-28 01:20:57 +00:00
|
|
|
virtual void SeekForPrev(const Slice& target) override {}
|
2015-02-26 19:28:41 +00:00
|
|
|
virtual void SeekToFirst() override {}
|
|
|
|
virtual void SeekToLast() override {}
|
|
|
|
virtual void Next() override { assert(false); }
|
|
|
|
virtual void Prev() override { assert(false); }
|
|
|
|
Slice key() const override {
|
|
|
|
assert(false);
|
|
|
|
return Slice();
|
|
|
|
}
|
|
|
|
Slice value() const override {
|
|
|
|
assert(false);
|
|
|
|
return Slice();
|
|
|
|
}
|
|
|
|
virtual Status status() const override { return status_; }
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
private:
|
|
|
|
Status status_;
|
|
|
|
};
|
2015-10-12 22:06:38 +00:00
|
|
|
|
|
|
|
class EmptyInternalIterator : public InternalIterator {
|
|
|
|
public:
|
|
|
|
explicit EmptyInternalIterator(const Status& s) : status_(s) {}
|
|
|
|
virtual bool Valid() const override { return false; }
|
|
|
|
virtual void Seek(const Slice& target) override {}
|
2016-09-28 01:20:57 +00:00
|
|
|
virtual void SeekForPrev(const Slice& target) override {}
|
2015-10-12 22:06:38 +00:00
|
|
|
virtual void SeekToFirst() override {}
|
|
|
|
virtual void SeekToLast() override {}
|
|
|
|
virtual void Next() override { assert(false); }
|
|
|
|
virtual void Prev() override { assert(false); }
|
|
|
|
Slice key() const override {
|
|
|
|
assert(false);
|
|
|
|
return Slice();
|
|
|
|
}
|
|
|
|
Slice value() const override {
|
|
|
|
assert(false);
|
|
|
|
return Slice();
|
|
|
|
}
|
|
|
|
virtual Status status() const override { return status_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
Status status_;
|
|
|
|
};
|
2011-10-31 17:22:06 +00:00
|
|
|
} // namespace
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
Iterator* NewEmptyIterator() {
|
|
|
|
return new EmptyIterator(Status::OK());
|
|
|
|
}
|
|
|
|
|
2015-10-12 22:06:38 +00:00
|
|
|
Iterator* NewErrorIterator(const Status& status) {
|
|
|
|
return new EmptyIterator(status);
|
|
|
|
}
|
|
|
|
|
|
|
|
InternalIterator* NewEmptyInternalIterator() {
|
|
|
|
return new EmptyInternalIterator(Status::OK());
|
|
|
|
}
|
|
|
|
|
|
|
|
InternalIterator* NewEmptyInternalIterator(Arena* arena) {
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
if (arena == nullptr) {
|
2015-10-12 22:06:38 +00:00
|
|
|
return NewEmptyInternalIterator();
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
} else {
|
|
|
|
auto mem = arena->AllocateAligned(sizeof(EmptyIterator));
|
2015-10-12 22:06:38 +00:00
|
|
|
return new (mem) EmptyInternalIterator(Status::OK());
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-12 22:06:38 +00:00
|
|
|
InternalIterator* NewErrorInternalIterator(const Status& status) {
|
|
|
|
return new EmptyInternalIterator(status);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2015-10-12 22:06:38 +00:00
|
|
|
InternalIterator* NewErrorInternalIterator(const Status& status, Arena* arena) {
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
if (arena == nullptr) {
|
2015-10-12 22:06:38 +00:00
|
|
|
return NewErrorInternalIterator(status);
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
} else {
|
|
|
|
auto mem = arena->AllocateAligned(sizeof(EmptyIterator));
|
2015-10-12 22:06:38 +00:00
|
|
|
return new (mem) EmptyInternalIterator(status);
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-04 04:49:15 +00:00
|
|
|
} // namespace rocksdb
|