2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2014-04-15 20:39:26 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
2013-08-23 06:10:02 +00:00
|
|
|
#include <algorithm>
|
2021-09-08 14:45:59 +00:00
|
|
|
#include <memory>
|
|
|
|
#include <set>
|
2013-08-23 06:10:02 +00:00
|
|
|
#include <type_traits>
|
2021-09-08 14:45:59 +00:00
|
|
|
#include <unordered_set>
|
2013-08-23 06:10:02 +00:00
|
|
|
|
2013-11-21 03:49:27 +00:00
|
|
|
#include "db/memtable.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "memory/arena.h"
|
2015-10-16 21:10:33 +00:00
|
|
|
#include "memtable/stl_wrappers.h"
|
2013-08-23 06:10:02 +00:00
|
|
|
#include "port/port.h"
|
2021-09-08 14:45:59 +00:00
|
|
|
#include "rocksdb/memtablerep.h"
|
|
|
|
#include "rocksdb/utilities/options_type.h"
|
2013-08-23 06:10:02 +00:00
|
|
|
#include "util/mutexlock.h"
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2013-08-23 06:10:02 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
class VectorRep : public MemTableRep {
|
|
|
|
public:
|
2017-06-02 21:13:59 +00:00
|
|
|
VectorRep(const KeyComparator& compare, Allocator* allocator, size_t count);
|
2013-08-23 06:10:02 +00:00
|
|
|
|
|
|
|
// Insert key into the collection. (The caller will pack key and value into a
|
|
|
|
// single buffer and pass that in as the parameter to Insert)
|
|
|
|
// REQUIRES: nothing that compares equal to key is currently in the
|
|
|
|
// collection.
|
2019-02-14 21:52:47 +00:00
|
|
|
void Insert(KeyHandle handle) override;
|
2013-08-23 06:10:02 +00:00
|
|
|
|
|
|
|
// Returns true iff an entry that compares equal to key is in the collection.
|
2019-02-14 21:52:47 +00:00
|
|
|
bool Contains(const char* key) const override;
|
2013-08-23 06:10:02 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
void MarkReadOnly() override;
|
2013-08-23 06:10:02 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
size_t ApproximateMemoryUsage() override;
|
2013-08-23 06:10:02 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
void Get(const LookupKey& k, void* callback_args,
|
|
|
|
bool (*callback_func)(void* arg, const char* entry)) override;
|
2014-02-11 17:46:30 +00:00
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
~VectorRep() override {}
|
2013-08-23 06:10:02 +00:00
|
|
|
|
|
|
|
class Iterator : public MemTableRep::Iterator {
|
2013-09-17 21:11:04 +00:00
|
|
|
class VectorRep* vrep_;
|
2013-08-23 06:10:02 +00:00
|
|
|
std::shared_ptr<std::vector<const char*>> bucket_;
|
2015-07-01 23:13:49 +00:00
|
|
|
std::vector<const char*>::const_iterator mutable cit_;
|
2013-08-23 06:10:02 +00:00
|
|
|
const KeyComparator& compare_;
|
2013-11-21 03:49:27 +00:00
|
|
|
std::string tmp_; // For passing to EncodeKey
|
2013-09-17 21:11:04 +00:00
|
|
|
bool mutable sorted_;
|
|
|
|
void DoSort() const;
|
2013-08-23 06:10:02 +00:00
|
|
|
public:
|
2013-09-17 21:11:04 +00:00
|
|
|
explicit Iterator(class VectorRep* vrep,
|
|
|
|
std::shared_ptr<std::vector<const char*>> bucket,
|
2013-08-23 06:10:02 +00:00
|
|
|
const KeyComparator& compare);
|
|
|
|
|
|
|
|
// Initialize an iterator over the specified collection.
|
|
|
|
// The returned iterator is not valid.
|
|
|
|
// explicit Iterator(const MemTableRep* collection);
|
2019-02-14 21:52:47 +00:00
|
|
|
~Iterator() override{};
|
2013-08-23 06:10:02 +00:00
|
|
|
|
|
|
|
// Returns true iff the iterator is positioned at a valid node.
|
2019-02-14 21:52:47 +00:00
|
|
|
bool Valid() const override;
|
2013-08-23 06:10:02 +00:00
|
|
|
|
|
|
|
// Returns the key at the current position.
|
|
|
|
// REQUIRES: Valid()
|
2019-02-14 21:52:47 +00:00
|
|
|
const char* key() const override;
|
2013-08-23 06:10:02 +00:00
|
|
|
|
|
|
|
// Advances to the next position.
|
|
|
|
// REQUIRES: Valid()
|
2019-02-14 21:52:47 +00:00
|
|
|
void Next() override;
|
2013-08-23 06:10:02 +00:00
|
|
|
|
|
|
|
// Advances to the previous position.
|
|
|
|
// REQUIRES: Valid()
|
2019-02-14 21:52:47 +00:00
|
|
|
void Prev() override;
|
2013-08-23 06:10:02 +00:00
|
|
|
|
|
|
|
// Advance to the first entry with a key >= target
|
2019-02-14 21:52:47 +00:00
|
|
|
void Seek(const Slice& user_key, const char* memtable_key) override;
|
2013-08-23 06:10:02 +00:00
|
|
|
|
2016-09-28 01:20:57 +00:00
|
|
|
// Advance to the first entry with a key <= target
|
2019-02-14 21:52:47 +00:00
|
|
|
void SeekForPrev(const Slice& user_key, const char* memtable_key) override;
|
2016-09-28 01:20:57 +00:00
|
|
|
|
2013-08-23 06:10:02 +00:00
|
|
|
// Position at the first entry in collection.
|
|
|
|
// Final state of iterator is Valid() iff collection is not empty.
|
2019-02-14 21:52:47 +00:00
|
|
|
void SeekToFirst() override;
|
2013-08-23 06:10:02 +00:00
|
|
|
|
|
|
|
// Position at the last entry in collection.
|
|
|
|
// Final state of iterator is Valid() iff collection is not empty.
|
2019-02-14 21:52:47 +00:00
|
|
|
void SeekToLast() override;
|
2013-08-23 06:10:02 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// Return an iterator over the keys in this representation.
|
2019-02-14 21:52:47 +00:00
|
|
|
MemTableRep::Iterator* GetIterator(Arena* arena) override;
|
2013-08-23 06:10:02 +00:00
|
|
|
|
|
|
|
private:
|
2013-09-17 21:11:04 +00:00
|
|
|
friend class Iterator;
|
2021-09-07 18:31:12 +00:00
|
|
|
using Bucket = std::vector<const char*>;
|
2013-08-23 06:10:02 +00:00
|
|
|
std::shared_ptr<Bucket> bucket_;
|
|
|
|
mutable port::RWMutex rwlock_;
|
2013-09-17 21:11:04 +00:00
|
|
|
bool immutable_;
|
|
|
|
bool sorted_;
|
2013-08-23 06:10:02 +00:00
|
|
|
const KeyComparator& compare_;
|
|
|
|
};
|
|
|
|
|
2018-02-16 01:12:48 +00:00
|
|
|
void VectorRep::Insert(KeyHandle handle) {
|
2014-04-04 22:37:28 +00:00
|
|
|
auto* key = static_cast<char*>(handle);
|
2013-08-23 06:10:02 +00:00
|
|
|
WriteLock l(&rwlock_);
|
|
|
|
assert(!immutable_);
|
|
|
|
bucket_->push_back(key);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true iff an entry that compares equal to key is in the collection.
|
|
|
|
bool VectorRep::Contains(const char* key) const {
|
|
|
|
ReadLock l(&rwlock_);
|
|
|
|
return std::find(bucket_->begin(), bucket_->end(), key) != bucket_->end();
|
|
|
|
}
|
|
|
|
|
|
|
|
void VectorRep::MarkReadOnly() {
|
|
|
|
WriteLock l(&rwlock_);
|
|
|
|
immutable_ = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t VectorRep::ApproximateMemoryUsage() {
|
|
|
|
return
|
|
|
|
sizeof(bucket_) + sizeof(*bucket_) +
|
|
|
|
bucket_->size() *
|
|
|
|
sizeof(
|
|
|
|
std::remove_reference<decltype(*bucket_)>::type::value_type
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2017-06-02 21:13:59 +00:00
|
|
|
VectorRep::VectorRep(const KeyComparator& compare, Allocator* allocator,
|
2014-12-02 20:09:20 +00:00
|
|
|
size_t count)
|
2017-06-02 21:13:59 +00:00
|
|
|
: MemTableRep(allocator),
|
|
|
|
bucket_(new Bucket()),
|
|
|
|
immutable_(false),
|
|
|
|
sorted_(false),
|
|
|
|
compare_(compare) {
|
|
|
|
bucket_.get()->reserve(count);
|
|
|
|
}
|
2013-08-23 06:10:02 +00:00
|
|
|
|
2013-09-17 21:11:04 +00:00
|
|
|
VectorRep::Iterator::Iterator(class VectorRep* vrep,
|
|
|
|
std::shared_ptr<std::vector<const char*>> bucket,
|
2013-08-23 06:10:02 +00:00
|
|
|
const KeyComparator& compare)
|
2013-09-17 21:11:04 +00:00
|
|
|
: vrep_(vrep),
|
|
|
|
bucket_(bucket),
|
2013-11-13 04:05:28 +00:00
|
|
|
cit_(bucket_->end()),
|
2013-09-17 21:11:04 +00:00
|
|
|
compare_(compare),
|
|
|
|
sorted_(false) { }
|
|
|
|
|
|
|
|
void VectorRep::Iterator::DoSort() const {
|
|
|
|
// vrep is non-null means that we are working on an immutable memtable
|
|
|
|
if (!sorted_ && vrep_ != nullptr) {
|
|
|
|
WriteLock l(&vrep_->rwlock_);
|
|
|
|
if (!vrep_->sorted_) {
|
2022-01-12 17:28:09 +00:00
|
|
|
std::sort(bucket_->begin(), bucket_->end(),
|
|
|
|
stl_wrappers::Compare(compare_));
|
2013-09-17 21:11:04 +00:00
|
|
|
cit_ = bucket_->begin();
|
|
|
|
vrep_->sorted_ = true;
|
|
|
|
}
|
|
|
|
sorted_ = true;
|
|
|
|
}
|
|
|
|
if (!sorted_) {
|
2022-01-12 17:28:09 +00:00
|
|
|
std::sort(bucket_->begin(), bucket_->end(),
|
|
|
|
stl_wrappers::Compare(compare_));
|
2013-09-17 21:11:04 +00:00
|
|
|
cit_ = bucket_->begin();
|
|
|
|
sorted_ = true;
|
|
|
|
}
|
|
|
|
assert(sorted_);
|
|
|
|
assert(vrep_ == nullptr || vrep_->sorted_);
|
|
|
|
}
|
2013-08-23 06:10:02 +00:00
|
|
|
|
|
|
|
// Returns true iff the iterator is positioned at a valid node.
|
|
|
|
bool VectorRep::Iterator::Valid() const {
|
2013-09-17 21:11:04 +00:00
|
|
|
DoSort();
|
2013-08-23 06:10:02 +00:00
|
|
|
return cit_ != bucket_->end();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the key at the current position.
|
|
|
|
// REQUIRES: Valid()
|
|
|
|
const char* VectorRep::Iterator::key() const {
|
2015-04-14 00:33:24 +00:00
|
|
|
assert(sorted_);
|
2013-08-23 06:10:02 +00:00
|
|
|
return *cit_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advances to the next position.
|
|
|
|
// REQUIRES: Valid()
|
|
|
|
void VectorRep::Iterator::Next() {
|
2015-04-14 00:33:24 +00:00
|
|
|
assert(sorted_);
|
2013-08-23 06:10:02 +00:00
|
|
|
if (cit_ == bucket_->end()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
++cit_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advances to the previous position.
|
|
|
|
// REQUIRES: Valid()
|
|
|
|
void VectorRep::Iterator::Prev() {
|
2015-04-14 00:33:24 +00:00
|
|
|
assert(sorted_);
|
2013-08-23 06:10:02 +00:00
|
|
|
if (cit_ == bucket_->begin()) {
|
|
|
|
// If you try to go back from the first element, the iterator should be
|
|
|
|
// invalidated. So we set it to past-the-end. This means that you can
|
|
|
|
// treat the container circularly.
|
|
|
|
cit_ = bucket_->end();
|
|
|
|
} else {
|
|
|
|
--cit_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Advance to the first entry with a key >= target
|
2013-11-21 03:49:27 +00:00
|
|
|
void VectorRep::Iterator::Seek(const Slice& user_key,
|
|
|
|
const char* memtable_key) {
|
2013-09-17 21:11:04 +00:00
|
|
|
DoSort();
|
2013-08-23 06:10:02 +00:00
|
|
|
// Do binary search to find first value not less than the target
|
2013-11-21 03:49:27 +00:00
|
|
|
const char* encoded_key =
|
|
|
|
(memtable_key != nullptr) ? memtable_key : EncodeKey(&tmp_, user_key);
|
2013-08-23 06:10:02 +00:00
|
|
|
cit_ = std::equal_range(bucket_->begin(),
|
|
|
|
bucket_->end(),
|
2013-11-21 03:49:27 +00:00
|
|
|
encoded_key,
|
2013-08-23 06:10:02 +00:00
|
|
|
[this] (const char* a, const char* b) {
|
|
|
|
return compare_(a, b) < 0;
|
|
|
|
}).first;
|
|
|
|
}
|
|
|
|
|
2016-09-28 01:20:57 +00:00
|
|
|
// Advance to the first entry with a key <= target
|
2018-03-05 21:08:17 +00:00
|
|
|
void VectorRep::Iterator::SeekForPrev(const Slice& /*user_key*/,
|
|
|
|
const char* /*memtable_key*/) {
|
2016-09-28 01:20:57 +00:00
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
|
2013-08-23 06:10:02 +00:00
|
|
|
// Position at the first entry in collection.
|
|
|
|
// Final state of iterator is Valid() iff collection is not empty.
|
|
|
|
void VectorRep::Iterator::SeekToFirst() {
|
2013-09-17 21:11:04 +00:00
|
|
|
DoSort();
|
2013-08-23 06:10:02 +00:00
|
|
|
cit_ = bucket_->begin();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Position at the last entry in collection.
|
|
|
|
// Final state of iterator is Valid() iff collection is not empty.
|
|
|
|
void VectorRep::Iterator::SeekToLast() {
|
2013-09-17 21:11:04 +00:00
|
|
|
DoSort();
|
2013-08-23 06:10:02 +00:00
|
|
|
cit_ = bucket_->end();
|
|
|
|
if (bucket_->size() != 0) {
|
|
|
|
--cit_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-11 17:46:30 +00:00
|
|
|
void VectorRep::Get(const LookupKey& k, void* callback_args,
|
|
|
|
bool (*callback_func)(void* arg, const char* entry)) {
|
|
|
|
rwlock_.ReadLock();
|
|
|
|
VectorRep* vector_rep;
|
|
|
|
std::shared_ptr<Bucket> bucket;
|
|
|
|
if (immutable_) {
|
|
|
|
vector_rep = this;
|
|
|
|
} else {
|
|
|
|
vector_rep = nullptr;
|
|
|
|
bucket.reset(new Bucket(*bucket_)); // make a copy
|
|
|
|
}
|
|
|
|
VectorRep::Iterator iter(vector_rep, immutable_ ? bucket_ : bucket, compare_);
|
2014-06-16 22:41:46 +00:00
|
|
|
rwlock_.ReadUnlock();
|
2014-02-11 17:46:30 +00:00
|
|
|
|
|
|
|
for (iter.Seek(k.user_key(), k.memtable_key().data());
|
|
|
|
iter.Valid() && callback_func(callback_args, iter.key()); iter.Next()) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
MemTableRep::Iterator* VectorRep::GetIterator(Arena* arena) {
|
|
|
|
char* mem = nullptr;
|
|
|
|
if (arena != nullptr) {
|
|
|
|
mem = arena->AllocateAligned(sizeof(Iterator));
|
|
|
|
}
|
2013-08-23 06:10:02 +00:00
|
|
|
ReadLock l(&rwlock_);
|
2013-09-17 21:11:04 +00:00
|
|
|
// Do not sort here. The sorting would be done the first time
|
|
|
|
// a Seek is performed on the iterator.
|
2013-08-23 06:10:02 +00:00
|
|
|
if (immutable_) {
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
if (arena == nullptr) {
|
|
|
|
return new Iterator(this, bucket_, compare_);
|
|
|
|
} else {
|
|
|
|
return new (mem) Iterator(this, bucket_, compare_);
|
|
|
|
}
|
2013-08-23 06:10:02 +00:00
|
|
|
} else {
|
2013-09-17 21:11:04 +00:00
|
|
|
std::shared_ptr<Bucket> tmp;
|
2013-08-23 06:10:02 +00:00
|
|
|
tmp.reset(new Bucket(*bucket_)); // make a copy
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-02 23:38:00 +00:00
|
|
|
if (arena == nullptr) {
|
|
|
|
return new Iterator(nullptr, tmp, compare_);
|
|
|
|
} else {
|
|
|
|
return new (mem) Iterator(nullptr, tmp, compare_);
|
|
|
|
}
|
2013-08-23 06:10:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} // anon namespace
|
|
|
|
|
2021-09-08 14:45:59 +00:00
|
|
|
static std::unordered_map<std::string, OptionTypeInfo> vector_rep_table_info = {
|
|
|
|
{"count",
|
|
|
|
{0, OptionType::kSizeT, OptionVerificationType::kNormal,
|
|
|
|
OptionTypeFlags::kNone}},
|
|
|
|
};
|
|
|
|
|
|
|
|
VectorRepFactory::VectorRepFactory(size_t count) : count_(count) {
|
|
|
|
RegisterOptions("VectorRepFactoryOptions", &count_, &vector_rep_table_info);
|
|
|
|
}
|
|
|
|
|
2014-01-16 02:17:58 +00:00
|
|
|
MemTableRep* VectorRepFactory::CreateMemTableRep(
|
2017-06-02 21:13:59 +00:00
|
|
|
const MemTableRep::KeyComparator& compare, Allocator* allocator,
|
2018-03-05 21:08:17 +00:00
|
|
|
const SliceTransform*, Logger* /*logger*/) {
|
2014-12-02 20:09:20 +00:00
|
|
|
return new VectorRep(compare, allocator, count_);
|
2013-08-23 06:10:02 +00:00
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2014-04-15 20:39:26 +00:00
|
|
|
#endif // ROCKSDB_LITE
|