mirror of https://github.com/facebook/rocksdb.git
Run clang format against files under example/, memory/ and memtable/ folders (#10893)
Summary: **Context/Summary:** Run the following to format ``` find ./examples -iname *.h -o -iname *.cc | xargs clang-format -i find ./memory -iname *.h -o -iname *.cc | xargs clang-format -i find ./memtable -iname *.h -o -iname *.cc | xargs clang-format -i ``` **Test** - Manual inspection to ensure changes are cosmetic only - CI Pull Request resolved: https://github.com/facebook/rocksdb/pull/10893 Reviewed By: jay-zhuang Differential Revision: D40779187 Pulled By: hx235 fbshipit-source-id: 529cbb0f0fbd698d95817e8c42fe3ce32254d9b0
This commit is contained in:
parent
7867a1112b
commit
08a63ad10b
|
@ -7,8 +7,8 @@
|
|||
#include <vector>
|
||||
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "rocksdb/options.h"
|
||||
#include "rocksdb/slice.h"
|
||||
|
||||
#if defined(OS_WIN)
|
||||
std::string kDBPath = "C:\\Windows\\TEMP\\rocksdb_column_families_example";
|
||||
|
@ -52,8 +52,8 @@ int main() {
|
|||
column_families.push_back(ColumnFamilyDescriptor(
|
||||
ROCKSDB_NAMESPACE::kDefaultColumnFamilyName, ColumnFamilyOptions()));
|
||||
// open the new one, too
|
||||
column_families.push_back(ColumnFamilyDescriptor(
|
||||
"new_cf", ColumnFamilyOptions()));
|
||||
column_families.push_back(
|
||||
ColumnFamilyDescriptor("new_cf", ColumnFamilyOptions()));
|
||||
std::vector<ColumnFamilyHandle*> handles;
|
||||
s = DB::Open(DBOptions(), kDBPath, column_families, &handles, &db);
|
||||
assert(s.ok());
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include <mutex>
|
||||
#include <string>
|
||||
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/env.h"
|
||||
#include "rocksdb/options.h"
|
||||
|
@ -39,22 +40,20 @@ class Compactor : public EventListener {
|
|||
// and column family. It is the caller's responsibility to
|
||||
// destroy the returned CompactionTask. Returns "nullptr"
|
||||
// if it cannot find a proper compaction task.
|
||||
virtual CompactionTask* PickCompaction(
|
||||
DB* db, const std::string& cf_name) = 0;
|
||||
virtual CompactionTask* PickCompaction(DB* db,
|
||||
const std::string& cf_name) = 0;
|
||||
|
||||
// Schedule and run the specified compaction task in background.
|
||||
virtual void ScheduleCompaction(CompactionTask *task) = 0;
|
||||
virtual void ScheduleCompaction(CompactionTask* task) = 0;
|
||||
};
|
||||
|
||||
// Example structure that describes a compaction task.
|
||||
struct CompactionTask {
|
||||
CompactionTask(
|
||||
DB* _db, Compactor* _compactor,
|
||||
CompactionTask(DB* _db, Compactor* _compactor,
|
||||
const std::string& _column_family_name,
|
||||
const std::vector<std::string>& _input_file_names,
|
||||
const int _output_level,
|
||||
const CompactionOptions& _compact_options,
|
||||
bool _retry_on_fail)
|
||||
const CompactionOptions& _compact_options, bool _retry_on_fail)
|
||||
: db(_db),
|
||||
compactor(_compactor),
|
||||
column_family_name(_column_family_name),
|
||||
|
@ -77,15 +76,13 @@ class FullCompactor : public Compactor {
|
|||
public:
|
||||
explicit FullCompactor(const Options options) : options_(options) {
|
||||
compact_options_.compression = options_.compression;
|
||||
compact_options_.output_file_size_limit =
|
||||
options_.target_file_size_base;
|
||||
compact_options_.output_file_size_limit = options_.target_file_size_base;
|
||||
}
|
||||
|
||||
// When flush happens, it determines whether to trigger compaction. If
|
||||
// triggered_writes_stop is true, it will also set the retry flag of
|
||||
// compaction-task to true.
|
||||
void OnFlushCompleted(
|
||||
DB* db, const FlushJobInfo& info) override {
|
||||
void OnFlushCompleted(DB* db, const FlushJobInfo& info) override {
|
||||
CompactionTask* task = PickCompaction(db, info.cf_name);
|
||||
if (task != nullptr) {
|
||||
if (info.triggered_writes_stop) {
|
||||
|
@ -97,8 +94,7 @@ class FullCompactor : public Compactor {
|
|||
}
|
||||
|
||||
// Always pick a compaction which includes all files whenever possible.
|
||||
CompactionTask* PickCompaction(
|
||||
DB* db, const std::string& cf_name) override {
|
||||
CompactionTask* PickCompaction(DB* db, const std::string& cf_name) override {
|
||||
ColumnFamilyMetaData cf_meta;
|
||||
db->GetColumnFamilyMetaData(&cf_meta);
|
||||
|
||||
|
@ -111,8 +107,7 @@ class FullCompactor : public Compactor {
|
|||
input_file_names.push_back(file.name);
|
||||
}
|
||||
}
|
||||
return new CompactionTask(
|
||||
db, this, cf_name, input_file_names,
|
||||
return new CompactionTask(db, this, cf_name, input_file_names,
|
||||
options_.num_levels - 1, compact_options_, false);
|
||||
}
|
||||
|
||||
|
@ -127,16 +122,14 @@ class FullCompactor : public Compactor {
|
|||
assert(task);
|
||||
assert(task->db);
|
||||
Status s = task->db->CompactFiles(
|
||||
task->compact_options,
|
||||
task->input_file_names,
|
||||
task->output_level);
|
||||
task->compact_options, task->input_file_names, task->output_level);
|
||||
printf("CompactFiles() finished with status %s\n", s.ToString().c_str());
|
||||
if (!s.ok() && !s.IsIOError() && task->retry_on_fail) {
|
||||
// If a compaction task with its retry_on_fail=true failed,
|
||||
// try to schedule another compaction in case the reason
|
||||
// is not an IO error.
|
||||
CompactionTask* new_task = task->compactor->PickCompaction(
|
||||
task->db, task->column_family_name);
|
||||
CompactionTask* new_task =
|
||||
task->compactor->PickCompaction(task->db, task->column_family_name);
|
||||
task->compactor->ScheduleCompaction(new_task);
|
||||
}
|
||||
}
|
||||
|
@ -173,8 +166,7 @@ int main() {
|
|||
// verify the values are still there
|
||||
std::string value;
|
||||
for (int i = 1000; i < 99999; ++i) {
|
||||
db->Get(ReadOptions(), std::to_string(i),
|
||||
&value);
|
||||
db->Get(ReadOptions(), std::to_string(i), &value);
|
||||
assert(value == std::string(500, 'a' + (i % 26)));
|
||||
}
|
||||
|
||||
|
|
|
@ -8,8 +8,8 @@
|
|||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/options.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "rocksdb/utilities/transaction.h"
|
||||
#include "rocksdb/utilities/optimistic_transaction_db.h"
|
||||
#include "rocksdb/utilities/transaction.h"
|
||||
|
||||
using ROCKSDB_NAMESPACE::DB;
|
||||
using ROCKSDB_NAMESPACE::OptimisticTransactionDB;
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
#include <string>
|
||||
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/slice.h"
|
||||
#include "rocksdb/options.h"
|
||||
#include "rocksdb/slice.h"
|
||||
|
||||
using ROCKSDB_NAMESPACE::DB;
|
||||
using ROCKSDB_NAMESPACE::Options;
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#pragma once
|
||||
#include <cerrno>
|
||||
#include <cstddef>
|
||||
|
||||
#include "rocksdb/write_buffer_manager.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
|
|
@ -8,7 +8,9 @@
|
|||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "memory/concurrent_arena.h"
|
||||
|
||||
#include <thread>
|
||||
|
||||
#include "port/port.h"
|
||||
#include "util/random.h"
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "memory/allocator.h"
|
||||
#include "memory/arena.h"
|
||||
#include "port/lang.h"
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include "memory/allocator.h"
|
||||
#include "memory/arena.h"
|
||||
#include "rocksdb/write_buffer_manager.h"
|
||||
|
|
|
@ -77,9 +77,7 @@ struct Node {
|
|||
next_.store(x, std::memory_order_release);
|
||||
}
|
||||
// No-barrier variants that can be safely used in a few locations.
|
||||
Node* NoBarrier_Next() {
|
||||
return next_.load(std::memory_order_relaxed);
|
||||
}
|
||||
Node* NoBarrier_Next() { return next_.load(std::memory_order_relaxed); }
|
||||
|
||||
void NoBarrier_SetNext(Node* x) { next_.store(x, std::memory_order_relaxed); }
|
||||
|
||||
|
@ -296,9 +294,9 @@ class HashLinkListRep : public MemTableRep {
|
|||
|
||||
// Advance to the first entry with a key >= target
|
||||
void Seek(const Slice& internal_key, const char* memtable_key) override {
|
||||
const char* encoded_key =
|
||||
(memtable_key != nullptr) ?
|
||||
memtable_key : EncodeKey(&tmp_, internal_key);
|
||||
const char* encoded_key = (memtable_key != nullptr)
|
||||
? memtable_key
|
||||
: EncodeKey(&tmp_, internal_key);
|
||||
iter_.Seek(encoded_key);
|
||||
}
|
||||
|
||||
|
@ -365,8 +363,8 @@ class HashLinkListRep : public MemTableRep {
|
|||
// Advance to the first entry with a key >= target
|
||||
void Seek(const Slice& internal_key,
|
||||
const char* /*memtable_key*/) override {
|
||||
node_ = hash_link_list_rep_->FindGreaterOrEqualInBucket(head_,
|
||||
internal_key);
|
||||
node_ =
|
||||
hash_link_list_rep_->FindGreaterOrEqualInBucket(head_, internal_key);
|
||||
}
|
||||
|
||||
// Retreat to the last entry with a key <= target
|
||||
|
@ -398,15 +396,14 @@ class HashLinkListRep : public MemTableRep {
|
|||
head_ = head;
|
||||
node_ = nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class HashLinkListRep;
|
||||
const HashLinkListRep* const hash_link_list_rep_;
|
||||
Node* head_;
|
||||
Node* node_;
|
||||
|
||||
virtual void SeekToHead() {
|
||||
node_ = head_;
|
||||
}
|
||||
virtual void SeekToHead() { node_ = head_; }
|
||||
};
|
||||
|
||||
class DynamicIterator : public HashLinkListRep::LinkListIterator {
|
||||
|
@ -486,7 +483,7 @@ class HashLinkListRep : public MemTableRep {
|
|||
// This is used when there wasn't a bucket. It is cheaper than
|
||||
// instantiating an empty bucket over which to iterate.
|
||||
public:
|
||||
EmptyIterator() { }
|
||||
EmptyIterator() {}
|
||||
bool Valid() const override { return false; }
|
||||
const char* key() const override {
|
||||
assert(false);
|
||||
|
@ -530,8 +527,7 @@ HashLinkListRep::HashLinkListRep(
|
|||
}
|
||||
}
|
||||
|
||||
HashLinkListRep::~HashLinkListRep() {
|
||||
}
|
||||
HashLinkListRep::~HashLinkListRep() {}
|
||||
|
||||
KeyHandle HashLinkListRep::Allocate(const size_t len, char** buf) {
|
||||
char* mem = allocator_->AllocateAligned(sizeof(Node) + len);
|
||||
|
@ -633,7 +629,8 @@ void HashLinkListRep::Insert(KeyHandle handle) {
|
|||
if (bucket_entries_logging_threshold_ > 0 &&
|
||||
header->GetNumEntries() ==
|
||||
static_cast<uint32_t>(bucket_entries_logging_threshold_)) {
|
||||
Info(logger_, "HashLinkedList bucket %" ROCKSDB_PRIszt
|
||||
Info(logger_,
|
||||
"HashLinkedList bucket %" ROCKSDB_PRIszt
|
||||
" has more than %d "
|
||||
"entries. Key to insert: %s",
|
||||
GetHash(transformed), header->GetNumEntries(),
|
||||
|
|
|
@ -118,9 +118,9 @@ class HashSkipListRep : public MemTableRep {
|
|||
// Advance to the first entry with a key >= target
|
||||
void Seek(const Slice& internal_key, const char* memtable_key) override {
|
||||
if (list_ != nullptr) {
|
||||
const char* encoded_key =
|
||||
(memtable_key != nullptr) ?
|
||||
memtable_key : EncodeKey(&tmp_, internal_key);
|
||||
const char* encoded_key = (memtable_key != nullptr)
|
||||
? memtable_key
|
||||
: EncodeKey(&tmp_, internal_key);
|
||||
iter_.Seek(encoded_key);
|
||||
}
|
||||
}
|
||||
|
@ -158,6 +158,7 @@ class HashSkipListRep : public MemTableRep {
|
|||
iter_.SetList(list);
|
||||
own_list_ = false;
|
||||
}
|
||||
|
||||
private:
|
||||
// if list_ is nullptr, we should NEVER call any methods on iter_
|
||||
// if list_ is nullptr, this Iterator is not Valid()
|
||||
|
@ -208,7 +209,7 @@ class HashSkipListRep : public MemTableRep {
|
|||
// This is used when there wasn't a bucket. It is cheaper than
|
||||
// instantiating an empty bucket over which to iterate.
|
||||
public:
|
||||
EmptyIterator() { }
|
||||
EmptyIterator() {}
|
||||
bool Valid() const override { return false; }
|
||||
const char* key() const override {
|
||||
assert(false);
|
||||
|
@ -239,8 +240,8 @@ HashSkipListRep::HashSkipListRep(const MemTableRep::KeyComparator& compare,
|
|||
transform_(transform),
|
||||
compare_(compare),
|
||||
allocator_(allocator) {
|
||||
auto mem = allocator->AllocateAligned(
|
||||
sizeof(std::atomic<void*>) * bucket_size);
|
||||
auto mem =
|
||||
allocator->AllocateAligned(sizeof(std::atomic<void*>) * bucket_size);
|
||||
buckets_ = new (mem) std::atomic<Bucket*>[bucket_size];
|
||||
|
||||
for (size_t i = 0; i < bucket_size_; ++i) {
|
||||
|
@ -248,8 +249,7 @@ HashSkipListRep::HashSkipListRep(const MemTableRep::KeyComparator& compare,
|
|||
}
|
||||
}
|
||||
|
||||
HashSkipListRep::~HashSkipListRep() {
|
||||
}
|
||||
HashSkipListRep::~HashSkipListRep() {}
|
||||
|
||||
HashSkipListRep::Bucket* HashSkipListRep::GetInitializedBucket(
|
||||
const Slice& transformed) {
|
||||
|
@ -281,9 +281,7 @@ bool HashSkipListRep::Contains(const char* key) const {
|
|||
return bucket->Contains(key);
|
||||
}
|
||||
|
||||
size_t HashSkipListRep::ApproximateMemoryUsage() {
|
||||
return 0;
|
||||
}
|
||||
size_t HashSkipListRep::ApproximateMemoryUsage() { return 0; }
|
||||
|
||||
void HashSkipListRep::Get(const LookupKey& k, void* callback_args,
|
||||
bool (*callback_func)(void* arg, const char* entry)) {
|
||||
|
|
|
@ -43,9 +43,11 @@
|
|||
#pragma once
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <type_traits>
|
||||
|
||||
#include "memory/allocator.h"
|
||||
#include "port/likely.h"
|
||||
#include "port/port.h"
|
||||
|
@ -62,7 +64,7 @@ class InlineSkipList {
|
|||
struct Splice;
|
||||
|
||||
public:
|
||||
using DecodedKey = \
|
||||
using DecodedKey =
|
||||
typename std::remove_reference<Comparator>::type::DecodedType;
|
||||
|
||||
static const uint16_t kMaxPossibleHeight = 32;
|
||||
|
@ -264,9 +266,9 @@ class InlineSkipList {
|
|||
// point to a node that is before the key, and after should point to
|
||||
// a node that is after the key. after should be nullptr if a good after
|
||||
// node isn't conveniently available.
|
||||
template<bool prefetch_before>
|
||||
void FindSpliceForLevel(const DecodedKey& key, Node* before, Node* after, int level,
|
||||
Node** out_prev, Node** out_next);
|
||||
template <bool prefetch_before>
|
||||
void FindSpliceForLevel(const DecodedKey& key, Node* before, Node* after,
|
||||
int level, Node** out_prev, Node** out_next);
|
||||
|
||||
// Recomputes Splice levels from highest_level (inclusive) down to
|
||||
// lowest_level (inclusive).
|
||||
|
@ -766,8 +768,8 @@ void InlineSkipList<Comparator>::FindSpliceForLevel(const DecodedKey& key,
|
|||
PREFETCH(next->Next(level), 0, 1);
|
||||
}
|
||||
if (prefetch_before == true) {
|
||||
if (next != nullptr && level>0) {
|
||||
PREFETCH(next->Next(level-1), 0, 1);
|
||||
if (next != nullptr && level > 0) {
|
||||
PREFETCH(next->Next(level - 1), 0, 1);
|
||||
}
|
||||
}
|
||||
assert(before == head_ || next == nullptr ||
|
||||
|
@ -881,8 +883,7 @@ bool InlineSkipList<Comparator>::Insert(const char* key, Splice* splice,
|
|||
// we're pessimistic, recompute everything
|
||||
recompute_height = max_height;
|
||||
}
|
||||
} else if (KeyIsAfterNode(key_decoded,
|
||||
splice->next_[recompute_height])) {
|
||||
} else if (KeyIsAfterNode(key_decoded, splice->next_[recompute_height])) {
|
||||
// key is from after splice
|
||||
if (allow_partial_splice_fix) {
|
||||
Node* bad = splice->next_[recompute_height];
|
||||
|
|
|
@ -8,8 +8,10 @@
|
|||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "memtable/inlineskiplist.h"
|
||||
|
||||
#include <set>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "memory/concurrent_arena.h"
|
||||
#include "rocksdb/env.h"
|
||||
#include "test_util/testharness.h"
|
||||
|
@ -34,9 +36,7 @@ static Key Decode(const char* key) {
|
|||
struct TestComparator {
|
||||
using DecodedType = Key;
|
||||
|
||||
static DecodedType decode_key(const char* b) {
|
||||
return Decode(b);
|
||||
}
|
||||
static DecodedType decode_key(const char* b) { return Decode(b); }
|
||||
|
||||
int operator()(const char* a, const char* b) const {
|
||||
if (Decode(a) < Decode(b)) {
|
||||
|
|
|
@ -467,8 +467,8 @@ class FillBenchmark : public Benchmark {
|
|||
num_write_ops_per_thread_ = FLAGS_num_operations;
|
||||
}
|
||||
|
||||
void RunThreads(std::vector<port::Thread>* /*threads*/, uint64_t* bytes_written,
|
||||
uint64_t* bytes_read, bool /*write*/,
|
||||
void RunThreads(std::vector<port::Thread>* /*threads*/,
|
||||
uint64_t* bytes_written, uint64_t* bytes_read, bool /*write*/,
|
||||
uint64_t* read_hits) override {
|
||||
FillBenchmarkThread(table_, key_gen_, bytes_written, bytes_read, sequence_,
|
||||
num_write_ops_per_thread_, read_hits)();
|
||||
|
|
|
@ -33,14 +33,16 @@
|
|||
#pragma once
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "memory/allocator.h"
|
||||
#include "port/port.h"
|
||||
#include "util/random.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
class SkipList {
|
||||
private:
|
||||
struct Node;
|
||||
|
@ -164,9 +166,9 @@ class SkipList {
|
|||
};
|
||||
|
||||
// Implementation details follow
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
struct SkipList<Key, Comparator>::Node {
|
||||
explicit Node(const Key& k) : key(k) { }
|
||||
explicit Node(const Key& k) : key(k) {}
|
||||
|
||||
Key const key;
|
||||
|
||||
|
@ -200,43 +202,43 @@ struct SkipList<Key, Comparator>::Node {
|
|||
std::atomic<Node*> next_[1];
|
||||
};
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
typename SkipList<Key, Comparator>::Node*
|
||||
SkipList<Key, Comparator>::NewNode(const Key& key, int height) {
|
||||
template <typename Key, class Comparator>
|
||||
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::NewNode(
|
||||
const Key& key, int height) {
|
||||
char* mem = allocator_->AllocateAligned(
|
||||
sizeof(Node) + sizeof(std::atomic<Node*>) * (height - 1));
|
||||
return new (mem) Node(key);
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
inline SkipList<Key, Comparator>::Iterator::Iterator(const SkipList* list) {
|
||||
SetList(list);
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
inline void SkipList<Key, Comparator>::Iterator::SetList(const SkipList* list) {
|
||||
list_ = list;
|
||||
node_ = nullptr;
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
inline bool SkipList<Key, Comparator>::Iterator::Valid() const {
|
||||
return node_ != nullptr;
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
inline const Key& SkipList<Key, Comparator>::Iterator::key() const {
|
||||
assert(Valid());
|
||||
return node_->key;
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
inline void SkipList<Key, Comparator>::Iterator::Next() {
|
||||
assert(Valid());
|
||||
node_ = node_->Next(0);
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
inline void SkipList<Key, Comparator>::Iterator::Prev() {
|
||||
// Instead of using explicit "prev" links, we just search for the
|
||||
// last node that falls before key.
|
||||
|
@ -247,7 +249,7 @@ inline void SkipList<Key, Comparator>::Iterator::Prev() {
|
|||
}
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
inline void SkipList<Key, Comparator>::Iterator::Seek(const Key& target) {
|
||||
node_ = list_->FindGreaterOrEqual(target);
|
||||
}
|
||||
|
@ -269,7 +271,7 @@ inline void SkipList<Key, Comparator>::Iterator::SeekToFirst() {
|
|||
node_ = list_->head_->Next(0);
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
|
||||
node_ = list_->FindLast();
|
||||
if (node_ == list_->head_) {
|
||||
|
@ -277,7 +279,7 @@ inline void SkipList<Key, Comparator>::Iterator::SeekToLast() {
|
|||
}
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
int SkipList<Key, Comparator>::RandomHeight() {
|
||||
auto rnd = Random::GetTLSInstance();
|
||||
|
||||
|
@ -291,15 +293,15 @@ int SkipList<Key, Comparator>::RandomHeight() {
|
|||
return height;
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
bool SkipList<Key, Comparator>::KeyIsAfterNode(const Key& key, Node* n) const {
|
||||
// nullptr n is considered infinite
|
||||
return (n != nullptr) && (compare_(n->key, key) < 0);
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::
|
||||
FindGreaterOrEqual(const Key& key) const {
|
||||
template <typename Key, class Comparator>
|
||||
typename SkipList<Key, Comparator>::Node*
|
||||
SkipList<Key, Comparator>::FindGreaterOrEqual(const Key& key) const {
|
||||
// Note: It looks like we could reduce duplication by implementing
|
||||
// this function as FindLessThan(key)->Next(0), but we wouldn't be able
|
||||
// to exit early on equality and the result wouldn't even be correct.
|
||||
|
@ -315,8 +317,8 @@ typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::
|
|||
assert(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x));
|
||||
// Make sure we haven't overshot during our search
|
||||
assert(x == head_ || KeyIsAfterNode(key, x));
|
||||
int cmp = (next == nullptr || next == last_bigger)
|
||||
? 1 : compare_(next->key, key);
|
||||
int cmp =
|
||||
(next == nullptr || next == last_bigger) ? 1 : compare_(next->key, key);
|
||||
if (cmp == 0 || (cmp > 0 && level == 0)) {
|
||||
return next;
|
||||
} else if (cmp < 0) {
|
||||
|
@ -330,7 +332,7 @@ typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::
|
|||
}
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
typename SkipList<Key, Comparator>::Node*
|
||||
SkipList<Key, Comparator>::FindLessThan(const Key& key, Node** prev) const {
|
||||
Node* x = head_;
|
||||
|
@ -360,7 +362,7 @@ SkipList<Key, Comparator>::FindLessThan(const Key& key, Node** prev) const {
|
|||
}
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
typename SkipList<Key, Comparator>::Node* SkipList<Key, Comparator>::FindLast()
|
||||
const {
|
||||
Node* x = head_;
|
||||
|
@ -431,7 +433,7 @@ SkipList<Key, Comparator>::SkipList(const Comparator cmp, Allocator* allocator,
|
|||
}
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
void SkipList<Key, Comparator>::Insert(const Key& key) {
|
||||
// fast path for sequential insertion
|
||||
if (!KeyIsAfterNode(key, prev_[0]->NoBarrier_Next(0)) &&
|
||||
|
@ -460,7 +462,7 @@ void SkipList<Key, Comparator>::Insert(const Key& key) {
|
|||
for (int i = GetMaxHeight(); i < height; i++) {
|
||||
prev_[i] = head_;
|
||||
}
|
||||
//fprintf(stderr, "Change height from %d to %d\n", max_height_, height);
|
||||
// fprintf(stderr, "Change height from %d to %d\n", max_height_, height);
|
||||
|
||||
// It is ok to mutate max_height_ without any synchronization
|
||||
// with concurrent readers. A concurrent reader that observes
|
||||
|
@ -483,7 +485,7 @@ void SkipList<Key, Comparator>::Insert(const Key& key) {
|
|||
prev_height_ = height;
|
||||
}
|
||||
|
||||
template<typename Key, class Comparator>
|
||||
template <typename Key, class Comparator>
|
||||
bool SkipList<Key, Comparator>::Contains(const Key& key) const {
|
||||
Node* x = FindGreaterOrEqual(key);
|
||||
if (x != nullptr && Equal(key, x->key)) {
|
||||
|
|
|
@ -8,7 +8,9 @@
|
|||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "memtable/skiplist.h"
|
||||
|
||||
#include <set>
|
||||
|
||||
#include "memory/arena.h"
|
||||
#include "rocksdb/env.h"
|
||||
#include "test_util/testharness.h"
|
||||
|
@ -169,7 +171,7 @@ class ConcurrentTest {
|
|||
static uint64_t hash(Key key) { return key & 0xff; }
|
||||
|
||||
static uint64_t HashNumbers(uint64_t k, uint64_t g) {
|
||||
uint64_t data[2] = { k, g };
|
||||
uint64_t data[2] = {k, g};
|
||||
return Hash(reinterpret_cast<char*>(data), sizeof(data), 0);
|
||||
}
|
||||
|
||||
|
@ -311,11 +313,7 @@ class TestState {
|
|||
int seed_;
|
||||
std::atomic<bool> quit_flag_;
|
||||
|
||||
enum ReaderState {
|
||||
STARTING,
|
||||
RUNNING,
|
||||
DONE
|
||||
};
|
||||
enum ReaderState { STARTING, RUNNING, DONE };
|
||||
|
||||
explicit TestState(int s)
|
||||
: seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {}
|
||||
|
|
|
@ -21,7 +21,8 @@ class SkipListRep : public MemTableRep {
|
|||
const size_t lookahead_;
|
||||
|
||||
friend class LookaheadIterator;
|
||||
public:
|
||||
|
||||
public:
|
||||
explicit SkipListRep(const MemTableRep::KeyComparator& compare,
|
||||
Allocator* allocator, const SliceTransform* transform,
|
||||
const size_t lookahead)
|
||||
|
@ -86,7 +87,8 @@ public:
|
|||
SkipListRep::Iterator iter(&skip_list_);
|
||||
Slice dummy_slice;
|
||||
for (iter.Seek(dummy_slice, k.memtable_key().data());
|
||||
iter.Valid() && callback_func(callback_args, iter.key()); iter.Next()) {
|
||||
iter.Valid() && callback_func(callback_args, iter.key());
|
||||
iter.Next()) {
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -227,8 +229,8 @@ public:
|
|||
// the target key hasn't been found.
|
||||
class LookaheadIterator : public MemTableRep::Iterator {
|
||||
public:
|
||||
explicit LookaheadIterator(const SkipListRep& rep) :
|
||||
rep_(rep), iter_(&rep_.skip_list_), prev_(iter_) {}
|
||||
explicit LookaheadIterator(const SkipListRep& rep)
|
||||
: rep_(rep), iter_(&rep_.skip_list_), prev_(iter_) {}
|
||||
|
||||
~LookaheadIterator() override {}
|
||||
|
||||
|
@ -271,9 +273,9 @@ public:
|
|||
}
|
||||
|
||||
void Seek(const Slice& internal_key, const char* memtable_key) override {
|
||||
const char *encoded_key =
|
||||
(memtable_key != nullptr) ?
|
||||
memtable_key : EncodeKey(&tmp_, internal_key);
|
||||
const char* encoded_key = (memtable_key != nullptr)
|
||||
? memtable_key
|
||||
: EncodeKey(&tmp_, internal_key);
|
||||
|
||||
if (prev_.Valid() && rep_.cmp_(encoded_key, prev_.key()) >= 0) {
|
||||
// prev_.key() is smaller or equal to our target key; do a quick
|
||||
|
@ -323,19 +325,20 @@ public:
|
|||
|
||||
MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override {
|
||||
if (lookahead_ > 0) {
|
||||
void *mem =
|
||||
void* mem =
|
||||
arena ? arena->AllocateAligned(sizeof(SkipListRep::LookaheadIterator))
|
||||
: operator new(sizeof(SkipListRep::LookaheadIterator));
|
||||
:
|
||||
operator new(sizeof(SkipListRep::LookaheadIterator));
|
||||
return new (mem) SkipListRep::LookaheadIterator(*this);
|
||||
} else {
|
||||
void *mem =
|
||||
arena ? arena->AllocateAligned(sizeof(SkipListRep::Iterator))
|
||||
: operator new(sizeof(SkipListRep::Iterator));
|
||||
void* mem = arena ? arena->AllocateAligned(sizeof(SkipListRep::Iterator))
|
||||
:
|
||||
operator new(sizeof(SkipListRep::Iterator));
|
||||
return new (mem) SkipListRep::Iterator(&skip_list_);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
} // namespace
|
||||
|
||||
static std::unordered_map<std::string, OptionTypeInfo> skiplist_factory_info = {
|
||||
#ifndef ROCKSDB_LITE
|
||||
|
|
|
@ -29,5 +29,5 @@ struct Compare : private Base {
|
|||
}
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace stl_wrappers
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
|
|
@ -51,6 +51,7 @@ class VectorRep : public MemTableRep {
|
|||
std::string tmp_; // For passing to EncodeKey
|
||||
bool mutable sorted_;
|
||||
void DoSort() const;
|
||||
|
||||
public:
|
||||
explicit Iterator(class VectorRep* vrep,
|
||||
std::shared_ptr<std::vector<const char*>> bucket,
|
||||
|
@ -123,12 +124,10 @@ void VectorRep::MarkReadOnly() {
|
|||
}
|
||||
|
||||
size_t VectorRep::ApproximateMemoryUsage() {
|
||||
return
|
||||
sizeof(bucket_) + sizeof(*bucket_) +
|
||||
return sizeof(bucket_) + sizeof(*bucket_) +
|
||||
bucket_->size() *
|
||||
sizeof(
|
||||
std::remove_reference<decltype(*bucket_)>::type::value_type
|
||||
);
|
||||
std::remove_reference<decltype(*bucket_)>::type::value_type);
|
||||
}
|
||||
|
||||
VectorRep::VectorRep(const KeyComparator& compare, Allocator* allocator,
|
||||
|
@ -144,11 +143,11 @@ VectorRep::VectorRep(const KeyComparator& compare, Allocator* allocator,
|
|||
VectorRep::Iterator::Iterator(class VectorRep* vrep,
|
||||
std::shared_ptr<std::vector<const char*>> bucket,
|
||||
const KeyComparator& compare)
|
||||
: vrep_(vrep),
|
||||
: vrep_(vrep),
|
||||
bucket_(bucket),
|
||||
cit_(bucket_->end()),
|
||||
compare_(compare),
|
||||
sorted_(false) { }
|
||||
sorted_(false) {}
|
||||
|
||||
void VectorRep::Iterator::DoSort() const {
|
||||
// vrep is non-null means that we are working on an immutable memtable
|
||||
|
@ -216,12 +215,11 @@ void VectorRep::Iterator::Seek(const Slice& user_key,
|
|||
// Do binary search to find first value not less than the target
|
||||
const char* encoded_key =
|
||||
(memtable_key != nullptr) ? memtable_key : EncodeKey(&tmp_, user_key);
|
||||
cit_ = std::equal_range(bucket_->begin(),
|
||||
bucket_->end(),
|
||||
encoded_key,
|
||||
[this] (const char* a, const char* b) {
|
||||
cit_ = std::equal_range(bucket_->begin(), bucket_->end(), encoded_key,
|
||||
[this](const char* a, const char* b) {
|
||||
return compare_(a, b) < 0;
|
||||
}).first;
|
||||
})
|
||||
.first;
|
||||
}
|
||||
|
||||
// Advance to the first entry with a key <= target
|
||||
|
@ -290,7 +288,7 @@ MemTableRep::Iterator* VectorRep::GetIterator(Arena* arena) {
|
|||
}
|
||||
}
|
||||
}
|
||||
} // anon namespace
|
||||
} // namespace
|
||||
|
||||
static std::unordered_map<std::string, OptionTypeInfo> vector_rep_table_info = {
|
||||
{"count",
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "rocksdb/write_buffer_manager.h"
|
||||
|
||||
#include "test_util/testharness.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
|
Loading…
Reference in New Issue