mirror of
https://github.com/facebook/rocksdb.git
synced 2024-12-04 20:02:50 +00:00
4720ba4391
Summary: We haven't been actively mantaining RocksDB LITE recently and the size must have been gone up significantly. We are removing the support. Most of changes were done through following comments: unifdef -m -UROCKSDB_LITE `git grep -l ROCKSDB_LITE | egrep '[.](cc|h)'` by Peter Dillinger. Others changes were manually applied to build scripts, CircleCI manifests, ROCKSDB_LITE is used in an expression and file db_stress_test_base.cc. Pull Request resolved: https://github.com/facebook/rocksdb/pull/11147 Test Plan: See CI Reviewed By: pdillinger Differential Revision: D42796341 fbshipit-source-id: 4920e15fc2060c2cd2221330a6d0e5e65d4b7fe2
688 lines
24 KiB
C++
688 lines
24 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#ifndef GFLAGS
|
|
#include <cstdio>
|
|
int main() {
|
|
fprintf(stderr, "Please install gflags to run rocksdb tools\n");
|
|
return 1;
|
|
}
|
|
#else
|
|
|
|
#include <atomic>
|
|
#include <iostream>
|
|
#include <memory>
|
|
#include <thread>
|
|
#include <type_traits>
|
|
#include <vector>
|
|
|
|
#include "db/dbformat.h"
|
|
#include "db/memtable.h"
|
|
#include "memory/arena.h"
|
|
#include "port/port.h"
|
|
#include "port/stack_trace.h"
|
|
#include "rocksdb/comparator.h"
|
|
#include "rocksdb/convenience.h"
|
|
#include "rocksdb/memtablerep.h"
|
|
#include "rocksdb/options.h"
|
|
#include "rocksdb/slice_transform.h"
|
|
#include "rocksdb/system_clock.h"
|
|
#include "rocksdb/write_buffer_manager.h"
|
|
#include "test_util/testutil.h"
|
|
#include "util/gflags_compat.h"
|
|
#include "util/mutexlock.h"
|
|
#include "util/stop_watch.h"
|
|
|
|
using GFLAGS_NAMESPACE::ParseCommandLineFlags;
|
|
using GFLAGS_NAMESPACE::RegisterFlagValidator;
|
|
using GFLAGS_NAMESPACE::SetUsageMessage;
|
|
|
|
DEFINE_string(benchmarks, "fillrandom",
|
|
"Comma-separated list of benchmarks to run. Options:\n"
|
|
"\tfillrandom -- write N random values\n"
|
|
"\tfillseq -- write N values in sequential order\n"
|
|
"\treadrandom -- read N values in random order\n"
|
|
"\treadseq -- scan the DB\n"
|
|
"\treadwrite -- 1 thread writes while N - 1 threads "
|
|
"do random\n"
|
|
"\t reads\n"
|
|
"\tseqreadwrite -- 1 thread writes while N - 1 threads "
|
|
"do scans\n");
|
|
|
|
DEFINE_string(memtablerep, "skiplist",
|
|
"Which implementation of memtablerep to use. See "
|
|
"include/memtablerep.h for\n"
|
|
" more details. Options:\n"
|
|
"\tskiplist -- backed by a skiplist\n"
|
|
"\tvector -- backed by an std::vector\n"
|
|
"\thashskiplist -- backed by a hash skip list\n"
|
|
"\thashlinklist -- backed by a hash linked list\n"
|
|
"\tcuckoo -- backed by a cuckoo hash table");
|
|
|
|
DEFINE_int64(bucket_count, 1000000,
|
|
"bucket_count parameter to pass into NewHashSkiplistRepFactory or "
|
|
"NewHashLinkListRepFactory");
|
|
|
|
DEFINE_int32(
|
|
hashskiplist_height, 4,
|
|
"skiplist_height parameter to pass into NewHashSkiplistRepFactory");
|
|
|
|
DEFINE_int32(
|
|
hashskiplist_branching_factor, 4,
|
|
"branching_factor parameter to pass into NewHashSkiplistRepFactory");
|
|
|
|
DEFINE_int32(
|
|
huge_page_tlb_size, 0,
|
|
"huge_page_tlb_size parameter to pass into NewHashLinkListRepFactory");
|
|
|
|
DEFINE_int32(bucket_entries_logging_threshold, 4096,
|
|
"bucket_entries_logging_threshold parameter to pass into "
|
|
"NewHashLinkListRepFactory");
|
|
|
|
DEFINE_bool(if_log_bucket_dist_when_flash, true,
|
|
"if_log_bucket_dist_when_flash parameter to pass into "
|
|
"NewHashLinkListRepFactory");
|
|
|
|
DEFINE_int32(
|
|
threshold_use_skiplist, 256,
|
|
"threshold_use_skiplist parameter to pass into NewHashLinkListRepFactory");
|
|
|
|
DEFINE_int64(write_buffer_size, 256,
|
|
"write_buffer_size parameter to pass into WriteBufferManager");
|
|
|
|
DEFINE_int32(
|
|
num_threads, 1,
|
|
"Number of concurrent threads to run. If the benchmark includes writes,\n"
|
|
"then at most one thread will be a writer");
|
|
|
|
DEFINE_int32(num_operations, 1000000,
|
|
"Number of operations to do for write and random read benchmarks");
|
|
|
|
DEFINE_int32(num_scans, 10,
|
|
"Number of times for each thread to scan the memtablerep for "
|
|
"sequential read "
|
|
"benchmarks");
|
|
|
|
DEFINE_int32(item_size, 100, "Number of bytes each item should be");
|
|
|
|
DEFINE_int32(prefix_length, 8,
|
|
"Prefix length to pass into NewFixedPrefixTransform");
|
|
|
|
/* VectorRep settings */
|
|
DEFINE_int64(vectorrep_count, 0,
|
|
"Number of entries to reserve on VectorRep initialization");
|
|
|
|
DEFINE_int64(seed, 0,
|
|
"Seed base for random number generators. "
|
|
"When 0 it is deterministic.");
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
namespace {
|
|
struct CallbackVerifyArgs {
|
|
bool found;
|
|
LookupKey* key;
|
|
MemTableRep* table;
|
|
InternalKeyComparator* comparator;
|
|
};
|
|
} // namespace
|
|
|
|
// Helper for quickly generating random data.
|
|
class RandomGenerator {
|
|
private:
|
|
std::string data_;
|
|
unsigned int pos_;
|
|
|
|
public:
|
|
RandomGenerator() {
|
|
Random rnd(301);
|
|
auto size = (unsigned)std::max(1048576, FLAGS_item_size);
|
|
data_ = rnd.RandomString(size);
|
|
pos_ = 0;
|
|
}
|
|
|
|
Slice Generate(unsigned int len) {
|
|
assert(len <= data_.size());
|
|
if (pos_ + len > data_.size()) {
|
|
pos_ = 0;
|
|
}
|
|
pos_ += len;
|
|
return Slice(data_.data() + pos_ - len, len);
|
|
}
|
|
};
|
|
|
|
enum WriteMode { SEQUENTIAL, RANDOM, UNIQUE_RANDOM };
|
|
|
|
class KeyGenerator {
|
|
public:
|
|
KeyGenerator(Random64* rand, WriteMode mode, uint64_t num)
|
|
: rand_(rand), mode_(mode), num_(num), next_(0) {
|
|
if (mode_ == UNIQUE_RANDOM) {
|
|
// NOTE: if memory consumption of this approach becomes a concern,
|
|
// we can either break it into pieces and only random shuffle a section
|
|
// each time. Alternatively, use a bit map implementation
|
|
// (https://reviews.facebook.net/differential/diff/54627/)
|
|
values_.resize(num_);
|
|
for (uint64_t i = 0; i < num_; ++i) {
|
|
values_[i] = i;
|
|
}
|
|
RandomShuffle(values_.begin(), values_.end(),
|
|
static_cast<uint32_t>(FLAGS_seed));
|
|
}
|
|
}
|
|
|
|
uint64_t Next() {
|
|
switch (mode_) {
|
|
case SEQUENTIAL:
|
|
return next_++;
|
|
case RANDOM:
|
|
return rand_->Next() % num_;
|
|
case UNIQUE_RANDOM:
|
|
return values_[next_++];
|
|
}
|
|
assert(false);
|
|
return std::numeric_limits<uint64_t>::max();
|
|
}
|
|
|
|
private:
|
|
Random64* rand_;
|
|
WriteMode mode_;
|
|
const uint64_t num_;
|
|
uint64_t next_;
|
|
std::vector<uint64_t> values_;
|
|
};
|
|
|
|
class BenchmarkThread {
|
|
public:
|
|
explicit BenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
|
|
uint64_t* bytes_written, uint64_t* bytes_read,
|
|
uint64_t* sequence, uint64_t num_ops,
|
|
uint64_t* read_hits)
|
|
: table_(table),
|
|
key_gen_(key_gen),
|
|
bytes_written_(bytes_written),
|
|
bytes_read_(bytes_read),
|
|
sequence_(sequence),
|
|
num_ops_(num_ops),
|
|
read_hits_(read_hits) {}
|
|
|
|
virtual void operator()() = 0;
|
|
virtual ~BenchmarkThread() {}
|
|
|
|
protected:
|
|
MemTableRep* table_;
|
|
KeyGenerator* key_gen_;
|
|
uint64_t* bytes_written_;
|
|
uint64_t* bytes_read_;
|
|
uint64_t* sequence_;
|
|
uint64_t num_ops_;
|
|
uint64_t* read_hits_;
|
|
RandomGenerator generator_;
|
|
};
|
|
|
|
class FillBenchmarkThread : public BenchmarkThread {
|
|
public:
|
|
FillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
|
|
uint64_t* bytes_written, uint64_t* bytes_read,
|
|
uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
|
|
: BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
|
|
num_ops, read_hits) {}
|
|
|
|
void FillOne() {
|
|
char* buf = nullptr;
|
|
auto internal_key_size = 16;
|
|
auto encoded_len =
|
|
FLAGS_item_size + VarintLength(internal_key_size) + internal_key_size;
|
|
KeyHandle handle = table_->Allocate(encoded_len, &buf);
|
|
assert(buf != nullptr);
|
|
char* p = EncodeVarint32(buf, internal_key_size);
|
|
auto key = key_gen_->Next();
|
|
EncodeFixed64(p, key);
|
|
p += 8;
|
|
EncodeFixed64(p, ++(*sequence_));
|
|
p += 8;
|
|
Slice bytes = generator_.Generate(FLAGS_item_size);
|
|
memcpy(p, bytes.data(), FLAGS_item_size);
|
|
p += FLAGS_item_size;
|
|
assert(p == buf + encoded_len);
|
|
table_->Insert(handle);
|
|
*bytes_written_ += encoded_len;
|
|
}
|
|
|
|
void operator()() override {
|
|
for (unsigned int i = 0; i < num_ops_; ++i) {
|
|
FillOne();
|
|
}
|
|
}
|
|
};
|
|
|
|
class ConcurrentFillBenchmarkThread : public FillBenchmarkThread {
|
|
public:
|
|
ConcurrentFillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
|
|
uint64_t* bytes_written, uint64_t* bytes_read,
|
|
uint64_t* sequence, uint64_t num_ops,
|
|
uint64_t* read_hits,
|
|
std::atomic_int* threads_done)
|
|
: FillBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
|
|
num_ops, read_hits) {
|
|
threads_done_ = threads_done;
|
|
}
|
|
|
|
void operator()() override {
|
|
// # of read threads will be total threads - write threads (always 1). Loop
|
|
// while all reads complete.
|
|
while ((*threads_done_).load() < (FLAGS_num_threads - 1)) {
|
|
FillOne();
|
|
}
|
|
}
|
|
|
|
private:
|
|
std::atomic_int* threads_done_;
|
|
};
|
|
|
|
class ReadBenchmarkThread : public BenchmarkThread {
|
|
public:
|
|
ReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
|
|
uint64_t* bytes_written, uint64_t* bytes_read,
|
|
uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits)
|
|
: BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
|
|
num_ops, read_hits) {}
|
|
|
|
static bool callback(void* arg, const char* entry) {
|
|
CallbackVerifyArgs* callback_args = static_cast<CallbackVerifyArgs*>(arg);
|
|
assert(callback_args != nullptr);
|
|
uint32_t key_length;
|
|
const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length);
|
|
if ((callback_args->comparator)
|
|
->user_comparator()
|
|
->Equal(Slice(key_ptr, key_length - 8),
|
|
callback_args->key->user_key())) {
|
|
callback_args->found = true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
void ReadOne() {
|
|
std::string user_key;
|
|
auto key = key_gen_->Next();
|
|
PutFixed64(&user_key, key);
|
|
LookupKey lookup_key(user_key, *sequence_);
|
|
InternalKeyComparator internal_key_comp(BytewiseComparator());
|
|
CallbackVerifyArgs verify_args;
|
|
verify_args.found = false;
|
|
verify_args.key = &lookup_key;
|
|
verify_args.table = table_;
|
|
verify_args.comparator = &internal_key_comp;
|
|
table_->Get(lookup_key, &verify_args, callback);
|
|
if (verify_args.found) {
|
|
*bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
|
|
++*read_hits_;
|
|
}
|
|
}
|
|
void operator()() override {
|
|
for (unsigned int i = 0; i < num_ops_; ++i) {
|
|
ReadOne();
|
|
}
|
|
}
|
|
};
|
|
|
|
class SeqReadBenchmarkThread : public BenchmarkThread {
|
|
public:
|
|
SeqReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
|
|
uint64_t* bytes_written, uint64_t* bytes_read,
|
|
uint64_t* sequence, uint64_t num_ops,
|
|
uint64_t* read_hits)
|
|
: BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
|
|
num_ops, read_hits) {}
|
|
|
|
void ReadOneSeq() {
|
|
std::unique_ptr<MemTableRep::Iterator> iter(table_->GetIterator());
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
// pretend to read the value
|
|
*bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size;
|
|
}
|
|
++*read_hits_;
|
|
}
|
|
|
|
void operator()() override {
|
|
for (unsigned int i = 0; i < num_ops_; ++i) {
|
|
{ ReadOneSeq(); }
|
|
}
|
|
}
|
|
};
|
|
|
|
class ConcurrentReadBenchmarkThread : public ReadBenchmarkThread {
|
|
public:
|
|
ConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
|
|
uint64_t* bytes_written, uint64_t* bytes_read,
|
|
uint64_t* sequence, uint64_t num_ops,
|
|
uint64_t* read_hits,
|
|
std::atomic_int* threads_done)
|
|
: ReadBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence,
|
|
num_ops, read_hits) {
|
|
threads_done_ = threads_done;
|
|
}
|
|
|
|
void operator()() override {
|
|
for (unsigned int i = 0; i < num_ops_; ++i) {
|
|
ReadOne();
|
|
}
|
|
++*threads_done_;
|
|
}
|
|
|
|
private:
|
|
std::atomic_int* threads_done_;
|
|
};
|
|
|
|
class SeqConcurrentReadBenchmarkThread : public SeqReadBenchmarkThread {
|
|
public:
|
|
SeqConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen,
|
|
uint64_t* bytes_written,
|
|
uint64_t* bytes_read, uint64_t* sequence,
|
|
uint64_t num_ops, uint64_t* read_hits,
|
|
std::atomic_int* threads_done)
|
|
: SeqReadBenchmarkThread(table, key_gen, bytes_written, bytes_read,
|
|
sequence, num_ops, read_hits) {
|
|
threads_done_ = threads_done;
|
|
}
|
|
|
|
void operator()() override {
|
|
for (unsigned int i = 0; i < num_ops_; ++i) {
|
|
ReadOneSeq();
|
|
}
|
|
++*threads_done_;
|
|
}
|
|
|
|
private:
|
|
std::atomic_int* threads_done_;
|
|
};
|
|
|
|
class Benchmark {
|
|
public:
|
|
explicit Benchmark(MemTableRep* table, KeyGenerator* key_gen,
|
|
uint64_t* sequence, uint32_t num_threads)
|
|
: table_(table),
|
|
key_gen_(key_gen),
|
|
sequence_(sequence),
|
|
num_threads_(num_threads) {}
|
|
|
|
virtual ~Benchmark() {}
|
|
virtual void Run() {
|
|
std::cout << "Number of threads: " << num_threads_ << std::endl;
|
|
std::vector<port::Thread> threads;
|
|
uint64_t bytes_written = 0;
|
|
uint64_t bytes_read = 0;
|
|
uint64_t read_hits = 0;
|
|
StopWatchNano timer(SystemClock::Default().get(), true);
|
|
RunThreads(&threads, &bytes_written, &bytes_read, true, &read_hits);
|
|
auto elapsed_time = static_cast<double>(timer.ElapsedNanos() / 1000);
|
|
std::cout << "Elapsed time: " << static_cast<int>(elapsed_time) << " us"
|
|
<< std::endl;
|
|
|
|
if (bytes_written > 0) {
|
|
auto MiB_written = static_cast<double>(bytes_written) / (1 << 20);
|
|
auto write_throughput = MiB_written / (elapsed_time / 1000000);
|
|
std::cout << "Total bytes written: " << MiB_written << " MiB"
|
|
<< std::endl;
|
|
std::cout << "Write throughput: " << write_throughput << " MiB/s"
|
|
<< std::endl;
|
|
auto us_per_op = elapsed_time / num_write_ops_per_thread_;
|
|
std::cout << "write us/op: " << us_per_op << std::endl;
|
|
}
|
|
if (bytes_read > 0) {
|
|
auto MiB_read = static_cast<double>(bytes_read) / (1 << 20);
|
|
auto read_throughput = MiB_read / (elapsed_time / 1000000);
|
|
std::cout << "Total bytes read: " << MiB_read << " MiB" << std::endl;
|
|
std::cout << "Read throughput: " << read_throughput << " MiB/s"
|
|
<< std::endl;
|
|
auto us_per_op = elapsed_time / num_read_ops_per_thread_;
|
|
std::cout << "read us/op: " << us_per_op << std::endl;
|
|
}
|
|
}
|
|
|
|
virtual void RunThreads(std::vector<port::Thread>* threads,
|
|
uint64_t* bytes_written, uint64_t* bytes_read,
|
|
bool write, uint64_t* read_hits) = 0;
|
|
|
|
protected:
|
|
MemTableRep* table_;
|
|
KeyGenerator* key_gen_;
|
|
uint64_t* sequence_;
|
|
uint64_t num_write_ops_per_thread_ = 0;
|
|
uint64_t num_read_ops_per_thread_ = 0;
|
|
const uint32_t num_threads_;
|
|
};
|
|
|
|
class FillBenchmark : public Benchmark {
|
|
public:
|
|
explicit FillBenchmark(MemTableRep* table, KeyGenerator* key_gen,
|
|
uint64_t* sequence)
|
|
: Benchmark(table, key_gen, sequence, 1) {
|
|
num_write_ops_per_thread_ = FLAGS_num_operations;
|
|
}
|
|
|
|
void RunThreads(std::vector<port::Thread>* /*threads*/,
|
|
uint64_t* bytes_written, uint64_t* bytes_read, bool /*write*/,
|
|
uint64_t* read_hits) override {
|
|
FillBenchmarkThread(table_, key_gen_, bytes_written, bytes_read, sequence_,
|
|
num_write_ops_per_thread_, read_hits)();
|
|
}
|
|
};
|
|
|
|
class ReadBenchmark : public Benchmark {
|
|
public:
|
|
explicit ReadBenchmark(MemTableRep* table, KeyGenerator* key_gen,
|
|
uint64_t* sequence)
|
|
: Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
|
|
num_read_ops_per_thread_ = FLAGS_num_operations / FLAGS_num_threads;
|
|
}
|
|
|
|
void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
|
|
uint64_t* bytes_read, bool /*write*/,
|
|
uint64_t* read_hits) override {
|
|
for (int i = 0; i < FLAGS_num_threads; ++i) {
|
|
threads->emplace_back(
|
|
ReadBenchmarkThread(table_, key_gen_, bytes_written, bytes_read,
|
|
sequence_, num_read_ops_per_thread_, read_hits));
|
|
}
|
|
for (auto& thread : *threads) {
|
|
thread.join();
|
|
}
|
|
std::cout << "read hit%: "
|
|
<< (static_cast<double>(*read_hits) / FLAGS_num_operations) * 100
|
|
<< std::endl;
|
|
}
|
|
};
|
|
|
|
class SeqReadBenchmark : public Benchmark {
|
|
public:
|
|
explicit SeqReadBenchmark(MemTableRep* table, uint64_t* sequence)
|
|
: Benchmark(table, nullptr, sequence, FLAGS_num_threads) {
|
|
num_read_ops_per_thread_ = FLAGS_num_scans;
|
|
}
|
|
|
|
void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
|
|
uint64_t* bytes_read, bool /*write*/,
|
|
uint64_t* read_hits) override {
|
|
for (int i = 0; i < FLAGS_num_threads; ++i) {
|
|
threads->emplace_back(SeqReadBenchmarkThread(
|
|
table_, key_gen_, bytes_written, bytes_read, sequence_,
|
|
num_read_ops_per_thread_, read_hits));
|
|
}
|
|
for (auto& thread : *threads) {
|
|
thread.join();
|
|
}
|
|
}
|
|
};
|
|
|
|
template <class ReadThreadType>
|
|
class ReadWriteBenchmark : public Benchmark {
|
|
public:
|
|
explicit ReadWriteBenchmark(MemTableRep* table, KeyGenerator* key_gen,
|
|
uint64_t* sequence)
|
|
: Benchmark(table, key_gen, sequence, FLAGS_num_threads) {
|
|
num_read_ops_per_thread_ =
|
|
FLAGS_num_threads <= 1
|
|
? 0
|
|
: (FLAGS_num_operations / (FLAGS_num_threads - 1));
|
|
num_write_ops_per_thread_ = FLAGS_num_operations;
|
|
}
|
|
|
|
void RunThreads(std::vector<port::Thread>* threads, uint64_t* bytes_written,
|
|
uint64_t* bytes_read, bool /*write*/,
|
|
uint64_t* read_hits) override {
|
|
std::atomic_int threads_done;
|
|
threads_done.store(0);
|
|
threads->emplace_back(ConcurrentFillBenchmarkThread(
|
|
table_, key_gen_, bytes_written, bytes_read, sequence_,
|
|
num_write_ops_per_thread_, read_hits, &threads_done));
|
|
for (int i = 1; i < FLAGS_num_threads; ++i) {
|
|
threads->emplace_back(
|
|
ReadThreadType(table_, key_gen_, bytes_written, bytes_read, sequence_,
|
|
num_read_ops_per_thread_, read_hits, &threads_done));
|
|
}
|
|
for (auto& thread : *threads) {
|
|
thread.join();
|
|
}
|
|
}
|
|
};
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
|
|
void PrintWarnings() {
|
|
#if defined(__GNUC__) && !defined(__OPTIMIZE__)
|
|
fprintf(stdout,
|
|
"WARNING: Optimization is disabled: benchmarks unnecessarily slow\n");
|
|
#endif
|
|
#ifndef NDEBUG
|
|
fprintf(stdout,
|
|
"WARNING: Assertions are enabled; benchmarks unnecessarily slow\n");
|
|
#endif
|
|
}
|
|
|
|
int main(int argc, char** argv) {
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
|
SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) +
|
|
" [OPTIONS]...");
|
|
ParseCommandLineFlags(&argc, &argv, true);
|
|
|
|
PrintWarnings();
|
|
|
|
ROCKSDB_NAMESPACE::Options options;
|
|
|
|
std::unique_ptr<ROCKSDB_NAMESPACE::MemTableRepFactory> factory;
|
|
if (FLAGS_memtablerep == "skiplist") {
|
|
factory.reset(new ROCKSDB_NAMESPACE::SkipListFactory);
|
|
} else if (FLAGS_memtablerep == "vector") {
|
|
factory.reset(new ROCKSDB_NAMESPACE::VectorRepFactory);
|
|
} else if (FLAGS_memtablerep == "hashskiplist" ||
|
|
FLAGS_memtablerep == "prefix_hash") {
|
|
factory.reset(ROCKSDB_NAMESPACE::NewHashSkipListRepFactory(
|
|
FLAGS_bucket_count, FLAGS_hashskiplist_height,
|
|
FLAGS_hashskiplist_branching_factor));
|
|
options.prefix_extractor.reset(
|
|
ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length));
|
|
} else if (FLAGS_memtablerep == "hashlinklist" ||
|
|
FLAGS_memtablerep == "hash_linkedlist") {
|
|
factory.reset(ROCKSDB_NAMESPACE::NewHashLinkListRepFactory(
|
|
FLAGS_bucket_count, FLAGS_huge_page_tlb_size,
|
|
FLAGS_bucket_entries_logging_threshold,
|
|
FLAGS_if_log_bucket_dist_when_flash, FLAGS_threshold_use_skiplist));
|
|
options.prefix_extractor.reset(
|
|
ROCKSDB_NAMESPACE::NewFixedPrefixTransform(FLAGS_prefix_length));
|
|
} else {
|
|
ROCKSDB_NAMESPACE::ConfigOptions config_options;
|
|
config_options.ignore_unsupported_options = false;
|
|
|
|
ROCKSDB_NAMESPACE::Status s =
|
|
ROCKSDB_NAMESPACE::MemTableRepFactory::CreateFromString(
|
|
config_options, FLAGS_memtablerep, &factory);
|
|
if (!s.ok()) {
|
|
fprintf(stdout, "Unknown memtablerep: %s\n", s.ToString().c_str());
|
|
exit(1);
|
|
}
|
|
}
|
|
|
|
ROCKSDB_NAMESPACE::InternalKeyComparator internal_key_comp(
|
|
ROCKSDB_NAMESPACE::BytewiseComparator());
|
|
ROCKSDB_NAMESPACE::MemTable::KeyComparator key_comp(internal_key_comp);
|
|
ROCKSDB_NAMESPACE::Arena arena;
|
|
ROCKSDB_NAMESPACE::WriteBufferManager wb(FLAGS_write_buffer_size);
|
|
uint64_t sequence;
|
|
auto createMemtableRep = [&] {
|
|
sequence = 0;
|
|
return factory->CreateMemTableRep(key_comp, &arena,
|
|
options.prefix_extractor.get(),
|
|
options.info_log.get());
|
|
};
|
|
std::unique_ptr<ROCKSDB_NAMESPACE::MemTableRep> memtablerep;
|
|
ROCKSDB_NAMESPACE::Random64 rng(FLAGS_seed);
|
|
const char* benchmarks = FLAGS_benchmarks.c_str();
|
|
while (benchmarks != nullptr) {
|
|
std::unique_ptr<ROCKSDB_NAMESPACE::KeyGenerator> key_gen;
|
|
const char* sep = strchr(benchmarks, ',');
|
|
ROCKSDB_NAMESPACE::Slice name;
|
|
if (sep == nullptr) {
|
|
name = benchmarks;
|
|
benchmarks = nullptr;
|
|
} else {
|
|
name = ROCKSDB_NAMESPACE::Slice(benchmarks, sep - benchmarks);
|
|
benchmarks = sep + 1;
|
|
}
|
|
std::unique_ptr<ROCKSDB_NAMESPACE::Benchmark> benchmark;
|
|
if (name == ROCKSDB_NAMESPACE::Slice("fillseq")) {
|
|
memtablerep.reset(createMemtableRep());
|
|
key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
|
|
&rng, ROCKSDB_NAMESPACE::SEQUENTIAL, FLAGS_num_operations));
|
|
benchmark.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
|
|
memtablerep.get(), key_gen.get(), &sequence));
|
|
} else if (name == ROCKSDB_NAMESPACE::Slice("fillrandom")) {
|
|
memtablerep.reset(createMemtableRep());
|
|
key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
|
|
&rng, ROCKSDB_NAMESPACE::UNIQUE_RANDOM, FLAGS_num_operations));
|
|
benchmark.reset(new ROCKSDB_NAMESPACE::FillBenchmark(
|
|
memtablerep.get(), key_gen.get(), &sequence));
|
|
} else if (name == ROCKSDB_NAMESPACE::Slice("readrandom")) {
|
|
key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
|
|
&rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
|
|
benchmark.reset(new ROCKSDB_NAMESPACE::ReadBenchmark(
|
|
memtablerep.get(), key_gen.get(), &sequence));
|
|
} else if (name == ROCKSDB_NAMESPACE::Slice("readseq")) {
|
|
key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
|
|
&rng, ROCKSDB_NAMESPACE::SEQUENTIAL, FLAGS_num_operations));
|
|
benchmark.reset(new ROCKSDB_NAMESPACE::SeqReadBenchmark(memtablerep.get(),
|
|
&sequence));
|
|
} else if (name == ROCKSDB_NAMESPACE::Slice("readwrite")) {
|
|
memtablerep.reset(createMemtableRep());
|
|
key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
|
|
&rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
|
|
benchmark.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark<
|
|
ROCKSDB_NAMESPACE::ConcurrentReadBenchmarkThread>(
|
|
memtablerep.get(), key_gen.get(), &sequence));
|
|
} else if (name == ROCKSDB_NAMESPACE::Slice("seqreadwrite")) {
|
|
memtablerep.reset(createMemtableRep());
|
|
key_gen.reset(new ROCKSDB_NAMESPACE::KeyGenerator(
|
|
&rng, ROCKSDB_NAMESPACE::RANDOM, FLAGS_num_operations));
|
|
benchmark.reset(new ROCKSDB_NAMESPACE::ReadWriteBenchmark<
|
|
ROCKSDB_NAMESPACE::SeqConcurrentReadBenchmarkThread>(
|
|
memtablerep.get(), key_gen.get(), &sequence));
|
|
} else {
|
|
std::cout << "WARNING: skipping unknown benchmark '" << name.ToString()
|
|
<< std::endl;
|
|
continue;
|
|
}
|
|
std::cout << "Running " << name.ToString() << std::endl;
|
|
benchmark->Run();
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
#endif // GFLAGS
|