mirror of https://github.com/facebook/rocksdb.git
Add more micro-benchmark tests (#9436)
Summary: * Add more micro-benchmark tests * Expose an API in DBImpl for waiting for compactions (still not visible to the user) * Add argument name for ribbon_bench * remove benchmark run from CI, as it runs too long. Pull Request resolved: https://github.com/facebook/rocksdb/pull/9436 Test Plan: CI Reviewed By: riversand963 Differential Revision: D33777836 Pulled By: jay-zhuang fbshipit-source-id: c05de3bc082cc05b5d019f00b324e774bf4bbd96
This commit is contained in:
parent
f6d7ec1d02
commit
980b9ff385
|
@ -323,7 +323,7 @@ jobs:
|
|||
- pre-steps
|
||||
- install-gflags
|
||||
- install-benchmark
|
||||
- run: (mkdir build && cd build && cmake -DWITH_GFLAGS=1 -DWITH_BENCHMARK=1 .. && make V=1 -j20 && ctest -j20 && make microbench) 2>&1 | .circleci/cat_ignore_eagain
|
||||
- run: (mkdir build && cd build && cmake -DWITH_GFLAGS=1 -DWITH_BENCHMARK=1 .. && make V=1 -j20 && ctest -j20) 2>&1 | .circleci/cat_ignore_eagain
|
||||
- post-steps
|
||||
|
||||
build-linux-unity-and-headers:
|
||||
|
|
|
@ -952,6 +952,11 @@ class DBImpl : public DB {
|
|||
|
||||
VersionSet* GetVersionSet() const { return versions_.get(); }
|
||||
|
||||
// Wait for any compaction
|
||||
// We add a bool parameter to wait for unscheduledCompactions_ == 0, but this
|
||||
// is only for the special test of CancelledCompactions
|
||||
Status WaitForCompact(bool waitUnscheduled = false);
|
||||
|
||||
#ifndef NDEBUG
|
||||
// Compact any files in the named level that overlap [*begin, *end]
|
||||
Status TEST_CompactRange(int level, const Slice* begin, const Slice* end,
|
||||
|
|
|
@ -3721,4 +3721,22 @@ void DBImpl::GetSnapshotContext(
|
|||
}
|
||||
*snapshot_seqs = snapshots_.GetAll(earliest_write_conflict_snapshot);
|
||||
}
|
||||
|
||||
Status DBImpl::WaitForCompact(bool wait_unscheduled) {
|
||||
// Wait until the compaction completes
|
||||
|
||||
// TODO: a bug here. This function actually does not necessarily
|
||||
// wait for compact. It actually waits for scheduled compaction
|
||||
// OR flush to finish.
|
||||
|
||||
InstrumentedMutexLock l(&mutex_);
|
||||
while ((bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ ||
|
||||
bg_flush_scheduled_ ||
|
||||
(wait_unscheduled && unscheduled_compactions_)) &&
|
||||
(error_handler_.GetBGError().ok())) {
|
||||
bg_cv_.Wait();
|
||||
}
|
||||
return error_handler_.GetBGError();
|
||||
}
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
|
|
@ -175,19 +175,7 @@ Status DBImpl::TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family) {
|
|||
|
||||
Status DBImpl::TEST_WaitForCompact(bool wait_unscheduled) {
|
||||
// Wait until the compaction completes
|
||||
|
||||
// TODO: a bug here. This function actually does not necessarily
|
||||
// wait for compact. It actually waits for scheduled compaction
|
||||
// OR flush to finish.
|
||||
|
||||
InstrumentedMutexLock l(&mutex_);
|
||||
while ((bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ ||
|
||||
bg_flush_scheduled_ ||
|
||||
(wait_unscheduled && unscheduled_compactions_)) &&
|
||||
(error_handler_.GetBGError().ok())) {
|
||||
bg_cv_.Wait();
|
||||
}
|
||||
return error_handler_.GetBGError();
|
||||
return WaitForCompact(wait_unscheduled);
|
||||
}
|
||||
|
||||
Status DBImpl::TEST_WaitForPurge() {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -5,8 +5,7 @@
|
|||
|
||||
// this is a simple micro-benchmark for compare ribbon filter vs. other filter
|
||||
// for more comprehensive, please check the dedicate util/filter_bench.
|
||||
#include <benchmark/benchmark.h>
|
||||
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "table/block_based/filter_policy_internal.h"
|
||||
#include "table/block_based/mock_block_based_table.h"
|
||||
|
||||
|
@ -53,18 +52,18 @@ struct KeyMaker {
|
|||
// 2. average data key length
|
||||
// 3. data entry number
|
||||
static void CustomArguments(benchmark::internal::Benchmark *b) {
|
||||
for (int filterMode :
|
||||
for (int filter_mode :
|
||||
{BloomFilterPolicy::kLegacyBloom, BloomFilterPolicy::kFastLocalBloom,
|
||||
BloomFilterPolicy::kStandard128Ribbon}) {
|
||||
// for (int bits_per_key : {4, 10, 20, 30}) {
|
||||
for (int bits_per_key : {10, 20}) {
|
||||
for (int key_len_avg : {10, 100}) {
|
||||
for (int64_t entry_num : {1 << 10, 1 << 20}) {
|
||||
b->Args({filterMode, bits_per_key, key_len_avg, entry_num});
|
||||
b->Args({filter_mode, bits_per_key, key_len_avg, entry_num});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
b->ArgNames({"filter_mode", "bits_per_key", "key_len_avg", "entry_num"});
|
||||
}
|
||||
|
||||
static void FilterBuild(benchmark::State &state) {
|
||||
|
|
Loading…
Reference in New Issue