2016-06-02 23:24:14 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2016-06-02 23:24:14 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "rocksdb/db_bench_tool.h"
|
2020-09-09 16:03:39 +00:00
|
|
|
|
|
|
|
#include "db/db_impl/db_impl.h"
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "options/options_parser.h"
|
2016-06-02 23:24:14 +00:00
|
|
|
#include "rocksdb/utilities/options_util.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "util/random.h"
|
2016-06-02 23:24:14 +00:00
|
|
|
|
|
|
|
#ifdef GFLAGS
|
2017-12-01 18:40:45 +00:00
|
|
|
#include "util/gflags_compat.h"
|
2016-06-02 23:24:14 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2016-06-02 23:24:14 +00:00
|
|
|
namespace {
|
|
|
|
static const int kMaxArgCount = 100;
|
|
|
|
static const size_t kArgBufferSize = 100000;
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
class DBBenchTest : public testing::Test {
|
|
|
|
public:
|
|
|
|
DBBenchTest() : rnd_(0xFB) {
|
2018-07-14 00:18:39 +00:00
|
|
|
test_path_ = test::PerThreadDBPath("db_bench_test");
|
2016-06-02 23:24:14 +00:00
|
|
|
Env::Default()->CreateDir(test_path_);
|
|
|
|
db_path_ = test_path_ + "/db";
|
|
|
|
wal_path_ = test_path_ + "/wal";
|
|
|
|
}
|
|
|
|
|
|
|
|
~DBBenchTest() {
|
|
|
|
// DestroyDB(db_path_, Options());
|
|
|
|
}
|
|
|
|
|
|
|
|
void ResetArgs() {
|
|
|
|
argc_ = 0;
|
|
|
|
cursor_ = 0;
|
|
|
|
memset(arg_buffer_, 0, kArgBufferSize);
|
|
|
|
}
|
|
|
|
|
|
|
|
void AppendArgs(const std::vector<std::string>& args) {
|
|
|
|
for (const auto& arg : args) {
|
|
|
|
ASSERT_LE(cursor_ + arg.size() + 1, kArgBufferSize);
|
|
|
|
ASSERT_LE(argc_ + 1, kMaxArgCount);
|
|
|
|
snprintf(arg_buffer_ + cursor_, arg.size() + 1, "%s", arg.c_str());
|
|
|
|
|
|
|
|
argv_[argc_++] = arg_buffer_ + cursor_;
|
|
|
|
cursor_ += arg.size() + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-09 16:03:39 +00:00
|
|
|
// Gets the default options for this test/db_bench.
|
|
|
|
// Note that db_bench may change some of the default option values and that
|
|
|
|
// the database might as well. The options changed by db_bench are
|
|
|
|
// specified here; the ones by the DB are set via SanitizeOptions
|
|
|
|
Options GetDefaultOptions(CompactionStyle style = kCompactionStyleLevel,
|
|
|
|
int levels = 7) const {
|
|
|
|
Options opt;
|
|
|
|
|
|
|
|
opt.create_if_missing = true;
|
|
|
|
opt.max_open_files = 256;
|
|
|
|
opt.max_background_compactions = 10;
|
|
|
|
opt.dump_malloc_stats = true; // db_bench uses a different default
|
|
|
|
opt.compaction_style = style;
|
|
|
|
opt.num_levels = levels;
|
|
|
|
opt.compression = kNoCompression;
|
|
|
|
opt.arena_block_size = 8388608;
|
|
|
|
|
|
|
|
return SanitizeOptions(db_path_, opt);
|
|
|
|
}
|
|
|
|
|
2016-06-02 23:24:14 +00:00
|
|
|
void RunDbBench(const std::string& options_file_name) {
|
|
|
|
AppendArgs({"./db_bench", "--benchmarks=fillseq", "--use_existing_db=0",
|
2020-09-09 16:03:39 +00:00
|
|
|
"--num=1000", "--compression_type=none",
|
2016-06-02 23:24:14 +00:00
|
|
|
std::string(std::string("--db=") + db_path_).c_str(),
|
|
|
|
std::string(std::string("--wal_dir=") + wal_path_).c_str(),
|
|
|
|
std::string(std::string("--options_file=") + options_file_name)
|
|
|
|
.c_str()});
|
|
|
|
ASSERT_EQ(0, db_bench_tool(argc(), argv()));
|
|
|
|
}
|
|
|
|
|
|
|
|
void VerifyOptions(const Options& opt) {
|
|
|
|
DBOptions loaded_db_opts;
|
2023-01-27 19:10:53 +00:00
|
|
|
ConfigOptions config_opts;
|
|
|
|
config_opts.ignore_unknown_options = false;
|
|
|
|
config_opts.input_strings_escaped = true;
|
|
|
|
config_opts.env = Env::Default();
|
2016-06-02 23:24:14 +00:00
|
|
|
std::vector<ColumnFamilyDescriptor> cf_descs;
|
2023-01-27 19:10:53 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
LoadLatestOptions(config_opts, db_path_, &loaded_db_opts, &cf_descs));
|
2016-06-02 23:24:14 +00:00
|
|
|
|
2020-09-09 16:03:39 +00:00
|
|
|
ConfigOptions exact;
|
|
|
|
exact.input_strings_escaped = false;
|
|
|
|
exact.sanity_level = ConfigOptions::kSanityLevelExactMatch;
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyDBOptions(exact, DBOptions(opt),
|
|
|
|
loaded_db_opts));
|
|
|
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
exact, ColumnFamilyOptions(opt), cf_descs[0].options));
|
2016-06-02 23:24:14 +00:00
|
|
|
|
|
|
|
// check with the default rocksdb options and expect failure
|
2020-09-09 16:03:39 +00:00
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyDBOptions(exact, DBOptions(),
|
|
|
|
loaded_db_opts));
|
|
|
|
ASSERT_NOK(RocksDBOptionsParser::VerifyCFOptions(
|
|
|
|
exact, ColumnFamilyOptions(), cf_descs[0].options));
|
2016-06-02 23:24:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
char** argv() { return argv_; }
|
|
|
|
|
|
|
|
int argc() { return argc_; }
|
|
|
|
|
|
|
|
std::string db_path_;
|
|
|
|
std::string test_path_;
|
|
|
|
std::string wal_path_;
|
|
|
|
|
|
|
|
char arg_buffer_[kArgBufferSize];
|
|
|
|
char* argv_[kMaxArgCount];
|
|
|
|
int argc_ = 0;
|
|
|
|
int cursor_ = 0;
|
|
|
|
Random rnd_;
|
|
|
|
};
|
|
|
|
|
|
|
|
namespace {} // namespace
|
|
|
|
|
|
|
|
TEST_F(DBBenchTest, OptionsFile) {
|
|
|
|
const std::string kOptionsFileName = test_path_ + "/OPTIONS_test";
|
2020-09-09 16:03:39 +00:00
|
|
|
Options opt = GetDefaultOptions();
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
ASSERT_OK(PersistRocksDBOptions(WriteOptions(), DBOptions(opt), {"default"},
|
2021-02-05 23:39:50 +00:00
|
|
|
{ColumnFamilyOptions(opt)}, kOptionsFileName,
|
2021-01-06 18:48:24 +00:00
|
|
|
opt.env->GetFileSystem().get()));
|
2016-06-02 23:24:14 +00:00
|
|
|
|
|
|
|
// override the following options as db_bench will not take these
|
|
|
|
// options from the options file
|
|
|
|
opt.wal_dir = wal_path_;
|
|
|
|
|
|
|
|
RunDbBench(kOptionsFileName);
|
2020-09-09 16:03:39 +00:00
|
|
|
opt.delayed_write_rate = 16 * 1024 * 1024; // Set by SanitizeOptions
|
2016-06-02 23:24:14 +00:00
|
|
|
|
|
|
|
VerifyOptions(opt);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBBenchTest, OptionsFileUniversal) {
|
|
|
|
const std::string kOptionsFileName = test_path_ + "/OPTIONS_test";
|
|
|
|
|
2020-09-09 16:03:39 +00:00
|
|
|
Options opt = GetDefaultOptions(kCompactionStyleUniversal, 1);
|
|
|
|
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
ASSERT_OK(PersistRocksDBOptions(WriteOptions(), DBOptions(opt), {"default"},
|
2016-06-02 23:24:14 +00:00
|
|
|
{ColumnFamilyOptions(opt)}, kOptionsFileName,
|
2021-01-06 18:48:24 +00:00
|
|
|
opt.env->GetFileSystem().get()));
|
2016-06-02 23:24:14 +00:00
|
|
|
|
|
|
|
// override the following options as db_bench will not take these
|
|
|
|
// options from the options file
|
|
|
|
opt.wal_dir = wal_path_;
|
|
|
|
RunDbBench(kOptionsFileName);
|
|
|
|
|
|
|
|
VerifyOptions(opt);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBBenchTest, OptionsFileMultiLevelUniversal) {
|
|
|
|
const std::string kOptionsFileName = test_path_ + "/OPTIONS_test";
|
|
|
|
|
2020-09-09 16:03:39 +00:00
|
|
|
Options opt = GetDefaultOptions(kCompactionStyleUniversal, 12);
|
|
|
|
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
ASSERT_OK(PersistRocksDBOptions(WriteOptions(), DBOptions(opt), {"default"},
|
2016-06-02 23:24:14 +00:00
|
|
|
{ColumnFamilyOptions(opt)}, kOptionsFileName,
|
2021-02-05 23:39:50 +00:00
|
|
|
opt.env->GetFileSystem().get()));
|
2016-06-02 23:24:14 +00:00
|
|
|
|
|
|
|
// override the following options as db_bench will not take these
|
|
|
|
// options from the options file
|
|
|
|
opt.wal_dir = wal_path_;
|
|
|
|
|
|
|
|
RunDbBench(kOptionsFileName);
|
|
|
|
VerifyOptions(opt);
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string options_file_content = R"OPTIONS_FILE(
|
|
|
|
[Version]
|
|
|
|
rocksdb_version=4.3.1
|
|
|
|
options_file_version=1.1
|
|
|
|
|
|
|
|
[DBOptions]
|
|
|
|
wal_bytes_per_sync=1048576
|
|
|
|
delete_obsolete_files_period_micros=0
|
|
|
|
WAL_ttl_seconds=0
|
|
|
|
WAL_size_limit_MB=0
|
|
|
|
db_write_buffer_size=0
|
|
|
|
max_subcompactions=1
|
|
|
|
table_cache_numshardbits=4
|
|
|
|
max_open_files=-1
|
|
|
|
max_file_opening_threads=10
|
|
|
|
max_background_compactions=5
|
|
|
|
use_fsync=false
|
|
|
|
use_adaptive_mutex=false
|
|
|
|
max_total_wal_size=18446744073709551615
|
|
|
|
compaction_readahead_size=0
|
|
|
|
keep_log_file_num=10
|
|
|
|
skip_stats_update_on_db_open=false
|
|
|
|
max_manifest_file_size=18446744073709551615
|
|
|
|
db_log_dir=
|
|
|
|
writable_file_max_buffer_size=1048576
|
|
|
|
paranoid_checks=true
|
|
|
|
is_fd_close_on_exec=true
|
|
|
|
bytes_per_sync=1048576
|
|
|
|
enable_thread_tracking=true
|
|
|
|
recycle_log_file_num=0
|
|
|
|
create_missing_column_families=false
|
|
|
|
log_file_time_to_roll=0
|
|
|
|
max_background_flushes=1
|
|
|
|
create_if_missing=true
|
|
|
|
error_if_exists=false
|
|
|
|
delayed_write_rate=1048576
|
|
|
|
manifest_preallocation_size=4194304
|
2016-12-22 20:51:29 +00:00
|
|
|
allow_mmap_reads=false
|
2016-06-02 23:24:14 +00:00
|
|
|
allow_mmap_writes=false
|
2016-12-22 20:51:29 +00:00
|
|
|
use_direct_reads=false
|
2017-04-13 20:07:33 +00:00
|
|
|
use_direct_io_for_flush_and_compaction=false
|
2016-06-02 23:24:14 +00:00
|
|
|
stats_dump_period_sec=600
|
|
|
|
allow_fallocate=true
|
|
|
|
max_log_file_size=83886080
|
|
|
|
random_access_max_buffer_size=1048576
|
|
|
|
advise_random_on_open=true
|
2020-09-09 16:03:39 +00:00
|
|
|
dump_malloc_stats=true
|
2016-06-02 23:24:14 +00:00
|
|
|
|
|
|
|
[CFOptions "default"]
|
|
|
|
compaction_filter_factory=nullptr
|
|
|
|
table_factory=BlockBasedTable
|
|
|
|
prefix_extractor=nullptr
|
|
|
|
comparator=leveldb.BytewiseComparator
|
|
|
|
compression_per_level=
|
|
|
|
max_bytes_for_level_base=104857600
|
|
|
|
bloom_locality=0
|
|
|
|
target_file_size_base=10485760
|
2016-07-27 01:05:30 +00:00
|
|
|
memtable_huge_page_size=0
|
2016-06-02 23:24:14 +00:00
|
|
|
max_successive_merges=1000
|
|
|
|
max_sequential_skip_in_iterations=8
|
|
|
|
arena_block_size=52428800
|
|
|
|
target_file_size_multiplier=1
|
|
|
|
source_compaction_factor=1
|
|
|
|
min_write_buffer_number_to_merge=1
|
|
|
|
max_write_buffer_number=2
|
|
|
|
write_buffer_size=419430400
|
|
|
|
max_grandparent_overlap_factor=10
|
|
|
|
max_bytes_for_level_multiplier=10
|
|
|
|
memtable_factory=SkipListFactory
|
2020-09-09 16:03:39 +00:00
|
|
|
compression=kNoCompression
|
2016-06-02 23:24:14 +00:00
|
|
|
min_partial_merge_operands=2
|
|
|
|
level0_stop_writes_trigger=100
|
|
|
|
num_levels=1
|
|
|
|
level0_slowdown_writes_trigger=50
|
|
|
|
level0_file_num_compaction_trigger=10
|
|
|
|
expanded_compaction_factor=25
|
|
|
|
max_write_buffer_number_to_maintain=0
|
Refactor trimming logic for immutable memtables (#5022)
Summary:
MyRocks currently sets `max_write_buffer_number_to_maintain` in order to maintain enough history for transaction conflict checking. The effectiveness of this approach depends on the size of memtables. When memtables are small, it may not keep enough history; when memtables are large, this may consume too much memory.
We are proposing a new way to configure memtable list history: by limiting the memory usage of immutable memtables. The new option is `max_write_buffer_size_to_maintain` and it will take precedence over the old `max_write_buffer_number_to_maintain` if they are both set to non-zero values. The new option accounts for the total memory usage of flushed immutable memtables and mutable memtable. When the total usage exceeds the limit, RocksDB may start dropping immutable memtables (which is also called trimming history), starting from the oldest one.
The semantics of the old option actually works both as an upper bound and lower bound. History trimming will start if number of immutable memtables exceeds the limit, but it will never go below (limit-1) due to history trimming.
In order the mimic the behavior with the new option, history trimming will stop if dropping the next immutable memtable causes the total memory usage go below the size limit. For example, assuming the size limit is set to 64MB, and there are 3 immutable memtables with sizes of 20, 30, 30. Although the total memory usage is 80MB > 64MB, dropping the oldest memtable will reduce the memory usage to 60MB < 64MB, so in this case no memtable will be dropped.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5022
Differential Revision: D14394062
Pulled By: miasantreble
fbshipit-source-id: 60457a509c6af89d0993f988c9b5c2aa9e45f5c5
2019-08-23 20:54:09 +00:00
|
|
|
max_write_buffer_size_to_maintain=0
|
2016-06-02 23:24:14 +00:00
|
|
|
verify_checksums_in_compaction=true
|
|
|
|
merge_operator=nullptr
|
|
|
|
memtable_prefix_bloom_bits=0
|
2019-02-19 20:12:25 +00:00
|
|
|
memtable_whole_key_filtering=true
|
2016-06-02 23:24:14 +00:00
|
|
|
paranoid_file_checks=false
|
|
|
|
inplace_update_num_locks=10000
|
|
|
|
optimize_filters_for_hits=false
|
|
|
|
level_compaction_dynamic_level_bytes=false
|
|
|
|
inplace_update_support=false
|
|
|
|
compaction_style=kCompactionStyleUniversal
|
|
|
|
memtable_prefix_bloom_probes=6
|
|
|
|
filter_deletes=false
|
|
|
|
hard_pending_compaction_bytes_limit=0
|
|
|
|
disable_auto_compactions=false
|
|
|
|
compaction_measure_io_stats=false
|
2021-02-17 19:08:12 +00:00
|
|
|
enable_blob_files=true
|
|
|
|
min_blob_size=16
|
|
|
|
blob_file_size=10485760
|
|
|
|
blob_compression_type=kNoCompression
|
|
|
|
enable_blob_garbage_collection=true
|
Make it possible to force the garbage collection of the oldest blob files (#8994)
Summary:
The current BlobDB garbage collection logic works by relocating the valid
blobs from the oldest blob files as they are encountered during compaction,
and cleaning up blob files once they contain nothing but garbage. However,
with sufficiently skewed workloads, it is theoretically possible to end up in a
situation when few or no compactions get scheduled for the SST files that contain
references to the oldest blob files, which can lead to increased space amp due
to the lack of GC.
In order to efficiently handle such workloads, the patch adds a new BlobDB
configuration option called `blob_garbage_collection_force_threshold`,
which signals to BlobDB to schedule targeted compactions for the SST files
that keep alive the oldest batch of blob files if the overall ratio of garbage in
the given blob files meets the threshold *and* all the given blob files are
eligible for GC based on `blob_garbage_collection_age_cutoff`. (For example,
if the new option is set to 0.9, targeted compactions will get scheduled if the
sum of garbage bytes meets or exceeds 90% of the sum of total bytes in the
oldest blob files, assuming all affected blob files are below the age-based cutoff.)
The net result of these targeted compactions is that the valid blobs in the oldest
blob files are relocated and the oldest blob files themselves cleaned up (since
*all* SST files that rely on them get compacted away).
These targeted compactions are similar to periodic compactions in the sense
that they force certain SST files that otherwise would not get picked up to undergo
compaction and also in the sense that instead of merging files from multiple levels,
they target a single file. (Note: such compactions might still include neighboring files
from the same level due to the need of having a "clean cut" boundary but they never
include any files from any other level.)
This functionality is currently only supported with the leveled compaction style
and is inactive by default (since the default value is set to 1.0, i.e. 100%).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8994
Test Plan: Ran `make check` and tested using `db_bench` and the stress/crash tests.
Reviewed By: riversand963
Differential Revision: D31489850
Pulled By: ltamasi
fbshipit-source-id: 44057d511726a0e2a03c5d9313d7511b3f0c4eab
2021-10-12 01:00:44 +00:00
|
|
|
blob_garbage_collection_age_cutoff=0.5
|
|
|
|
blob_garbage_collection_force_threshold=0.75
|
2021-11-20 01:52:42 +00:00
|
|
|
blob_compaction_readahead_size=262144
|
2022-06-03 03:04:33 +00:00
|
|
|
blob_file_starting_level=0
|
2022-07-17 14:13:59 +00:00
|
|
|
prepopulate_blob_cache=kDisable;
|
2016-06-02 23:24:14 +00:00
|
|
|
|
|
|
|
[TableOptions/BlockBasedTable "default"]
|
|
|
|
format_version=0
|
|
|
|
skip_table_builder_flush=false
|
|
|
|
cache_index_and_filter_blocks=false
|
|
|
|
flush_block_policy_factory=FlushBlockBySizePolicyFactory
|
|
|
|
index_type=kBinarySearch
|
|
|
|
whole_key_filtering=true
|
|
|
|
checksum=kCRC32c
|
|
|
|
no_block_cache=false
|
|
|
|
block_size=32768
|
|
|
|
block_size_deviation=10
|
|
|
|
block_restart_interval=16
|
|
|
|
filter_policy=rocksdb.BuiltinBloomFilter
|
|
|
|
)OPTIONS_FILE";
|
|
|
|
|
|
|
|
TEST_F(DBBenchTest, OptionsFileFromFile) {
|
|
|
|
const std::string kOptionsFileName = test_path_ + "/OPTIONS_flash";
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<WritableFile> writable;
|
2016-06-02 23:24:14 +00:00
|
|
|
ASSERT_OK(Env::Default()->NewWritableFile(kOptionsFileName, &writable,
|
|
|
|
EnvOptions()));
|
|
|
|
ASSERT_OK(writable->Append(options_file_content));
|
|
|
|
ASSERT_OK(writable->Close());
|
|
|
|
|
|
|
|
DBOptions db_opt;
|
2023-01-27 19:10:53 +00:00
|
|
|
ConfigOptions config_opt;
|
|
|
|
config_opt.ignore_unknown_options = false;
|
|
|
|
config_opt.input_strings_escaped = true;
|
|
|
|
config_opt.env = Env::Default();
|
2016-06-02 23:24:14 +00:00
|
|
|
std::vector<ColumnFamilyDescriptor> cf_descs;
|
2023-01-27 19:10:53 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
LoadOptionsFromFile(config_opt, kOptionsFileName, &db_opt, &cf_descs));
|
2016-06-02 23:24:14 +00:00
|
|
|
Options opt(db_opt, cf_descs[0].options);
|
|
|
|
opt.create_if_missing = true;
|
|
|
|
|
|
|
|
// override the following options as db_bench will not take these
|
|
|
|
// options from the options file
|
|
|
|
opt.wal_dir = wal_path_;
|
|
|
|
|
|
|
|
RunDbBench(kOptionsFileName);
|
|
|
|
|
2020-09-09 16:03:39 +00:00
|
|
|
VerifyOptions(SanitizeOptions(db_path_, opt));
|
2016-06-02 23:24:14 +00:00
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2016-06-02 23:24:14 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2022-10-18 07:35:35 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2016-06-02 23:24:14 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
2021-02-05 23:39:50 +00:00
|
|
|
GFLAGS_NAMESPACE::ParseCommandLineFlags(&argc, &argv, true);
|
2016-06-02 23:24:14 +00:00
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
printf("Skip db_bench_tool_test as the required library GFLAG is missing.");
|
|
|
|
}
|
|
|
|
#endif // #ifdef GFLAGS
|