2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2015-10-13 17:32:05 +00:00
|
|
|
|
2021-01-29 06:08:46 +00:00
|
|
|
#include "db/compaction/compaction_job.h"
|
|
|
|
|
2015-08-08 04:59:51 +00:00
|
|
|
#include <algorithm>
|
2019-05-04 00:26:20 +00:00
|
|
|
#include <array>
|
2019-09-20 19:00:55 +00:00
|
|
|
#include <cinttypes>
|
2014-11-14 19:35:48 +00:00
|
|
|
#include <map>
|
|
|
|
#include <string>
|
2015-08-08 04:59:51 +00:00
|
|
|
#include <tuple>
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2020-03-12 17:58:27 +00:00
|
|
|
#include "db/blob/blob_index.h"
|
2014-11-14 19:35:48 +00:00
|
|
|
#include "db/column_family.h"
|
2019-09-03 15:50:47 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2018-06-28 19:23:57 +00:00
|
|
|
#include "db/error_handler.h"
|
2014-11-14 19:35:48 +00:00
|
|
|
#include "db/version_set.h"
|
2022-09-15 04:59:56 +00:00
|
|
|
#include "file/random_access_file_reader.h"
|
2019-09-16 17:31:27 +00:00
|
|
|
#include "file/writable_file_writer.h"
|
2022-09-15 04:59:56 +00:00
|
|
|
#include "options/options_helper.h"
|
2014-11-14 19:35:48 +00:00
|
|
|
#include "rocksdb/cache.h"
|
2021-11-08 19:04:01 +00:00
|
|
|
#include "rocksdb/convenience.h"
|
2014-11-14 19:35:48 +00:00
|
|
|
#include "rocksdb/db.h"
|
2021-01-29 06:08:46 +00:00
|
|
|
#include "rocksdb/file_system.h"
|
2015-08-08 04:59:51 +00:00
|
|
|
#include "rocksdb/options.h"
|
2016-06-21 01:01:03 +00:00
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
2015-08-08 04:59:51 +00:00
|
|
|
#include "table/mock_table.h"
|
2022-05-19 18:04:21 +00:00
|
|
|
#include "table/unique_id_impl.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "util/string_util.h"
|
2015-08-08 04:59:51 +00:00
|
|
|
#include "utilities/merge_operators.h"
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
namespace {
|
2015-08-08 04:59:51 +00:00
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
void VerifyInitializationOfCompactionJobStats(
|
2022-10-26 19:35:12 +00:00
|
|
|
const CompactionJobStats& compaction_job_stats) {
|
2015-07-16 16:18:35 +00:00
|
|
|
#if !defined(IOS_CROSS_COMPILE)
|
|
|
|
ASSERT_EQ(compaction_job_stats.elapsed_micros, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_records, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_files, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_files_at_output_level, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_output_records, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_output_files, 0U);
|
|
|
|
|
2024-09-25 17:26:15 +00:00
|
|
|
ASSERT_TRUE(compaction_job_stats.is_manual_compaction);
|
|
|
|
ASSERT_FALSE(compaction_job_stats.is_remote_compaction);
|
2015-07-16 16:18:35 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_input_bytes, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_output_bytes, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_input_raw_key_bytes, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_input_raw_value_bytes, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.smallest_output_key_prefix[0], 0);
|
|
|
|
ASSERT_EQ(compaction_job_stats.largest_output_key_prefix[0], 0);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_records_replaced, 0U);
|
2015-07-28 23:41:40 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_deletion_records, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_expired_deletion_records, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_corrupt_keys, 0U);
|
2015-07-16 16:18:35 +00:00
|
|
|
#endif // !defined(IOS_CROSS_COMPILE)
|
|
|
|
}
|
|
|
|
|
2022-06-07 18:57:12 +00:00
|
|
|
// Mock FSWritableFile for testing io priority.
|
|
|
|
// Only override the essential functions for testing compaction io priority.
|
|
|
|
class MockTestWritableFile : public FSWritableFileOwnerWrapper {
|
|
|
|
public:
|
|
|
|
MockTestWritableFile(std::unique_ptr<FSWritableFile>&& file,
|
|
|
|
Env::IOPriority io_priority)
|
|
|
|
: FSWritableFileOwnerWrapper(std::move(file)),
|
|
|
|
write_io_priority_(io_priority) {}
|
|
|
|
IOStatus Append(const Slice& data, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Append(data, options, dbg);
|
|
|
|
}
|
|
|
|
IOStatus Append(const Slice& data, const IOOptions& options,
|
|
|
|
const DataVerificationInfo& verification_info,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Append(data, options, verification_info, dbg);
|
|
|
|
}
|
|
|
|
IOStatus Close(const IOOptions& options, IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Close(options, dbg);
|
|
|
|
}
|
|
|
|
IOStatus Flush(const IOOptions& options, IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Flush(options, dbg);
|
|
|
|
}
|
|
|
|
IOStatus Sync(const IOOptions& options, IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Sync(options, dbg);
|
|
|
|
}
|
|
|
|
IOStatus Fsync(const IOOptions& options, IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Fsync(options, dbg);
|
|
|
|
}
|
|
|
|
uint64_t GetFileSize(const IOOptions& options, IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->GetFileSize(options, dbg);
|
|
|
|
}
|
|
|
|
IOStatus RangeSync(uint64_t offset, uint64_t nbytes, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->RangeSync(offset, nbytes, options, dbg);
|
|
|
|
}
|
|
|
|
|
|
|
|
void PrepareWrite(size_t offset, size_t len, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
target()->PrepareWrite(offset, len, options, dbg);
|
|
|
|
}
|
|
|
|
|
|
|
|
IOStatus Allocate(uint64_t offset, uint64_t len, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Allocate(offset, len, options, dbg);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Env::IOPriority write_io_priority_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Mock FSRandomAccessFile for testing io priority.
|
|
|
|
// Only override the essential functions for testing compaction io priority.
|
|
|
|
class MockTestRandomAccessFile : public FSRandomAccessFileOwnerWrapper {
|
|
|
|
public:
|
|
|
|
MockTestRandomAccessFile(std::unique_ptr<FSRandomAccessFile>&& file,
|
|
|
|
Env::IOPriority io_priority)
|
|
|
|
: FSRandomAccessFileOwnerWrapper(std::move(file)),
|
|
|
|
read_io_priority_(io_priority) {}
|
|
|
|
|
|
|
|
IOStatus Read(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
Slice* result, char* scratch,
|
|
|
|
IODebugContext* dbg) const override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, read_io_priority_);
|
|
|
|
return target()->Read(offset, n, options, result, scratch, dbg);
|
|
|
|
}
|
|
|
|
IOStatus Prefetch(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, read_io_priority_);
|
|
|
|
return target()->Prefetch(offset, n, options, dbg);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Env::IOPriority read_io_priority_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Mock FileSystem for testing io priority.
|
|
|
|
class MockTestFileSystem : public FileSystemWrapper {
|
|
|
|
public:
|
|
|
|
explicit MockTestFileSystem(const std::shared_ptr<FileSystem>& base,
|
|
|
|
Env::IOPriority read_io_priority,
|
|
|
|
Env::IOPriority write_io_priority)
|
|
|
|
: FileSystemWrapper(base),
|
|
|
|
read_io_priority_(read_io_priority),
|
|
|
|
write_io_priority_(write_io_priority) {}
|
|
|
|
|
|
|
|
static const char* kClassName() { return "MockTestFileSystem"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
|
|
|
|
IOStatus NewRandomAccessFile(const std::string& fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSRandomAccessFile>* result,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
IOStatus s = target()->NewRandomAccessFile(fname, file_opts, result, dbg);
|
|
|
|
EXPECT_OK(s);
|
|
|
|
result->reset(
|
|
|
|
new MockTestRandomAccessFile(std::move(*result), read_io_priority_));
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
IOStatus NewWritableFile(const std::string& fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSWritableFile>* result,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
IOStatus s = target()->NewWritableFile(fname, file_opts, result, dbg);
|
|
|
|
EXPECT_OK(s);
|
|
|
|
result->reset(
|
|
|
|
new MockTestWritableFile(std::move(*result), write_io_priority_));
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Env::IOPriority read_io_priority_;
|
|
|
|
Env::IOPriority write_io_priority_;
|
|
|
|
};
|
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
enum TableTypeForTest : uint8_t { kMockTable = 0, kBlockBasedTable = 1 };
|
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
} // namespace
|
|
|
|
|
2020-11-12 19:40:52 +00:00
|
|
|
class CompactionJobTestBase : public testing::Test {
|
|
|
|
protected:
|
|
|
|
CompactionJobTestBase(std::string dbname, const Comparator* ucmp,
|
2022-06-07 18:57:12 +00:00
|
|
|
std::function<std::string(uint64_t)> encode_u64_ts,
|
2022-09-15 04:59:56 +00:00
|
|
|
bool test_io_priority, TableTypeForTest table_type)
|
2021-11-08 19:04:01 +00:00
|
|
|
: dbname_(std::move(dbname)),
|
2020-11-12 19:40:52 +00:00
|
|
|
ucmp_(ucmp),
|
2016-09-23 23:34:04 +00:00
|
|
|
db_options_(),
|
|
|
|
mutable_cf_options_(cf_options_),
|
2020-07-23 01:31:25 +00:00
|
|
|
mutable_db_options_(),
|
2015-03-17 22:04:37 +00:00
|
|
|
table_cache_(NewLRUCache(50000, 16)),
|
2016-06-21 01:01:03 +00:00
|
|
|
write_buffer_manager_(db_options_.db_write_buffer_size),
|
2022-06-21 03:58:11 +00:00
|
|
|
versions_(new VersionSet(
|
|
|
|
dbname_, &db_options_, env_options_, table_cache_.get(),
|
|
|
|
&write_buffer_manager_, &write_controller_,
|
|
|
|
/*block_cache_tracer=*/nullptr,
|
2023-11-11 16:11:11 +00:00
|
|
|
/*io_tracer=*/nullptr, /*db_id=*/"", /*db_session_id=*/"",
|
|
|
|
/*daily_offpeak_time_utc=*/"",
|
2023-12-29 02:25:29 +00:00
|
|
|
/*error_handler=*/nullptr, /*read_only=*/false)),
|
2014-11-14 19:35:48 +00:00
|
|
|
shutting_down_(false),
|
2018-06-28 19:23:57 +00:00
|
|
|
mock_table_factory_(new mock::MockTableFactory()),
|
2020-11-12 19:40:52 +00:00
|
|
|
error_handler_(nullptr, db_options_, &mutex_),
|
2022-06-07 18:57:12 +00:00
|
|
|
encode_u64_ts_(std::move(encode_u64_ts)),
|
2022-09-15 04:59:56 +00:00
|
|
|
test_io_priority_(test_io_priority),
|
|
|
|
table_type_(table_type) {
|
2021-11-08 19:04:01 +00:00
|
|
|
Env* base_env = Env::Default();
|
|
|
|
EXPECT_OK(
|
|
|
|
test::CreateEnvFromSystem(ConfigOptions(), &base_env, &env_guard_));
|
|
|
|
env_ = base_env;
|
|
|
|
fs_ = env_->GetFileSystem();
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
// set default for the tests
|
|
|
|
mutable_cf_options_.target_file_size_base = 1024 * 1024;
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 10 * 1024 * 1024;
|
2021-11-08 19:04:01 +00:00
|
|
|
}
|
2020-11-12 19:40:52 +00:00
|
|
|
|
|
|
|
void SetUp() override {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_OK(env_->CreateDirIfMissing(dbname_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
db_options_.env = env_;
|
|
|
|
db_options_.fs = fs_;
|
2014-11-14 19:35:48 +00:00
|
|
|
db_options_.db_paths.emplace_back(dbname_,
|
|
|
|
std::numeric_limits<uint64_t>::max());
|
2020-11-12 19:40:52 +00:00
|
|
|
cf_options_.comparator = ucmp_;
|
2022-09-15 04:59:56 +00:00
|
|
|
if (table_type_ == TableTypeForTest::kBlockBasedTable) {
|
2022-06-07 18:57:12 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
cf_options_.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2022-09-15 04:59:56 +00:00
|
|
|
} else if (table_type_ == TableTypeForTest::kMockTable) {
|
2022-06-07 18:57:12 +00:00
|
|
|
cf_options_.table_factory = mock_table_factory_;
|
2022-09-15 04:59:56 +00:00
|
|
|
} else {
|
|
|
|
assert(false);
|
2022-06-07 18:57:12 +00:00
|
|
|
}
|
2024-10-17 21:13:20 +00:00
|
|
|
mutable_cf_options_.table_factory = cf_options_.table_factory;
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string GenerateFileName(uint64_t file_number) {
|
|
|
|
FileMetaData meta;
|
|
|
|
std::vector<DbPath> db_paths;
|
|
|
|
db_paths.emplace_back(dbname_, std::numeric_limits<uint64_t>::max());
|
|
|
|
meta.fd = FileDescriptor(file_number, 0, 0);
|
|
|
|
return TableFileName(db_paths, meta.fd.GetNumber(), meta.fd.GetPathId());
|
|
|
|
}
|
|
|
|
|
2020-11-12 19:40:52 +00:00
|
|
|
std::string KeyStr(const std::string& user_key, const SequenceNumber seq_num,
|
|
|
|
const ValueType t, uint64_t ts = 0) {
|
|
|
|
std::string user_key_with_ts = user_key + encode_u64_ts_(ts);
|
|
|
|
return InternalKey(user_key_with_ts, seq_num, t).Encode().ToString();
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
2019-10-14 22:19:31 +00:00
|
|
|
static std::string BlobStr(uint64_t blob_file_number, uint64_t offset,
|
|
|
|
uint64_t size) {
|
|
|
|
std::string blob_index;
|
|
|
|
BlobIndex::EncodeBlob(&blob_index, blob_file_number, offset, size,
|
|
|
|
kNoCompression);
|
|
|
|
return blob_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::string BlobStrTTL(uint64_t blob_file_number, uint64_t offset,
|
|
|
|
uint64_t size, uint64_t expiration) {
|
|
|
|
std::string blob_index;
|
|
|
|
BlobIndex::EncodeBlobTTL(&blob_index, expiration, blob_file_number, offset,
|
|
|
|
size, kNoCompression);
|
|
|
|
return blob_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::string BlobStrInlinedTTL(const Slice& value,
|
|
|
|
uint64_t expiration) {
|
|
|
|
std::string blob_index;
|
|
|
|
BlobIndex::EncodeInlinedTTL(&blob_index, expiration, value);
|
|
|
|
return blob_index;
|
|
|
|
}
|
|
|
|
|
2022-06-07 18:57:12 +00:00
|
|
|
// Creates a table with the specificied key value pairs.
|
|
|
|
void CreateTable(const std::string& table_name,
|
|
|
|
const mock::KVVector& contents, uint64_t& file_size) {
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
|
|
Status s = WritableFileWriter::Create(fs_, table_name, FileOptions(),
|
|
|
|
&file_writer, nullptr);
|
|
|
|
ASSERT_OK(s);
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
const ReadOptions read_options;
|
|
|
|
const WriteOptions write_options;
|
2022-06-07 18:57:12 +00:00
|
|
|
std::unique_ptr<TableBuilder> table_builder(
|
|
|
|
cf_options_.table_factory->NewTableBuilder(
|
2024-11-01 17:08:35 +00:00
|
|
|
TableBuilderOptions(
|
|
|
|
*cfd_->ioptions(), mutable_cf_options_, read_options,
|
|
|
|
write_options, cfd_->internal_comparator(),
|
|
|
|
cfd_->internal_tbl_prop_coll_factories(),
|
|
|
|
CompressionType::kNoCompression, CompressionOptions(),
|
|
|
|
0 /* column_family_id */, kDefaultColumnFamilyName,
|
|
|
|
-1 /* level */, kUnknownNewestKeyTime),
|
2022-06-07 18:57:12 +00:00
|
|
|
file_writer.get()));
|
|
|
|
// Build table.
|
2024-03-04 18:08:32 +00:00
|
|
|
for (const auto& kv : contents) {
|
2022-06-07 18:57:12 +00:00
|
|
|
std::string key;
|
|
|
|
std::string value;
|
|
|
|
std::tie(key, value) = kv;
|
|
|
|
table_builder->Add(key, value);
|
|
|
|
}
|
|
|
|
ASSERT_OK(table_builder->Finish());
|
|
|
|
file_size = table_builder->FileSize();
|
|
|
|
}
|
|
|
|
|
2020-10-01 17:08:52 +00:00
|
|
|
void AddMockFile(const mock::KVVector& contents, int level = 0) {
|
2015-08-08 04:59:51 +00:00
|
|
|
assert(contents.size() > 0);
|
|
|
|
|
|
|
|
bool first_key = true;
|
|
|
|
std::string smallest, largest;
|
|
|
|
InternalKey smallest_key, largest_key;
|
|
|
|
SequenceNumber smallest_seqno = kMaxSequenceNumber;
|
|
|
|
SequenceNumber largest_seqno = 0;
|
2019-10-14 22:19:31 +00:00
|
|
|
uint64_t oldest_blob_file_number = kInvalidBlobFileNumber;
|
2024-03-04 18:08:32 +00:00
|
|
|
for (const auto& kv : contents) {
|
2015-08-08 04:59:51 +00:00
|
|
|
ParsedInternalKey key;
|
|
|
|
std::string skey;
|
|
|
|
std::string value;
|
|
|
|
std::tie(skey, value) = kv;
|
2020-10-28 17:11:13 +00:00
|
|
|
const Status pik_status =
|
|
|
|
ParseInternalKey(skey, &key, true /* log_err_key */);
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
smallest_seqno = std::min(smallest_seqno, key.sequence);
|
|
|
|
largest_seqno = std::max(largest_seqno, key.sequence);
|
|
|
|
|
|
|
|
if (first_key ||
|
|
|
|
cfd_->user_comparator()->Compare(key.user_key, smallest) < 0) {
|
|
|
|
smallest.assign(key.user_key.data(), key.user_key.size());
|
|
|
|
smallest_key.DecodeFrom(skey);
|
|
|
|
}
|
|
|
|
if (first_key ||
|
|
|
|
cfd_->user_comparator()->Compare(key.user_key, largest) > 0) {
|
|
|
|
largest.assign(key.user_key.data(), key.user_key.size());
|
|
|
|
largest_key.DecodeFrom(skey);
|
|
|
|
}
|
|
|
|
|
|
|
|
first_key = false;
|
2019-10-14 22:19:31 +00:00
|
|
|
|
2020-10-28 17:11:13 +00:00
|
|
|
if (pik_status.ok() && key.type == kTypeBlobIndex) {
|
2019-10-14 22:19:31 +00:00
|
|
|
BlobIndex blob_index;
|
|
|
|
const Status s = blob_index.DecodeFrom(value);
|
|
|
|
if (!s.ok()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (blob_index.IsInlined() || blob_index.HasTTL() ||
|
|
|
|
blob_index.file_number() == kInvalidBlobFileNumber) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (oldest_blob_file_number == kInvalidBlobFileNumber ||
|
|
|
|
oldest_blob_file_number > blob_index.file_number()) {
|
|
|
|
oldest_blob_file_number = blob_index.file_number();
|
|
|
|
}
|
|
|
|
}
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t file_number = versions_->NewFileNumber();
|
2022-06-07 18:57:12 +00:00
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
uint64_t file_size = 0;
|
|
|
|
if (table_type_ == TableTypeForTest::kBlockBasedTable) {
|
2022-06-07 18:57:12 +00:00
|
|
|
CreateTable(GenerateFileName(file_number), contents, file_size);
|
2022-09-15 04:59:56 +00:00
|
|
|
} else if (table_type_ == TableTypeForTest::kMockTable) {
|
2022-06-07 18:57:12 +00:00
|
|
|
file_size = 10;
|
|
|
|
EXPECT_OK(mock_table_factory_->CreateMockTable(
|
2023-05-03 16:37:21 +00:00
|
|
|
env_, GenerateFileName(file_number), contents));
|
2022-09-15 04:59:56 +00:00
|
|
|
} else {
|
|
|
|
assert(false);
|
2022-06-07 18:57:12 +00:00
|
|
|
}
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
VersionEdit edit;
|
Sort L0 files by newly introduced epoch_num (#10922)
Summary:
**Context:**
Sorting L0 files by `largest_seqno` has at least two inconvenience:
- File ingestion and compaction involving ingested files can create files of overlapping seqno range with the existing files. `force_consistency_check=true` will catch such overlap seqno range even those harmless overlap.
- For example, consider the following sequence of events ("key@n" indicates key at seqno "n")
- insert k1@1 to memtable m1
- ingest file s1 with k2@2, ingest file s2 with k3@3
- insert k4@4 to m1
- compact files s1, s2 and result in new file s3 of seqno range [2, 3]
- flush m1 and result in new file s4 of seqno range [1, 4]. And `force_consistency_check=true` will think s4 and s3 has file reordering corruption that might cause retuning an old value of k1
- However such caught corruption is a false positive since s1, s2 will not have overlapped keys with k1 or whatever inserted into m1 before ingest file s1 by the requirement of file ingestion (otherwise the m1 will be flushed first before any of the file ingestion completes). Therefore there in fact isn't any file reordering corruption.
- Single delete can decrease a file's largest seqno and ordering by `largest_seqno` can introduce a wrong ordering hence file reordering corruption
- For example, consider the following sequence of events ("key@n" indicates key at seqno "n", Credit to ajkr for this example)
- an existing SST s1 contains only k1@1
- insert k1@2 to memtable m1
- ingest file s2 with k3@3, ingest file s3 with k4@4
- insert single delete k5@5 in m1
- flush m1 and result in new file s4 of seqno range [2, 5]
- compact s1, s2, s3 and result in new file s5 of seqno range [1, 4]
- compact s4 and result in new file s6 of seqno range [2] due to single delete
- By the last step, we have file ordering by largest seqno (">" means "newer") : s5 > s6 while s6 contains a newer version of the k1's value (i.e, k1@2) than s5, which is a real reordering corruption. While this can be caught by `force_consistency_check=true`, there isn't a good way to prevent this from happening if ordering by `largest_seqno`
Therefore, we are redesigning the sorting criteria of L0 files and avoid above inconvenience. Credit to ajkr , we now introduce `epoch_num` which describes the order of a file being flushed or ingested/imported (compaction output file will has the minimum `epoch_num` among input files'). This will avoid the above inconvenience in the following ways:
- In the first case above, there will no longer be overlap seqno range check in `force_consistency_check=true` but `epoch_number` ordering check. This will result in file ordering s1 < s2 < s4 (pre-compaction) and s3 < s4 (post-compaction) which won't trigger false positive corruption. See test class `DBCompactionTestL0FilesMisorderCorruption*` for more.
- In the second case above, this will result in file ordering s1 < s2 < s3 < s4 (pre-compacting s1, s2, s3), s5 < s4 (post-compacting s1, s2, s3), s5 < s6 (post-compacting s4), which are correct file ordering without causing any corruption.
**Summary:**
- Introduce `epoch_number` stored per `ColumnFamilyData` and sort CF's L0 files by their assigned `epoch_number` instead of `largest_seqno`.
- `epoch_number` is increased and assigned upon `VersionEdit::AddFile()` for flush (or similarly for WriteLevel0TableForRecovery) and file ingestion (except for allow_behind_true, which will always get assigned as the `kReservedEpochNumberForFileIngestedBehind`)
- Compaction output file is assigned with the minimum `epoch_number` among input files'
- Refit level: reuse refitted file's epoch_number
- Other paths needing `epoch_number` treatment:
- Import column families: reuse file's epoch_number if exists. If not, assign one based on `NewestFirstBySeqNo`
- Repair: reuse file's epoch_number if exists. If not, assign one based on `NewestFirstBySeqNo`.
- Assigning new epoch_number to a file and adding this file to LSM tree should be atomic. This is guaranteed by us assigning epoch_number right upon `VersionEdit::AddFile()` where this version edit will be apply to LSM tree shape right after by holding the db mutex (e.g, flush, file ingestion, import column family) or by there is only 1 ongoing edit per CF (e.g, WriteLevel0TableForRecovery, Repair).
- Assigning the minimum input epoch number to compaction output file won't misorder L0 files (even through later `Refit(target_level=0)`). It's due to for every key "k" in the input range, a legit compaction will cover a continuous epoch number range of that key. As long as we assign the key "k" the minimum input epoch number, it won't become newer or older than the versions of this key that aren't included in this compaction hence no misorder.
- Persist `epoch_number` of each file in manifest and recover `epoch_number` on db recovery
- Backward compatibility with old db without `epoch_number` support is guaranteed by assigning `epoch_number` to recovered files by `NewestFirstBySeqno` order. See `VersionStorageInfo::RecoverEpochNumbers()` for more
- Forward compatibility with manifest is guaranteed by flexibility of `NewFileCustomTag`
- Replace `force_consistent_check` on L0 with `epoch_number` and remove false positive check like case 1 with `largest_seqno` above
- Due to backward compatibility issue, we might encounter files with missing epoch number at the beginning of db recovery. We will still use old L0 sorting mechanism (`NewestFirstBySeqno`) to check/sort them till we infer their epoch number. See usages of `EpochNumberRequirement`.
- Remove fix https://github.com/facebook/rocksdb/pull/5958#issue-511150930 and their outdated tests to file reordering corruption because such fix can be replaced by this PR.
- Misc:
- update existing tests with `epoch_number` so make check will pass
- update https://github.com/facebook/rocksdb/pull/5958#issue-511150930 tests to verify corruption is fixed using `epoch_number` and cover universal/fifo compaction/CompactRange/CompactFile cases
- assert db_mutex is held for a few places before calling ColumnFamilyData::NewEpochNumber()
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10922
Test Plan:
- `make check`
- New unit tests under `db/db_compaction_test.cc`, `db/db_test2.cc`, `db/version_builder_test.cc`, `db/repair_test.cc`
- Updated tests (i.e, `DBCompactionTestL0FilesMisorderCorruption*`) under https://github.com/facebook/rocksdb/pull/5958#issue-511150930
- [Ongoing] Compatibility test: manually run https://github.com/ajkr/rocksdb/commit/36a5686ec012f35a4371e409aa85c404ca1c210d (with file ingestion off for running the `.orig` binary to prevent this bug affecting upgrade/downgrade formality checking) for 1 hour on `simple black/white box`, `cf_consistency/txn/enable_ts with whitebox + test_best_efforts_recovery with blackbox`
- [Ongoing] normal db stress test
- [Ongoing] db stress test with aggressive value https://github.com/facebook/rocksdb/pull/10761
Reviewed By: ajkr
Differential Revision: D41063187
Pulled By: hx235
fbshipit-source-id: 826cb23455de7beaabe2d16c57682a82733a32a9
2022-12-13 21:29:37 +00:00
|
|
|
edit.AddFile(
|
|
|
|
level, file_number, 0, file_size, smallest_key, largest_key,
|
|
|
|
smallest_seqno, largest_seqno, false, Temperature::kUnknown,
|
|
|
|
oldest_blob_file_number, kUnknownOldestAncesterTime,
|
|
|
|
kUnknownFileCreationTime,
|
|
|
|
versions_->GetColumnFamilySet()->GetDefault()->NewEpochNumber(),
|
2022-12-29 21:28:24 +00:00
|
|
|
kUnknownFileChecksum, kUnknownFileChecksumFuncName, kNullUniqueId64x2,
|
2023-06-22 04:49:01 +00:00
|
|
|
/*compensated_range_deletion_size=*/0, /*tail_size=*/0,
|
|
|
|
/*user_defined_timestamps_persisted=*/true);
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
mutex_.Lock();
|
2023-04-21 16:07:18 +00:00
|
|
|
EXPECT_OK(versions_->LogAndApply(
|
|
|
|
versions_->GetColumnFamilySet()->GetDefault(), mutable_cf_options_,
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
read_options_, write_options_, &edit, &mutex_, nullptr));
|
2015-08-08 04:59:51 +00:00
|
|
|
mutex_.Unlock();
|
|
|
|
}
|
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
void VerifyTables(int output_level,
|
|
|
|
const std::vector<mock::KVVector>& expected_results,
|
|
|
|
std::vector<uint64_t> expected_oldest_blob_file_numbers) {
|
|
|
|
if (expected_results.empty()) {
|
|
|
|
ASSERT_EQ(compaction_job_stats_.num_output_files, 0U);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
int expected_output_file_num = 0;
|
|
|
|
for (const auto& e : expected_results) {
|
|
|
|
if (!e.empty()) {
|
|
|
|
++expected_output_file_num;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_EQ(expected_output_file_num, compaction_job_stats_.num_output_files);
|
|
|
|
if (expected_output_file_num == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (expected_oldest_blob_file_numbers.empty()) {
|
|
|
|
expected_oldest_blob_file_numbers.resize(expected_output_file_num,
|
|
|
|
kInvalidBlobFileNumber);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
if (table_type_ == TableTypeForTest::kMockTable) {
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
ASSERT_EQ(compaction_job_stats_.num_output_files,
|
|
|
|
expected_results.size());
|
|
|
|
mock_table_factory_->AssertLatestFiles(expected_results);
|
2022-09-15 04:59:56 +00:00
|
|
|
} else {
|
|
|
|
assert(table_type_ == TableTypeForTest::kBlockBasedTable);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto output_files =
|
|
|
|
cfd->current()->storage_info()->LevelFiles(output_level);
|
|
|
|
ASSERT_EQ(expected_output_file_num, output_files.size());
|
|
|
|
|
|
|
|
if (table_type_ == TableTypeForTest::kMockTable) {
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
assert(output_files.size() ==
|
|
|
|
static_cast<size_t>(expected_output_file_num));
|
2022-09-15 04:59:56 +00:00
|
|
|
const FileMetaData* const output_file = output_files[0];
|
|
|
|
ASSERT_EQ(output_file->oldest_blob_file_number,
|
|
|
|
expected_oldest_blob_file_numbers[0]);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < expected_results.size(); ++i) {
|
|
|
|
const FileMetaData* const output_file = output_files[i];
|
|
|
|
std::string file_name = GenerateFileName(output_file->fd.GetNumber());
|
|
|
|
const auto& fs = env_->GetFileSystem();
|
|
|
|
std::unique_ptr<RandomAccessFileReader> freader;
|
|
|
|
IOStatus ios = RandomAccessFileReader::Create(
|
|
|
|
fs, file_name, FileOptions(), &freader, nullptr);
|
|
|
|
ASSERT_OK(ios);
|
|
|
|
std::unique_ptr<TableReader> table_reader;
|
|
|
|
uint64_t file_size = output_file->fd.GetFileSize();
|
|
|
|
ReadOptions read_opts;
|
|
|
|
Status s = cf_options_.table_factory->NewTableReader(
|
|
|
|
read_opts,
|
|
|
|
TableReaderOptions(*cfd->ioptions(), nullptr, FileOptions(),
|
2023-04-25 19:08:23 +00:00
|
|
|
cfd_->internal_comparator(),
|
|
|
|
0 /* block_protection_bytes_per_key */),
|
2022-09-15 04:59:56 +00:00
|
|
|
std::move(freader), file_size, &table_reader, false);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
assert(table_reader);
|
|
|
|
std::unique_ptr<InternalIterator> iiter(
|
|
|
|
table_reader->NewIterator(read_opts, nullptr, nullptr, true,
|
|
|
|
TableReaderCaller::kUncategorized));
|
|
|
|
assert(iiter);
|
|
|
|
|
|
|
|
mock::KVVector from_db;
|
|
|
|
for (iiter->SeekToFirst(); iiter->Valid(); iiter->Next()) {
|
|
|
|
const Slice key = iiter->key();
|
|
|
|
const Slice value = iiter->value();
|
|
|
|
from_db.emplace_back(
|
|
|
|
make_pair(key.ToString(false), value.ToString(false)));
|
|
|
|
}
|
|
|
|
ASSERT_EQ(expected_results[i], from_db);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-08 04:59:51 +00:00
|
|
|
void SetLastSequence(const SequenceNumber sequence_number) {
|
2017-11-11 01:18:01 +00:00
|
|
|
versions_->SetLastAllocatedSequence(sequence_number + 1);
|
2017-12-01 07:39:56 +00:00
|
|
|
versions_->SetLastPublishedSequence(sequence_number + 1);
|
2015-08-08 04:59:51 +00:00
|
|
|
versions_->SetLastSequence(sequence_number + 1);
|
|
|
|
}
|
|
|
|
|
2014-11-14 19:35:48 +00:00
|
|
|
// returns expected result after compaction
|
2020-10-01 17:08:52 +00:00
|
|
|
mock::KVVector CreateTwoFiles(bool gen_corrupted_keys) {
|
|
|
|
stl_wrappers::KVMap expected_results;
|
2020-11-12 19:40:52 +00:00
|
|
|
constexpr int kKeysPerFile = 10000;
|
|
|
|
constexpr int kCorruptKeysPerFile = 200;
|
|
|
|
constexpr int kMatchingKeys = kKeysPerFile / 2;
|
2014-11-14 19:35:48 +00:00
|
|
|
SequenceNumber sequence_number = 0;
|
2015-07-16 16:18:35 +00:00
|
|
|
|
|
|
|
auto corrupt_id = [&](int id) {
|
|
|
|
return gen_corrupted_keys && id > 0 && id <= kCorruptKeysPerFile;
|
|
|
|
};
|
|
|
|
|
2014-11-14 19:35:48 +00:00
|
|
|
for (int i = 0; i < 2; ++i) {
|
2015-09-02 20:58:22 +00:00
|
|
|
auto contents = mock::MakeMockFile();
|
2014-11-14 19:35:48 +00:00
|
|
|
for (int k = 0; k < kKeysPerFile; ++k) {
|
2022-05-06 20:03:58 +00:00
|
|
|
auto key = std::to_string(i * kMatchingKeys + k);
|
|
|
|
auto value = std::to_string(i * kKeysPerFile + k);
|
2014-11-14 19:35:48 +00:00
|
|
|
InternalKey internal_key(key, ++sequence_number, kTypeValue);
|
2015-12-10 01:28:46 +00:00
|
|
|
|
Make Compaction class easier to use
Summary:
The goal of this diff is to make Compaction class easier to use. This should also make new compaction algorithms easier to write (like CompactFiles from @yhchiang and dynamic leveled and multi-leveled universal from @sdong).
Here are couple of things demonstrating that Compaction class is hard to use:
1. we have two constructors of Compaction class
2. there's this thing called grandparents_, but it appears to only be setup for leveled compaction and not compactfiles
3. it's easy to introduce a subtle and dangerous bug like this: D36225
4. SetupBottomMostLevel() is hard to understand and it shouldn't be. See this comment: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction.cc#L236-L241. It also made it harder for @yhchiang to write CompactFiles, as evidenced by this: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction_picker.cc#L204-L210
The problem is that we create Compaction object, which holds a lot of state, and then pass it around to some functions. After those functions are done mutating, then we call couple of functions on Compaction object, like SetupBottommostLevel() and MarkFilesBeingCompacted(). It is very hard to see what's happening with all that Compaction's state while it's travelling across different functions. If you're writing a new PickCompaction() function you need to try really hard to understand what are all the functions you need to run on Compaction object and what state you need to setup.
My proposed solution is to make important parts of Compaction immutable after construction. PickCompaction() should calculate compaction inputs and then pass them onto Compaction object once they are finalized. That makes it easy to create a new compaction -- just provide all the parameters to the constructor and you're done. No need to call confusing functions after you created your object.
This diff doesn't fully achieve that goal, but it comes pretty close. Here are some of the changes:
* have one Compaction constructor instead of two.
* inputs_ is constant after construction
* MarkFilesBeingCompacted() is now private to Compaction class and automatically called on construction/destruction.
* SetupBottommostLevel() is gone. Compaction figures it out on its own based on the input.
* CompactionPicker's functions are not passing around Compaction object anymore. They are only passing around the state that they need.
Test Plan:
make check
make asan_check
make valgrind_check
Reviewers: rven, anthony, sdong, yhchiang
Reviewed By: yhchiang
Subscribers: sdong, yhchiang, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D36687
2015-04-10 22:01:54 +00:00
|
|
|
// This is how the key will look like once it's written in bottommost
|
|
|
|
// file
|
2022-10-26 19:35:12 +00:00
|
|
|
InternalKey bottommost_internal_key(key, 0, kTypeValue);
|
2015-12-10 01:28:46 +00:00
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
if (corrupt_id(k)) {
|
Simplify querying of merge results
Summary:
While working on supporting mixing merge operators with
single deletes ( https://reviews.facebook.net/D43179 ),
I realized that returning and dealing with merge results
can be made simpler. Submitting this as a separate diff
because it is not directly related to single deletes.
Before, callers of merge helper had to retrieve the merge
result in one of two ways depending on whether the merge
was successful or not (success = result of merge was single
kTypeValue). For successful merges, the caller could query
the resulting key/value pair and for unsuccessful merges,
the result could be retrieved in the form of two deques of
keys and values. However, with single deletes, a successful merge
does not return a single key/value pair (if merge
operands are merged with a single delete, we have to generate
a value and keep the original single delete around to make
sure that we are not accidentially producing a key overwrite).
In addition, the two existing call sites of the merge
helper were taking the same actions independently from whether
the merge was successful or not, so this patch simplifies that.
Test Plan: make clean all check
Reviewers: rven, sdong, yhchiang, anthony, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43353
2015-08-18 00:34:38 +00:00
|
|
|
test::CorruptKeyType(&internal_key);
|
|
|
|
test::CorruptKeyType(&bottommost_internal_key);
|
2015-07-16 16:18:35 +00:00
|
|
|
}
|
2020-10-01 17:08:52 +00:00
|
|
|
contents.push_back({internal_key.Encode().ToString(), value});
|
2015-07-16 16:18:35 +00:00
|
|
|
if (i == 1 || k < kMatchingKeys || corrupt_id(k - kMatchingKeys)) {
|
2015-07-06 18:14:08 +00:00
|
|
|
expected_results.insert(
|
2020-10-01 17:08:52 +00:00
|
|
|
{bottommost_internal_key.Encode().ToString(), value});
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
}
|
2020-11-12 19:40:52 +00:00
|
|
|
mock::SortKVVector(&contents, ucmp_);
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(contents);
|
|
|
|
}
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2015-08-08 04:59:51 +00:00
|
|
|
SetLastSequence(sequence_number);
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2020-10-01 17:08:52 +00:00
|
|
|
mock::KVVector expected_results_kvvector;
|
|
|
|
for (auto& kv : expected_results) {
|
|
|
|
expected_results_kvvector.push_back({kv.first, kv.second});
|
|
|
|
}
|
|
|
|
|
|
|
|
return expected_results_kvvector;
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
void NewDB() {
|
2020-11-12 19:40:52 +00:00
|
|
|
EXPECT_OK(DestroyDB(dbname_, Options()));
|
2019-05-04 00:26:20 +00:00
|
|
|
EXPECT_OK(env_->CreateDirIfMissing(dbname_));
|
2022-09-15 04:59:56 +00:00
|
|
|
|
|
|
|
std::shared_ptr<Logger> info_log;
|
|
|
|
DBOptions db_opts = BuildDBOptions(db_options_, mutable_db_options_);
|
|
|
|
Status s = CreateLoggerFromOptions(dbname_, db_opts, &info_log);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
db_options_.info_log = info_log;
|
|
|
|
|
2024-09-19 21:05:21 +00:00
|
|
|
versions_.reset(
|
|
|
|
new VersionSet(dbname_, &db_options_, env_options_, table_cache_.get(),
|
|
|
|
&write_buffer_manager_, &write_controller_,
|
|
|
|
/*block_cache_tracer=*/nullptr, /*io_tracer=*/nullptr,
|
|
|
|
test::kUnitTestDbId, /*db_session_id=*/"",
|
|
|
|
/*daily_offpeak_time_utc=*/"",
|
|
|
|
/*error_handler=*/nullptr, /*read_only=*/false));
|
2019-05-04 00:26:20 +00:00
|
|
|
compaction_job_stats_.Reset();
|
|
|
|
|
2014-11-14 19:35:48 +00:00
|
|
|
VersionEdit new_db;
|
|
|
|
new_db.SetLogNumber(0);
|
|
|
|
new_db.SetNextFile(2);
|
|
|
|
new_db.SetLastSequence(0);
|
|
|
|
|
|
|
|
const std::string manifest = DescriptorFileName(dbname_, 1);
|
2021-01-29 06:08:46 +00:00
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
|
|
const auto& fs = env_->GetFileSystem();
|
2022-09-15 04:59:56 +00:00
|
|
|
s = WritableFileWriter::Create(fs, manifest,
|
|
|
|
fs->OptimizeForManifestWrite(env_options_),
|
|
|
|
&file_writer, nullptr);
|
2021-01-29 06:08:46 +00:00
|
|
|
|
2014-11-14 19:35:48 +00:00
|
|
|
ASSERT_OK(s);
|
|
|
|
{
|
2015-10-08 17:07:15 +00:00
|
|
|
log::Writer log(std::move(file_writer), 0, false);
|
2014-11-14 19:35:48 +00:00
|
|
|
std::string record;
|
|
|
|
new_db.EncodeTo(&record);
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
s = log.AddRecord(WriteOptions(), record);
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
ASSERT_OK(s);
|
|
|
|
// Make "CURRENT" file that points to the new manifest file.
|
2024-08-24 02:49:25 +00:00
|
|
|
s = SetCurrentFile(WriteOptions(), fs_.get(), dbname_, 1,
|
|
|
|
Temperature::kUnknown, nullptr);
|
2015-08-08 04:59:51 +00:00
|
|
|
|
2020-09-25 04:47:43 +00:00
|
|
|
ASSERT_OK(s);
|
|
|
|
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
cf_options_.merge_operator = merge_op_;
|
|
|
|
cf_options_.compaction_filter = compaction_filter_.get();
|
2020-11-12 19:40:52 +00:00
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
2015-08-08 04:59:51 +00:00
|
|
|
column_families.emplace_back(kDefaultColumnFamilyName, cf_options_);
|
|
|
|
|
2020-11-12 19:40:52 +00:00
|
|
|
ASSERT_OK(versions_->Recover(column_families, false));
|
2015-08-08 04:59:51 +00:00
|
|
|
cfd_ = versions_->GetColumnFamilySet()->GetDefault();
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
// input_files[i] on input_levels[i]
|
2022-07-14 03:54:49 +00:00
|
|
|
void RunLastLevelCompaction(
|
|
|
|
const std::vector<std::vector<FileMetaData*>>& input_files,
|
2022-09-15 04:59:56 +00:00
|
|
|
const std::vector<int> input_levels,
|
2022-07-14 03:54:49 +00:00
|
|
|
std::function<void(Compaction& comp)>&& verify_func,
|
|
|
|
const std::vector<SequenceNumber>& snapshots = {}) {
|
|
|
|
const int kLastLevel = cf_options_.num_levels - 1;
|
|
|
|
verify_per_key_placement_ = std::move(verify_func);
|
|
|
|
mock::KVVector empty_map;
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction(input_files, input_levels, {empty_map}, snapshots,
|
|
|
|
kMaxSequenceNumber, kLastLevel, false);
|
2022-07-14 03:54:49 +00:00
|
|
|
}
|
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
// input_files[i] on input_levels[i]
|
2015-11-05 04:52:22 +00:00
|
|
|
void RunCompaction(
|
|
|
|
const std::vector<std::vector<FileMetaData*>>& input_files,
|
2022-09-15 04:59:56 +00:00
|
|
|
const std::vector<int>& input_levels,
|
|
|
|
const std::vector<mock::KVVector>& expected_results,
|
2015-11-05 04:52:22 +00:00
|
|
|
const std::vector<SequenceNumber>& snapshots = {},
|
2019-05-04 00:26:20 +00:00
|
|
|
SequenceNumber earliest_write_conflict_snapshot = kMaxSequenceNumber,
|
2019-10-14 22:19:31 +00:00
|
|
|
int output_level = 1, bool verify = true,
|
2022-09-15 04:59:56 +00:00
|
|
|
std::vector<uint64_t> expected_oldest_blob_file_numbers = {},
|
2022-06-07 18:57:12 +00:00
|
|
|
bool check_get_priority = false,
|
|
|
|
Env::IOPriority read_io_priority = Env::IO_TOTAL,
|
2022-09-15 04:59:56 +00:00
|
|
|
Env::IOPriority write_io_priority = Env::IO_TOTAL,
|
|
|
|
int max_subcompactions = 0) {
|
2022-06-07 18:57:12 +00:00
|
|
|
// For compaction, set fs as MockTestFileSystem to check the io_priority.
|
|
|
|
if (test_io_priority_) {
|
|
|
|
db_options_.fs.reset(
|
|
|
|
new MockTestFileSystem(fs_, read_io_priority, write_io_priority));
|
|
|
|
}
|
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
|
2015-08-08 04:59:51 +00:00
|
|
|
size_t num_input_files = 0;
|
|
|
|
std::vector<CompactionInputFiles> compaction_input_files;
|
2022-09-15 04:59:56 +00:00
|
|
|
for (size_t i = 0; i < input_files.size(); ++i) {
|
|
|
|
auto level_files = input_files[i];
|
2015-08-08 04:59:51 +00:00
|
|
|
CompactionInputFiles compaction_level;
|
2022-09-15 04:59:56 +00:00
|
|
|
compaction_level.level = input_levels[i];
|
2015-08-08 04:59:51 +00:00
|
|
|
compaction_level.files.insert(compaction_level.files.end(),
|
2022-10-26 19:35:12 +00:00
|
|
|
level_files.begin(), level_files.end());
|
2015-08-08 04:59:51 +00:00
|
|
|
compaction_input_files.push_back(compaction_level);
|
|
|
|
num_input_files += level_files.size();
|
2015-07-16 16:18:35 +00:00
|
|
|
}
|
2015-08-08 04:59:51 +00:00
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
std::vector<FileMetaData*> grandparents;
|
|
|
|
// it should actually be the next non-empty level
|
|
|
|
const int kGrandparentsLevel = output_level + 1;
|
|
|
|
if (kGrandparentsLevel < cf_options_.num_levels) {
|
|
|
|
grandparents =
|
|
|
|
cfd_->current()->storage_info()->LevelFiles(kGrandparentsLevel);
|
|
|
|
}
|
|
|
|
|
2020-03-31 19:08:41 +00:00
|
|
|
Compaction compaction(
|
|
|
|
cfd->current()->storage_info(), *cfd->ioptions(),
|
2020-07-23 01:31:25 +00:00
|
|
|
*cfd->GetLatestMutableCFOptions(), mutable_db_options_,
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
compaction_input_files, output_level,
|
|
|
|
mutable_cf_options_.target_file_size_base,
|
|
|
|
mutable_cf_options_.max_compaction_bytes, 0, kNoCompression,
|
|
|
|
cfd->GetLatestMutableCFOptions()->compression_opts,
|
Optimize compaction for standalone range deletion files (#13078)
Summary:
This PR adds some optimization for compacting standalone range deletion files. A standalone range deletion file is one with just a single range deletion. Currently, such a file is used in bulk loading to achieve something like atomically delete old version of all data with one big range deletion and adding new version of data. These are the changes included in the PR:
1) When a standalone range deletion file is ingested via bulk loading, it's marked for compaction.
2) When picking input files during compaction picking, we attempt to only pick a standalone range deletion file when oldest snapshot is at or above the file's seqno. To do this, `PickCompaction` API is updated to take existing snapshots as an input. This is only done for the universal compaction + UDT disabled combination, we save querying for existing snapshots and not pass it for all other cases.
3) At `Compaction` construction time, the input files will be filtered to examine if any of them can be skipped for compaction iterator. For example, if all the data of the file is deleted by a standalone range tombstone, and the oldest snapshot is at or above such range tombstone, this file will be filtered out.
4) Every time a snapshot is released, we examine if any column family has standalone range deletion files that becomes eligible to be scheduled for compaction. And schedule one for it.
Potential future improvements:
- Add some dedicated statistics for the filtered files.
- Extend this input filtering to L0 files' compactions cases when a newer L0 file could shadow an older L0 file
Pull Request resolved: https://github.com/facebook/rocksdb/pull/13078
Test Plan: Added unit tests and stress tested a few rounds
Reviewed By: cbi42
Differential Revision: D64879415
Pulled By: jowlyzhang
fbshipit-source-id: 02b8683fddbe11f093bcaa0a38406deb39f44d9e
2024-10-25 16:32:14 +00:00
|
|
|
Temperature::kUnknown, max_subcompactions, grandparents,
|
|
|
|
/*earliest_snapshot*/ std::nullopt, /*snapshot_checker*/ nullptr, true);
|
2023-09-20 20:34:39 +00:00
|
|
|
compaction.FinalizeInputInfo(cfd->current());
|
2015-07-16 16:18:35 +00:00
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
assert(db_options_.info_log);
|
2015-07-16 16:18:35 +00:00
|
|
|
LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, db_options_.info_log.get());
|
|
|
|
mutex_.Lock();
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
2017-10-06 17:26:38 +00:00
|
|
|
// TODO(yiwu) add a mock snapshot checker and add test for it.
|
|
|
|
SnapshotChecker* snapshot_checker = nullptr;
|
2020-11-12 19:40:52 +00:00
|
|
|
ASSERT_TRUE(full_history_ts_low_.empty() ||
|
|
|
|
ucmp_->timestamp_size() == full_history_ts_low_.size());
|
2022-06-07 01:32:26 +00:00
|
|
|
const std::atomic<bool> kManualCompactionCanceledFalse{false};
|
2023-07-28 16:47:31 +00:00
|
|
|
JobContext job_context(1, false /* create_superversion */);
|
2018-06-28 19:23:57 +00:00
|
|
|
CompactionJob compaction_job(
|
2021-05-20 04:40:43 +00:00
|
|
|
0, &compaction, db_options_, mutable_db_options_, env_options_,
|
2022-04-11 17:26:55 +00:00
|
|
|
versions_.get(), &shutting_down_, &log_buffer, nullptr, nullptr,
|
|
|
|
nullptr, nullptr, &mutex_, &error_handler_, snapshots,
|
2023-07-28 16:47:31 +00:00
|
|
|
earliest_write_conflict_snapshot, snapshot_checker, &job_context,
|
CompactionIterator sees consistent view of which keys are committed (#9830)
Summary:
**This PR does not affect the functionality of `DB` and write-committed transactions.**
`CompactionIterator` uses `KeyCommitted(seq)` to determine if a key in the database is committed.
As the name 'write-committed' implies, if write-committed policy is used, a key exists in the database only if
it is committed. In fact, the implementation of `KeyCommitted()` is as follows:
```
inline bool KeyCommitted(SequenceNumber seq) {
// For non-txn-db and write-committed, snapshot_checker_ is always nullptr.
return snapshot_checker_ == nullptr ||
snapshot_checker_->CheckInSnapshot(seq, kMaxSequence) == SnapshotCheckerResult::kInSnapshot;
}
```
With that being said, we focus on write-prepared/write-unprepared transactions.
A few notes:
- A key can exist in the db even if it's uncommitted. Therefore, we rely on `snapshot_checker_` to determine data visibility. We also require that all writes go through transaction API instead of the raw `WriteBatch` + `Write`, thus at most one uncommitted version of one user key can exist in the database.
- `CompactionIterator` outputs a key as long as the key is uncommitted.
Due to the above reasons, it is possible that `CompactionIterator` decides to output an uncommitted key without
doing further checks on the key (`NextFromInput()`). By the time the key is being prepared for output, the key becomes
committed because the `snapshot_checker_(seq, kMaxSequence)` becomes true in the implementation of `KeyCommitted()`.
Then `CompactionIterator` will try to zero its sequence number and hit assertion error if the key is a tombstone.
To fix this issue, we should make the `CompactionIterator` see a consistent view of the input keys. Note that
for write-prepared/write-unprepared, the background flush/compaction jobs already take a "job snapshot" before starting
processing keys. The job snapshot is released only after the entire flush/compaction finishes. We can use this snapshot
to determine whether a key is committed or not with minor change to `KeyCommitted()`.
```
inline bool KeyCommitted(SequenceNumber sequence) {
// For non-txn-db and write-committed, snapshot_checker_ is always nullptr.
return snapshot_checker_ == nullptr ||
snapshot_checker_->CheckInSnapshot(sequence, job_snapshot_) ==
SnapshotCheckerResult::kInSnapshot;
}
```
As a result, whether a key is committed or not will remain a constant throughout compaction, causing no trouble
for `CompactionIterator`s assertions.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9830
Test Plan: make check
Reviewed By: ltamasi
Differential Revision: D35561162
Pulled By: riversand963
fbshipit-source-id: 0e00d200c195240341cfe6d34cbc86798b315b9f
2022-04-14 18:11:04 +00:00
|
|
|
table_cache_, &event_logger, false, false, dbname_,
|
|
|
|
&compaction_job_stats_, Env::Priority::USER, nullptr /* IOTracer */,
|
2022-06-07 01:32:26 +00:00
|
|
|
/*manual_compaction_canceled=*/kManualCompactionCanceledFalse,
|
|
|
|
env_->GenerateUniqueId(), DBImpl::GenerateDbSessionId(nullptr),
|
|
|
|
full_history_ts_low_);
|
2015-07-28 23:41:40 +00:00
|
|
|
VerifyInitializationOfCompactionJobStats(compaction_job_stats_);
|
2015-07-16 16:18:35 +00:00
|
|
|
|
|
|
|
compaction_job.Prepare();
|
|
|
|
mutex_.Unlock();
|
2020-11-12 19:40:52 +00:00
|
|
|
Status s = compaction_job.Run();
|
2015-08-08 04:59:51 +00:00
|
|
|
ASSERT_OK(s);
|
2020-09-25 04:47:43 +00:00
|
|
|
ASSERT_OK(compaction_job.io_status());
|
2015-08-08 04:59:51 +00:00
|
|
|
mutex_.Lock();
|
2023-09-18 20:11:53 +00:00
|
|
|
bool compaction_released = false;
|
|
|
|
ASSERT_OK(compaction_job.Install(*cfd->GetLatestMutableCFOptions(),
|
|
|
|
&compaction_released));
|
2020-09-25 04:47:43 +00:00
|
|
|
ASSERT_OK(compaction_job.io_status());
|
2015-07-16 16:18:35 +00:00
|
|
|
mutex_.Unlock();
|
2022-09-15 04:59:56 +00:00
|
|
|
log_buffer.FlushBufferToLog();
|
2015-07-16 16:18:35 +00:00
|
|
|
|
2019-05-04 00:26:20 +00:00
|
|
|
if (verify) {
|
2019-10-14 22:19:31 +00:00
|
|
|
ASSERT_GE(compaction_job_stats_.elapsed_micros, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats_.num_input_files, num_input_files);
|
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
VerifyTables(output_level, expected_results,
|
|
|
|
expected_oldest_blob_file_numbers);
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
}
|
Set Write rate limiter priority dynamically and pass it to FS (#9988)
Summary:
### Context:
Background compactions and flush generate large reads and writes, and can be long running, especially for universal compaction. In some cases, this can impact foreground reads and writes by users.
From the RocksDB perspective, there can be two kinds of rate limiters, the internal (native) one and the external one.
- The internal (native) rate limiter is introduced in [the wiki](https://github.com/facebook/rocksdb/wiki/Rate-Limiter). Currently, only IO_LOW and IO_HIGH are used and they are set statically.
- For the external rate limiter, in FSWritableFile functions, IOOptions is open for end users to set and get rate_limiter_priority for their own rate limiter. Currently, RocksDB doesn’t pass the rate_limiter_priority through IOOptions to the file system.
### Solution
During the User Read, Flush write, Compaction read/write, the WriteController is used to determine whether DB writes are stalled or slowed down. The rate limiter priority (Env::IOPriority) can be determined accordingly. We decided to always pass the priority in IOOptions. What the file system does with it should be a contract between the user and the file system. We would like to set the rate limiter priority at file level, since the Flush/Compaction job level may be too coarse with multiple files and block IO level is too granular.
**This PR is for the Write path.** The **Write:** dynamic priority for different state are listed as follows:
| State | Normal | Delayed | Stalled |
| ----- | ------ | ------- | ------- |
| Flush | IO_HIGH | IO_USER | IO_USER |
| Compaction | IO_LOW | IO_USER | IO_USER |
Flush and Compaction writes share the same call path through BlockBaseTableWriter, WritableFileWriter, and FSWritableFile. When a new FSWritableFile object is created, its io_priority_ can be set dynamically based on the state of the WriteController. In WritableFileWriter, before the call sites of FSWritableFile functions, WritableFileWriter::DecideRateLimiterPriority() determines the rate_limiter_priority. The options (IOOptions) argument of FSWritableFile functions will be updated with the rate_limiter_priority.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9988
Test Plan: Add unit tests.
Reviewed By: anand1976
Differential Revision: D36395159
Pulled By: gitbw95
fbshipit-source-id: a7c82fc29759139a1a07ec46c37dbf7e753474cf
2022-05-18 07:41:41 +00:00
|
|
|
|
|
|
|
if (check_get_priority) {
|
|
|
|
CheckGetRateLimiterPriority(compaction_job);
|
|
|
|
}
|
2022-07-14 03:54:49 +00:00
|
|
|
|
|
|
|
if (verify_per_key_placement_) {
|
|
|
|
// Verify per_key_placement compaction
|
|
|
|
assert(compaction.SupportsPerKeyPlacement());
|
|
|
|
verify_per_key_placement_(compaction);
|
|
|
|
}
|
Set Write rate limiter priority dynamically and pass it to FS (#9988)
Summary:
### Context:
Background compactions and flush generate large reads and writes, and can be long running, especially for universal compaction. In some cases, this can impact foreground reads and writes by users.
From the RocksDB perspective, there can be two kinds of rate limiters, the internal (native) one and the external one.
- The internal (native) rate limiter is introduced in [the wiki](https://github.com/facebook/rocksdb/wiki/Rate-Limiter). Currently, only IO_LOW and IO_HIGH are used and they are set statically.
- For the external rate limiter, in FSWritableFile functions, IOOptions is open for end users to set and get rate_limiter_priority for their own rate limiter. Currently, RocksDB doesn’t pass the rate_limiter_priority through IOOptions to the file system.
### Solution
During the User Read, Flush write, Compaction read/write, the WriteController is used to determine whether DB writes are stalled or slowed down. The rate limiter priority (Env::IOPriority) can be determined accordingly. We decided to always pass the priority in IOOptions. What the file system does with it should be a contract between the user and the file system. We would like to set the rate limiter priority at file level, since the Flush/Compaction job level may be too coarse with multiple files and block IO level is too granular.
**This PR is for the Write path.** The **Write:** dynamic priority for different state are listed as follows:
| State | Normal | Delayed | Stalled |
| ----- | ------ | ------- | ------- |
| Flush | IO_HIGH | IO_USER | IO_USER |
| Compaction | IO_LOW | IO_USER | IO_USER |
Flush and Compaction writes share the same call path through BlockBaseTableWriter, WritableFileWriter, and FSWritableFile. When a new FSWritableFile object is created, its io_priority_ can be set dynamically based on the state of the WriteController. In WritableFileWriter, before the call sites of FSWritableFile functions, WritableFileWriter::DecideRateLimiterPriority() determines the rate_limiter_priority. The options (IOOptions) argument of FSWritableFile functions will be updated with the rate_limiter_priority.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9988
Test Plan: Add unit tests.
Reviewed By: anand1976
Differential Revision: D36395159
Pulled By: gitbw95
fbshipit-source-id: a7c82fc29759139a1a07ec46c37dbf7e753474cf
2022-05-18 07:41:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void CheckGetRateLimiterPriority(CompactionJob& compaction_job) {
|
|
|
|
// When the state from WriteController is normal.
|
|
|
|
ASSERT_EQ(compaction_job.GetRateLimiterPriority(), Env::IO_LOW);
|
|
|
|
|
|
|
|
WriteController* write_controller =
|
|
|
|
compaction_job.versions_->GetColumnFamilySet()->write_controller();
|
|
|
|
|
|
|
|
{
|
|
|
|
// When the state from WriteController is Delayed.
|
|
|
|
std::unique_ptr<WriteControllerToken> delay_token =
|
|
|
|
write_controller->GetDelayToken(1000000);
|
|
|
|
ASSERT_EQ(compaction_job.GetRateLimiterPriority(), Env::IO_USER);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// When the state from WriteController is Stopped.
|
|
|
|
std::unique_ptr<WriteControllerToken> stop_token =
|
|
|
|
write_controller->GetStopToken();
|
|
|
|
ASSERT_EQ(compaction_job.GetRateLimiterPriority(), Env::IO_USER);
|
|
|
|
}
|
2015-07-16 16:18:35 +00:00
|
|
|
}
|
|
|
|
|
2021-11-08 19:04:01 +00:00
|
|
|
std::shared_ptr<Env> env_guard_;
|
2014-11-14 19:35:48 +00:00
|
|
|
Env* env_;
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
std::shared_ptr<FileSystem> fs_;
|
2014-11-14 19:35:48 +00:00
|
|
|
std::string dbname_;
|
2020-11-12 19:40:52 +00:00
|
|
|
const Comparator* const ucmp_;
|
2014-11-14 19:35:48 +00:00
|
|
|
EnvOptions env_options_;
|
2016-09-23 23:34:04 +00:00
|
|
|
ImmutableDBOptions db_options_;
|
|
|
|
ColumnFamilyOptions cf_options_;
|
2014-11-14 19:35:48 +00:00
|
|
|
MutableCFOptions mutable_cf_options_;
|
2020-07-23 01:31:25 +00:00
|
|
|
MutableDBOptions mutable_db_options_;
|
2023-04-21 16:07:18 +00:00
|
|
|
const ReadOptions read_options_;
|
Group SST write in flush, compaction and db open with new stats (#11910)
Summary:
## Context/Summary
Similar to https://github.com/facebook/rocksdb/pull/11288, https://github.com/facebook/rocksdb/pull/11444, categorizing SST/blob file write according to different io activities allows more insight into the activity.
For that, this PR does the following:
- Tag different write IOs by passing down and converting WriteOptions to IOOptions
- Add new SST_WRITE_MICROS histogram in WritableFileWriter::Append() and breakdown FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS
Some related code refactory to make implementation cleaner:
- Blob stats
- Replace high-level write measurement with low-level WritableFileWriter::Append() measurement for BLOB_DB_BLOB_FILE_WRITE_MICROS. This is to make FILE_WRITE_{FLUSH|COMPACTION|DB_OPEN}_MICROS include blob file. As a consequence, this introduces some behavioral changes on it, see HISTORY and db bench test plan below for more info.
- Fix bugs where BLOB_DB_BLOB_FILE_SYNCED/BLOB_DB_BLOB_FILE_BYTES_WRITTEN include file failed to sync and bytes failed to write.
- Refactor WriteOptions constructor for easier construction with io_activity and rate_limiter_priority
- Refactor DBImpl::~DBImpl()/BlobDBImpl::Close() to bypass thread op verification
- Build table
- TableBuilderOptions now includes Read/WriteOpitons so BuildTable() do not need to take these two variables
- Replace the io_priority passed into BuildTable() with TableBuilderOptions::WriteOpitons::rate_limiter_priority. Similar for BlobFileBuilder.
This parameter is used for dynamically changing file io priority for flush, see https://github.com/facebook/rocksdb/pull/9988?fbclid=IwAR1DtKel6c-bRJAdesGo0jsbztRtciByNlvokbxkV6h_L-AE9MACzqRTT5s for more
- Update ThreadStatus::FLUSH_BYTES_WRITTEN to use io_activity to track flush IO in flush job and db open instead of io_priority
## Test
### db bench
Flush
```
./db_bench --statistics=1 --benchmarks=fillseq --num=100000 --write_buffer_size=100
rocksdb.sst.write.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.flush.micros P50 : 1.830863 P95 : 4.094720 P99 : 6.578947 P100 : 26.000000 COUNT : 7875 SUM : 20377
rocksdb.file.write.compaction.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.db.open.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
```
compaction, db oopen
```
Setup: ./db_bench --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
rocksdb.sst.write.micros P50 : 2.675325 P95 : 9.578788 P99 : 18.780000 P100 : 314.000000 COUNT : 638 SUM : 3279
rocksdb.file.write.flush.micros P50 : 0.000000 P95 : 0.000000 P99 : 0.000000 P100 : 0.000000 COUNT : 0 SUM : 0
rocksdb.file.write.compaction.micros P50 : 2.757353 P95 : 9.610687 P99 : 19.316667 P100 : 314.000000 COUNT : 615 SUM : 3213
rocksdb.file.write.db.open.micros P50 : 2.055556 P95 : 3.925000 P99 : 9.000000 P100 : 9.000000 COUNT : 23 SUM : 66
```
blob stats - just to make sure they aren't broken by this PR
```
Integrated Blob DB
Setup: ./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
Run:./db_bench --enable_blob_files=1 --statistics=1 --benchmarks=compact --db=../db_bench --use_existing_db=1
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 7.298246 P95 : 9.771930 P99 : 9.991813 P100 : 16.000000 COUNT : 235 SUM : 1600
rocksdb.blobdb.blob.file.synced COUNT : 1
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 2.000000 P95 : 2.829360 P99 : 2.993779 P100 : 9.000000 COUNT : 707 SUM : 1614
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 1 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 34842 (stay the same)
```
```
Stacked Blob DB
Run: ./db_bench --use_blob_db=1 --statistics=1 --benchmarks=fillseq --num=10000 --disable_auto_compactions=1 -write_buffer_size=100 --db=../db_bench
pre-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 12.808042 P95 : 19.674497 P99 : 28.539683 P100 : 51.000000 COUNT : 10000 SUM : 140876
rocksdb.blobdb.blob.file.synced COUNT : 8
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445
post-PR:
rocksdb.blobdb.blob.file.write.micros P50 : 1.657370 P95 : 2.952175 P99 : 3.877519 P100 : 24.000000 COUNT : 30001 SUM : 67924
- COUNT is higher and values are smaller as it includes header and footer write
- COUNT is 3X higher due to each Append() count as one post-PR, while in pre-PR, 3 Append()s counts as one. See https://github.com/facebook/rocksdb/pull/11910/files#diff-32b811c0a1c000768cfb2532052b44dc0b3bf82253f3eab078e15ff201a0dabfL157-L164
rocksdb.blobdb.blob.file.synced COUNT : 8 (stay the same)
rocksdb.blobdb.blob.file.bytes.written COUNT : 1043445 (stay the same)
```
### Rehearsal CI stress test
Trigger 3 full runs of all our CI stress tests
### Performance
Flush
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=ManualFlush/key_num:524288/per_key_size:256 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark; enable_statistics = true
Pre-pr: avg 507515519.3 ns
497686074,499444327,500862543,501389862,502994471,503744435,504142123,504224056,505724198,506610393,506837742,506955122,507695561,507929036,508307733,508312691,508999120,509963561,510142147,510698091,510743096,510769317,510957074,511053311,511371367,511409911,511432960,511642385,511691964,511730908,
Post-pr: avg 511971266.5 ns, regressed 0.88%
502744835,506502498,507735420,507929724,508313335,509548582,509994942,510107257,510715603,511046955,511352639,511458478,512117521,512317380,512766303,512972652,513059586,513804934,513808980,514059409,514187369,514389494,514447762,514616464,514622882,514641763,514666265,514716377,514990179,515502408,
```
Compaction
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_{pre|post}_pr --benchmark_filter=ManualCompaction/comp_style:0/max_data:134217728/per_key_size:256/enable_statistics:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 495346098.30 ns
492118301,493203526,494201411,494336607,495269217,495404950,496402598,497012157,497358370,498153846
Post-pr: avg 504528077.20, regressed 1.85%. "ManualCompaction" include flush so the isolated regression for compaction should be around 1.85-0.88 = 0.97%
502465338,502485945,502541789,502909283,503438601,504143885,506113087,506629423,507160414,507393007
```
Put with WAL (in case passing WriteOptions slows down this path even without collecting SST write stats)
```
TEST_TMPDIR=/dev/shm ./db_basic_bench_pre_pr --benchmark_filter=DBPut/comp_style:0/max_data:107374182400/per_key_size:256/enable_statistics:1/wal:1 --benchmark_repetitions=1000
-- default: 1 thread is used to run benchmark
Pre-pr: avg 3848.10 ns
3814,3838,3839,3848,3854,3854,3854,3860,3860,3860
Post-pr: avg 3874.20 ns, regressed 0.68%
3863,3867,3871,3874,3875,3877,3877,3877,3880,3881
```
Pull Request resolved: https://github.com/facebook/rocksdb/pull/11910
Reviewed By: ajkr
Differential Revision: D49788060
Pulled By: hx235
fbshipit-source-id: 79e73699cda5be3b66461687e5147c2484fc5eff
2023-12-29 23:29:23 +00:00
|
|
|
const WriteOptions write_options_;
|
2014-11-14 19:35:48 +00:00
|
|
|
std::shared_ptr<Cache> table_cache_;
|
|
|
|
WriteController write_controller_;
|
2016-06-21 01:01:03 +00:00
|
|
|
WriteBufferManager write_buffer_manager_;
|
2014-11-14 19:35:48 +00:00
|
|
|
std::unique_ptr<VersionSet> versions_;
|
2015-02-05 05:39:45 +00:00
|
|
|
InstrumentedMutex mutex_;
|
2014-11-14 19:35:48 +00:00
|
|
|
std::atomic<bool> shutting_down_;
|
|
|
|
std::shared_ptr<mock::MockTableFactory> mock_table_factory_;
|
2015-07-28 23:41:40 +00:00
|
|
|
CompactionJobStats compaction_job_stats_;
|
2015-08-08 04:59:51 +00:00
|
|
|
ColumnFamilyData* cfd_;
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
std::unique_ptr<CompactionFilter> compaction_filter_;
|
|
|
|
std::shared_ptr<MergeOperator> merge_op_;
|
2018-06-28 19:23:57 +00:00
|
|
|
ErrorHandler error_handler_;
|
2020-11-12 19:40:52 +00:00
|
|
|
std::string full_history_ts_low_;
|
|
|
|
const std::function<std::string(uint64_t)> encode_u64_ts_;
|
2022-09-15 04:59:56 +00:00
|
|
|
const bool test_io_priority_;
|
2022-07-14 03:54:49 +00:00
|
|
|
std::function<void(Compaction& comp)> verify_per_key_placement_;
|
2022-09-15 04:59:56 +00:00
|
|
|
const TableTypeForTest table_type_ = kMockTable;
|
2020-11-12 19:40:52 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// TODO(icanadi) Make it simpler once we mock out VersionSet
|
|
|
|
class CompactionJobTest : public CompactionJobTestBase {
|
|
|
|
public:
|
|
|
|
CompactionJobTest()
|
2022-06-07 18:57:12 +00:00
|
|
|
: CompactionJobTestBase(
|
|
|
|
test::PerThreadDBPath("compaction_job_test"), BytewiseComparator(),
|
2022-09-15 04:59:56 +00:00
|
|
|
[](uint64_t /*ts*/) { return ""; }, /*test_io_priority=*/false,
|
|
|
|
TableTypeForTest::kMockTable) {}
|
2014-11-14 19:35:48 +00:00
|
|
|
};
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(CompactionJobTest, Simple) {
|
2015-08-08 04:59:51 +00:00
|
|
|
NewDB();
|
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
auto expected_results = CreateTwoFiles(false);
|
2014-11-14 19:35:48 +00:00
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(input_level);
|
2014-11-14 19:35:48 +00:00
|
|
|
ASSERT_EQ(2U, files.size());
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
|
2020-07-15 00:16:18 +00:00
|
|
|
TEST_F(CompactionJobTest, DISABLED_SimpleCorrupted) {
|
2015-08-08 04:59:51 +00:00
|
|
|
NewDB();
|
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
auto expected_results = CreateTwoFiles(true);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2015-07-28 23:41:40 +00:00
|
|
|
ASSERT_EQ(compaction_job_stats_.num_corrupt_keys, 400U);
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleDeletion) {
|
|
|
|
NewDB();
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file1 = mock::MakeMockFile({{KeyStr("c", 4U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("c", 3U, kTypeValue), "val"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("b", 2U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 1U, kTypeValue), "val"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 0U, kTypeValue), "val"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
SetLastSequence(4U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
2017-06-29 22:13:02 +00:00
|
|
|
TEST_F(CompactionJobTest, OutputNothing) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"}});
|
|
|
|
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 2U, kTypeDeletion), ""}});
|
|
|
|
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile();
|
|
|
|
|
|
|
|
SetLastSequence(4U);
|
2022-09-15 04:59:56 +00:00
|
|
|
|
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2017-06-29 22:13:02 +00:00
|
|
|
}
|
|
|
|
|
2015-08-08 04:59:51 +00:00
|
|
|
TEST_F(CompactionJobTest, SimpleOverwrite) {
|
|
|
|
NewDB();
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 3U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("b", 4U, kTypeValue), "val3"},
|
|
|
|
});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 2U, kTypeValue), "val"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "val2"},
|
2019-02-01 17:19:09 +00:00
|
|
|
{KeyStr("b", 0U, kTypeValue), "val3"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
SetLastSequence(4U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleNonLastLevel) {
|
|
|
|
NewDB();
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("b", 6U, kTypeValue), "val3"},
|
|
|
|
});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 4U, kTypeValue), "val"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file3 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 2U, kTypeValue), "val"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
// Because level 1 is not the last level, the sequence numbers of a and b
|
|
|
|
// cannot be set to 0
|
2015-09-02 20:58:22 +00:00
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("b", 6U, kTypeValue), "val3"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
SetLastSequence(6U);
|
2022-09-15 04:59:56 +00:00
|
|
|
const std::vector<int> input_levels = {0, 1};
|
|
|
|
auto lvl0_files =
|
|
|
|
cfd_->current()->storage_info()->LevelFiles(input_levels[0]);
|
|
|
|
auto lvl1_files =
|
|
|
|
cfd_->current()->storage_info()->LevelFiles(input_levels[1]);
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels, {expected_results});
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleMerge) {
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
merge_op_ = MergeOperators::CreateStringAppendOperator();
|
|
|
|
NewDB();
|
2015-08-08 04:59:51 +00:00
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeMerge), "5"},
|
|
|
|
{KeyStr("a", 4U, kTypeMerge), "4"},
|
|
|
|
{KeyStr("a", 3U, kTypeValue), "3"},
|
|
|
|
});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file2 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("b", 2U, kTypeMerge), "2"}, {KeyStr("b", 1U, kTypeValue), "1"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "3,4,5"},
|
2019-02-01 17:19:09 +00:00
|
|
|
{KeyStr("b", 0U, kTypeValue), "1,2"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
SetLastSequence(5U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, NonAssocMerge) {
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
merge_op_ = MergeOperators::CreateStringAppendTESTOperator();
|
|
|
|
NewDB();
|
2015-08-08 04:59:51 +00:00
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeMerge), "5"},
|
|
|
|
{KeyStr("a", 4U, kTypeMerge), "4"},
|
|
|
|
{KeyStr("a", 3U, kTypeMerge), "3"},
|
|
|
|
});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file2 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("b", 2U, kTypeMerge), "2"}, {KeyStr("b", 1U, kTypeMerge), "1"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "3,4,5"},
|
2019-02-01 17:19:09 +00:00
|
|
|
{KeyStr("b", 0U, kTypeValue), "1,2"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
SetLastSequence(5U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Filters merge operands with value 10.
|
|
|
|
TEST_F(CompactionJobTest, MergeOperandFilter) {
|
|
|
|
merge_op_ = MergeOperators::CreateUInt64AddOperator();
|
|
|
|
compaction_filter_.reset(new test::FilterNumber(10U));
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("a", 5U, kTypeMerge), test::EncodeInt(5U)},
|
|
|
|
{KeyStr("a", 4U, kTypeMerge), test::EncodeInt(10U)}, // Filtered
|
|
|
|
{KeyStr("a", 3U, kTypeMerge), test::EncodeInt(3U)}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("b", 2U, kTypeMerge), test::EncodeInt(2U)},
|
|
|
|
{KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)} // Filtered
|
|
|
|
});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), test::EncodeInt(8U)},
|
2019-02-01 17:19:09 +00:00
|
|
|
{KeyStr("b", 0U, kTypeValue), test::EncodeInt(2U)}});
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
|
|
|
|
SetLastSequence(5U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, FilterSomeMergeOperands) {
|
|
|
|
merge_op_ = MergeOperators::CreateUInt64AddOperator();
|
|
|
|
compaction_filter_.reset(new test::FilterNumber(10U));
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("a", 5U, kTypeMerge), test::EncodeInt(5U)},
|
|
|
|
{KeyStr("a", 4U, kTypeMerge), test::EncodeInt(10U)}, // Filtered
|
|
|
|
{KeyStr("a", 3U, kTypeValue), test::EncodeInt(5U)},
|
|
|
|
{KeyStr("d", 8U, kTypeMerge), test::EncodeInt(10U)}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 2U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("c", 2U, kTypeMerge), test::EncodeInt(3U)},
|
|
|
|
{KeyStr("c", 1U, kTypeValue), test::EncodeInt(7U)},
|
|
|
|
{KeyStr("d", 1U, kTypeValue), test::EncodeInt(6U)}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 1U, kTypeMerge), test::EncodeInt(3U)}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeValue), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("c", 2U, kTypeValue), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("d", 1U, kTypeValue), test::EncodeInt(6U)}
|
|
|
|
// b does not appear because the operands are filtered
|
|
|
|
});
|
|
|
|
|
|
|
|
SetLastSequence(5U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test where all operands/merge results are filtered out.
|
|
|
|
TEST_F(CompactionJobTest, FilterAllMergeOperands) {
|
|
|
|
merge_op_ = MergeOperators::CreateUInt64AddOperator();
|
|
|
|
compaction_filter_.reset(new test::FilterNumber(10U));
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 11U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("a", 10U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("a", 9U, kTypeMerge), test::EncodeInt(10U)}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 8U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 7U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 6U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 5U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 4U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 3U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 2U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("c", 2U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("c", 1U, kTypeMerge), test::EncodeInt(10U)}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 2U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
SetLastSequence(11U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
2015-10-07 17:17:47 +00:00
|
|
|
|
2020-10-01 17:08:52 +00:00
|
|
|
mock::KVVector empty_map;
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {empty_map});
|
2015-07-16 16:18:35 +00:00
|
|
|
}
|
|
|
|
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
TEST_F(CompactionJobTest, SimpleSingleDelete) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("b", 6U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 4U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 1U, kTypeValue), "val"},
|
|
|
|
});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5U, kTypeDeletion), ""}});
|
|
|
|
|
|
|
|
SetLastSequence(6U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SingleDeleteSnapshots) {
|
|
|
|
NewDB();
|
|
|
|
|
2015-11-05 04:52:22 +00:00
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("b", 21U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("c", 22U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("d", 9U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("f", 21U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("j", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("j", 9U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("k", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("k", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("l", 3U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("l", 2U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
2015-11-05 04:52:22 +00:00
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("0", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 11U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("b", 11U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("c", 21U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("d", 8U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("e", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("f", 1U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("g", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("h", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("m", 12U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("m", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("m", 8U, kTypeValue), "val2"},
|
|
|
|
});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
2015-11-05 04:52:22 +00:00
|
|
|
auto file3 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("e", 1U, kTypeValue), "val"},
|
|
|
|
});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
2015-11-05 04:52:22 +00:00
|
|
|
auto expected_results = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 11U, kTypeValue), ""},
|
|
|
|
{KeyStr("b", 21U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("b", 11U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("c", 22U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("c", 21U, kTypeValue), ""},
|
|
|
|
{KeyStr("e", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("f", 21U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("f", 1U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("g", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("j", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("k", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("m", 12U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("m", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("m", 8U, kTypeValue), "val2"},
|
|
|
|
});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
|
|
|
|
SetLastSequence(22U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {10U, 20U}, 10U);
|
2015-11-05 04:52:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, EarliestWriteConflictSnapshot) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
// Test multiple snapshots where the earliest snapshot is not a
|
|
|
|
// write-conflic-snapshot.
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 23U, kTypeValue), "val"},
|
|
|
|
{KeyStr("B", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 23U, kTypeValue), "val"},
|
|
|
|
{KeyStr("D", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 32U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 31U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 23U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("H", 31U, kTypeValue), "val"},
|
|
|
|
{KeyStr("H", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 23U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 35U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 34U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("I", 33U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 32U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("I", 31U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 34U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 33U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 25U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("J", 24U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 13U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("C", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("C", 13U, kTypeValue), "val"},
|
|
|
|
{KeyStr("E", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("F", 4U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("F", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 13U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("H", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 13U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("I", 13U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("I", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 11U, kTypeValue), "val5"},
|
|
|
|
{KeyStr("J", 15U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("J", 14U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 23U, kTypeValue), ""},
|
|
|
|
{KeyStr("B", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 23U, kTypeValue), ""},
|
|
|
|
{KeyStr("D", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 32U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 31U, kTypeValue), ""},
|
|
|
|
{KeyStr("H", 31U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 35U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 34U, kTypeValue), ""},
|
|
|
|
{KeyStr("I", 31U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 13U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("J", 34U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 33U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 25U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("J", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 15U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("J", 14U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
|
|
|
|
|
|
|
SetLastSequence(24U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {10U, 20U, 30U},
|
|
|
|
20U);
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SingleDeleteZeroSeq) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("dummy", 5U, kTypeValue), "val2"},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 0U, kTypeValue), "val"},
|
|
|
|
});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile({
|
2019-02-01 17:19:09 +00:00
|
|
|
{KeyStr("dummy", 0U, kTypeValue), "val2"},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
SetLastSequence(22U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, MultiSingleDelete) {
|
|
|
|
// Tests three scenarios involving multiple single delete/put pairs:
|
|
|
|
//
|
|
|
|
// A: Put Snapshot SDel Put SDel -> Put Snapshot SDel
|
2015-11-05 04:52:22 +00:00
|
|
|
// B: Snapshot Put SDel Put SDel Snapshot -> Snapshot SDel Snapshot
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
// C: SDel Put SDel Snapshot Put -> Snapshot Put
|
2015-11-05 04:52:22 +00:00
|
|
|
// D: (Put) SDel Snapshot Put SDel -> (Put) SDel Snapshot SDel
|
|
|
|
// E: Put SDel Snapshot Put SDel -> Snapshot SDel
|
|
|
|
// F: Put SDel Put Sdel Snapshot -> removed
|
|
|
|
// G: Snapshot SDel Put SDel Put -> Snapshot Put SDel
|
|
|
|
// H: (Put) Put SDel Put Sdel Snapshot -> Removed
|
|
|
|
// I: (Put) Snapshot Put SDel Put SDel -> SDel
|
|
|
|
// J: Put Put SDel Put SDel SDel Snapshot Put Put SDel SDel Put
|
|
|
|
// -> Snapshot Put
|
|
|
|
// K: SDel SDel Put SDel Put Put Snapshot SDel Put SDel SDel Put SDel
|
|
|
|
// -> Snapshot Put Snapshot SDel
|
2022-04-28 21:48:27 +00:00
|
|
|
// L: SDel Put SDel Put SDel Snapshot SDel Put SDel SDel Put SDel
|
|
|
|
// -> Snapshot SDel Put SDel
|
|
|
|
// M: (Put) SDel Put SDel Put SDel Snapshot Put SDel SDel Put SDel SDel
|
|
|
|
// -> SDel Snapshot Put SDel
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 13U, kTypeValue), "val5"},
|
|
|
|
{KeyStr("A", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 13U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("C", 14U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("D", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("D", 11U, kTypeValue), "val4"},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("G", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 13U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 13U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 13U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 12U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 11U, kTypeValue), "val"},
|
|
|
|
{KeyStr("K", 16U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 15U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("K", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 13U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 12U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("K", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 16U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("L", 14U, kTypeSingleDeletion), ""},
|
2022-04-28 21:48:27 +00:00
|
|
|
{KeyStr("L", 13U, kTypeSingleDeletion), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("L", 12U, kTypeValue), "val"},
|
2022-04-28 21:48:27 +00:00
|
|
|
{KeyStr("L", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 16U, kTypeSingleDeletion), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("M", 15U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 14U, kTypeValue), "val"},
|
|
|
|
{KeyStr("M", 13U, kTypeSingleDeletion), ""},
|
2022-04-28 21:48:27 +00:00
|
|
|
{KeyStr("M", 12U, kTypeSingleDeletion), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("M", 11U, kTypeValue), "val"},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 10U, kTypeValue), "val"},
|
|
|
|
{KeyStr("B", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 11U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("C", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("C", 9U, kTypeValue), "val6"},
|
|
|
|
{KeyStr("C", 8U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("D", 10U, kTypeSingleDeletion), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("E", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 11U, kTypeValue), "val"},
|
|
|
|
{KeyStr("E", 5U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("F", 6U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("F", 5U, kTypeValue), "val"},
|
|
|
|
{KeyStr("F", 4U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("F", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 6U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 5U, kTypeValue), "val"},
|
|
|
|
{KeyStr("H", 4U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 11U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 6U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 5U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 3U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 2U, kTypeValue), "val"},
|
|
|
|
{KeyStr("K", 8U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("K", 7U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("K", 6U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 5U, kTypeValue), "val5"},
|
|
|
|
{KeyStr("K", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 1U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 5U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 4U, kTypeValue), "val"},
|
2022-04-28 21:48:27 +00:00
|
|
|
{KeyStr("L", 3U, kTypeSingleDeletion), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("L", 2U, kTypeValue), "val"},
|
|
|
|
{KeyStr("L", 1U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 7U, kTypeValue), "val"},
|
2022-04-28 21:48:27 +00:00
|
|
|
{KeyStr("M", 5U, kTypeSingleDeletion), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("M", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("M", 3U, kTypeSingleDeletion), ""},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("D", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("H", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 2U, kTypeValue), "val"},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
2015-11-05 04:52:22 +00:00
|
|
|
auto file4 = mock::MakeMockFile({
|
|
|
|
{KeyStr("M", 1U, kTypeValue), "val"},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
});
|
2015-11-05 04:52:22 +00:00
|
|
|
AddMockFile(file4, 2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("A", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 13U, kTypeValue), ""},
|
|
|
|
{KeyStr("A", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 10U, kTypeValue), "val"},
|
|
|
|
{KeyStr("B", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 13U, kTypeValue), ""},
|
|
|
|
{KeyStr("C", 14U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("D", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("D", 11U, kTypeValue), ""},
|
|
|
|
{KeyStr("D", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 11U, kTypeValue), ""},
|
|
|
|
{KeyStr("G", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 13U, kTypeValue), ""},
|
|
|
|
{KeyStr("J", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("K", 16U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 15U, kTypeValue), ""},
|
|
|
|
{KeyStr("K", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 8U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("L", 16U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 15U, kTypeValue), ""},
|
2022-04-28 21:48:27 +00:00
|
|
|
{KeyStr("L", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 15U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 14U, kTypeValue), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("M", 3U, kTypeSingleDeletion), ""}});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
|
|
|
|
SetLastSequence(22U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {10U}, 10U);
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This test documents the behavior where a corrupt key follows a deletion or a
|
|
|
|
// single deletion and the (single) deletion gets removed while the corrupt key
|
|
|
|
// gets written out. TODO(noetzli): We probably want a better way to treat
|
|
|
|
// corrupt keys.
|
2020-07-15 00:16:18 +00:00
|
|
|
TEST_F(CompactionJobTest, DISABLED_CorruptionAfterDeletion) {
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 =
|
|
|
|
mock::MakeMockFile({{test::KeyStr("A", 6U, kTypeValue), "val3"},
|
|
|
|
{test::KeyStr("a", 5U, kTypeDeletion), ""},
|
|
|
|
{test::KeyStr("a", 4U, kTypeValue, true), "val"}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 =
|
|
|
|
mock::MakeMockFile({{test::KeyStr("b", 3U, kTypeSingleDeletion), ""},
|
|
|
|
{test::KeyStr("b", 2U, kTypeValue, true), "val"},
|
|
|
|
{test::KeyStr("c", 1U, kTypeValue), "val2"}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{test::KeyStr("A", 0U, kTypeValue), "val3"},
|
|
|
|
{test::KeyStr("a", 0U, kTypeValue, true), "val"},
|
|
|
|
{test::KeyStr("b", 0U, kTypeValue, true), "val"},
|
2019-02-01 17:19:09 +00:00
|
|
|
{test::KeyStr("c", 0U, kTypeValue), "val2"}});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
|
|
|
|
SetLastSequence(6U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
2019-10-14 22:19:31 +00:00
|
|
|
TEST_F(CompactionJobTest, OldestBlobFileNumber) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
// Note: blob1 is inlined TTL, so it will not be considered for the purposes
|
|
|
|
// of identifying the oldest referenced blob file. Similarly, blob6 will be
|
|
|
|
// ignored because it has TTL and hence refers to a TTL blob file.
|
|
|
|
const stl_wrappers::KVMap::value_type blob1(
|
|
|
|
KeyStr("a", 1U, kTypeBlobIndex), BlobStrInlinedTTL("foo", 1234567890ULL));
|
|
|
|
const stl_wrappers::KVMap::value_type blob2(KeyStr("b", 2U, kTypeBlobIndex),
|
|
|
|
BlobStr(59, 123456, 999));
|
|
|
|
const stl_wrappers::KVMap::value_type blob3(KeyStr("c", 3U, kTypeBlobIndex),
|
|
|
|
BlobStr(138, 1000, 1 << 8));
|
|
|
|
auto file1 = mock::MakeMockFile({blob1, blob2, blob3});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
const stl_wrappers::KVMap::value_type blob4(KeyStr("d", 4U, kTypeBlobIndex),
|
|
|
|
BlobStr(199, 3 << 10, 1 << 20));
|
|
|
|
const stl_wrappers::KVMap::value_type blob5(KeyStr("e", 5U, kTypeBlobIndex),
|
|
|
|
BlobStr(19, 6789, 333));
|
|
|
|
const stl_wrappers::KVMap::value_type blob6(
|
|
|
|
KeyStr("f", 6U, kTypeBlobIndex),
|
|
|
|
BlobStrTTL(5, 2048, 1 << 7, 1234567890ULL));
|
|
|
|
auto file2 = mock::MakeMockFile({blob4, blob5, blob6});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
const stl_wrappers::KVMap::value_type expected_blob1(
|
|
|
|
KeyStr("a", 0U, kTypeBlobIndex), blob1.second);
|
|
|
|
const stl_wrappers::KVMap::value_type expected_blob2(
|
|
|
|
KeyStr("b", 0U, kTypeBlobIndex), blob2.second);
|
|
|
|
const stl_wrappers::KVMap::value_type expected_blob3(
|
|
|
|
KeyStr("c", 0U, kTypeBlobIndex), blob3.second);
|
|
|
|
const stl_wrappers::KVMap::value_type expected_blob4(
|
|
|
|
KeyStr("d", 0U, kTypeBlobIndex), blob4.second);
|
|
|
|
const stl_wrappers::KVMap::value_type expected_blob5(
|
|
|
|
KeyStr("e", 0U, kTypeBlobIndex), blob5.second);
|
|
|
|
const stl_wrappers::KVMap::value_type expected_blob6(
|
|
|
|
KeyStr("f", 0U, kTypeBlobIndex), blob6.second);
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({expected_blob1, expected_blob2, expected_blob3,
|
|
|
|
expected_blob4, expected_blob5, expected_blob6});
|
|
|
|
|
|
|
|
SetLastSequence(6U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results},
|
|
|
|
std::vector<SequenceNumber>(), kMaxSequenceNumber,
|
|
|
|
/* output_level */ 1, /* verify */ true,
|
|
|
|
/* expected_oldest_blob_file_numbers */ {19});
|
2019-10-14 22:19:31 +00:00
|
|
|
}
|
|
|
|
|
2022-07-14 03:54:49 +00:00
|
|
|
TEST_F(CompactionJobTest, VerifyPenultimateLevelOutput) {
|
2024-02-27 22:48:00 +00:00
|
|
|
cf_options_.last_level_temperature = Temperature::kCold;
|
2022-07-14 03:54:49 +00:00
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"Compaction::SupportsPerKeyPlacement:Enabled", [&](void* arg) {
|
|
|
|
auto supports_per_key_placement = static_cast<bool*>(arg);
|
|
|
|
*supports_per_key_placement = true;
|
|
|
|
});
|
|
|
|
|
|
|
|
std::atomic_uint64_t latest_cold_seq = 0;
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"CompactionIterator::PrepareOutput.context", [&](void* arg) {
|
|
|
|
auto context = static_cast<PerKeyPlacementContext*>(arg);
|
|
|
|
context->output_to_penultimate_level =
|
|
|
|
context->seq_num > latest_cold_seq;
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
// Add files on different levels that may overlap
|
|
|
|
auto file0_1 = mock::MakeMockFile({{KeyStr("z", 12U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file0_1);
|
|
|
|
|
|
|
|
auto file1_1 = mock::MakeMockFile({{KeyStr("b", 10U, kTypeValue), "val"},
|
|
|
|
{KeyStr("f", 11U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file1_1, 1);
|
|
|
|
auto file1_2 = mock::MakeMockFile({{KeyStr("j", 12U, kTypeValue), "val"},
|
|
|
|
{KeyStr("k", 13U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file1_2, 1);
|
|
|
|
auto file1_3 = mock::MakeMockFile({{KeyStr("p", 14U, kTypeValue), "val"},
|
|
|
|
{KeyStr("u", 15U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file1_3, 1);
|
|
|
|
|
|
|
|
auto file2_1 = mock::MakeMockFile({{KeyStr("f", 8U, kTypeValue), "val"},
|
|
|
|
{KeyStr("h", 9U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2_1, 2);
|
|
|
|
auto file2_2 = mock::MakeMockFile({{KeyStr("m", 6U, kTypeValue), "val"},
|
|
|
|
{KeyStr("p", 7U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2_2, 2);
|
|
|
|
|
|
|
|
auto file3_1 = mock::MakeMockFile({{KeyStr("g", 2U, kTypeValue), "val"},
|
|
|
|
{KeyStr("k", 3U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3_1, 3);
|
|
|
|
auto file3_2 = mock::MakeMockFile({{KeyStr("v", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("x", 5U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3_2, 3);
|
|
|
|
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
const std::vector<int> input_levels = {0, 1, 2, 3};
|
|
|
|
auto files0 = cfd->current()->storage_info()->LevelFiles(input_levels[0]);
|
|
|
|
auto files1 = cfd->current()->storage_info()->LevelFiles(input_levels[1]);
|
|
|
|
auto files2 = cfd->current()->storage_info()->LevelFiles(input_levels[2]);
|
|
|
|
auto files3 = cfd->current()->storage_info()->LevelFiles(input_levels[3]);
|
2022-07-14 03:54:49 +00:00
|
|
|
|
|
|
|
RunLastLevelCompaction(
|
2022-09-15 04:59:56 +00:00
|
|
|
{files0, files1, files2, files3}, input_levels,
|
|
|
|
/*verify_func=*/[&](Compaction& comp) {
|
2022-07-14 03:54:49 +00:00
|
|
|
for (char c = 'a'; c <= 'z'; c++) {
|
|
|
|
if (c == 'a') {
|
2023-11-17 18:50:40 +00:00
|
|
|
ParsedInternalKey pik("a", 0U, kTypeValue);
|
|
|
|
ASSERT_FALSE(comp.WithinPenultimateLevelOutputRange(pik));
|
2022-07-14 03:54:49 +00:00
|
|
|
} else {
|
2023-11-17 18:50:40 +00:00
|
|
|
std::string c_str{c};
|
|
|
|
// WithinPenultimateLevelOutputRange checks internal key range.
|
|
|
|
// 'z' is the last key, so set seqno properly.
|
|
|
|
ParsedInternalKey pik(c_str, c == 'z' ? 12U : 0U, kTypeValue);
|
|
|
|
ASSERT_TRUE(comp.WithinPenultimateLevelOutputRange(pik));
|
2022-07-14 03:54:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-05-16 22:44:59 +00:00
|
|
|
TEST_F(CompactionJobTest, NoEnforceSingleDeleteContract) {
|
|
|
|
db_options_.enforce_single_del_contracts = false;
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 4U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 3U, kTypeDeletion), "dontcare"}});
|
|
|
|
AddMockFile(file);
|
|
|
|
SetLastSequence(4U);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2022-05-16 22:44:59 +00:00
|
|
|
}
|
|
|
|
|
2021-05-12 19:34:22 +00:00
|
|
|
TEST_F(CompactionJobTest, InputSerialization) {
|
|
|
|
// Setup a random CompactionServiceInput
|
|
|
|
CompactionServiceInput input;
|
|
|
|
const int kStrMaxLen = 1000;
|
|
|
|
Random rnd(static_cast<uint32_t>(time(nullptr)));
|
|
|
|
Random64 rnd64(time(nullptr));
|
2024-09-20 20:26:02 +00:00
|
|
|
input.cf_name = rnd.RandomString(rnd.Uniform(kStrMaxLen));
|
2021-05-12 19:34:22 +00:00
|
|
|
while (!rnd.OneIn(10)) {
|
|
|
|
input.snapshots.emplace_back(rnd64.Uniform(UINT64_MAX));
|
|
|
|
}
|
|
|
|
while (!rnd.OneIn(10)) {
|
2021-08-26 16:26:41 +00:00
|
|
|
input.input_files.emplace_back(rnd.RandomString(
|
|
|
|
rnd.Uniform(kStrMaxLen - 1) +
|
|
|
|
1)); // input file name should have at least one character
|
2021-05-12 19:34:22 +00:00
|
|
|
}
|
|
|
|
input.output_level = 4;
|
|
|
|
input.has_begin = rnd.OneIn(2);
|
|
|
|
if (input.has_begin) {
|
|
|
|
input.begin = rnd.RandomBinaryString(rnd.Uniform(kStrMaxLen));
|
|
|
|
}
|
|
|
|
input.has_end = rnd.OneIn(2);
|
|
|
|
if (input.has_end) {
|
|
|
|
input.end = rnd.RandomBinaryString(rnd.Uniform(kStrMaxLen));
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string output;
|
|
|
|
ASSERT_OK(input.Write(&output));
|
|
|
|
|
|
|
|
// Test deserialization
|
|
|
|
CompactionServiceInput deserialized1;
|
|
|
|
ASSERT_OK(CompactionServiceInput::Read(output, &deserialized1));
|
|
|
|
ASSERT_TRUE(deserialized1.TEST_Equals(&input));
|
|
|
|
|
|
|
|
// Test mismatch
|
2024-09-20 20:26:02 +00:00
|
|
|
deserialized1.output_level += 10;
|
2021-05-12 19:34:22 +00:00
|
|
|
std::string mismatch;
|
|
|
|
ASSERT_FALSE(deserialized1.TEST_Equals(&input, &mismatch));
|
2024-09-20 20:26:02 +00:00
|
|
|
ASSERT_EQ(mismatch, "output_level");
|
2021-05-12 19:34:22 +00:00
|
|
|
|
|
|
|
// Test unknown field
|
|
|
|
CompactionServiceInput deserialized2;
|
|
|
|
output.clear();
|
|
|
|
ASSERT_OK(input.Write(&output));
|
|
|
|
output.append("new_field=123;");
|
|
|
|
|
|
|
|
ASSERT_OK(CompactionServiceInput::Read(output, &deserialized2));
|
|
|
|
ASSERT_TRUE(deserialized2.TEST_Equals(&input));
|
|
|
|
|
|
|
|
// Test missing field
|
|
|
|
CompactionServiceInput deserialized3;
|
|
|
|
deserialized3.output_level = 0;
|
|
|
|
std::string to_remove = "output_level=4;";
|
|
|
|
size_t pos = output.find(to_remove);
|
|
|
|
ASSERT_TRUE(pos != std::string::npos);
|
|
|
|
output.erase(pos, to_remove.length());
|
|
|
|
ASSERT_OK(CompactionServiceInput::Read(output, &deserialized3));
|
|
|
|
mismatch.clear();
|
|
|
|
ASSERT_FALSE(deserialized3.TEST_Equals(&input, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "output_level");
|
|
|
|
|
|
|
|
// manually set the value back, should match the original structure
|
|
|
|
deserialized3.output_level = 4;
|
|
|
|
ASSERT_TRUE(deserialized3.TEST_Equals(&input));
|
|
|
|
|
|
|
|
// Test invalid version
|
|
|
|
output.clear();
|
|
|
|
ASSERT_OK(input.Write(&output));
|
|
|
|
|
|
|
|
uint32_t data_version = DecodeFixed32(output.data());
|
|
|
|
const size_t kDataVersionSize = sizeof(data_version);
|
|
|
|
ASSERT_EQ(data_version,
|
|
|
|
1U); // Update once the default data version is changed
|
|
|
|
char buf[kDataVersionSize];
|
|
|
|
EncodeFixed32(buf, data_version + 10); // make sure it's not valid
|
|
|
|
output.replace(0, kDataVersionSize, buf, kDataVersionSize);
|
|
|
|
Status s = CompactionServiceInput::Read(output, &deserialized3);
|
|
|
|
ASSERT_TRUE(s.IsNotSupported());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, ResultSerialization) {
|
|
|
|
// Setup a random CompactionServiceResult
|
|
|
|
CompactionServiceResult result;
|
|
|
|
const int kStrMaxLen = 1000;
|
|
|
|
Random rnd(static_cast<uint32_t>(time(nullptr)));
|
|
|
|
Random64 rnd64(time(nullptr));
|
2021-05-20 04:40:43 +00:00
|
|
|
std::vector<Status> status_list = {
|
|
|
|
Status::OK(),
|
|
|
|
Status::InvalidArgument("invalid option"),
|
|
|
|
Status::Aborted("failed to run"),
|
|
|
|
Status::NotSupported("not supported option"),
|
|
|
|
};
|
|
|
|
result.status =
|
|
|
|
status_list.at(rnd.Uniform(static_cast<int>(status_list.size())));
|
2024-10-15 01:26:17 +00:00
|
|
|
|
|
|
|
std::string file_checksum = rnd.RandomBinaryString(rnd.Uniform(kStrMaxLen));
|
|
|
|
std::string file_checksum_func_name = "MyAwesomeChecksumGenerator";
|
2021-05-12 19:34:22 +00:00
|
|
|
while (!rnd.OneIn(10)) {
|
2024-10-25 20:13:12 +00:00
|
|
|
TableProperties tp;
|
|
|
|
tp.user_collected_properties.emplace(
|
|
|
|
"UCP_Key1", rnd.RandomString(rnd.Uniform(kStrMaxLen)));
|
|
|
|
tp.user_collected_properties.emplace(
|
|
|
|
"UCP_Key2", rnd.RandomString(rnd.Uniform(kStrMaxLen)));
|
|
|
|
tp.readable_properties.emplace("RP_Key1",
|
|
|
|
rnd.RandomString(rnd.Uniform(kStrMaxLen)));
|
|
|
|
tp.readable_properties.emplace("RP_K2y2",
|
|
|
|
rnd.RandomString(rnd.Uniform(kStrMaxLen)));
|
|
|
|
|
2022-05-19 18:04:21 +00:00
|
|
|
UniqueId64x2 id{rnd64.Uniform(UINT64_MAX), rnd64.Uniform(UINT64_MAX)};
|
2021-05-12 19:34:22 +00:00
|
|
|
result.output_files.emplace_back(
|
2024-10-15 01:26:17 +00:00
|
|
|
rnd.RandomString(rnd.Uniform(kStrMaxLen)) /* file_name */,
|
|
|
|
rnd64.Uniform(UINT64_MAX) /* smallest_seqno */,
|
|
|
|
rnd64.Uniform(UINT64_MAX) /* largest_seqno */,
|
|
|
|
rnd.RandomBinaryString(
|
|
|
|
rnd.Uniform(kStrMaxLen)) /* smallest_internal_key */,
|
|
|
|
rnd.RandomBinaryString(
|
|
|
|
rnd.Uniform(kStrMaxLen)) /* largest_internal_key */,
|
|
|
|
rnd64.Uniform(UINT64_MAX) /* oldest_ancester_time */,
|
|
|
|
rnd64.Uniform(UINT64_MAX) /* file_creation_time */,
|
|
|
|
rnd64.Uniform(UINT64_MAX) /* epoch_number */,
|
|
|
|
file_checksum /* file_checksum */,
|
|
|
|
file_checksum_func_name /* file_checksum_func_name */,
|
|
|
|
rnd64.Uniform(UINT64_MAX) /* paranoid_hash */,
|
2024-10-31 18:13:53 +00:00
|
|
|
rnd.OneIn(2) /* marked_for_compaction */, id /* unique_id */, tp);
|
2021-05-12 19:34:22 +00:00
|
|
|
}
|
|
|
|
result.output_level = rnd.Uniform(10);
|
|
|
|
result.output_path = rnd.RandomString(rnd.Uniform(kStrMaxLen));
|
Fix Compaction Stats (#13071)
Summary:
Compaction stats code is not so straightforward to understand. Here's a bit of context for this PR and why this change was made.
- **CompactionStats (compaction_stats_.stats):** Internal stats about the compaction used for logging and public metrics.
- **CompactionJobStats (compaction_job_stats_)**: The public stats at job level. It's part of Compaction event listener and included in the CompactionResult.
- **CompactionOutputsStats**: output stats only. resides in CompactionOutputs. It gets aggregated toward the CompactionStats (internal stats).
The internal stats, `compaction_stats_.stats`, has the output information recorded from the compaction iterator, but it does not have any input information (input records, input output files) until `UpdateCompactionStats()` gets called. We cannot simply call `UpdateCompactionStats()` to fill in the input information in the remote compaction (which is a subcompaction of the primary host's compaction) because the `compaction->inputs()` have the full list of input files and `UpdateCompactionStats()` takes the entire list of records in all files. `num_input_records` gets double-counted if multiple sub-compactions are submitted to the remote worker.
The job level stats (in the case of remote compaction, it's subcompaction level stat), `compaction_job_stats_`, has the correct input records, but has no output information. We can use `UpdateCompactionJobStats(compaction_stats_.stats)` to set the output information (num_output_records, num_output_files, etc.) from the `compaction_stats_.stats`, but it also sets all other fields including the input information which sets all back to 0.
Therefore, we are overriding `UpdateCompactionJobStats()` in remote worker only to update job level stats, `compaction_job_stats_`, with output information of the internal stats.
Baiscally, we are merging the aggregated output info from the internal stats and aggregated input info from the compaction job stats.
In this PR we are also fixing how we are setting `is_remote_compaction` in CompactionJobStats.
- OnCompactionBegin event, if options.compaction_service is set, `is_remote_compaction=true` for all compactions except for trivial moves
- OnCompactionCompleted event, if any of the sub_compactions were done remotely, compaction level stats's `is_remote_compaction` will be true
Other minor changes
- num_output_records is already available in CompactionJobStats. No need to store separately in CompactionResult.
- total_bytes is not needed.
- Renamed `SubcompactionState::AggregateCompactionStats()` to `SubcompactionState::AggregateCompactionOutputStats()` to make it clear that it's only aggregating output stats.
- Renamed `SetTotalBytes()` to `AddBytesWritten()` to make it more clear that it's adding total written bytes from the compaction output.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/13071
Test Plan:
Unit Tests added and updated
```
./compaction_service_test
```
Reviewed By: anand1976
Differential Revision: D64479657
Pulled By: jaykorean
fbshipit-source-id: a7a776a00dc718abae95d856b661bcbafd3b0ed5
2024-10-17 02:20:37 +00:00
|
|
|
result.stats.num_output_records = rnd64.Uniform(UINT64_MAX);
|
2021-05-12 19:34:22 +00:00
|
|
|
result.bytes_read = 123;
|
|
|
|
result.bytes_written = rnd64.Uniform(UINT64_MAX);
|
|
|
|
result.stats.elapsed_micros = rnd64.Uniform(UINT64_MAX);
|
|
|
|
result.stats.num_output_files = rnd.Uniform(1000);
|
|
|
|
result.stats.is_full_compaction = rnd.OneIn(2);
|
|
|
|
result.stats.num_single_del_mismatch = rnd64.Uniform(UINT64_MAX);
|
|
|
|
result.stats.num_input_files = 9;
|
|
|
|
|
|
|
|
std::string output;
|
|
|
|
ASSERT_OK(result.Write(&output));
|
|
|
|
|
|
|
|
// Test deserialization
|
|
|
|
CompactionServiceResult deserialized1;
|
|
|
|
ASSERT_OK(CompactionServiceResult::Read(output, &deserialized1));
|
|
|
|
ASSERT_TRUE(deserialized1.TEST_Equals(&result));
|
|
|
|
|
2024-10-25 20:13:12 +00:00
|
|
|
for (size_t i = 0; i < result.output_files.size(); i++) {
|
|
|
|
for (const auto& prop :
|
|
|
|
result.output_files[i].table_properties.user_collected_properties) {
|
|
|
|
ASSERT_EQ(deserialized1.output_files[i]
|
|
|
|
.table_properties.user_collected_properties[prop.first],
|
|
|
|
prop.second);
|
|
|
|
}
|
|
|
|
for (const auto& prop :
|
|
|
|
result.output_files[i].table_properties.readable_properties) {
|
|
|
|
ASSERT_EQ(deserialized1.output_files[i]
|
|
|
|
.table_properties.readable_properties[prop.first],
|
|
|
|
prop.second);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-05-12 19:34:22 +00:00
|
|
|
// Test mismatch
|
|
|
|
deserialized1.stats.num_input_files += 10;
|
|
|
|
std::string mismatch;
|
|
|
|
ASSERT_FALSE(deserialized1.TEST_Equals(&result, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "stats.num_input_files");
|
|
|
|
|
2022-05-19 18:04:21 +00:00
|
|
|
// Test unique id mismatch
|
|
|
|
if (!result.output_files.empty()) {
|
|
|
|
CompactionServiceResult deserialized_tmp;
|
|
|
|
ASSERT_OK(CompactionServiceResult::Read(output, &deserialized_tmp));
|
|
|
|
deserialized_tmp.output_files[0].unique_id[0] += 1;
|
|
|
|
ASSERT_FALSE(deserialized_tmp.TEST_Equals(&result, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "output_files.unique_id");
|
|
|
|
deserialized_tmp.status.PermitUncheckedError();
|
2024-10-15 01:26:17 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(deserialized_tmp.output_files[0].file_checksum, file_checksum);
|
|
|
|
ASSERT_EQ(deserialized_tmp.output_files[0].file_checksum_func_name,
|
|
|
|
file_checksum_func_name);
|
2022-05-19 18:04:21 +00:00
|
|
|
}
|
|
|
|
|
2021-05-12 19:34:22 +00:00
|
|
|
// Test unknown field
|
|
|
|
CompactionServiceResult deserialized2;
|
|
|
|
output.clear();
|
|
|
|
ASSERT_OK(result.Write(&output));
|
|
|
|
output.append("new_field=123;");
|
|
|
|
|
|
|
|
ASSERT_OK(CompactionServiceResult::Read(output, &deserialized2));
|
|
|
|
ASSERT_TRUE(deserialized2.TEST_Equals(&result));
|
|
|
|
|
|
|
|
// Test missing field
|
|
|
|
CompactionServiceResult deserialized3;
|
|
|
|
deserialized3.bytes_read = 0;
|
|
|
|
std::string to_remove = "bytes_read=123;";
|
|
|
|
size_t pos = output.find(to_remove);
|
|
|
|
ASSERT_TRUE(pos != std::string::npos);
|
|
|
|
output.erase(pos, to_remove.length());
|
|
|
|
ASSERT_OK(CompactionServiceResult::Read(output, &deserialized3));
|
|
|
|
mismatch.clear();
|
|
|
|
ASSERT_FALSE(deserialized3.TEST_Equals(&result, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "bytes_read");
|
|
|
|
|
|
|
|
deserialized3.bytes_read = 123;
|
|
|
|
ASSERT_TRUE(deserialized3.TEST_Equals(&result));
|
|
|
|
|
|
|
|
// Test invalid version
|
|
|
|
output.clear();
|
|
|
|
ASSERT_OK(result.Write(&output));
|
|
|
|
|
|
|
|
uint32_t data_version = DecodeFixed32(output.data());
|
|
|
|
const size_t kDataVersionSize = sizeof(data_version);
|
|
|
|
ASSERT_EQ(data_version,
|
|
|
|
1U); // Update once the default data version is changed
|
|
|
|
char buf[kDataVersionSize];
|
|
|
|
EncodeFixed32(buf, data_version + 10); // make sure it's not valid
|
|
|
|
output.replace(0, kDataVersionSize, buf, kDataVersionSize);
|
|
|
|
Status s = CompactionServiceResult::Read(output, &deserialized3);
|
|
|
|
ASSERT_TRUE(s.IsNotSupported());
|
2021-05-20 04:40:43 +00:00
|
|
|
for (const auto& item : status_list) {
|
|
|
|
item.PermitUncheckedError();
|
|
|
|
}
|
2021-05-12 19:34:22 +00:00
|
|
|
}
|
|
|
|
|
2024-02-02 23:37:40 +00:00
|
|
|
TEST_F(CompactionJobTest, CutForMaxCompactionBytes) {
|
2022-10-06 22:54:58 +00:00
|
|
|
// dynamic_file_size option should have no impact on cutting for max
|
|
|
|
// compaction bytes.
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
NewDB();
|
|
|
|
mutable_cf_options_.target_file_size_base = 80;
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 21;
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("c", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("n", 6U, kTypeValue), "val3"},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("h", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("j", 4U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
|
|
|
// Create three L2 files, each size 10.
|
|
|
|
// max_compaction_bytes 21 means the compaction output in L1 will
|
|
|
|
// be cut to at least two files.
|
|
|
|
auto file3 = mock::MakeMockFile({{KeyStr("b", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("c", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("c1", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("c2", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("c3", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("c4", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("d", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("e", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto file4 = mock::MakeMockFile({{KeyStr("h", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("i", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("i1", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("i2", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("i3", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("i4", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("j", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("k", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file4, 2);
|
|
|
|
|
|
|
|
auto file5 = mock::MakeMockFile({{KeyStr("l", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("m", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("m1", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("m2", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("m3", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("m4", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("n", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("o", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file5, 2);
|
|
|
|
|
2022-10-06 22:54:58 +00:00
|
|
|
// The expected output should be:
|
|
|
|
// L1: [c, h, j] [n]
|
|
|
|
// L2: [b ... e] [h ... k] [l ... o]
|
|
|
|
// It's better to have "j" in the first file, because anyway it's overlapping
|
|
|
|
// with the second file on L2.
|
|
|
|
// (Note: before this PR, it was cut at "h" because it's using the internal
|
|
|
|
// comparator which think L1 "h" with seqno 3 is smaller than L2 "h" with
|
|
|
|
// seqno 1, but actually they're overlapped with the compaction picker).
|
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
auto expected_file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("c", 5U, kTypeValue), "val2"},
|
2022-10-06 22:54:58 +00:00
|
|
|
{KeyStr("h", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("j", 4U, kTypeValue), "val"}});
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
auto expected_file2 =
|
2022-10-06 22:54:58 +00:00
|
|
|
mock::MakeMockFile({{KeyStr("n", 6U, kTypeValue), "val3"}});
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
|
|
|
|
SetLastSequence(6U);
|
|
|
|
|
|
|
|
const std::vector<int> input_levels = {0, 1};
|
|
|
|
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
|
|
|
|
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file1, expected_file2});
|
|
|
|
}
|
|
|
|
|
2024-02-02 23:37:40 +00:00
|
|
|
TEST_F(CompactionJobTest, CutToSkipGrandparentFile) {
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
NewDB();
|
|
|
|
// Make sure the grandparent level file size (10) qualifies skipping.
|
|
|
|
// Currently, it has to be > 1/8 of target file size.
|
|
|
|
mutable_cf_options_.target_file_size_base = 70;
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("z", 6U, kTypeValue), "val3"},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("c", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("x", 4U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({{KeyStr("b", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("d", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto file4 = mock::MakeMockFile({{KeyStr("h", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("i", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file4, 2);
|
|
|
|
|
|
|
|
auto file5 = mock::MakeMockFile({{KeyStr("v", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("y", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file5, 2);
|
|
|
|
|
|
|
|
auto expected_file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("c", 3U, kTypeValue), "val"}});
|
|
|
|
auto expected_file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("x", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("z", 6U, kTypeValue), "val3"}});
|
|
|
|
|
|
|
|
SetLastSequence(6U);
|
|
|
|
const std::vector<int> input_levels = {0, 1};
|
|
|
|
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file1, expected_file2});
|
|
|
|
}
|
|
|
|
|
2024-02-02 23:37:40 +00:00
|
|
|
TEST_F(CompactionJobTest, CutToAlignGrandparentBoundary) {
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
NewDB();
|
|
|
|
|
|
|
|
// MockTable has 1 byte per entry by default and each file is 10 bytes.
|
|
|
|
// When the file size is smaller than 100, it won't cut file earlier to align
|
|
|
|
// with its grandparent boundary.
|
|
|
|
const size_t kKeyValueSize = 10000;
|
|
|
|
mock_table_factory_->SetKeyValueSize(kKeyValueSize);
|
|
|
|
|
|
|
|
mutable_cf_options_.target_file_size_base = 10 * kKeyValueSize;
|
|
|
|
|
|
|
|
mock::KVVector file1;
|
|
|
|
char ch = 'd';
|
|
|
|
// Add value from d -> o
|
|
|
|
for (char i = 0; i < 12; i++) {
|
|
|
|
file1.emplace_back(KeyStr(std::string(1, ch + i), i + 10, kTypeValue),
|
|
|
|
"val" + std::to_string(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("e", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("s", 4U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
|
|
|
// the 1st grandparent file should be skipped
|
|
|
|
auto file3 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto file4 = mock::MakeMockFile({{KeyStr("c", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("e", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file4, 2);
|
|
|
|
|
|
|
|
auto file5 = mock::MakeMockFile({{KeyStr("h", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("j", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file5, 2);
|
|
|
|
|
|
|
|
auto file6 = mock::MakeMockFile({{KeyStr("k", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("n", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file6, 2);
|
|
|
|
|
|
|
|
auto file7 = mock::MakeMockFile({{KeyStr("q", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("t", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file7, 2);
|
|
|
|
|
|
|
|
// The expected outputs are:
|
|
|
|
// L1: [d,e,f,g,h,i,j] [k,l,m,n,o,s]
|
|
|
|
// L2: [a, b] [c, e] [h, j] [k, n] [q, t]
|
|
|
|
// The first output cut earlier at "j", so it could be aligned with L2 files.
|
|
|
|
// If dynamic_file_size is not enabled, it will be cut based on the
|
|
|
|
// target_file_size
|
|
|
|
mock::KVVector expected_file1;
|
|
|
|
for (char i = 0; i < 7; i++) {
|
|
|
|
expected_file1.emplace_back(
|
|
|
|
KeyStr(std::string(1, ch + i), i + 10, kTypeValue),
|
|
|
|
"val" + std::to_string(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
mock::KVVector expected_file2;
|
|
|
|
for (char i = 7; i < 12; i++) {
|
|
|
|
expected_file2.emplace_back(
|
|
|
|
KeyStr(std::string(1, ch + i), i + 10, kTypeValue),
|
|
|
|
"val" + std::to_string(i));
|
|
|
|
}
|
|
|
|
expected_file2.emplace_back(KeyStr("s", 4U, kTypeValue), "val");
|
|
|
|
|
|
|
|
SetLastSequence(22U);
|
|
|
|
const std::vector<int> input_levels = {0, 1};
|
|
|
|
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file1, expected_file2});
|
|
|
|
}
|
|
|
|
|
2024-02-02 23:37:40 +00:00
|
|
|
TEST_F(CompactionJobTest, CutToAlignGrandparentBoundarySameKey) {
|
2022-10-06 22:54:58 +00:00
|
|
|
NewDB();
|
|
|
|
|
|
|
|
// MockTable has 1 byte per entry by default and each file is 10 bytes.
|
|
|
|
// When the file size is smaller than 100, it won't cut file earlier to align
|
|
|
|
// with its grandparent boundary.
|
|
|
|
const size_t kKeyValueSize = 10000;
|
|
|
|
mock_table_factory_->SetKeyValueSize(kKeyValueSize);
|
|
|
|
|
|
|
|
mutable_cf_options_.target_file_size_base = 10 * kKeyValueSize;
|
|
|
|
|
|
|
|
mock::KVVector file1;
|
|
|
|
for (int i = 0; i < 7; i++) {
|
|
|
|
file1.emplace_back(KeyStr("a", 100 - i, kTypeValue),
|
|
|
|
"val" + std::to_string(100 - i));
|
|
|
|
}
|
|
|
|
file1.emplace_back(KeyStr("b", 90, kTypeValue), "valb");
|
|
|
|
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 93U, kTypeValue), "val93"},
|
|
|
|
{KeyStr("b", 90U, kTypeValue), "valb"}});
|
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({{KeyStr("a", 89U, kTypeValue), "val"},
|
|
|
|
{KeyStr("a", 88U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto file4 = mock::MakeMockFile({{KeyStr("a", 87U, kTypeValue), "val"},
|
|
|
|
{KeyStr("a", 86U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file4, 2);
|
|
|
|
|
|
|
|
auto file5 = mock::MakeMockFile({{KeyStr("b", 85U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 84U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file5, 2);
|
|
|
|
|
|
|
|
mock::KVVector expected_file1;
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
expected_file1.emplace_back(KeyStr("a", 100 - i, kTypeValue),
|
|
|
|
"val" + std::to_string(100 - i));
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure `b` is cut in a separated file (so internally it's not using
|
|
|
|
// internal comparator, which will think the "b:90" (seqno 90) here is smaller
|
|
|
|
// than "b:85" on L2.)
|
|
|
|
auto expected_file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 90U, kTypeValue), "valb"}});
|
|
|
|
|
|
|
|
SetLastSequence(122U);
|
|
|
|
const std::vector<int> input_levels = {0, 1};
|
|
|
|
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
|
|
|
|
|
|
|
|
// Just keep all the history
|
|
|
|
std::vector<SequenceNumber> snapshots;
|
|
|
|
for (int i = 80; i <= 100; i++) {
|
|
|
|
snapshots.emplace_back(i);
|
|
|
|
}
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file1, expected_file2}, snapshots);
|
|
|
|
}
|
|
|
|
|
2024-02-02 23:37:40 +00:00
|
|
|
TEST_F(CompactionJobTest, CutForMaxCompactionBytesSameKey) {
|
2022-10-06 22:54:58 +00:00
|
|
|
// dynamic_file_size option should have no impact on cutting for max
|
|
|
|
// compaction bytes.
|
|
|
|
|
|
|
|
NewDB();
|
|
|
|
mutable_cf_options_.target_file_size_base = 80;
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 20;
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({{KeyStr("a", 104U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("b", 103U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 102U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("c", 101U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
auto file =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 100 - (i * 2), kTypeValue), "val"},
|
|
|
|
{KeyStr("a", 99 - (i * 2), kTypeValue), "val"}});
|
|
|
|
AddMockFile(file, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
auto file =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 80 - (i * 2), kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 79 - (i * 2), kTypeValue), "val"}});
|
|
|
|
AddMockFile(file, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto file5 = mock::MakeMockFile({{KeyStr("c", 60U, kTypeValue), "valc"},
|
|
|
|
{KeyStr("c", 59U, kTypeValue), "valc"}});
|
|
|
|
|
|
|
|
// "a" has 10 overlapped grandparent files (each size 10), which is far
|
|
|
|
// exceeded the `max_compaction_bytes`, but make sure 2 "a" are not separated,
|
|
|
|
// as splitting them won't help reducing the compaction size.
|
|
|
|
// also make sure "b" and "c" are cut separately.
|
|
|
|
mock::KVVector expected_file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 104U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("a", 102U, kTypeValue), "val2"}});
|
|
|
|
mock::KVVector expected_file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 103U, kTypeValue), "val"}});
|
|
|
|
mock::KVVector expected_file3 =
|
|
|
|
mock::MakeMockFile({{KeyStr("c", 101U, kTypeValue), "val"}});
|
|
|
|
|
|
|
|
SetLastSequence(122U);
|
|
|
|
const std::vector<int> input_levels = {0, 1};
|
|
|
|
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
|
|
|
|
|
|
|
|
// Just keep all the history
|
|
|
|
std::vector<SequenceNumber> snapshots;
|
|
|
|
for (int i = 80; i <= 105; i++) {
|
|
|
|
snapshots.emplace_back(i);
|
|
|
|
}
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file1, expected_file2, expected_file3}, snapshots);
|
|
|
|
}
|
|
|
|
|
2020-11-12 19:40:52 +00:00
|
|
|
class CompactionJobTimestampTest : public CompactionJobTestBase {
|
|
|
|
public:
|
|
|
|
CompactionJobTimestampTest()
|
|
|
|
: CompactionJobTestBase(test::PerThreadDBPath("compaction_job_ts_test"),
|
2022-02-08 20:14:25 +00:00
|
|
|
test::BytewiseComparatorWithU64TsWrapper(),
|
2022-09-15 04:59:56 +00:00
|
|
|
test::EncodeInt, /*test_io_priority=*/false,
|
|
|
|
TableTypeForTest::kMockTable) {}
|
2020-11-12 19:40:52 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTimestampTest, GCDisabled) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 10, ValueType::kTypeValue, 100), "a10"},
|
|
|
|
{KeyStr("a", 9, ValueType::kTypeValue, 99), "a9"},
|
2021-09-27 18:49:35 +00:00
|
|
|
{KeyStr("b", 8, ValueType::kTypeValue, 98), "b8"},
|
|
|
|
{KeyStr("d", 7, ValueType::kTypeValue, 97), "d7"}});
|
|
|
|
|
2020-11-12 19:40:52 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile(
|
2021-09-27 18:49:35 +00:00
|
|
|
{{KeyStr("b", 6, ValueType::kTypeDeletionWithTimestamp, 96), ""},
|
|
|
|
{KeyStr("c", 5, ValueType::kTypeDeletionWithTimestamp, 95), ""},
|
|
|
|
{KeyStr("c", 4, ValueType::kTypeValue, 94), "c5"},
|
|
|
|
{KeyStr("d", 3, ValueType::kTypeSingleDeletion, 93), ""}});
|
2020-11-12 19:40:52 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
SetLastSequence(10);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile(
|
|
|
|
{{KeyStr("a", 10, ValueType::kTypeValue, 100), "a10"},
|
|
|
|
{KeyStr("a", 9, ValueType::kTypeValue, 99), "a9"},
|
|
|
|
{KeyStr("b", 8, ValueType::kTypeValue, 98), "b8"},
|
2021-09-27 18:49:35 +00:00
|
|
|
{KeyStr("b", 6, ValueType::kTypeDeletionWithTimestamp, 96), ""},
|
|
|
|
{KeyStr("c", 5, ValueType::kTypeDeletionWithTimestamp, 95), ""},
|
|
|
|
{KeyStr("c", 4, ValueType::kTypeValue, 94), "c5"},
|
|
|
|
{KeyStr("d", 7, ValueType::kTypeValue, 97), "d7"},
|
|
|
|
{KeyStr("d", 3, ValueType::kTypeSingleDeletion, 93), ""}});
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
const auto& files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2020-11-12 19:40:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTimestampTest, NoKeyExpired) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 6, ValueType::kTypeValue, 100), "a6"},
|
|
|
|
{KeyStr("b", 7, ValueType::kTypeValue, 101), "b7"},
|
|
|
|
{KeyStr("c", 5, ValueType::kTypeValue, 99), "c5"}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 4, ValueType::kTypeValue, 98), "a4"},
|
|
|
|
{KeyStr("c", 3, ValueType::kTypeValue, 97), "c3"}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
SetLastSequence(101);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 6, ValueType::kTypeValue, 100), "a6"},
|
|
|
|
{KeyStr("a", 4, ValueType::kTypeValue, 98), "a4"},
|
|
|
|
{KeyStr("b", 7, ValueType::kTypeValue, 101), "b7"},
|
|
|
|
{KeyStr("c", 5, ValueType::kTypeValue, 99), "c5"},
|
|
|
|
{KeyStr("c", 3, ValueType::kTypeValue, 97), "c3"}});
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
const auto& files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
2020-11-12 19:40:52 +00:00
|
|
|
|
|
|
|
full_history_ts_low_ = encode_u64_ts_(0);
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2020-11-12 19:40:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTimestampTest, AllKeysExpired) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("a", 5, ValueType::kTypeDeletionWithTimestamp, 100), ""},
|
2021-09-27 18:49:35 +00:00
|
|
|
{KeyStr("b", 6, ValueType::kTypeSingleDeletion, 99), ""},
|
|
|
|
{KeyStr("c", 7, ValueType::kTypeValue, 98), "c7"}});
|
2020-11-12 19:40:52 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile(
|
2021-09-27 18:49:35 +00:00
|
|
|
{{KeyStr("a", 4, ValueType::kTypeValue, 97), "a4"},
|
|
|
|
{KeyStr("b", 3, ValueType::kTypeValue, 96), "b3"},
|
|
|
|
{KeyStr("c", 2, ValueType::kTypeDeletionWithTimestamp, 95), ""},
|
|
|
|
{KeyStr("c", 1, ValueType::kTypeValue, 94), "c1"}});
|
2020-11-12 19:40:52 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
2021-09-27 18:49:35 +00:00
|
|
|
SetLastSequence(7);
|
2020-11-12 19:40:52 +00:00
|
|
|
|
|
|
|
auto expected_results =
|
2021-09-27 18:49:35 +00:00
|
|
|
mock::MakeMockFile({{KeyStr("c", 0, ValueType::kTypeValue, 0), "c7"}});
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
const auto& files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
2020-11-12 19:40:52 +00:00
|
|
|
|
|
|
|
full_history_ts_low_ = encode_u64_ts_(std::numeric_limits<uint64_t>::max());
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2020-11-12 19:40:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTimestampTest, SomeKeysExpired) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5, ValueType::kTypeValue, 50), "a5"},
|
|
|
|
{KeyStr("b", 6, ValueType::kTypeValue, 49), "b6"}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("a", 3, ValueType::kTypeValue, 48), "a3"},
|
|
|
|
{KeyStr("a", 2, ValueType::kTypeValue, 46), "a2"},
|
|
|
|
{KeyStr("b", 4, ValueType::kTypeDeletionWithTimestamp, 47), ""}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
SetLastSequence(6);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5, ValueType::kTypeValue, 50), "a5"},
|
2021-11-09 21:07:33 +00:00
|
|
|
{KeyStr("a", 0, ValueType::kTypeValue, 0), "a3"},
|
2020-11-12 19:40:52 +00:00
|
|
|
{KeyStr("b", 6, ValueType::kTypeValue, 49), "b6"}});
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
const auto& files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
2020-11-12 19:40:52 +00:00
|
|
|
|
|
|
|
full_history_ts_low_ = encode_u64_ts_(49);
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
|
|
|
}
|
|
|
|
|
|
|
|
class CompactionJobTimestampTestWithBbTable : public CompactionJobTestBase {
|
|
|
|
public:
|
|
|
|
// Block-based table is needed if we want to test subcompaction partitioning
|
|
|
|
// with anchors.
|
|
|
|
explicit CompactionJobTimestampTestWithBbTable()
|
|
|
|
: CompactionJobTestBase(
|
|
|
|
test::PerThreadDBPath("compaction_job_ts_bbt_test"),
|
|
|
|
test::BytewiseComparatorWithU64TsWrapper(), test::EncodeInt,
|
|
|
|
/*test_io_priority=*/false, TableTypeForTest::kBlockBasedTable) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTimestampTestWithBbTable, SubcompactionAnchorL1) {
|
|
|
|
cf_options_.target_file_size_base = 20;
|
|
|
|
mutable_cf_options_.target_file_size_base = 20;
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
const std::vector<std::string> keys = {
|
|
|
|
KeyStr("a", 20, ValueType::kTypeValue, 200),
|
|
|
|
KeyStr("b", 21, ValueType::kTypeValue, 210),
|
|
|
|
KeyStr("b", 20, ValueType::kTypeValue, 200),
|
|
|
|
KeyStr("b", 18, ValueType::kTypeValue, 180),
|
|
|
|
KeyStr("c", 17, ValueType::kTypeValue, 170),
|
|
|
|
KeyStr("c", 16, ValueType::kTypeValue, 160),
|
|
|
|
KeyStr("c", 15, ValueType::kTypeValue, 150)};
|
|
|
|
const std::vector<std::string> values = {"a20", "b21", "b20", "b18",
|
|
|
|
"c17", "c16", "c15"};
|
|
|
|
|
|
|
|
constexpr int input_level = 1;
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile(
|
|
|
|
{{keys[0], values[0]}, {keys[1], values[1]}, {keys[2], values[2]}});
|
|
|
|
AddMockFile(file1, input_level);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile(
|
|
|
|
{{keys[3], values[3]}, {keys[4], values[4]}, {keys[5], values[5]}});
|
|
|
|
AddMockFile(file2, input_level);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({{keys[6], values[6]}});
|
|
|
|
AddMockFile(file3, input_level);
|
|
|
|
|
|
|
|
SetLastSequence(20);
|
|
|
|
|
|
|
|
auto output1 = mock::MakeMockFile({{keys[0], values[0]}});
|
|
|
|
auto output2 = mock::MakeMockFile(
|
|
|
|
{{keys[1], values[1]}, {keys[2], values[2]}, {keys[3], values[3]}});
|
|
|
|
auto output3 = mock::MakeMockFile(
|
|
|
|
{{keys[4], values[4]}, {keys[5], values[5]}, {keys[6], values[6]}});
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
std::vector<mock::KVVector>{output1, output2, output3};
|
|
|
|
const auto& files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
|
|
|
|
constexpr int output_level = 2;
|
|
|
|
constexpr int max_subcompactions = 4;
|
|
|
|
RunCompaction({files}, {input_level}, expected_results, /*snapshots=*/{},
|
|
|
|
/*earliest_write_conflict_snapshot=*/kMaxSequenceNumber,
|
|
|
|
output_level, /*verify=*/true, {kInvalidBlobFileNumber},
|
|
|
|
/*check_get_priority=*/false, Env::IO_TOTAL, Env::IO_TOTAL,
|
|
|
|
max_subcompactions);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTimestampTestWithBbTable, SubcompactionL0) {
|
|
|
|
cf_options_.target_file_size_base = 20;
|
|
|
|
mutable_cf_options_.target_file_size_base = 20;
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
const std::vector<std::string> keys = {
|
|
|
|
KeyStr("a", 20, ValueType::kTypeValue, 200),
|
|
|
|
KeyStr("b", 20, ValueType::kTypeValue, 200),
|
|
|
|
KeyStr("b", 19, ValueType::kTypeValue, 190),
|
|
|
|
KeyStr("b", 18, ValueType::kTypeValue, 180),
|
|
|
|
KeyStr("c", 17, ValueType::kTypeValue, 170),
|
|
|
|
KeyStr("c", 16, ValueType::kTypeValue, 160),
|
|
|
|
KeyStr("c", 15, ValueType::kTypeValue, 150)};
|
|
|
|
const std::vector<std::string> values = {"a20", "b20", "b19", "b18",
|
|
|
|
"c17", "c16", "c15"};
|
|
|
|
|
|
|
|
constexpr int input_level = 0;
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({{keys[5], values[5]}, {keys[6], values[6]}});
|
|
|
|
AddMockFile(file1, input_level);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{keys[3], values[3]}, {keys[4], values[4]}});
|
|
|
|
AddMockFile(file2, input_level);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile(
|
|
|
|
{{keys[0], values[0]}, {keys[1], values[1]}, {keys[2], values[2]}});
|
|
|
|
AddMockFile(file3, input_level);
|
|
|
|
|
|
|
|
SetLastSequence(20);
|
|
|
|
|
|
|
|
auto output1 = mock::MakeMockFile({{keys[0], values[0]}});
|
|
|
|
auto output2 = mock::MakeMockFile(
|
|
|
|
{{keys[1], values[1]}, {keys[2], values[2]}, {keys[3], values[3]}});
|
|
|
|
auto output3 = mock::MakeMockFile(
|
|
|
|
{{keys[4], values[4]}, {keys[5], values[5]}, {keys[6], values[6]}});
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
std::vector<mock::KVVector>{output1, output2, output3};
|
|
|
|
const auto& files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
|
|
|
|
constexpr int output_level = 1;
|
|
|
|
constexpr int max_subcompactions = 4;
|
|
|
|
RunCompaction({files}, {input_level}, expected_results, /*snapshots=*/{},
|
|
|
|
/*earliest_write_conflict_snapshot=*/kMaxSequenceNumber,
|
|
|
|
output_level, /*verify=*/true, {kInvalidBlobFileNumber},
|
|
|
|
/*check_get_priority=*/false, Env::IO_TOTAL, Env::IO_TOTAL,
|
|
|
|
max_subcompactions);
|
2020-11-12 19:40:52 +00:00
|
|
|
}
|
|
|
|
|
2022-06-07 18:57:12 +00:00
|
|
|
// The io priority of the compaction reads and writes are different from
|
|
|
|
// other DB reads and writes. To prepare the compaction input files, use the
|
|
|
|
// default filesystem from Env. To test the io priority of the compaction
|
|
|
|
// reads and writes, db_options_.fs is set as MockTestFileSystem.
|
|
|
|
class CompactionJobIOPriorityTest : public CompactionJobTestBase {
|
|
|
|
public:
|
|
|
|
CompactionJobIOPriorityTest()
|
|
|
|
: CompactionJobTestBase(
|
|
|
|
test::PerThreadDBPath("compaction_job_io_priority_test"),
|
2022-09-15 04:59:56 +00:00
|
|
|
BytewiseComparator(), [](uint64_t /*ts*/) { return ""; },
|
|
|
|
/*test_io_priority=*/true, TableTypeForTest::kBlockBasedTable) {}
|
2022-06-07 18:57:12 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(CompactionJobIOPriorityTest, WriteControllerStateNormal) {
|
|
|
|
// When the state from WriteController is normal.
|
|
|
|
NewDB();
|
|
|
|
mock::KVVector expected_results = CreateTwoFiles(false);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(input_level);
|
2022-06-07 18:57:12 +00:00
|
|
|
ASSERT_EQ(2U, files.size());
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {},
|
|
|
|
kMaxSequenceNumber, 1, false, {kInvalidBlobFileNumber}, false,
|
|
|
|
Env::IO_LOW, Env::IO_LOW);
|
2022-06-07 18:57:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobIOPriorityTest, WriteControllerStateDelayed) {
|
|
|
|
// When the state from WriteController is Delayed.
|
|
|
|
NewDB();
|
|
|
|
mock::KVVector expected_results = CreateTwoFiles(false);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(input_level);
|
2022-06-07 18:57:12 +00:00
|
|
|
ASSERT_EQ(2U, files.size());
|
|
|
|
{
|
|
|
|
std::unique_ptr<WriteControllerToken> delay_token =
|
|
|
|
write_controller_.GetDelayToken(1000000);
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {},
|
|
|
|
kMaxSequenceNumber, 1, false, {kInvalidBlobFileNumber}, false,
|
|
|
|
Env::IO_USER, Env::IO_USER);
|
2022-06-07 18:57:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobIOPriorityTest, WriteControllerStateStalled) {
|
|
|
|
// When the state from WriteController is Stalled.
|
|
|
|
NewDB();
|
|
|
|
mock::KVVector expected_results = CreateTwoFiles(false);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(input_level);
|
2022-06-07 18:57:12 +00:00
|
|
|
ASSERT_EQ(2U, files.size());
|
|
|
|
{
|
|
|
|
std::unique_ptr<WriteControllerToken> stop_token =
|
|
|
|
write_controller_.GetStopToken();
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {},
|
|
|
|
kMaxSequenceNumber, 1, false, {kInvalidBlobFileNumber}, false,
|
|
|
|
Env::IO_USER, Env::IO_USER);
|
2022-06-07 18:57:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobIOPriorityTest, GetRateLimiterPriority) {
|
|
|
|
NewDB();
|
|
|
|
mock::KVVector expected_results = CreateTwoFiles(false);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(input_level);
|
2022-06-07 18:57:12 +00:00
|
|
|
ASSERT_EQ(2U, files.size());
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {},
|
|
|
|
kMaxSequenceNumber, 1, false, {kInvalidBlobFileNumber}, true,
|
|
|
|
Env::IO_LOW, Env::IO_LOW);
|
2022-06-07 18:57:12 +00:00
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
int main(int argc, char** argv) {
|
2022-10-18 07:35:35 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2015-03-17 21:08:00 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
2021-11-08 19:04:01 +00:00
|
|
|
RegisterCustomObjects(argc, argv);
|
2015-03-17 21:08:00 +00:00
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|