2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2015-10-13 17:32:05 +00:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
2021-01-29 06:08:46 +00:00
|
|
|
#include "db/compaction/compaction_job.h"
|
|
|
|
|
2015-08-08 04:59:51 +00:00
|
|
|
#include <algorithm>
|
2019-05-04 00:26:20 +00:00
|
|
|
#include <array>
|
2019-09-20 19:00:55 +00:00
|
|
|
#include <cinttypes>
|
2014-11-14 19:35:48 +00:00
|
|
|
#include <map>
|
|
|
|
#include <string>
|
2015-08-08 04:59:51 +00:00
|
|
|
#include <tuple>
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2020-03-12 17:58:27 +00:00
|
|
|
#include "db/blob/blob_index.h"
|
2014-11-14 19:35:48 +00:00
|
|
|
#include "db/column_family.h"
|
2019-09-03 15:50:47 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2018-06-28 19:23:57 +00:00
|
|
|
#include "db/error_handler.h"
|
2014-11-14 19:35:48 +00:00
|
|
|
#include "db/version_set.h"
|
2022-09-15 04:59:56 +00:00
|
|
|
#include "file/random_access_file_reader.h"
|
2019-09-16 17:31:27 +00:00
|
|
|
#include "file/writable_file_writer.h"
|
2022-09-15 04:59:56 +00:00
|
|
|
#include "options/options_helper.h"
|
2014-11-14 19:35:48 +00:00
|
|
|
#include "rocksdb/cache.h"
|
2021-11-08 19:04:01 +00:00
|
|
|
#include "rocksdb/convenience.h"
|
2014-11-14 19:35:48 +00:00
|
|
|
#include "rocksdb/db.h"
|
2021-01-29 06:08:46 +00:00
|
|
|
#include "rocksdb/file_system.h"
|
2015-08-08 04:59:51 +00:00
|
|
|
#include "rocksdb/options.h"
|
2016-06-21 01:01:03 +00:00
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
2015-08-08 04:59:51 +00:00
|
|
|
#include "table/mock_table.h"
|
2022-05-19 18:04:21 +00:00
|
|
|
#include "table/unique_id_impl.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "util/string_util.h"
|
2015-08-08 04:59:51 +00:00
|
|
|
#include "utilities/merge_operators.h"
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
namespace {
|
2015-08-08 04:59:51 +00:00
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
void VerifyInitializationOfCompactionJobStats(
|
2022-10-26 19:35:12 +00:00
|
|
|
const CompactionJobStats& compaction_job_stats) {
|
2015-07-16 16:18:35 +00:00
|
|
|
#if !defined(IOS_CROSS_COMPILE)
|
|
|
|
ASSERT_EQ(compaction_job_stats.elapsed_micros, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_records, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_files, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_files_at_output_level, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_output_records, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_output_files, 0U);
|
|
|
|
|
2015-08-11 21:47:14 +00:00
|
|
|
ASSERT_EQ(compaction_job_stats.is_manual_compaction, true);
|
2015-07-16 16:18:35 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_input_bytes, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_output_bytes, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_input_raw_key_bytes, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.total_input_raw_value_bytes, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.smallest_output_key_prefix[0], 0);
|
|
|
|
ASSERT_EQ(compaction_job_stats.largest_output_key_prefix[0], 0);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_records_replaced, 0U);
|
2015-07-28 23:41:40 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_input_deletion_records, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_expired_deletion_records, 0U);
|
|
|
|
|
|
|
|
ASSERT_EQ(compaction_job_stats.num_corrupt_keys, 0U);
|
2015-07-16 16:18:35 +00:00
|
|
|
#endif // !defined(IOS_CROSS_COMPILE)
|
|
|
|
}
|
|
|
|
|
2022-06-07 18:57:12 +00:00
|
|
|
// Mock FSWritableFile for testing io priority.
|
|
|
|
// Only override the essential functions for testing compaction io priority.
|
|
|
|
class MockTestWritableFile : public FSWritableFileOwnerWrapper {
|
|
|
|
public:
|
|
|
|
MockTestWritableFile(std::unique_ptr<FSWritableFile>&& file,
|
|
|
|
Env::IOPriority io_priority)
|
|
|
|
: FSWritableFileOwnerWrapper(std::move(file)),
|
|
|
|
write_io_priority_(io_priority) {}
|
|
|
|
IOStatus Append(const Slice& data, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Append(data, options, dbg);
|
|
|
|
}
|
|
|
|
IOStatus Append(const Slice& data, const IOOptions& options,
|
|
|
|
const DataVerificationInfo& verification_info,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Append(data, options, verification_info, dbg);
|
|
|
|
}
|
|
|
|
IOStatus Close(const IOOptions& options, IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Close(options, dbg);
|
|
|
|
}
|
|
|
|
IOStatus Flush(const IOOptions& options, IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Flush(options, dbg);
|
|
|
|
}
|
|
|
|
IOStatus Sync(const IOOptions& options, IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Sync(options, dbg);
|
|
|
|
}
|
|
|
|
IOStatus Fsync(const IOOptions& options, IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Fsync(options, dbg);
|
|
|
|
}
|
|
|
|
uint64_t GetFileSize(const IOOptions& options, IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->GetFileSize(options, dbg);
|
|
|
|
}
|
|
|
|
IOStatus RangeSync(uint64_t offset, uint64_t nbytes, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->RangeSync(offset, nbytes, options, dbg);
|
|
|
|
}
|
|
|
|
|
|
|
|
void PrepareWrite(size_t offset, size_t len, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
target()->PrepareWrite(offset, len, options, dbg);
|
|
|
|
}
|
|
|
|
|
|
|
|
IOStatus Allocate(uint64_t offset, uint64_t len, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, write_io_priority_);
|
|
|
|
return target()->Allocate(offset, len, options, dbg);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Env::IOPriority write_io_priority_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Mock FSRandomAccessFile for testing io priority.
|
|
|
|
// Only override the essential functions for testing compaction io priority.
|
|
|
|
class MockTestRandomAccessFile : public FSRandomAccessFileOwnerWrapper {
|
|
|
|
public:
|
|
|
|
MockTestRandomAccessFile(std::unique_ptr<FSRandomAccessFile>&& file,
|
|
|
|
Env::IOPriority io_priority)
|
|
|
|
: FSRandomAccessFileOwnerWrapper(std::move(file)),
|
|
|
|
read_io_priority_(io_priority) {}
|
|
|
|
|
|
|
|
IOStatus Read(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
Slice* result, char* scratch,
|
|
|
|
IODebugContext* dbg) const override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, read_io_priority_);
|
|
|
|
return target()->Read(offset, n, options, result, scratch, dbg);
|
|
|
|
}
|
|
|
|
IOStatus Prefetch(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
EXPECT_EQ(options.rate_limiter_priority, read_io_priority_);
|
|
|
|
return target()->Prefetch(offset, n, options, dbg);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Env::IOPriority read_io_priority_;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Mock FileSystem for testing io priority.
|
|
|
|
class MockTestFileSystem : public FileSystemWrapper {
|
|
|
|
public:
|
|
|
|
explicit MockTestFileSystem(const std::shared_ptr<FileSystem>& base,
|
|
|
|
Env::IOPriority read_io_priority,
|
|
|
|
Env::IOPriority write_io_priority)
|
|
|
|
: FileSystemWrapper(base),
|
|
|
|
read_io_priority_(read_io_priority),
|
|
|
|
write_io_priority_(write_io_priority) {}
|
|
|
|
|
|
|
|
static const char* kClassName() { return "MockTestFileSystem"; }
|
|
|
|
const char* Name() const override { return kClassName(); }
|
|
|
|
|
|
|
|
IOStatus NewRandomAccessFile(const std::string& fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSRandomAccessFile>* result,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
IOStatus s = target()->NewRandomAccessFile(fname, file_opts, result, dbg);
|
|
|
|
EXPECT_OK(s);
|
|
|
|
result->reset(
|
|
|
|
new MockTestRandomAccessFile(std::move(*result), read_io_priority_));
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
IOStatus NewWritableFile(const std::string& fname,
|
|
|
|
const FileOptions& file_opts,
|
|
|
|
std::unique_ptr<FSWritableFile>* result,
|
|
|
|
IODebugContext* dbg) override {
|
|
|
|
IOStatus s = target()->NewWritableFile(fname, file_opts, result, dbg);
|
|
|
|
EXPECT_OK(s);
|
|
|
|
result->reset(
|
|
|
|
new MockTestWritableFile(std::move(*result), write_io_priority_));
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Env::IOPriority read_io_priority_;
|
|
|
|
Env::IOPriority write_io_priority_;
|
|
|
|
};
|
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
enum TableTypeForTest : uint8_t { kMockTable = 0, kBlockBasedTable = 1 };
|
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
} // namespace
|
|
|
|
|
2020-11-12 19:40:52 +00:00
|
|
|
class CompactionJobTestBase : public testing::Test {
|
|
|
|
protected:
|
|
|
|
CompactionJobTestBase(std::string dbname, const Comparator* ucmp,
|
2022-06-07 18:57:12 +00:00
|
|
|
std::function<std::string(uint64_t)> encode_u64_ts,
|
2022-09-15 04:59:56 +00:00
|
|
|
bool test_io_priority, TableTypeForTest table_type)
|
2021-11-08 19:04:01 +00:00
|
|
|
: dbname_(std::move(dbname)),
|
2020-11-12 19:40:52 +00:00
|
|
|
ucmp_(ucmp),
|
2016-09-23 23:34:04 +00:00
|
|
|
db_options_(),
|
|
|
|
mutable_cf_options_(cf_options_),
|
2020-07-23 01:31:25 +00:00
|
|
|
mutable_db_options_(),
|
2015-03-17 22:04:37 +00:00
|
|
|
table_cache_(NewLRUCache(50000, 16)),
|
2016-06-21 01:01:03 +00:00
|
|
|
write_buffer_manager_(db_options_.db_write_buffer_size),
|
2022-06-21 03:58:11 +00:00
|
|
|
versions_(new VersionSet(
|
|
|
|
dbname_, &db_options_, env_options_, table_cache_.get(),
|
|
|
|
&write_buffer_manager_, &write_controller_,
|
|
|
|
/*block_cache_tracer=*/nullptr,
|
|
|
|
/*io_tracer=*/nullptr, /*db_id*/ "", /*db_session_id*/ "")),
|
2014-11-14 19:35:48 +00:00
|
|
|
shutting_down_(false),
|
2018-06-28 19:23:57 +00:00
|
|
|
mock_table_factory_(new mock::MockTableFactory()),
|
2020-11-12 19:40:52 +00:00
|
|
|
error_handler_(nullptr, db_options_, &mutex_),
|
2022-06-07 18:57:12 +00:00
|
|
|
encode_u64_ts_(std::move(encode_u64_ts)),
|
2022-09-15 04:59:56 +00:00
|
|
|
test_io_priority_(test_io_priority),
|
|
|
|
table_type_(table_type) {
|
2021-11-08 19:04:01 +00:00
|
|
|
Env* base_env = Env::Default();
|
|
|
|
EXPECT_OK(
|
|
|
|
test::CreateEnvFromSystem(ConfigOptions(), &base_env, &env_guard_));
|
|
|
|
env_ = base_env;
|
|
|
|
fs_ = env_->GetFileSystem();
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
// set default for the tests
|
|
|
|
mutable_cf_options_.target_file_size_base = 1024 * 1024;
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 10 * 1024 * 1024;
|
2021-11-08 19:04:01 +00:00
|
|
|
}
|
2020-11-12 19:40:52 +00:00
|
|
|
|
|
|
|
void SetUp() override {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 03:52:32 +00:00
|
|
|
EXPECT_OK(env_->CreateDirIfMissing(dbname_));
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
db_options_.env = env_;
|
|
|
|
db_options_.fs = fs_;
|
2014-11-14 19:35:48 +00:00
|
|
|
db_options_.db_paths.emplace_back(dbname_,
|
|
|
|
std::numeric_limits<uint64_t>::max());
|
2020-11-12 19:40:52 +00:00
|
|
|
cf_options_.comparator = ucmp_;
|
2022-09-15 04:59:56 +00:00
|
|
|
if (table_type_ == TableTypeForTest::kBlockBasedTable) {
|
2022-06-07 18:57:12 +00:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
cf_options_.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2022-09-15 04:59:56 +00:00
|
|
|
} else if (table_type_ == TableTypeForTest::kMockTable) {
|
2022-06-07 18:57:12 +00:00
|
|
|
cf_options_.table_factory = mock_table_factory_;
|
2022-09-15 04:59:56 +00:00
|
|
|
} else {
|
|
|
|
assert(false);
|
2022-06-07 18:57:12 +00:00
|
|
|
}
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string GenerateFileName(uint64_t file_number) {
|
|
|
|
FileMetaData meta;
|
|
|
|
std::vector<DbPath> db_paths;
|
|
|
|
db_paths.emplace_back(dbname_, std::numeric_limits<uint64_t>::max());
|
|
|
|
meta.fd = FileDescriptor(file_number, 0, 0);
|
|
|
|
return TableFileName(db_paths, meta.fd.GetNumber(), meta.fd.GetPathId());
|
|
|
|
}
|
|
|
|
|
2020-11-12 19:40:52 +00:00
|
|
|
std::string KeyStr(const std::string& user_key, const SequenceNumber seq_num,
|
|
|
|
const ValueType t, uint64_t ts = 0) {
|
|
|
|
std::string user_key_with_ts = user_key + encode_u64_ts_(ts);
|
|
|
|
return InternalKey(user_key_with_ts, seq_num, t).Encode().ToString();
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
2019-10-14 22:19:31 +00:00
|
|
|
static std::string BlobStr(uint64_t blob_file_number, uint64_t offset,
|
|
|
|
uint64_t size) {
|
|
|
|
std::string blob_index;
|
|
|
|
BlobIndex::EncodeBlob(&blob_index, blob_file_number, offset, size,
|
|
|
|
kNoCompression);
|
|
|
|
return blob_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::string BlobStrTTL(uint64_t blob_file_number, uint64_t offset,
|
|
|
|
uint64_t size, uint64_t expiration) {
|
|
|
|
std::string blob_index;
|
|
|
|
BlobIndex::EncodeBlobTTL(&blob_index, expiration, blob_file_number, offset,
|
|
|
|
size, kNoCompression);
|
|
|
|
return blob_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::string BlobStrInlinedTTL(const Slice& value,
|
|
|
|
uint64_t expiration) {
|
|
|
|
std::string blob_index;
|
|
|
|
BlobIndex::EncodeInlinedTTL(&blob_index, expiration, value);
|
|
|
|
return blob_index;
|
|
|
|
}
|
|
|
|
|
2022-06-07 18:57:12 +00:00
|
|
|
// Creates a table with the specificied key value pairs.
|
|
|
|
void CreateTable(const std::string& table_name,
|
|
|
|
const mock::KVVector& contents, uint64_t& file_size) {
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
|
|
Status s = WritableFileWriter::Create(fs_, table_name, FileOptions(),
|
|
|
|
&file_writer, nullptr);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
std::unique_ptr<TableBuilder> table_builder(
|
|
|
|
cf_options_.table_factory->NewTableBuilder(
|
|
|
|
TableBuilderOptions(*cfd_->ioptions(), mutable_cf_options_,
|
|
|
|
cfd_->internal_comparator(),
|
|
|
|
cfd_->int_tbl_prop_collector_factories(),
|
|
|
|
CompressionType::kNoCompression,
|
|
|
|
CompressionOptions(), 0 /* column_family_id */,
|
|
|
|
kDefaultColumnFamilyName, -1 /* level */),
|
|
|
|
file_writer.get()));
|
|
|
|
// Build table.
|
|
|
|
for (auto kv : contents) {
|
|
|
|
std::string key;
|
|
|
|
std::string value;
|
|
|
|
std::tie(key, value) = kv;
|
|
|
|
table_builder->Add(key, value);
|
|
|
|
}
|
|
|
|
ASSERT_OK(table_builder->Finish());
|
|
|
|
file_size = table_builder->FileSize();
|
|
|
|
}
|
|
|
|
|
2020-10-01 17:08:52 +00:00
|
|
|
void AddMockFile(const mock::KVVector& contents, int level = 0) {
|
2015-08-08 04:59:51 +00:00
|
|
|
assert(contents.size() > 0);
|
|
|
|
|
|
|
|
bool first_key = true;
|
|
|
|
std::string smallest, largest;
|
|
|
|
InternalKey smallest_key, largest_key;
|
|
|
|
SequenceNumber smallest_seqno = kMaxSequenceNumber;
|
|
|
|
SequenceNumber largest_seqno = 0;
|
2019-10-14 22:19:31 +00:00
|
|
|
uint64_t oldest_blob_file_number = kInvalidBlobFileNumber;
|
2015-08-08 04:59:51 +00:00
|
|
|
for (auto kv : contents) {
|
|
|
|
ParsedInternalKey key;
|
|
|
|
std::string skey;
|
|
|
|
std::string value;
|
|
|
|
std::tie(skey, value) = kv;
|
2020-10-28 17:11:13 +00:00
|
|
|
const Status pik_status =
|
|
|
|
ParseInternalKey(skey, &key, true /* log_err_key */);
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
smallest_seqno = std::min(smallest_seqno, key.sequence);
|
|
|
|
largest_seqno = std::max(largest_seqno, key.sequence);
|
|
|
|
|
|
|
|
if (first_key ||
|
|
|
|
cfd_->user_comparator()->Compare(key.user_key, smallest) < 0) {
|
|
|
|
smallest.assign(key.user_key.data(), key.user_key.size());
|
|
|
|
smallest_key.DecodeFrom(skey);
|
|
|
|
}
|
|
|
|
if (first_key ||
|
|
|
|
cfd_->user_comparator()->Compare(key.user_key, largest) > 0) {
|
|
|
|
largest.assign(key.user_key.data(), key.user_key.size());
|
|
|
|
largest_key.DecodeFrom(skey);
|
|
|
|
}
|
|
|
|
|
|
|
|
first_key = false;
|
2019-10-14 22:19:31 +00:00
|
|
|
|
2020-10-28 17:11:13 +00:00
|
|
|
if (pik_status.ok() && key.type == kTypeBlobIndex) {
|
2019-10-14 22:19:31 +00:00
|
|
|
BlobIndex blob_index;
|
|
|
|
const Status s = blob_index.DecodeFrom(value);
|
|
|
|
if (!s.ok()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (blob_index.IsInlined() || blob_index.HasTTL() ||
|
|
|
|
blob_index.file_number() == kInvalidBlobFileNumber) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (oldest_blob_file_number == kInvalidBlobFileNumber ||
|
|
|
|
oldest_blob_file_number > blob_index.file_number()) {
|
|
|
|
oldest_blob_file_number = blob_index.file_number();
|
|
|
|
}
|
|
|
|
}
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t file_number = versions_->NewFileNumber();
|
2022-06-07 18:57:12 +00:00
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
uint64_t file_size = 0;
|
|
|
|
if (table_type_ == TableTypeForTest::kBlockBasedTable) {
|
2022-06-07 18:57:12 +00:00
|
|
|
CreateTable(GenerateFileName(file_number), contents, file_size);
|
2022-09-15 04:59:56 +00:00
|
|
|
} else if (table_type_ == TableTypeForTest::kMockTable) {
|
2022-06-07 18:57:12 +00:00
|
|
|
file_size = 10;
|
|
|
|
EXPECT_OK(mock_table_factory_->CreateMockTable(
|
|
|
|
env_, GenerateFileName(file_number), std::move(contents)));
|
2022-09-15 04:59:56 +00:00
|
|
|
} else {
|
|
|
|
assert(false);
|
2022-06-07 18:57:12 +00:00
|
|
|
}
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
VersionEdit edit;
|
2022-06-07 18:57:12 +00:00
|
|
|
edit.AddFile(level, file_number, 0, file_size, smallest_key, largest_key,
|
2021-12-03 22:42:05 +00:00
|
|
|
smallest_seqno, largest_seqno, false, Temperature::kUnknown,
|
|
|
|
oldest_blob_file_number, kUnknownOldestAncesterTime,
|
|
|
|
kUnknownFileCreationTime, kUnknownFileChecksum,
|
2022-08-02 00:56:13 +00:00
|
|
|
kUnknownFileChecksumFuncName, kNullUniqueId64x2);
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
mutex_.Lock();
|
2020-09-25 04:47:43 +00:00
|
|
|
EXPECT_OK(
|
|
|
|
versions_->LogAndApply(versions_->GetColumnFamilySet()->GetDefault(),
|
Sync dir containing CURRENT after RenameFile on CURRENT as much as possible (#10573)
Summary:
**Context:**
Below crash test revealed a bug that directory containing CURRENT file (short for `dir_contains_current_file` below) was not always get synced after a new CURRENT is created and being called with `RenameFile` as part of the creation.
This bug exposes a risk that such un-synced directory containing the updated CURRENT can’t survive a host crash (e.g, power loss) hence get corrupted. This then will be followed by a recovery from a corrupted CURRENT that we don't want.
The root-cause is that a nullptr `FSDirectory* dir_contains_current_file` sometimes gets passed-down to `SetCurrentFile()` hence in those case `dir_contains_current_file->FSDirectory::FsyncWithDirOptions()` will be skipped (which otherwise will internally call`Env/FS::SyncDic()` )
```
./db_stress --acquire_snapshot_one_in=10000 --adaptive_readahead=1 --allow_data_in_errors=True --avoid_unnecessary_blocking_io=0 --backup_max_size=104857600 --backup_one_in=100000 --batch_protection_bytes_per_key=8 --block_size=16384 --bloom_bits=134.8015470676662 --bottommost_compression_type=disable --cache_size=8388608 --checkpoint_one_in=1000000 --checksum_type=kCRC32c --clear_column_family_one_in=0 --compact_files_one_in=1000000 --compact_range_one_in=1000000 --compaction_pri=2 --compaction_ttl=100 --compression_max_dict_buffer_bytes=511 --compression_max_dict_bytes=16384 --compression_type=zstd --compression_use_zstd_dict_trainer=1 --compression_zstd_max_train_bytes=65536 --continuous_verification_interval=0 --data_block_index_type=0 --db=$db --db_write_buffer_size=1048576 --delpercent=5 --delrangepercent=0 --destroy_db_initially=0 --disable_wal=0 --enable_compaction_filter=0 --enable_pipelined_write=1 --expected_values_dir=$exp --fail_if_options_file_error=1 --file_checksum_impl=none --flush_one_in=1000000 --get_current_wal_file_one_in=0 --get_live_files_one_in=1000000 --get_property_one_in=1000000 --get_sorted_wal_files_one_in=0 --index_block_restart_interval=4 --ingest_external_file_one_in=0 --iterpercent=10 --key_len_percent_dist=1,30,69 --level_compaction_dynamic_level_bytes=True --mark_for_compaction_one_file_in=10 --max_background_compactions=20 --max_bytes_for_level_base=10485760 --max_key=10000 --max_key_len=3 --max_manifest_file_size=16384 --max_write_batch_group_size_bytes=64 --max_write_buffer_number=3 --max_write_buffer_size_to_maintain=0 --memtable_prefix_bloom_size_ratio=0.001 --memtable_protection_bytes_per_key=1 --memtable_whole_key_filtering=1 --mmap_read=1 --nooverwritepercent=1 --open_metadata_write_fault_one_in=0 --open_read_fault_one_in=0 --open_write_fault_one_in=0 --ops_per_thread=100000000 --optimize_filters_for_memory=1 --paranoid_file_checks=1 --partition_pinning=2 --pause_background_one_in=1000000 --periodic_compaction_seconds=0 --prefix_size=5 --prefixpercent=5 --prepopulate_block_cache=1 --progress_reports=0 --read_fault_one_in=1000 --readpercent=45 --recycle_log_file_num=0 --reopen=0 --ribbon_starting_level=999 --secondary_cache_fault_one_in=32 --secondary_cache_uri=compressed_secondary_cache://capacity=8388608 --set_options_one_in=10000 --snapshot_hold_ops=100000 --sst_file_manager_bytes_per_sec=0 --sst_file_manager_bytes_per_truncate=0 --subcompactions=3 --sync_fault_injection=1 --target_file_size_base=2097 --target_file_size_multiplier=2 --test_batches_snapshots=1 --top_level_index_pinning=1 --use_full_merge_v1=1 --use_merge=1 --value_size_mult=32 --verify_checksum=1 --verify_checksum_one_in=1000000 --verify_db_one_in=100000 --verify_sst_unique_id_in_manifest=1 --wal_bytes_per_sync=524288 --write_buffer_size=4194 --writepercent=35
```
```
stderr:
WARNING: prefix_size is non-zero but memtablerep != prefix_hash
db_stress: utilities/fault_injection_fs.cc:748: virtual rocksdb::IOStatus rocksdb::FaultInjectionTestFS::RenameFile(const std::string &, const std::string &, const rocksdb::IOOptions &, rocksdb::IODebugContext *): Assertion `tlist.find(tdn.second) == tlist.end()' failed.`
```
**Summary:**
The PR ensured the non-test path pass down a non-null dir containing CURRENT (which is by current RocksDB assumption just db_dir) by doing the following:
- Renamed `directory_to_fsync` as `dir_contains_current_file` in `SetCurrentFile()` to tighten the association between this directory and CURRENT file
- Changed `SetCurrentFile()` API to require `dir_contains_current_file` being passed-in, instead of making it by default nullptr.
- Because `SetCurrentFile()`'s `dir_contains_current_file` is passed down from `VersionSet::LogAndApply()` then `VersionSet::ProcessManifestWrites()` (i.e, think about this as a chain of 3 functions related to MANIFEST update), these 2 functions also got refactored to require `dir_contains_current_file`
- Updated the non-test-path callers of these 3 functions to obtain and pass in non-nullptr `dir_contains_current_file`, which by current assumption of RocksDB, is the `FSDirectory* db_dir`.
- `db_impl` path will obtain `DBImpl::directories_.getDbDir()` while others with no access to such `directories_` are obtained on the fly by creating such object `FileSystem::NewDirectory(..)` and manage it by unique pointers to ensure short life time.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10573
Test Plan:
- `make check`
- Passed the repro db_stress command
- For future improvement, since we currently don't assert dir containing CURRENT to be non-nullptr due to https://github.com/facebook/rocksdb/pull/10573#pullrequestreview-1087698899, there is still chances that future developers mistakenly pass down nullptr dir containing CURRENT thus resulting skipped sync dir and cause the bug again. Therefore a smarter test (e.g, such as quoted from ajkr "(make) unsynced data loss to be dropping files corresponding to unsynced directory entries") is still needed.
Reviewed By: ajkr
Differential Revision: D39005886
Pulled By: hx235
fbshipit-source-id: 336fb9090d0cfa6ca3dd580db86268007dde7f5a
2022-08-30 00:35:21 +00:00
|
|
|
mutable_cf_options_, &edit, &mutex_, nullptr));
|
2015-08-08 04:59:51 +00:00
|
|
|
mutex_.Unlock();
|
|
|
|
}
|
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
void VerifyTables(int output_level,
|
|
|
|
const std::vector<mock::KVVector>& expected_results,
|
|
|
|
std::vector<uint64_t> expected_oldest_blob_file_numbers) {
|
|
|
|
if (expected_results.empty()) {
|
|
|
|
ASSERT_EQ(compaction_job_stats_.num_output_files, 0U);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
int expected_output_file_num = 0;
|
|
|
|
for (const auto& e : expected_results) {
|
|
|
|
if (!e.empty()) {
|
|
|
|
++expected_output_file_num;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_EQ(expected_output_file_num, compaction_job_stats_.num_output_files);
|
|
|
|
if (expected_output_file_num == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (expected_oldest_blob_file_numbers.empty()) {
|
|
|
|
expected_oldest_blob_file_numbers.resize(expected_output_file_num,
|
|
|
|
kInvalidBlobFileNumber);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
if (table_type_ == TableTypeForTest::kMockTable) {
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
ASSERT_EQ(compaction_job_stats_.num_output_files,
|
|
|
|
expected_results.size());
|
|
|
|
mock_table_factory_->AssertLatestFiles(expected_results);
|
2022-09-15 04:59:56 +00:00
|
|
|
} else {
|
|
|
|
assert(table_type_ == TableTypeForTest::kBlockBasedTable);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto output_files =
|
|
|
|
cfd->current()->storage_info()->LevelFiles(output_level);
|
|
|
|
ASSERT_EQ(expected_output_file_num, output_files.size());
|
|
|
|
|
|
|
|
if (table_type_ == TableTypeForTest::kMockTable) {
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
assert(output_files.size() ==
|
|
|
|
static_cast<size_t>(expected_output_file_num));
|
2022-09-15 04:59:56 +00:00
|
|
|
const FileMetaData* const output_file = output_files[0];
|
|
|
|
ASSERT_EQ(output_file->oldest_blob_file_number,
|
|
|
|
expected_oldest_blob_file_numbers[0]);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < expected_results.size(); ++i) {
|
|
|
|
const FileMetaData* const output_file = output_files[i];
|
|
|
|
std::string file_name = GenerateFileName(output_file->fd.GetNumber());
|
|
|
|
const auto& fs = env_->GetFileSystem();
|
|
|
|
std::unique_ptr<RandomAccessFileReader> freader;
|
|
|
|
IOStatus ios = RandomAccessFileReader::Create(
|
|
|
|
fs, file_name, FileOptions(), &freader, nullptr);
|
|
|
|
ASSERT_OK(ios);
|
|
|
|
std::unique_ptr<TableReader> table_reader;
|
|
|
|
uint64_t file_size = output_file->fd.GetFileSize();
|
|
|
|
ReadOptions read_opts;
|
|
|
|
Status s = cf_options_.table_factory->NewTableReader(
|
|
|
|
read_opts,
|
|
|
|
TableReaderOptions(*cfd->ioptions(), nullptr, FileOptions(),
|
|
|
|
cfd_->internal_comparator()),
|
|
|
|
std::move(freader), file_size, &table_reader, false);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
assert(table_reader);
|
|
|
|
std::unique_ptr<InternalIterator> iiter(
|
|
|
|
table_reader->NewIterator(read_opts, nullptr, nullptr, true,
|
|
|
|
TableReaderCaller::kUncategorized));
|
|
|
|
assert(iiter);
|
|
|
|
|
|
|
|
mock::KVVector from_db;
|
|
|
|
for (iiter->SeekToFirst(); iiter->Valid(); iiter->Next()) {
|
|
|
|
const Slice key = iiter->key();
|
|
|
|
const Slice value = iiter->value();
|
|
|
|
from_db.emplace_back(
|
|
|
|
make_pair(key.ToString(false), value.ToString(false)));
|
|
|
|
}
|
|
|
|
ASSERT_EQ(expected_results[i], from_db);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-08 04:59:51 +00:00
|
|
|
void SetLastSequence(const SequenceNumber sequence_number) {
|
2017-11-11 01:18:01 +00:00
|
|
|
versions_->SetLastAllocatedSequence(sequence_number + 1);
|
2017-12-01 07:39:56 +00:00
|
|
|
versions_->SetLastPublishedSequence(sequence_number + 1);
|
2015-08-08 04:59:51 +00:00
|
|
|
versions_->SetLastSequence(sequence_number + 1);
|
|
|
|
}
|
|
|
|
|
2014-11-14 19:35:48 +00:00
|
|
|
// returns expected result after compaction
|
2020-10-01 17:08:52 +00:00
|
|
|
mock::KVVector CreateTwoFiles(bool gen_corrupted_keys) {
|
|
|
|
stl_wrappers::KVMap expected_results;
|
2020-11-12 19:40:52 +00:00
|
|
|
constexpr int kKeysPerFile = 10000;
|
|
|
|
constexpr int kCorruptKeysPerFile = 200;
|
|
|
|
constexpr int kMatchingKeys = kKeysPerFile / 2;
|
2014-11-14 19:35:48 +00:00
|
|
|
SequenceNumber sequence_number = 0;
|
2015-07-16 16:18:35 +00:00
|
|
|
|
|
|
|
auto corrupt_id = [&](int id) {
|
|
|
|
return gen_corrupted_keys && id > 0 && id <= kCorruptKeysPerFile;
|
|
|
|
};
|
|
|
|
|
2014-11-14 19:35:48 +00:00
|
|
|
for (int i = 0; i < 2; ++i) {
|
2015-09-02 20:58:22 +00:00
|
|
|
auto contents = mock::MakeMockFile();
|
2014-11-14 19:35:48 +00:00
|
|
|
for (int k = 0; k < kKeysPerFile; ++k) {
|
2022-05-06 20:03:58 +00:00
|
|
|
auto key = std::to_string(i * kMatchingKeys + k);
|
|
|
|
auto value = std::to_string(i * kKeysPerFile + k);
|
2014-11-14 19:35:48 +00:00
|
|
|
InternalKey internal_key(key, ++sequence_number, kTypeValue);
|
2015-12-10 01:28:46 +00:00
|
|
|
|
Make Compaction class easier to use
Summary:
The goal of this diff is to make Compaction class easier to use. This should also make new compaction algorithms easier to write (like CompactFiles from @yhchiang and dynamic leveled and multi-leveled universal from @sdong).
Here are couple of things demonstrating that Compaction class is hard to use:
1. we have two constructors of Compaction class
2. there's this thing called grandparents_, but it appears to only be setup for leveled compaction and not compactfiles
3. it's easy to introduce a subtle and dangerous bug like this: D36225
4. SetupBottomMostLevel() is hard to understand and it shouldn't be. See this comment: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction.cc#L236-L241. It also made it harder for @yhchiang to write CompactFiles, as evidenced by this: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction_picker.cc#L204-L210
The problem is that we create Compaction object, which holds a lot of state, and then pass it around to some functions. After those functions are done mutating, then we call couple of functions on Compaction object, like SetupBottommostLevel() and MarkFilesBeingCompacted(). It is very hard to see what's happening with all that Compaction's state while it's travelling across different functions. If you're writing a new PickCompaction() function you need to try really hard to understand what are all the functions you need to run on Compaction object and what state you need to setup.
My proposed solution is to make important parts of Compaction immutable after construction. PickCompaction() should calculate compaction inputs and then pass them onto Compaction object once they are finalized. That makes it easy to create a new compaction -- just provide all the parameters to the constructor and you're done. No need to call confusing functions after you created your object.
This diff doesn't fully achieve that goal, but it comes pretty close. Here are some of the changes:
* have one Compaction constructor instead of two.
* inputs_ is constant after construction
* MarkFilesBeingCompacted() is now private to Compaction class and automatically called on construction/destruction.
* SetupBottommostLevel() is gone. Compaction figures it out on its own based on the input.
* CompactionPicker's functions are not passing around Compaction object anymore. They are only passing around the state that they need.
Test Plan:
make check
make asan_check
make valgrind_check
Reviewers: rven, anthony, sdong, yhchiang
Reviewed By: yhchiang
Subscribers: sdong, yhchiang, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D36687
2015-04-10 22:01:54 +00:00
|
|
|
// This is how the key will look like once it's written in bottommost
|
|
|
|
// file
|
2022-10-26 19:35:12 +00:00
|
|
|
InternalKey bottommost_internal_key(key, 0, kTypeValue);
|
2015-12-10 01:28:46 +00:00
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
if (corrupt_id(k)) {
|
Simplify querying of merge results
Summary:
While working on supporting mixing merge operators with
single deletes ( https://reviews.facebook.net/D43179 ),
I realized that returning and dealing with merge results
can be made simpler. Submitting this as a separate diff
because it is not directly related to single deletes.
Before, callers of merge helper had to retrieve the merge
result in one of two ways depending on whether the merge
was successful or not (success = result of merge was single
kTypeValue). For successful merges, the caller could query
the resulting key/value pair and for unsuccessful merges,
the result could be retrieved in the form of two deques of
keys and values. However, with single deletes, a successful merge
does not return a single key/value pair (if merge
operands are merged with a single delete, we have to generate
a value and keep the original single delete around to make
sure that we are not accidentially producing a key overwrite).
In addition, the two existing call sites of the merge
helper were taking the same actions independently from whether
the merge was successful or not, so this patch simplifies that.
Test Plan: make clean all check
Reviewers: rven, sdong, yhchiang, anthony, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43353
2015-08-18 00:34:38 +00:00
|
|
|
test::CorruptKeyType(&internal_key);
|
|
|
|
test::CorruptKeyType(&bottommost_internal_key);
|
2015-07-16 16:18:35 +00:00
|
|
|
}
|
2020-10-01 17:08:52 +00:00
|
|
|
contents.push_back({internal_key.Encode().ToString(), value});
|
2015-07-16 16:18:35 +00:00
|
|
|
if (i == 1 || k < kMatchingKeys || corrupt_id(k - kMatchingKeys)) {
|
2015-07-06 18:14:08 +00:00
|
|
|
expected_results.insert(
|
2020-10-01 17:08:52 +00:00
|
|
|
{bottommost_internal_key.Encode().ToString(), value});
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
}
|
2020-11-12 19:40:52 +00:00
|
|
|
mock::SortKVVector(&contents, ucmp_);
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(contents);
|
|
|
|
}
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2015-08-08 04:59:51 +00:00
|
|
|
SetLastSequence(sequence_number);
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2020-10-01 17:08:52 +00:00
|
|
|
mock::KVVector expected_results_kvvector;
|
|
|
|
for (auto& kv : expected_results) {
|
|
|
|
expected_results_kvvector.push_back({kv.first, kv.second});
|
|
|
|
}
|
|
|
|
|
|
|
|
return expected_results_kvvector;
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
void NewDB() {
|
2020-11-12 19:40:52 +00:00
|
|
|
EXPECT_OK(DestroyDB(dbname_, Options()));
|
2019-05-04 00:26:20 +00:00
|
|
|
EXPECT_OK(env_->CreateDirIfMissing(dbname_));
|
2022-09-15 04:59:56 +00:00
|
|
|
|
|
|
|
std::shared_ptr<Logger> info_log;
|
|
|
|
DBOptions db_opts = BuildDBOptions(db_options_, mutable_db_options_);
|
|
|
|
Status s = CreateLoggerFromOptions(dbname_, db_opts, &info_log);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
db_options_.info_log = info_log;
|
|
|
|
|
2020-08-13 00:28:10 +00:00
|
|
|
versions_.reset(
|
|
|
|
new VersionSet(dbname_, &db_options_, env_options_, table_cache_.get(),
|
|
|
|
&write_buffer_manager_, &write_controller_,
|
2021-06-10 18:01:44 +00:00
|
|
|
/*block_cache_tracer=*/nullptr, /*io_tracer=*/nullptr,
|
2022-06-21 03:58:11 +00:00
|
|
|
/*db_id*/ "", /*db_session_id*/ ""));
|
2019-05-04 00:26:20 +00:00
|
|
|
compaction_job_stats_.Reset();
|
2020-12-24 00:54:05 +00:00
|
|
|
ASSERT_OK(SetIdentityFile(env_, dbname_));
|
2019-05-04 00:26:20 +00:00
|
|
|
|
2014-11-14 19:35:48 +00:00
|
|
|
VersionEdit new_db;
|
|
|
|
new_db.SetLogNumber(0);
|
|
|
|
new_db.SetNextFile(2);
|
|
|
|
new_db.SetLastSequence(0);
|
|
|
|
|
|
|
|
const std::string manifest = DescriptorFileName(dbname_, 1);
|
2021-01-29 06:08:46 +00:00
|
|
|
std::unique_ptr<WritableFileWriter> file_writer;
|
|
|
|
const auto& fs = env_->GetFileSystem();
|
2022-09-15 04:59:56 +00:00
|
|
|
s = WritableFileWriter::Create(fs, manifest,
|
|
|
|
fs->OptimizeForManifestWrite(env_options_),
|
|
|
|
&file_writer, nullptr);
|
2021-01-29 06:08:46 +00:00
|
|
|
|
2014-11-14 19:35:48 +00:00
|
|
|
ASSERT_OK(s);
|
|
|
|
{
|
2015-10-08 17:07:15 +00:00
|
|
|
log::Writer log(std::move(file_writer), 0, false);
|
2014-11-14 19:35:48 +00:00
|
|
|
std::string record;
|
|
|
|
new_db.EncodeTo(&record);
|
|
|
|
s = log.AddRecord(record);
|
|
|
|
}
|
|
|
|
ASSERT_OK(s);
|
|
|
|
// Make "CURRENT" file that points to the new manifest file.
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-27 23:03:05 +00:00
|
|
|
s = SetCurrentFile(fs_.get(), dbname_, 1, nullptr);
|
2015-08-08 04:59:51 +00:00
|
|
|
|
2020-09-25 04:47:43 +00:00
|
|
|
ASSERT_OK(s);
|
|
|
|
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
cf_options_.merge_operator = merge_op_;
|
|
|
|
cf_options_.compaction_filter = compaction_filter_.get();
|
2020-11-12 19:40:52 +00:00
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
2015-08-08 04:59:51 +00:00
|
|
|
column_families.emplace_back(kDefaultColumnFamilyName, cf_options_);
|
|
|
|
|
2020-11-12 19:40:52 +00:00
|
|
|
ASSERT_OK(versions_->Recover(column_families, false));
|
2015-08-08 04:59:51 +00:00
|
|
|
cfd_ = versions_->GetColumnFamilySet()->GetDefault();
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
// input_files[i] on input_levels[i]
|
2022-07-14 03:54:49 +00:00
|
|
|
void RunLastLevelCompaction(
|
|
|
|
const std::vector<std::vector<FileMetaData*>>& input_files,
|
2022-09-15 04:59:56 +00:00
|
|
|
const std::vector<int> input_levels,
|
2022-07-14 03:54:49 +00:00
|
|
|
std::function<void(Compaction& comp)>&& verify_func,
|
|
|
|
const std::vector<SequenceNumber>& snapshots = {}) {
|
|
|
|
const int kLastLevel = cf_options_.num_levels - 1;
|
|
|
|
verify_per_key_placement_ = std::move(verify_func);
|
|
|
|
mock::KVVector empty_map;
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction(input_files, input_levels, {empty_map}, snapshots,
|
|
|
|
kMaxSequenceNumber, kLastLevel, false);
|
2022-07-14 03:54:49 +00:00
|
|
|
}
|
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
// input_files[i] on input_levels[i]
|
2015-11-05 04:52:22 +00:00
|
|
|
void RunCompaction(
|
|
|
|
const std::vector<std::vector<FileMetaData*>>& input_files,
|
2022-09-15 04:59:56 +00:00
|
|
|
const std::vector<int>& input_levels,
|
|
|
|
const std::vector<mock::KVVector>& expected_results,
|
2015-11-05 04:52:22 +00:00
|
|
|
const std::vector<SequenceNumber>& snapshots = {},
|
2019-05-04 00:26:20 +00:00
|
|
|
SequenceNumber earliest_write_conflict_snapshot = kMaxSequenceNumber,
|
2019-10-14 22:19:31 +00:00
|
|
|
int output_level = 1, bool verify = true,
|
2022-09-15 04:59:56 +00:00
|
|
|
std::vector<uint64_t> expected_oldest_blob_file_numbers = {},
|
2022-06-07 18:57:12 +00:00
|
|
|
bool check_get_priority = false,
|
|
|
|
Env::IOPriority read_io_priority = Env::IO_TOTAL,
|
2022-09-15 04:59:56 +00:00
|
|
|
Env::IOPriority write_io_priority = Env::IO_TOTAL,
|
|
|
|
int max_subcompactions = 0) {
|
2022-06-07 18:57:12 +00:00
|
|
|
// For compaction, set fs as MockTestFileSystem to check the io_priority.
|
|
|
|
if (test_io_priority_) {
|
|
|
|
db_options_.fs.reset(
|
|
|
|
new MockTestFileSystem(fs_, read_io_priority, write_io_priority));
|
|
|
|
}
|
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
|
|
|
|
2015-08-08 04:59:51 +00:00
|
|
|
size_t num_input_files = 0;
|
|
|
|
std::vector<CompactionInputFiles> compaction_input_files;
|
2022-09-15 04:59:56 +00:00
|
|
|
for (size_t i = 0; i < input_files.size(); ++i) {
|
|
|
|
auto level_files = input_files[i];
|
2015-08-08 04:59:51 +00:00
|
|
|
CompactionInputFiles compaction_level;
|
2022-09-15 04:59:56 +00:00
|
|
|
compaction_level.level = input_levels[i];
|
2015-08-08 04:59:51 +00:00
|
|
|
compaction_level.files.insert(compaction_level.files.end(),
|
2022-10-26 19:35:12 +00:00
|
|
|
level_files.begin(), level_files.end());
|
2015-08-08 04:59:51 +00:00
|
|
|
compaction_input_files.push_back(compaction_level);
|
|
|
|
num_input_files += level_files.size();
|
2015-07-16 16:18:35 +00:00
|
|
|
}
|
2015-08-08 04:59:51 +00:00
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
std::vector<FileMetaData*> grandparents;
|
|
|
|
// it should actually be the next non-empty level
|
|
|
|
const int kGrandparentsLevel = output_level + 1;
|
|
|
|
if (kGrandparentsLevel < cf_options_.num_levels) {
|
|
|
|
grandparents =
|
|
|
|
cfd_->current()->storage_info()->LevelFiles(kGrandparentsLevel);
|
|
|
|
}
|
|
|
|
|
2020-03-31 19:08:41 +00:00
|
|
|
Compaction compaction(
|
|
|
|
cfd->current()->storage_info(), *cfd->ioptions(),
|
2020-07-23 01:31:25 +00:00
|
|
|
*cfd->GetLatestMutableCFOptions(), mutable_db_options_,
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
compaction_input_files, output_level,
|
|
|
|
mutable_cf_options_.target_file_size_base,
|
|
|
|
mutable_cf_options_.max_compaction_bytes, 0, kNoCompression,
|
|
|
|
cfd->GetLatestMutableCFOptions()->compression_opts,
|
|
|
|
Temperature::kUnknown, max_subcompactions, grandparents, true);
|
2015-07-16 16:18:35 +00:00
|
|
|
compaction.SetInputVersion(cfd->current());
|
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
assert(db_options_.info_log);
|
2015-07-16 16:18:35 +00:00
|
|
|
LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, db_options_.info_log.get());
|
|
|
|
mutex_.Lock();
|
|
|
|
EventLogger event_logger(db_options_.info_log.get());
|
2017-10-06 17:26:38 +00:00
|
|
|
// TODO(yiwu) add a mock snapshot checker and add test for it.
|
|
|
|
SnapshotChecker* snapshot_checker = nullptr;
|
2020-11-12 19:40:52 +00:00
|
|
|
ASSERT_TRUE(full_history_ts_low_.empty() ||
|
|
|
|
ucmp_->timestamp_size() == full_history_ts_low_.size());
|
2022-06-07 01:32:26 +00:00
|
|
|
const std::atomic<bool> kManualCompactionCanceledFalse{false};
|
2018-06-28 19:23:57 +00:00
|
|
|
CompactionJob compaction_job(
|
2021-05-20 04:40:43 +00:00
|
|
|
0, &compaction, db_options_, mutable_db_options_, env_options_,
|
2022-04-11 17:26:55 +00:00
|
|
|
versions_.get(), &shutting_down_, &log_buffer, nullptr, nullptr,
|
|
|
|
nullptr, nullptr, &mutex_, &error_handler_, snapshots,
|
CompactionIterator sees consistent view of which keys are committed (#9830)
Summary:
**This PR does not affect the functionality of `DB` and write-committed transactions.**
`CompactionIterator` uses `KeyCommitted(seq)` to determine if a key in the database is committed.
As the name 'write-committed' implies, if write-committed policy is used, a key exists in the database only if
it is committed. In fact, the implementation of `KeyCommitted()` is as follows:
```
inline bool KeyCommitted(SequenceNumber seq) {
// For non-txn-db and write-committed, snapshot_checker_ is always nullptr.
return snapshot_checker_ == nullptr ||
snapshot_checker_->CheckInSnapshot(seq, kMaxSequence) == SnapshotCheckerResult::kInSnapshot;
}
```
With that being said, we focus on write-prepared/write-unprepared transactions.
A few notes:
- A key can exist in the db even if it's uncommitted. Therefore, we rely on `snapshot_checker_` to determine data visibility. We also require that all writes go through transaction API instead of the raw `WriteBatch` + `Write`, thus at most one uncommitted version of one user key can exist in the database.
- `CompactionIterator` outputs a key as long as the key is uncommitted.
Due to the above reasons, it is possible that `CompactionIterator` decides to output an uncommitted key without
doing further checks on the key (`NextFromInput()`). By the time the key is being prepared for output, the key becomes
committed because the `snapshot_checker_(seq, kMaxSequence)` becomes true in the implementation of `KeyCommitted()`.
Then `CompactionIterator` will try to zero its sequence number and hit assertion error if the key is a tombstone.
To fix this issue, we should make the `CompactionIterator` see a consistent view of the input keys. Note that
for write-prepared/write-unprepared, the background flush/compaction jobs already take a "job snapshot" before starting
processing keys. The job snapshot is released only after the entire flush/compaction finishes. We can use this snapshot
to determine whether a key is committed or not with minor change to `KeyCommitted()`.
```
inline bool KeyCommitted(SequenceNumber sequence) {
// For non-txn-db and write-committed, snapshot_checker_ is always nullptr.
return snapshot_checker_ == nullptr ||
snapshot_checker_->CheckInSnapshot(sequence, job_snapshot_) ==
SnapshotCheckerResult::kInSnapshot;
}
```
As a result, whether a key is committed or not will remain a constant throughout compaction, causing no trouble
for `CompactionIterator`s assertions.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9830
Test Plan: make check
Reviewed By: ltamasi
Differential Revision: D35561162
Pulled By: riversand963
fbshipit-source-id: 0e00d200c195240341cfe6d34cbc86798b315b9f
2022-04-14 18:11:04 +00:00
|
|
|
earliest_write_conflict_snapshot, snapshot_checker, nullptr,
|
|
|
|
table_cache_, &event_logger, false, false, dbname_,
|
|
|
|
&compaction_job_stats_, Env::Priority::USER, nullptr /* IOTracer */,
|
2022-06-07 01:32:26 +00:00
|
|
|
/*manual_compaction_canceled=*/kManualCompactionCanceledFalse,
|
|
|
|
env_->GenerateUniqueId(), DBImpl::GenerateDbSessionId(nullptr),
|
|
|
|
full_history_ts_low_);
|
2015-07-28 23:41:40 +00:00
|
|
|
VerifyInitializationOfCompactionJobStats(compaction_job_stats_);
|
2015-07-16 16:18:35 +00:00
|
|
|
|
|
|
|
compaction_job.Prepare();
|
|
|
|
mutex_.Unlock();
|
2020-11-12 19:40:52 +00:00
|
|
|
Status s = compaction_job.Run();
|
2015-08-08 04:59:51 +00:00
|
|
|
ASSERT_OK(s);
|
2020-09-25 04:47:43 +00:00
|
|
|
ASSERT_OK(compaction_job.io_status());
|
2015-08-08 04:59:51 +00:00
|
|
|
mutex_.Lock();
|
2016-02-17 23:20:23 +00:00
|
|
|
ASSERT_OK(compaction_job.Install(*cfd->GetLatestMutableCFOptions()));
|
2020-09-25 04:47:43 +00:00
|
|
|
ASSERT_OK(compaction_job.io_status());
|
2015-07-16 16:18:35 +00:00
|
|
|
mutex_.Unlock();
|
2022-09-15 04:59:56 +00:00
|
|
|
log_buffer.FlushBufferToLog();
|
2015-07-16 16:18:35 +00:00
|
|
|
|
2019-05-04 00:26:20 +00:00
|
|
|
if (verify) {
|
2019-10-14 22:19:31 +00:00
|
|
|
ASSERT_GE(compaction_job_stats_.elapsed_micros, 0U);
|
|
|
|
ASSERT_EQ(compaction_job_stats_.num_input_files, num_input_files);
|
|
|
|
|
2022-09-15 04:59:56 +00:00
|
|
|
VerifyTables(output_level, expected_results,
|
|
|
|
expected_oldest_blob_file_numbers);
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
}
|
Set Write rate limiter priority dynamically and pass it to FS (#9988)
Summary:
### Context:
Background compactions and flush generate large reads and writes, and can be long running, especially for universal compaction. In some cases, this can impact foreground reads and writes by users.
From the RocksDB perspective, there can be two kinds of rate limiters, the internal (native) one and the external one.
- The internal (native) rate limiter is introduced in [the wiki](https://github.com/facebook/rocksdb/wiki/Rate-Limiter). Currently, only IO_LOW and IO_HIGH are used and they are set statically.
- For the external rate limiter, in FSWritableFile functions, IOOptions is open for end users to set and get rate_limiter_priority for their own rate limiter. Currently, RocksDB doesn’t pass the rate_limiter_priority through IOOptions to the file system.
### Solution
During the User Read, Flush write, Compaction read/write, the WriteController is used to determine whether DB writes are stalled or slowed down. The rate limiter priority (Env::IOPriority) can be determined accordingly. We decided to always pass the priority in IOOptions. What the file system does with it should be a contract between the user and the file system. We would like to set the rate limiter priority at file level, since the Flush/Compaction job level may be too coarse with multiple files and block IO level is too granular.
**This PR is for the Write path.** The **Write:** dynamic priority for different state are listed as follows:
| State | Normal | Delayed | Stalled |
| ----- | ------ | ------- | ------- |
| Flush | IO_HIGH | IO_USER | IO_USER |
| Compaction | IO_LOW | IO_USER | IO_USER |
Flush and Compaction writes share the same call path through BlockBaseTableWriter, WritableFileWriter, and FSWritableFile. When a new FSWritableFile object is created, its io_priority_ can be set dynamically based on the state of the WriteController. In WritableFileWriter, before the call sites of FSWritableFile functions, WritableFileWriter::DecideRateLimiterPriority() determines the rate_limiter_priority. The options (IOOptions) argument of FSWritableFile functions will be updated with the rate_limiter_priority.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9988
Test Plan: Add unit tests.
Reviewed By: anand1976
Differential Revision: D36395159
Pulled By: gitbw95
fbshipit-source-id: a7c82fc29759139a1a07ec46c37dbf7e753474cf
2022-05-18 07:41:41 +00:00
|
|
|
|
|
|
|
if (check_get_priority) {
|
|
|
|
CheckGetRateLimiterPriority(compaction_job);
|
|
|
|
}
|
2022-07-14 03:54:49 +00:00
|
|
|
|
|
|
|
if (verify_per_key_placement_) {
|
|
|
|
// Verify per_key_placement compaction
|
|
|
|
assert(compaction.SupportsPerKeyPlacement());
|
|
|
|
verify_per_key_placement_(compaction);
|
|
|
|
}
|
Set Write rate limiter priority dynamically and pass it to FS (#9988)
Summary:
### Context:
Background compactions and flush generate large reads and writes, and can be long running, especially for universal compaction. In some cases, this can impact foreground reads and writes by users.
From the RocksDB perspective, there can be two kinds of rate limiters, the internal (native) one and the external one.
- The internal (native) rate limiter is introduced in [the wiki](https://github.com/facebook/rocksdb/wiki/Rate-Limiter). Currently, only IO_LOW and IO_HIGH are used and they are set statically.
- For the external rate limiter, in FSWritableFile functions, IOOptions is open for end users to set and get rate_limiter_priority for their own rate limiter. Currently, RocksDB doesn’t pass the rate_limiter_priority through IOOptions to the file system.
### Solution
During the User Read, Flush write, Compaction read/write, the WriteController is used to determine whether DB writes are stalled or slowed down. The rate limiter priority (Env::IOPriority) can be determined accordingly. We decided to always pass the priority in IOOptions. What the file system does with it should be a contract between the user and the file system. We would like to set the rate limiter priority at file level, since the Flush/Compaction job level may be too coarse with multiple files and block IO level is too granular.
**This PR is for the Write path.** The **Write:** dynamic priority for different state are listed as follows:
| State | Normal | Delayed | Stalled |
| ----- | ------ | ------- | ------- |
| Flush | IO_HIGH | IO_USER | IO_USER |
| Compaction | IO_LOW | IO_USER | IO_USER |
Flush and Compaction writes share the same call path through BlockBaseTableWriter, WritableFileWriter, and FSWritableFile. When a new FSWritableFile object is created, its io_priority_ can be set dynamically based on the state of the WriteController. In WritableFileWriter, before the call sites of FSWritableFile functions, WritableFileWriter::DecideRateLimiterPriority() determines the rate_limiter_priority. The options (IOOptions) argument of FSWritableFile functions will be updated with the rate_limiter_priority.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9988
Test Plan: Add unit tests.
Reviewed By: anand1976
Differential Revision: D36395159
Pulled By: gitbw95
fbshipit-source-id: a7c82fc29759139a1a07ec46c37dbf7e753474cf
2022-05-18 07:41:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void CheckGetRateLimiterPriority(CompactionJob& compaction_job) {
|
|
|
|
// When the state from WriteController is normal.
|
|
|
|
ASSERT_EQ(compaction_job.GetRateLimiterPriority(), Env::IO_LOW);
|
|
|
|
|
|
|
|
WriteController* write_controller =
|
|
|
|
compaction_job.versions_->GetColumnFamilySet()->write_controller();
|
|
|
|
|
|
|
|
{
|
|
|
|
// When the state from WriteController is Delayed.
|
|
|
|
std::unique_ptr<WriteControllerToken> delay_token =
|
|
|
|
write_controller->GetDelayToken(1000000);
|
|
|
|
ASSERT_EQ(compaction_job.GetRateLimiterPriority(), Env::IO_USER);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// When the state from WriteController is Stopped.
|
|
|
|
std::unique_ptr<WriteControllerToken> stop_token =
|
|
|
|
write_controller->GetStopToken();
|
|
|
|
ASSERT_EQ(compaction_job.GetRateLimiterPriority(), Env::IO_USER);
|
|
|
|
}
|
2015-07-16 16:18:35 +00:00
|
|
|
}
|
|
|
|
|
2021-11-08 19:04:01 +00:00
|
|
|
std::shared_ptr<Env> env_guard_;
|
2014-11-14 19:35:48 +00:00
|
|
|
Env* env_;
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 22:47:08 +00:00
|
|
|
std::shared_ptr<FileSystem> fs_;
|
2014-11-14 19:35:48 +00:00
|
|
|
std::string dbname_;
|
2020-11-12 19:40:52 +00:00
|
|
|
const Comparator* const ucmp_;
|
2014-11-14 19:35:48 +00:00
|
|
|
EnvOptions env_options_;
|
2016-09-23 23:34:04 +00:00
|
|
|
ImmutableDBOptions db_options_;
|
|
|
|
ColumnFamilyOptions cf_options_;
|
2014-11-14 19:35:48 +00:00
|
|
|
MutableCFOptions mutable_cf_options_;
|
2020-07-23 01:31:25 +00:00
|
|
|
MutableDBOptions mutable_db_options_;
|
2014-11-14 19:35:48 +00:00
|
|
|
std::shared_ptr<Cache> table_cache_;
|
|
|
|
WriteController write_controller_;
|
2016-06-21 01:01:03 +00:00
|
|
|
WriteBufferManager write_buffer_manager_;
|
2014-11-14 19:35:48 +00:00
|
|
|
std::unique_ptr<VersionSet> versions_;
|
2015-02-05 05:39:45 +00:00
|
|
|
InstrumentedMutex mutex_;
|
2014-11-14 19:35:48 +00:00
|
|
|
std::atomic<bool> shutting_down_;
|
|
|
|
std::shared_ptr<mock::MockTableFactory> mock_table_factory_;
|
2015-07-28 23:41:40 +00:00
|
|
|
CompactionJobStats compaction_job_stats_;
|
2015-08-08 04:59:51 +00:00
|
|
|
ColumnFamilyData* cfd_;
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
std::unique_ptr<CompactionFilter> compaction_filter_;
|
|
|
|
std::shared_ptr<MergeOperator> merge_op_;
|
2018-06-28 19:23:57 +00:00
|
|
|
ErrorHandler error_handler_;
|
2020-11-12 19:40:52 +00:00
|
|
|
std::string full_history_ts_low_;
|
|
|
|
const std::function<std::string(uint64_t)> encode_u64_ts_;
|
2022-09-15 04:59:56 +00:00
|
|
|
const bool test_io_priority_;
|
2022-07-14 03:54:49 +00:00
|
|
|
std::function<void(Compaction& comp)> verify_per_key_placement_;
|
2022-09-15 04:59:56 +00:00
|
|
|
const TableTypeForTest table_type_ = kMockTable;
|
2020-11-12 19:40:52 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// TODO(icanadi) Make it simpler once we mock out VersionSet
|
|
|
|
class CompactionJobTest : public CompactionJobTestBase {
|
|
|
|
public:
|
|
|
|
CompactionJobTest()
|
2022-06-07 18:57:12 +00:00
|
|
|
: CompactionJobTestBase(
|
|
|
|
test::PerThreadDBPath("compaction_job_test"), BytewiseComparator(),
|
2022-09-15 04:59:56 +00:00
|
|
|
[](uint64_t /*ts*/) { return ""; }, /*test_io_priority=*/false,
|
|
|
|
TableTypeForTest::kMockTable) {}
|
2014-11-14 19:35:48 +00:00
|
|
|
};
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(CompactionJobTest, Simple) {
|
2015-08-08 04:59:51 +00:00
|
|
|
NewDB();
|
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
auto expected_results = CreateTwoFiles(false);
|
2014-11-14 19:35:48 +00:00
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(input_level);
|
2014-11-14 19:35:48 +00:00
|
|
|
ASSERT_EQ(2U, files.size());
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2014-11-14 19:35:48 +00:00
|
|
|
}
|
|
|
|
|
2020-07-15 00:16:18 +00:00
|
|
|
TEST_F(CompactionJobTest, DISABLED_SimpleCorrupted) {
|
2015-08-08 04:59:51 +00:00
|
|
|
NewDB();
|
|
|
|
|
2015-07-16 16:18:35 +00:00
|
|
|
auto expected_results = CreateTwoFiles(true);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2015-07-28 23:41:40 +00:00
|
|
|
ASSERT_EQ(compaction_job_stats_.num_corrupt_keys, 400U);
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleDeletion) {
|
|
|
|
NewDB();
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file1 = mock::MakeMockFile({{KeyStr("c", 4U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("c", 3U, kTypeValue), "val"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("b", 2U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 1U, kTypeValue), "val"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 0U, kTypeValue), "val"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
SetLastSequence(4U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
2017-06-29 22:13:02 +00:00
|
|
|
TEST_F(CompactionJobTest, OutputNothing) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"}});
|
|
|
|
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 2U, kTypeDeletion), ""}});
|
|
|
|
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile();
|
|
|
|
|
|
|
|
SetLastSequence(4U);
|
2022-09-15 04:59:56 +00:00
|
|
|
|
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2017-06-29 22:13:02 +00:00
|
|
|
}
|
|
|
|
|
2015-08-08 04:59:51 +00:00
|
|
|
TEST_F(CompactionJobTest, SimpleOverwrite) {
|
|
|
|
NewDB();
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 3U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("b", 4U, kTypeValue), "val3"},
|
|
|
|
});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 2U, kTypeValue), "val"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "val2"},
|
2019-02-01 17:19:09 +00:00
|
|
|
{KeyStr("b", 0U, kTypeValue), "val3"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
SetLastSequence(4U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleNonLastLevel) {
|
|
|
|
NewDB();
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("b", 6U, kTypeValue), "val3"},
|
|
|
|
});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 4U, kTypeValue), "val"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file3 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 2U, kTypeValue), "val"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
// Because level 1 is not the last level, the sequence numbers of a and b
|
|
|
|
// cannot be set to 0
|
2015-09-02 20:58:22 +00:00
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("b", 6U, kTypeValue), "val3"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
SetLastSequence(6U);
|
2022-09-15 04:59:56 +00:00
|
|
|
const std::vector<int> input_levels = {0, 1};
|
|
|
|
auto lvl0_files =
|
|
|
|
cfd_->current()->storage_info()->LevelFiles(input_levels[0]);
|
|
|
|
auto lvl1_files =
|
|
|
|
cfd_->current()->storage_info()->LevelFiles(input_levels[1]);
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels, {expected_results});
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SimpleMerge) {
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
merge_op_ = MergeOperators::CreateStringAppendOperator();
|
|
|
|
NewDB();
|
2015-08-08 04:59:51 +00:00
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeMerge), "5"},
|
|
|
|
{KeyStr("a", 4U, kTypeMerge), "4"},
|
|
|
|
{KeyStr("a", 3U, kTypeValue), "3"},
|
|
|
|
});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file2 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("b", 2U, kTypeMerge), "2"}, {KeyStr("b", 1U, kTypeValue), "1"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "3,4,5"},
|
2019-02-01 17:19:09 +00:00
|
|
|
{KeyStr("b", 0U, kTypeValue), "1,2"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
SetLastSequence(5U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2015-08-08 04:59:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, NonAssocMerge) {
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
merge_op_ = MergeOperators::CreateStringAppendTESTOperator();
|
|
|
|
NewDB();
|
2015-08-08 04:59:51 +00:00
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeMerge), "5"},
|
|
|
|
{KeyStr("a", 4U, kTypeMerge), "4"},
|
|
|
|
{KeyStr("a", 3U, kTypeMerge), "3"},
|
|
|
|
});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto file2 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("b", 2U, kTypeMerge), "2"}, {KeyStr("b", 1U, kTypeMerge), "1"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
2015-09-02 20:58:22 +00:00
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "3,4,5"},
|
2019-02-01 17:19:09 +00:00
|
|
|
{KeyStr("b", 0U, kTypeValue), "1,2"}});
|
2015-08-08 04:59:51 +00:00
|
|
|
|
|
|
|
SetLastSequence(5U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Filters merge operands with value 10.
|
|
|
|
TEST_F(CompactionJobTest, MergeOperandFilter) {
|
|
|
|
merge_op_ = MergeOperators::CreateUInt64AddOperator();
|
|
|
|
compaction_filter_.reset(new test::FilterNumber(10U));
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("a", 5U, kTypeMerge), test::EncodeInt(5U)},
|
|
|
|
{KeyStr("a", 4U, kTypeMerge), test::EncodeInt(10U)}, // Filtered
|
|
|
|
{KeyStr("a", 3U, kTypeMerge), test::EncodeInt(3U)}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("b", 2U, kTypeMerge), test::EncodeInt(2U)},
|
|
|
|
{KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)} // Filtered
|
|
|
|
});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), test::EncodeInt(8U)},
|
2019-02-01 17:19:09 +00:00
|
|
|
{KeyStr("b", 0U, kTypeValue), test::EncodeInt(2U)}});
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
|
|
|
|
SetLastSequence(5U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, FilterSomeMergeOperands) {
|
|
|
|
merge_op_ = MergeOperators::CreateUInt64AddOperator();
|
|
|
|
compaction_filter_.reset(new test::FilterNumber(10U));
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("a", 5U, kTypeMerge), test::EncodeInt(5U)},
|
|
|
|
{KeyStr("a", 4U, kTypeMerge), test::EncodeInt(10U)}, // Filtered
|
|
|
|
{KeyStr("a", 3U, kTypeValue), test::EncodeInt(5U)},
|
|
|
|
{KeyStr("d", 8U, kTypeMerge), test::EncodeInt(10U)}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 2U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("c", 2U, kTypeMerge), test::EncodeInt(3U)},
|
|
|
|
{KeyStr("c", 1U, kTypeValue), test::EncodeInt(7U)},
|
|
|
|
{KeyStr("d", 1U, kTypeValue), test::EncodeInt(6U)}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 1U, kTypeMerge), test::EncodeInt(3U)}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeValue), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("c", 2U, kTypeValue), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("d", 1U, kTypeValue), test::EncodeInt(6U)}
|
|
|
|
// b does not appear because the operands are filtered
|
|
|
|
});
|
|
|
|
|
|
|
|
SetLastSequence(5U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
Compaction filter on merge operands
Summary:
Since Andres' internship is over, I took over https://reviews.facebook.net/D42555 and rebased and simplified it a bit.
The behavior in this diff is a bit simpler than in D42555:
* only merge operators are passed through FilterMergeValue(). If fitler function returns true, the merge operator is ignored
* compaction filter is *not* called on: 1) results of merge operations and 2) base values that are getting merged with merge operands (the second case was also true in previous diff)
Do we also need a compaction filter to get called on merge results?
Test Plan: make && make check
Reviewers: lovro, tnovak, rven, yhchiang, sdong
Reviewed By: sdong
Subscribers: noetzli, kolmike, leveldb, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D47847
2015-10-07 16:30:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test where all operands/merge results are filtered out.
|
|
|
|
TEST_F(CompactionJobTest, FilterAllMergeOperands) {
|
|
|
|
merge_op_ = MergeOperators::CreateUInt64AddOperator();
|
|
|
|
compaction_filter_.reset(new test::FilterNumber(10U));
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 11U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("a", 10U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("a", 9U, kTypeMerge), test::EncodeInt(10U)}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 8U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 7U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 6U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 5U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 4U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 3U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 2U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("c", 2U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("c", 1U, kTypeMerge), test::EncodeInt(10U)}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 2U, kTypeMerge), test::EncodeInt(10U)},
|
|
|
|
{KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
SetLastSequence(11U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
2015-10-07 17:17:47 +00:00
|
|
|
|
2020-10-01 17:08:52 +00:00
|
|
|
mock::KVVector empty_map;
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {empty_map});
|
2015-07-16 16:18:35 +00:00
|
|
|
}
|
|
|
|
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
TEST_F(CompactionJobTest, SimpleSingleDelete) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeDeletion), ""},
|
|
|
|
{KeyStr("b", 6U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 4U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 1U, kTypeValue), "val"},
|
|
|
|
});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5U, kTypeDeletion), ""}});
|
|
|
|
|
|
|
|
SetLastSequence(6U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SingleDeleteSnapshots) {
|
|
|
|
NewDB();
|
|
|
|
|
2015-11-05 04:52:22 +00:00
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("b", 21U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("c", 22U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("d", 9U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("f", 21U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("j", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("j", 9U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("k", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("k", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("l", 3U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("l", 2U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
2015-11-05 04:52:22 +00:00
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("0", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 11U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("b", 11U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("c", 21U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("d", 8U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("e", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("f", 1U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("g", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("h", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("m", 12U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("m", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("m", 8U, kTypeValue), "val2"},
|
|
|
|
});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
2015-11-05 04:52:22 +00:00
|
|
|
auto file3 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("e", 1U, kTypeValue), "val"},
|
|
|
|
});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
2015-11-05 04:52:22 +00:00
|
|
|
auto expected_results = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 11U, kTypeValue), ""},
|
|
|
|
{KeyStr("b", 21U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("b", 11U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("c", 22U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("c", 21U, kTypeValue), ""},
|
|
|
|
{KeyStr("e", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("f", 21U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("f", 1U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("g", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("j", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("k", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("m", 12U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("m", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("m", 8U, kTypeValue), "val2"},
|
|
|
|
});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
|
|
|
|
SetLastSequence(22U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {10U, 20U}, 10U);
|
2015-11-05 04:52:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, EarliestWriteConflictSnapshot) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
// Test multiple snapshots where the earliest snapshot is not a
|
|
|
|
// write-conflic-snapshot.
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 23U, kTypeValue), "val"},
|
|
|
|
{KeyStr("B", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 23U, kTypeValue), "val"},
|
|
|
|
{KeyStr("D", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 32U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 31U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 23U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("H", 31U, kTypeValue), "val"},
|
|
|
|
{KeyStr("H", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 23U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 35U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 34U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("I", 33U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 32U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("I", 31U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 34U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 33U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 25U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("J", 24U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 13U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("C", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("C", 13U, kTypeValue), "val"},
|
|
|
|
{KeyStr("E", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("F", 4U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("F", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 13U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("H", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 13U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("I", 13U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("I", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 11U, kTypeValue), "val5"},
|
|
|
|
{KeyStr("J", 15U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("J", 14U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 23U, kTypeValue), ""},
|
|
|
|
{KeyStr("B", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 23U, kTypeValue), ""},
|
|
|
|
{KeyStr("D", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 32U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 31U, kTypeValue), ""},
|
|
|
|
{KeyStr("H", 31U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 35U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 34U, kTypeValue), ""},
|
|
|
|
{KeyStr("I", 31U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 13U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("J", 34U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 33U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 25U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("J", 24U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 15U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("J", 14U, kTypeSingleDeletion), ""},
|
|
|
|
});
|
|
|
|
|
|
|
|
SetLastSequence(24U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {10U, 20U, 30U},
|
|
|
|
20U);
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, SingleDeleteZeroSeq) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("dummy", 5U, kTypeValue), "val2"},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 0U, kTypeValue), "val"},
|
|
|
|
});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile({
|
2019-02-01 17:19:09 +00:00
|
|
|
{KeyStr("dummy", 0U, kTypeValue), "val2"},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
SetLastSequence(22U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, MultiSingleDelete) {
|
|
|
|
// Tests three scenarios involving multiple single delete/put pairs:
|
|
|
|
//
|
|
|
|
// A: Put Snapshot SDel Put SDel -> Put Snapshot SDel
|
2015-11-05 04:52:22 +00:00
|
|
|
// B: Snapshot Put SDel Put SDel Snapshot -> Snapshot SDel Snapshot
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
// C: SDel Put SDel Snapshot Put -> Snapshot Put
|
2015-11-05 04:52:22 +00:00
|
|
|
// D: (Put) SDel Snapshot Put SDel -> (Put) SDel Snapshot SDel
|
|
|
|
// E: Put SDel Snapshot Put SDel -> Snapshot SDel
|
|
|
|
// F: Put SDel Put Sdel Snapshot -> removed
|
|
|
|
// G: Snapshot SDel Put SDel Put -> Snapshot Put SDel
|
|
|
|
// H: (Put) Put SDel Put Sdel Snapshot -> Removed
|
|
|
|
// I: (Put) Snapshot Put SDel Put SDel -> SDel
|
|
|
|
// J: Put Put SDel Put SDel SDel Snapshot Put Put SDel SDel Put
|
|
|
|
// -> Snapshot Put
|
|
|
|
// K: SDel SDel Put SDel Put Put Snapshot SDel Put SDel SDel Put SDel
|
|
|
|
// -> Snapshot Put Snapshot SDel
|
2022-04-28 21:48:27 +00:00
|
|
|
// L: SDel Put SDel Put SDel Snapshot SDel Put SDel SDel Put SDel
|
|
|
|
// -> Snapshot SDel Put SDel
|
|
|
|
// M: (Put) SDel Put SDel Put SDel Snapshot Put SDel SDel Put SDel SDel
|
|
|
|
// -> SDel Snapshot Put SDel
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 13U, kTypeValue), "val5"},
|
|
|
|
{KeyStr("A", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 13U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("C", 14U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("D", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("D", 11U, kTypeValue), "val4"},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("G", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("G", 13U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 13U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 13U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 12U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 11U, kTypeValue), "val"},
|
|
|
|
{KeyStr("K", 16U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 15U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("K", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 13U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 12U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("K", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 16U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("L", 14U, kTypeSingleDeletion), ""},
|
2022-04-28 21:48:27 +00:00
|
|
|
{KeyStr("L", 13U, kTypeSingleDeletion), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("L", 12U, kTypeValue), "val"},
|
2022-04-28 21:48:27 +00:00
|
|
|
{KeyStr("L", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 16U, kTypeSingleDeletion), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("M", 15U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 14U, kTypeValue), "val"},
|
|
|
|
{KeyStr("M", 13U, kTypeSingleDeletion), ""},
|
2022-04-28 21:48:27 +00:00
|
|
|
{KeyStr("M", 12U, kTypeSingleDeletion), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("M", 11U, kTypeValue), "val"},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({
|
|
|
|
{KeyStr("A", 10U, kTypeValue), "val"},
|
|
|
|
{KeyStr("B", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 11U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("C", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("C", 9U, kTypeValue), "val6"},
|
|
|
|
{KeyStr("C", 8U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("D", 10U, kTypeSingleDeletion), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("E", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 11U, kTypeValue), "val"},
|
|
|
|
{KeyStr("E", 5U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("F", 6U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("F", 5U, kTypeValue), "val"},
|
|
|
|
{KeyStr("F", 4U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("F", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 6U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 5U, kTypeValue), "val"},
|
|
|
|
{KeyStr("H", 4U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("H", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 11U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 6U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 5U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("J", 3U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("J", 2U, kTypeValue), "val"},
|
|
|
|
{KeyStr("K", 8U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("K", 7U, kTypeValue), "val4"},
|
|
|
|
{KeyStr("K", 6U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 5U, kTypeValue), "val5"},
|
|
|
|
{KeyStr("K", 2U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 1U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 5U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 4U, kTypeValue), "val"},
|
2022-04-28 21:48:27 +00:00
|
|
|
{KeyStr("L", 3U, kTypeSingleDeletion), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("L", 2U, kTypeValue), "val"},
|
|
|
|
{KeyStr("L", 1U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 7U, kTypeValue), "val"},
|
2022-04-28 21:48:27 +00:00
|
|
|
{KeyStr("M", 5U, kTypeSingleDeletion), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("M", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("M", 3U, kTypeSingleDeletion), ""},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("D", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("H", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("I", 2U, kTypeValue), "val"},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
2015-11-05 04:52:22 +00:00
|
|
|
auto file4 = mock::MakeMockFile({
|
|
|
|
{KeyStr("M", 1U, kTypeValue), "val"},
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
});
|
2015-11-05 04:52:22 +00:00
|
|
|
AddMockFile(file4, 2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("A", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 13U, kTypeValue), ""},
|
|
|
|
{KeyStr("A", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("A", 10U, kTypeValue), "val"},
|
|
|
|
{KeyStr("B", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("B", 13U, kTypeValue), ""},
|
|
|
|
{KeyStr("C", 14U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("D", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("D", 11U, kTypeValue), ""},
|
|
|
|
{KeyStr("D", 10U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("E", 11U, kTypeValue), ""},
|
|
|
|
{KeyStr("G", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("G", 12U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 14U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("I", 13U, kTypeValue), ""},
|
|
|
|
{KeyStr("J", 15U, kTypeValue), "val"},
|
|
|
|
{KeyStr("K", 16U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 15U, kTypeValue), ""},
|
|
|
|
{KeyStr("K", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("K", 8U, kTypeValue), "val3"},
|
|
|
|
{KeyStr("L", 16U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("L", 15U, kTypeValue), ""},
|
2022-04-28 21:48:27 +00:00
|
|
|
{KeyStr("L", 11U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 15U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("M", 14U, kTypeValue), ""},
|
2015-11-05 04:52:22 +00:00
|
|
|
{KeyStr("M", 3U, kTypeSingleDeletion), ""}});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
|
|
|
|
SetLastSequence(22U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {10U}, 10U);
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This test documents the behavior where a corrupt key follows a deletion or a
|
|
|
|
// single deletion and the (single) deletion gets removed while the corrupt key
|
|
|
|
// gets written out. TODO(noetzli): We probably want a better way to treat
|
|
|
|
// corrupt keys.
|
2020-07-15 00:16:18 +00:00
|
|
|
TEST_F(CompactionJobTest, DISABLED_CorruptionAfterDeletion) {
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 =
|
|
|
|
mock::MakeMockFile({{test::KeyStr("A", 6U, kTypeValue), "val3"},
|
|
|
|
{test::KeyStr("a", 5U, kTypeDeletion), ""},
|
|
|
|
{test::KeyStr("a", 4U, kTypeValue, true), "val"}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 =
|
|
|
|
mock::MakeMockFile({{test::KeyStr("b", 3U, kTypeSingleDeletion), ""},
|
|
|
|
{test::KeyStr("b", 2U, kTypeValue, true), "val"},
|
|
|
|
{test::KeyStr("c", 1U, kTypeValue), "val2"}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{test::KeyStr("A", 0U, kTypeValue), "val3"},
|
|
|
|
{test::KeyStr("a", 0U, kTypeValue, true), "val"},
|
|
|
|
{test::KeyStr("b", 0U, kTypeValue, true), "val"},
|
2019-02-01 17:19:09 +00:00
|
|
|
{test::KeyStr("c", 0U, kTypeValue), "val2"}});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
|
|
|
|
SetLastSequence(6U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 18:42:56 +00:00
|
|
|
}
|
|
|
|
|
2019-10-14 22:19:31 +00:00
|
|
|
TEST_F(CompactionJobTest, OldestBlobFileNumber) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
// Note: blob1 is inlined TTL, so it will not be considered for the purposes
|
|
|
|
// of identifying the oldest referenced blob file. Similarly, blob6 will be
|
|
|
|
// ignored because it has TTL and hence refers to a TTL blob file.
|
|
|
|
const stl_wrappers::KVMap::value_type blob1(
|
|
|
|
KeyStr("a", 1U, kTypeBlobIndex), BlobStrInlinedTTL("foo", 1234567890ULL));
|
|
|
|
const stl_wrappers::KVMap::value_type blob2(KeyStr("b", 2U, kTypeBlobIndex),
|
|
|
|
BlobStr(59, 123456, 999));
|
|
|
|
const stl_wrappers::KVMap::value_type blob3(KeyStr("c", 3U, kTypeBlobIndex),
|
|
|
|
BlobStr(138, 1000, 1 << 8));
|
|
|
|
auto file1 = mock::MakeMockFile({blob1, blob2, blob3});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
const stl_wrappers::KVMap::value_type blob4(KeyStr("d", 4U, kTypeBlobIndex),
|
|
|
|
BlobStr(199, 3 << 10, 1 << 20));
|
|
|
|
const stl_wrappers::KVMap::value_type blob5(KeyStr("e", 5U, kTypeBlobIndex),
|
|
|
|
BlobStr(19, 6789, 333));
|
|
|
|
const stl_wrappers::KVMap::value_type blob6(
|
|
|
|
KeyStr("f", 6U, kTypeBlobIndex),
|
|
|
|
BlobStrTTL(5, 2048, 1 << 7, 1234567890ULL));
|
|
|
|
auto file2 = mock::MakeMockFile({blob4, blob5, blob6});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
const stl_wrappers::KVMap::value_type expected_blob1(
|
|
|
|
KeyStr("a", 0U, kTypeBlobIndex), blob1.second);
|
|
|
|
const stl_wrappers::KVMap::value_type expected_blob2(
|
|
|
|
KeyStr("b", 0U, kTypeBlobIndex), blob2.second);
|
|
|
|
const stl_wrappers::KVMap::value_type expected_blob3(
|
|
|
|
KeyStr("c", 0U, kTypeBlobIndex), blob3.second);
|
|
|
|
const stl_wrappers::KVMap::value_type expected_blob4(
|
|
|
|
KeyStr("d", 0U, kTypeBlobIndex), blob4.second);
|
|
|
|
const stl_wrappers::KVMap::value_type expected_blob5(
|
|
|
|
KeyStr("e", 0U, kTypeBlobIndex), blob5.second);
|
|
|
|
const stl_wrappers::KVMap::value_type expected_blob6(
|
|
|
|
KeyStr("f", 0U, kTypeBlobIndex), blob6.second);
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({expected_blob1, expected_blob2, expected_blob3,
|
|
|
|
expected_blob4, expected_blob5, expected_blob6});
|
|
|
|
|
|
|
|
SetLastSequence(6U);
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results},
|
|
|
|
std::vector<SequenceNumber>(), kMaxSequenceNumber,
|
|
|
|
/* output_level */ 1, /* verify */ true,
|
|
|
|
/* expected_oldest_blob_file_numbers */ {19});
|
2019-10-14 22:19:31 +00:00
|
|
|
}
|
|
|
|
|
2022-07-14 03:54:49 +00:00
|
|
|
TEST_F(CompactionJobTest, VerifyPenultimateLevelOutput) {
|
|
|
|
cf_options_.bottommost_temperature = Temperature::kCold;
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"Compaction::SupportsPerKeyPlacement:Enabled", [&](void* arg) {
|
|
|
|
auto supports_per_key_placement = static_cast<bool*>(arg);
|
|
|
|
*supports_per_key_placement = true;
|
|
|
|
});
|
|
|
|
|
|
|
|
std::atomic_uint64_t latest_cold_seq = 0;
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"CompactionIterator::PrepareOutput.context", [&](void* arg) {
|
|
|
|
auto context = static_cast<PerKeyPlacementContext*>(arg);
|
|
|
|
context->output_to_penultimate_level =
|
|
|
|
context->seq_num > latest_cold_seq;
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
// Add files on different levels that may overlap
|
|
|
|
auto file0_1 = mock::MakeMockFile({{KeyStr("z", 12U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file0_1);
|
|
|
|
|
|
|
|
auto file1_1 = mock::MakeMockFile({{KeyStr("b", 10U, kTypeValue), "val"},
|
|
|
|
{KeyStr("f", 11U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file1_1, 1);
|
|
|
|
auto file1_2 = mock::MakeMockFile({{KeyStr("j", 12U, kTypeValue), "val"},
|
|
|
|
{KeyStr("k", 13U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file1_2, 1);
|
|
|
|
auto file1_3 = mock::MakeMockFile({{KeyStr("p", 14U, kTypeValue), "val"},
|
|
|
|
{KeyStr("u", 15U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file1_3, 1);
|
|
|
|
|
|
|
|
auto file2_1 = mock::MakeMockFile({{KeyStr("f", 8U, kTypeValue), "val"},
|
|
|
|
{KeyStr("h", 9U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2_1, 2);
|
|
|
|
auto file2_2 = mock::MakeMockFile({{KeyStr("m", 6U, kTypeValue), "val"},
|
|
|
|
{KeyStr("p", 7U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2_2, 2);
|
|
|
|
|
|
|
|
auto file3_1 = mock::MakeMockFile({{KeyStr("g", 2U, kTypeValue), "val"},
|
|
|
|
{KeyStr("k", 3U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3_1, 3);
|
|
|
|
auto file3_2 = mock::MakeMockFile({{KeyStr("v", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("x", 5U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3_2, 3);
|
|
|
|
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
const std::vector<int> input_levels = {0, 1, 2, 3};
|
|
|
|
auto files0 = cfd->current()->storage_info()->LevelFiles(input_levels[0]);
|
|
|
|
auto files1 = cfd->current()->storage_info()->LevelFiles(input_levels[1]);
|
|
|
|
auto files2 = cfd->current()->storage_info()->LevelFiles(input_levels[2]);
|
|
|
|
auto files3 = cfd->current()->storage_info()->LevelFiles(input_levels[3]);
|
2022-07-14 03:54:49 +00:00
|
|
|
|
|
|
|
RunLastLevelCompaction(
|
2022-09-15 04:59:56 +00:00
|
|
|
{files0, files1, files2, files3}, input_levels,
|
|
|
|
/*verify_func=*/[&](Compaction& comp) {
|
2022-07-14 03:54:49 +00:00
|
|
|
for (char c = 'a'; c <= 'z'; c++) {
|
|
|
|
std::string c_str;
|
|
|
|
c_str = c;
|
|
|
|
const Slice key(c_str);
|
|
|
|
if (c == 'a') {
|
|
|
|
ASSERT_FALSE(comp.WithinPenultimateLevelOutputRange(key));
|
|
|
|
} else {
|
|
|
|
ASSERT_TRUE(comp.WithinPenultimateLevelOutputRange(key));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2022-05-16 22:44:59 +00:00
|
|
|
TEST_F(CompactionJobTest, NoEnforceSingleDeleteContract) {
|
|
|
|
db_options_.enforce_single_del_contracts = false;
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 4U, kTypeSingleDeletion), ""},
|
|
|
|
{KeyStr("a", 3U, kTypeDeletion), "dontcare"}});
|
|
|
|
AddMockFile(file);
|
|
|
|
SetLastSequence(4U);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2022-05-16 22:44:59 +00:00
|
|
|
}
|
|
|
|
|
2021-05-12 19:34:22 +00:00
|
|
|
TEST_F(CompactionJobTest, InputSerialization) {
|
|
|
|
// Setup a random CompactionServiceInput
|
|
|
|
CompactionServiceInput input;
|
|
|
|
const int kStrMaxLen = 1000;
|
|
|
|
Random rnd(static_cast<uint32_t>(time(nullptr)));
|
|
|
|
Random64 rnd64(time(nullptr));
|
|
|
|
input.column_family.name = rnd.RandomString(rnd.Uniform(kStrMaxLen));
|
|
|
|
input.column_family.options.comparator = ReverseBytewiseComparator();
|
|
|
|
input.column_family.options.max_bytes_for_level_base =
|
|
|
|
rnd64.Uniform(UINT64_MAX);
|
|
|
|
input.column_family.options.disable_auto_compactions = rnd.OneIn(2);
|
|
|
|
input.column_family.options.compression = kZSTD;
|
|
|
|
input.column_family.options.compression_opts.level = 4;
|
|
|
|
input.db_options.max_background_flushes = 10;
|
|
|
|
input.db_options.paranoid_checks = rnd.OneIn(2);
|
|
|
|
input.db_options.statistics = CreateDBStatistics();
|
|
|
|
input.db_options.env = env_;
|
|
|
|
while (!rnd.OneIn(10)) {
|
|
|
|
input.snapshots.emplace_back(rnd64.Uniform(UINT64_MAX));
|
|
|
|
}
|
|
|
|
while (!rnd.OneIn(10)) {
|
2021-08-26 16:26:41 +00:00
|
|
|
input.input_files.emplace_back(rnd.RandomString(
|
|
|
|
rnd.Uniform(kStrMaxLen - 1) +
|
|
|
|
1)); // input file name should have at least one character
|
2021-05-12 19:34:22 +00:00
|
|
|
}
|
|
|
|
input.output_level = 4;
|
|
|
|
input.has_begin = rnd.OneIn(2);
|
|
|
|
if (input.has_begin) {
|
|
|
|
input.begin = rnd.RandomBinaryString(rnd.Uniform(kStrMaxLen));
|
|
|
|
}
|
|
|
|
input.has_end = rnd.OneIn(2);
|
|
|
|
if (input.has_end) {
|
|
|
|
input.end = rnd.RandomBinaryString(rnd.Uniform(kStrMaxLen));
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string output;
|
|
|
|
ASSERT_OK(input.Write(&output));
|
|
|
|
|
|
|
|
// Test deserialization
|
|
|
|
CompactionServiceInput deserialized1;
|
|
|
|
ASSERT_OK(CompactionServiceInput::Read(output, &deserialized1));
|
|
|
|
ASSERT_TRUE(deserialized1.TEST_Equals(&input));
|
|
|
|
|
|
|
|
// Test mismatch
|
|
|
|
deserialized1.db_options.max_background_flushes += 10;
|
|
|
|
std::string mismatch;
|
|
|
|
ASSERT_FALSE(deserialized1.TEST_Equals(&input, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "db_options.max_background_flushes");
|
|
|
|
|
|
|
|
// Test unknown field
|
|
|
|
CompactionServiceInput deserialized2;
|
|
|
|
output.clear();
|
|
|
|
ASSERT_OK(input.Write(&output));
|
|
|
|
output.append("new_field=123;");
|
|
|
|
|
|
|
|
ASSERT_OK(CompactionServiceInput::Read(output, &deserialized2));
|
|
|
|
ASSERT_TRUE(deserialized2.TEST_Equals(&input));
|
|
|
|
|
|
|
|
// Test missing field
|
|
|
|
CompactionServiceInput deserialized3;
|
|
|
|
deserialized3.output_level = 0;
|
|
|
|
std::string to_remove = "output_level=4;";
|
|
|
|
size_t pos = output.find(to_remove);
|
|
|
|
ASSERT_TRUE(pos != std::string::npos);
|
|
|
|
output.erase(pos, to_remove.length());
|
|
|
|
ASSERT_OK(CompactionServiceInput::Read(output, &deserialized3));
|
|
|
|
mismatch.clear();
|
|
|
|
ASSERT_FALSE(deserialized3.TEST_Equals(&input, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "output_level");
|
|
|
|
|
|
|
|
// manually set the value back, should match the original structure
|
|
|
|
deserialized3.output_level = 4;
|
|
|
|
ASSERT_TRUE(deserialized3.TEST_Equals(&input));
|
|
|
|
|
|
|
|
// Test invalid version
|
|
|
|
output.clear();
|
|
|
|
ASSERT_OK(input.Write(&output));
|
|
|
|
|
|
|
|
uint32_t data_version = DecodeFixed32(output.data());
|
|
|
|
const size_t kDataVersionSize = sizeof(data_version);
|
|
|
|
ASSERT_EQ(data_version,
|
|
|
|
1U); // Update once the default data version is changed
|
|
|
|
char buf[kDataVersionSize];
|
|
|
|
EncodeFixed32(buf, data_version + 10); // make sure it's not valid
|
|
|
|
output.replace(0, kDataVersionSize, buf, kDataVersionSize);
|
|
|
|
Status s = CompactionServiceInput::Read(output, &deserialized3);
|
|
|
|
ASSERT_TRUE(s.IsNotSupported());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTest, ResultSerialization) {
|
|
|
|
// Setup a random CompactionServiceResult
|
|
|
|
CompactionServiceResult result;
|
|
|
|
const int kStrMaxLen = 1000;
|
|
|
|
Random rnd(static_cast<uint32_t>(time(nullptr)));
|
|
|
|
Random64 rnd64(time(nullptr));
|
2021-05-20 04:40:43 +00:00
|
|
|
std::vector<Status> status_list = {
|
|
|
|
Status::OK(),
|
|
|
|
Status::InvalidArgument("invalid option"),
|
|
|
|
Status::Aborted("failed to run"),
|
|
|
|
Status::NotSupported("not supported option"),
|
|
|
|
};
|
|
|
|
result.status =
|
|
|
|
status_list.at(rnd.Uniform(static_cast<int>(status_list.size())));
|
2021-05-12 19:34:22 +00:00
|
|
|
while (!rnd.OneIn(10)) {
|
2022-05-19 18:04:21 +00:00
|
|
|
UniqueId64x2 id{rnd64.Uniform(UINT64_MAX), rnd64.Uniform(UINT64_MAX)};
|
2021-05-12 19:34:22 +00:00
|
|
|
result.output_files.emplace_back(
|
|
|
|
rnd.RandomString(rnd.Uniform(kStrMaxLen)), rnd64.Uniform(UINT64_MAX),
|
|
|
|
rnd64.Uniform(UINT64_MAX),
|
|
|
|
rnd.RandomBinaryString(rnd.Uniform(kStrMaxLen)),
|
|
|
|
rnd.RandomBinaryString(rnd.Uniform(kStrMaxLen)),
|
|
|
|
rnd64.Uniform(UINT64_MAX), rnd64.Uniform(UINT64_MAX),
|
2022-05-19 18:04:21 +00:00
|
|
|
rnd64.Uniform(UINT64_MAX), rnd.OneIn(2), id);
|
2021-05-12 19:34:22 +00:00
|
|
|
}
|
|
|
|
result.output_level = rnd.Uniform(10);
|
|
|
|
result.output_path = rnd.RandomString(rnd.Uniform(kStrMaxLen));
|
|
|
|
result.num_output_records = rnd64.Uniform(UINT64_MAX);
|
|
|
|
result.total_bytes = rnd64.Uniform(UINT64_MAX);
|
|
|
|
result.bytes_read = 123;
|
|
|
|
result.bytes_written = rnd64.Uniform(UINT64_MAX);
|
|
|
|
result.stats.elapsed_micros = rnd64.Uniform(UINT64_MAX);
|
|
|
|
result.stats.num_output_files = rnd.Uniform(1000);
|
|
|
|
result.stats.is_full_compaction = rnd.OneIn(2);
|
|
|
|
result.stats.num_single_del_mismatch = rnd64.Uniform(UINT64_MAX);
|
|
|
|
result.stats.num_input_files = 9;
|
|
|
|
|
|
|
|
std::string output;
|
|
|
|
ASSERT_OK(result.Write(&output));
|
|
|
|
|
|
|
|
// Test deserialization
|
|
|
|
CompactionServiceResult deserialized1;
|
|
|
|
ASSERT_OK(CompactionServiceResult::Read(output, &deserialized1));
|
|
|
|
ASSERT_TRUE(deserialized1.TEST_Equals(&result));
|
|
|
|
|
|
|
|
// Test mismatch
|
|
|
|
deserialized1.stats.num_input_files += 10;
|
|
|
|
std::string mismatch;
|
|
|
|
ASSERT_FALSE(deserialized1.TEST_Equals(&result, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "stats.num_input_files");
|
|
|
|
|
2022-05-19 18:04:21 +00:00
|
|
|
// Test unique id mismatch
|
|
|
|
if (!result.output_files.empty()) {
|
|
|
|
CompactionServiceResult deserialized_tmp;
|
|
|
|
ASSERT_OK(CompactionServiceResult::Read(output, &deserialized_tmp));
|
|
|
|
deserialized_tmp.output_files[0].unique_id[0] += 1;
|
|
|
|
ASSERT_FALSE(deserialized_tmp.TEST_Equals(&result, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "output_files.unique_id");
|
|
|
|
deserialized_tmp.status.PermitUncheckedError();
|
|
|
|
}
|
|
|
|
|
2021-05-12 19:34:22 +00:00
|
|
|
// Test unknown field
|
|
|
|
CompactionServiceResult deserialized2;
|
|
|
|
output.clear();
|
|
|
|
ASSERT_OK(result.Write(&output));
|
|
|
|
output.append("new_field=123;");
|
|
|
|
|
|
|
|
ASSERT_OK(CompactionServiceResult::Read(output, &deserialized2));
|
|
|
|
ASSERT_TRUE(deserialized2.TEST_Equals(&result));
|
|
|
|
|
|
|
|
// Test missing field
|
|
|
|
CompactionServiceResult deserialized3;
|
|
|
|
deserialized3.bytes_read = 0;
|
|
|
|
std::string to_remove = "bytes_read=123;";
|
|
|
|
size_t pos = output.find(to_remove);
|
|
|
|
ASSERT_TRUE(pos != std::string::npos);
|
|
|
|
output.erase(pos, to_remove.length());
|
|
|
|
ASSERT_OK(CompactionServiceResult::Read(output, &deserialized3));
|
|
|
|
mismatch.clear();
|
|
|
|
ASSERT_FALSE(deserialized3.TEST_Equals(&result, &mismatch));
|
|
|
|
ASSERT_EQ(mismatch, "bytes_read");
|
|
|
|
|
|
|
|
deserialized3.bytes_read = 123;
|
|
|
|
ASSERT_TRUE(deserialized3.TEST_Equals(&result));
|
|
|
|
|
|
|
|
// Test invalid version
|
|
|
|
output.clear();
|
|
|
|
ASSERT_OK(result.Write(&output));
|
|
|
|
|
|
|
|
uint32_t data_version = DecodeFixed32(output.data());
|
|
|
|
const size_t kDataVersionSize = sizeof(data_version);
|
|
|
|
ASSERT_EQ(data_version,
|
|
|
|
1U); // Update once the default data version is changed
|
|
|
|
char buf[kDataVersionSize];
|
|
|
|
EncodeFixed32(buf, data_version + 10); // make sure it's not valid
|
|
|
|
output.replace(0, kDataVersionSize, buf, kDataVersionSize);
|
|
|
|
Status s = CompactionServiceResult::Read(output, &deserialized3);
|
|
|
|
ASSERT_TRUE(s.IsNotSupported());
|
2021-05-20 04:40:43 +00:00
|
|
|
for (const auto& item : status_list) {
|
|
|
|
item.PermitUncheckedError();
|
|
|
|
}
|
2021-05-12 19:34:22 +00:00
|
|
|
}
|
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
class CompactionJobDynamicFileSizeTest
|
|
|
|
: public CompactionJobTestBase,
|
|
|
|
public ::testing::WithParamInterface<bool> {
|
|
|
|
public:
|
|
|
|
CompactionJobDynamicFileSizeTest()
|
|
|
|
: CompactionJobTestBase(
|
|
|
|
test::PerThreadDBPath("compaction_job_dynamic_file_size_test"),
|
|
|
|
BytewiseComparator(), [](uint64_t /*ts*/) { return ""; },
|
|
|
|
/*test_io_priority=*/false, TableTypeForTest::kMockTable) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_P(CompactionJobDynamicFileSizeTest, CutForMaxCompactionBytes) {
|
2022-10-06 22:54:58 +00:00
|
|
|
// dynamic_file_size option should have no impact on cutting for max
|
|
|
|
// compaction bytes.
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
bool enable_dyanmic_file_size = GetParam();
|
|
|
|
cf_options_.level_compaction_dynamic_file_size = enable_dyanmic_file_size;
|
|
|
|
|
|
|
|
NewDB();
|
|
|
|
mutable_cf_options_.target_file_size_base = 80;
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 21;
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("c", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("n", 6U, kTypeValue), "val3"},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("h", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("j", 4U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
|
|
|
// Create three L2 files, each size 10.
|
|
|
|
// max_compaction_bytes 21 means the compaction output in L1 will
|
|
|
|
// be cut to at least two files.
|
|
|
|
auto file3 = mock::MakeMockFile({{KeyStr("b", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("c", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("c1", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("c2", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("c3", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("c4", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("d", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("e", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto file4 = mock::MakeMockFile({{KeyStr("h", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("i", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("i1", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("i2", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("i3", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("i4", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("j", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("k", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file4, 2);
|
|
|
|
|
|
|
|
auto file5 = mock::MakeMockFile({{KeyStr("l", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("m", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("m1", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("m2", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("m3", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("m4", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("n", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("o", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file5, 2);
|
|
|
|
|
2022-10-06 22:54:58 +00:00
|
|
|
// The expected output should be:
|
|
|
|
// L1: [c, h, j] [n]
|
|
|
|
// L2: [b ... e] [h ... k] [l ... o]
|
|
|
|
// It's better to have "j" in the first file, because anyway it's overlapping
|
|
|
|
// with the second file on L2.
|
|
|
|
// (Note: before this PR, it was cut at "h" because it's using the internal
|
|
|
|
// comparator which think L1 "h" with seqno 3 is smaller than L2 "h" with
|
|
|
|
// seqno 1, but actually they're overlapped with the compaction picker).
|
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
auto expected_file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("c", 5U, kTypeValue), "val2"},
|
2022-10-06 22:54:58 +00:00
|
|
|
{KeyStr("h", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("j", 4U, kTypeValue), "val"}});
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
auto expected_file2 =
|
2022-10-06 22:54:58 +00:00
|
|
|
mock::MakeMockFile({{KeyStr("n", 6U, kTypeValue), "val3"}});
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
|
|
|
|
SetLastSequence(6U);
|
|
|
|
|
|
|
|
const std::vector<int> input_levels = {0, 1};
|
|
|
|
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
|
|
|
|
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file1, expected_file2});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(CompactionJobDynamicFileSizeTest, CutToSkipGrandparentFile) {
|
|
|
|
bool enable_dyanmic_file_size = GetParam();
|
|
|
|
cf_options_.level_compaction_dynamic_file_size = enable_dyanmic_file_size;
|
|
|
|
|
|
|
|
NewDB();
|
|
|
|
// Make sure the grandparent level file size (10) qualifies skipping.
|
|
|
|
// Currently, it has to be > 1/8 of target file size.
|
|
|
|
mutable_cf_options_.target_file_size_base = 70;
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({
|
|
|
|
{KeyStr("a", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("z", 6U, kTypeValue), "val3"},
|
|
|
|
});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("c", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("x", 4U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({{KeyStr("b", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("d", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto file4 = mock::MakeMockFile({{KeyStr("h", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("i", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file4, 2);
|
|
|
|
|
|
|
|
auto file5 = mock::MakeMockFile({{KeyStr("v", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("y", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file5, 2);
|
|
|
|
|
|
|
|
auto expected_file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("c", 3U, kTypeValue), "val"}});
|
|
|
|
auto expected_file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("x", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("z", 6U, kTypeValue), "val3"}});
|
|
|
|
|
|
|
|
auto expected_file_disable_dynamic_file_size =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("c", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("x", 4U, kTypeValue), "val"},
|
|
|
|
{KeyStr("z", 6U, kTypeValue), "val3"}});
|
|
|
|
|
|
|
|
SetLastSequence(6U);
|
|
|
|
const std::vector<int> input_levels = {0, 1};
|
|
|
|
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
|
|
|
|
if (enable_dyanmic_file_size) {
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file1, expected_file2});
|
|
|
|
} else {
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file_disable_dynamic_file_size});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(CompactionJobDynamicFileSizeTest, CutToAlignGrandparentBoundary) {
|
|
|
|
bool enable_dyanmic_file_size = GetParam();
|
|
|
|
cf_options_.level_compaction_dynamic_file_size = enable_dyanmic_file_size;
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
// MockTable has 1 byte per entry by default and each file is 10 bytes.
|
|
|
|
// When the file size is smaller than 100, it won't cut file earlier to align
|
|
|
|
// with its grandparent boundary.
|
|
|
|
const size_t kKeyValueSize = 10000;
|
|
|
|
mock_table_factory_->SetKeyValueSize(kKeyValueSize);
|
|
|
|
|
|
|
|
mutable_cf_options_.target_file_size_base = 10 * kKeyValueSize;
|
|
|
|
|
|
|
|
mock::KVVector file1;
|
|
|
|
char ch = 'd';
|
|
|
|
// Add value from d -> o
|
|
|
|
for (char i = 0; i < 12; i++) {
|
|
|
|
file1.emplace_back(KeyStr(std::string(1, ch + i), i + 10, kTypeValue),
|
|
|
|
"val" + std::to_string(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("e", 3U, kTypeValue), "val"},
|
|
|
|
{KeyStr("s", 4U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
|
|
|
// the 1st grandparent file should be skipped
|
|
|
|
auto file3 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto file4 = mock::MakeMockFile({{KeyStr("c", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("e", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file4, 2);
|
|
|
|
|
|
|
|
auto file5 = mock::MakeMockFile({{KeyStr("h", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("j", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file5, 2);
|
|
|
|
|
|
|
|
auto file6 = mock::MakeMockFile({{KeyStr("k", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("n", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file6, 2);
|
|
|
|
|
|
|
|
auto file7 = mock::MakeMockFile({{KeyStr("q", 1U, kTypeValue), "val"},
|
|
|
|
{KeyStr("t", 2U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file7, 2);
|
|
|
|
|
|
|
|
// The expected outputs are:
|
|
|
|
// L1: [d,e,f,g,h,i,j] [k,l,m,n,o,s]
|
|
|
|
// L2: [a, b] [c, e] [h, j] [k, n] [q, t]
|
|
|
|
// The first output cut earlier at "j", so it could be aligned with L2 files.
|
|
|
|
// If dynamic_file_size is not enabled, it will be cut based on the
|
|
|
|
// target_file_size
|
|
|
|
mock::KVVector expected_file1;
|
|
|
|
for (char i = 0; i < 7; i++) {
|
|
|
|
expected_file1.emplace_back(
|
|
|
|
KeyStr(std::string(1, ch + i), i + 10, kTypeValue),
|
|
|
|
"val" + std::to_string(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
mock::KVVector expected_file2;
|
|
|
|
for (char i = 7; i < 12; i++) {
|
|
|
|
expected_file2.emplace_back(
|
|
|
|
KeyStr(std::string(1, ch + i), i + 10, kTypeValue),
|
|
|
|
"val" + std::to_string(i));
|
|
|
|
}
|
|
|
|
expected_file2.emplace_back(KeyStr("s", 4U, kTypeValue), "val");
|
|
|
|
|
|
|
|
mock::KVVector expected_file_disable_dynamic_file_size1;
|
|
|
|
for (char i = 0; i < 10; i++) {
|
|
|
|
expected_file_disable_dynamic_file_size1.emplace_back(
|
|
|
|
KeyStr(std::string(1, ch + i), i + 10, kTypeValue),
|
|
|
|
"val" + std::to_string(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
mock::KVVector expected_file_disable_dynamic_file_size2;
|
|
|
|
for (char i = 10; i < 12; i++) {
|
|
|
|
expected_file_disable_dynamic_file_size2.emplace_back(
|
|
|
|
KeyStr(std::string(1, ch + i), i + 10, kTypeValue),
|
|
|
|
"val" + std::to_string(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
expected_file_disable_dynamic_file_size2.emplace_back(
|
|
|
|
KeyStr("s", 4U, kTypeValue), "val");
|
|
|
|
|
|
|
|
SetLastSequence(22U);
|
|
|
|
const std::vector<int> input_levels = {0, 1};
|
|
|
|
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
|
|
|
|
if (enable_dyanmic_file_size) {
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file1, expected_file2});
|
|
|
|
} else {
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file_disable_dynamic_file_size1,
|
|
|
|
expected_file_disable_dynamic_file_size2});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-06 22:54:58 +00:00
|
|
|
TEST_P(CompactionJobDynamicFileSizeTest, CutToAlignGrandparentBoundarySameKey) {
|
|
|
|
bool enable_dyanmic_file_size = GetParam();
|
|
|
|
cf_options_.level_compaction_dynamic_file_size = enable_dyanmic_file_size;
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
// MockTable has 1 byte per entry by default and each file is 10 bytes.
|
|
|
|
// When the file size is smaller than 100, it won't cut file earlier to align
|
|
|
|
// with its grandparent boundary.
|
|
|
|
const size_t kKeyValueSize = 10000;
|
|
|
|
mock_table_factory_->SetKeyValueSize(kKeyValueSize);
|
|
|
|
|
|
|
|
mutable_cf_options_.target_file_size_base = 10 * kKeyValueSize;
|
|
|
|
|
|
|
|
mock::KVVector file1;
|
|
|
|
for (int i = 0; i < 7; i++) {
|
|
|
|
file1.emplace_back(KeyStr("a", 100 - i, kTypeValue),
|
|
|
|
"val" + std::to_string(100 - i));
|
|
|
|
}
|
|
|
|
file1.emplace_back(KeyStr("b", 90, kTypeValue), "valb");
|
|
|
|
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 93U, kTypeValue), "val93"},
|
|
|
|
{KeyStr("b", 90U, kTypeValue), "valb"}});
|
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({{KeyStr("a", 89U, kTypeValue), "val"},
|
|
|
|
{KeyStr("a", 88U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file3, 2);
|
|
|
|
|
|
|
|
auto file4 = mock::MakeMockFile({{KeyStr("a", 87U, kTypeValue), "val"},
|
|
|
|
{KeyStr("a", 86U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file4, 2);
|
|
|
|
|
|
|
|
auto file5 = mock::MakeMockFile({{KeyStr("b", 85U, kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 84U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file5, 2);
|
|
|
|
|
|
|
|
mock::KVVector expected_file1;
|
|
|
|
mock::KVVector expected_file_disable_dynamic_file_size;
|
|
|
|
|
|
|
|
for (int i = 0; i < 8; i++) {
|
|
|
|
expected_file1.emplace_back(KeyStr("a", 100 - i, kTypeValue),
|
|
|
|
"val" + std::to_string(100 - i));
|
|
|
|
expected_file_disable_dynamic_file_size.emplace_back(
|
|
|
|
KeyStr("a", 100 - i, kTypeValue), "val" + std::to_string(100 - i));
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure `b` is cut in a separated file (so internally it's not using
|
|
|
|
// internal comparator, which will think the "b:90" (seqno 90) here is smaller
|
|
|
|
// than "b:85" on L2.)
|
|
|
|
auto expected_file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 90U, kTypeValue), "valb"}});
|
|
|
|
|
|
|
|
expected_file_disable_dynamic_file_size.emplace_back(
|
|
|
|
KeyStr("b", 90U, kTypeValue), "valb");
|
|
|
|
|
|
|
|
SetLastSequence(122U);
|
|
|
|
const std::vector<int> input_levels = {0, 1};
|
|
|
|
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
|
|
|
|
|
|
|
|
// Just keep all the history
|
|
|
|
std::vector<SequenceNumber> snapshots;
|
|
|
|
for (int i = 80; i <= 100; i++) {
|
|
|
|
snapshots.emplace_back(i);
|
|
|
|
}
|
|
|
|
if (enable_dyanmic_file_size) {
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file1, expected_file2}, snapshots);
|
|
|
|
} else {
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file_disable_dynamic_file_size}, snapshots);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(CompactionJobDynamicFileSizeTest, CutForMaxCompactionBytesSameKey) {
|
|
|
|
// dynamic_file_size option should have no impact on cutting for max
|
|
|
|
// compaction bytes.
|
|
|
|
bool enable_dyanmic_file_size = GetParam();
|
|
|
|
cf_options_.level_compaction_dynamic_file_size = enable_dyanmic_file_size;
|
|
|
|
|
|
|
|
NewDB();
|
|
|
|
mutable_cf_options_.target_file_size_base = 80;
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 20;
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({{KeyStr("a", 104U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("b", 103U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{KeyStr("a", 102U, kTypeValue), "val2"},
|
|
|
|
{KeyStr("c", 101U, kTypeValue), "val"}});
|
|
|
|
AddMockFile(file2, 1);
|
|
|
|
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
auto file =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 100 - (i * 2), kTypeValue), "val"},
|
|
|
|
{KeyStr("a", 99 - (i * 2), kTypeValue), "val"}});
|
|
|
|
AddMockFile(file, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
auto file =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 80 - (i * 2), kTypeValue), "val"},
|
|
|
|
{KeyStr("b", 79 - (i * 2), kTypeValue), "val"}});
|
|
|
|
AddMockFile(file, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto file5 = mock::MakeMockFile({{KeyStr("c", 60U, kTypeValue), "valc"},
|
|
|
|
{KeyStr("c", 59U, kTypeValue), "valc"}});
|
|
|
|
|
|
|
|
// "a" has 10 overlapped grandparent files (each size 10), which is far
|
|
|
|
// exceeded the `max_compaction_bytes`, but make sure 2 "a" are not separated,
|
|
|
|
// as splitting them won't help reducing the compaction size.
|
|
|
|
// also make sure "b" and "c" are cut separately.
|
|
|
|
mock::KVVector expected_file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 104U, kTypeValue), "val1"},
|
|
|
|
{KeyStr("a", 102U, kTypeValue), "val2"}});
|
|
|
|
mock::KVVector expected_file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("b", 103U, kTypeValue), "val"}});
|
|
|
|
mock::KVVector expected_file3 =
|
|
|
|
mock::MakeMockFile({{KeyStr("c", 101U, kTypeValue), "val"}});
|
|
|
|
|
|
|
|
SetLastSequence(122U);
|
|
|
|
const std::vector<int> input_levels = {0, 1};
|
|
|
|
auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0);
|
|
|
|
auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1);
|
|
|
|
|
|
|
|
// Just keep all the history
|
|
|
|
std::vector<SequenceNumber> snapshots;
|
|
|
|
for (int i = 80; i <= 105; i++) {
|
|
|
|
snapshots.emplace_back(i);
|
|
|
|
}
|
|
|
|
RunCompaction({lvl0_files, lvl1_files}, input_levels,
|
|
|
|
{expected_file1, expected_file2, expected_file3}, snapshots);
|
|
|
|
}
|
|
|
|
|
Align compaction output file boundaries to the next level ones (#10655)
Summary:
Try to align the compaction output file boundaries to the next level ones
(grandparent level), to reduce the level compaction write-amplification.
In level compaction, there are "wasted" data at the beginning and end of the
output level files. Align the file boundary can avoid such "wasted" compaction.
With this PR, it tries to align the non-bottommost level file boundaries to its
next level ones. It may cut file when the file size is large enough (at least
50% of target_file_size) and not too large (2x target_file_size).
db_bench shows about 12.56% compaction reduction:
```
TEST_TMPDIR=/data/dbbench2 ./db_bench --benchmarks=fillrandom,readrandom -max_background_jobs=12 -num=400000000 -target_file_size_base=33554432
# baseline:
Flush(GB): cumulative 25.882, interval 7.216
Cumulative compaction: 285.90 GB write, 162.36 MB/s write, 269.68 GB read, 153.15 MB/s read, 2926.7 seconds
# with this change:
Flush(GB): cumulative 25.882, interval 7.753
Cumulative compaction: 249.97 GB write, 141.96 MB/s write, 233.74 GB read, 132.74 MB/s read, 2534.9 seconds
```
The compaction simulator shows a similar result (14% with 100G random data).
As a side effect, with this PR, the SST file size can exceed the
target_file_size, but is capped at 2x target_file_size. And there will be
smaller files. Here are file size statistics when loading 100GB with the target
file size 32MB:
```
baseline this_PR
count 1.656000e+03 1.705000e+03
mean 3.116062e+07 3.028076e+07
std 7.145242e+06 8.046139e+06
```
The feature is enabled by default, to revert to the old behavior disable it
with `AdvancedColumnFamilyOptions.level_compaction_dynamic_file_size = false`
Also includes https://github.com/facebook/rocksdb/issues/1963 to cut file before skippable grandparent file. Which is for
use case like user adding 2 or more non-overlapping data range at the same
time, it can reduce the overlapping of 2 datasets in the lower levels.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10655
Reviewed By: cbi42
Differential Revision: D39552321
Pulled By: jay-zhuang
fbshipit-source-id: 640d15f159ab0cd973f2426cfc3af266fc8bdde2
2022-09-30 02:43:55 +00:00
|
|
|
INSTANTIATE_TEST_CASE_P(CompactionJobDynamicFileSizeTest,
|
|
|
|
CompactionJobDynamicFileSizeTest, testing::Bool());
|
Set Write rate limiter priority dynamically and pass it to FS (#9988)
Summary:
### Context:
Background compactions and flush generate large reads and writes, and can be long running, especially for universal compaction. In some cases, this can impact foreground reads and writes by users.
From the RocksDB perspective, there can be two kinds of rate limiters, the internal (native) one and the external one.
- The internal (native) rate limiter is introduced in [the wiki](https://github.com/facebook/rocksdb/wiki/Rate-Limiter). Currently, only IO_LOW and IO_HIGH are used and they are set statically.
- For the external rate limiter, in FSWritableFile functions, IOOptions is open for end users to set and get rate_limiter_priority for their own rate limiter. Currently, RocksDB doesn’t pass the rate_limiter_priority through IOOptions to the file system.
### Solution
During the User Read, Flush write, Compaction read/write, the WriteController is used to determine whether DB writes are stalled or slowed down. The rate limiter priority (Env::IOPriority) can be determined accordingly. We decided to always pass the priority in IOOptions. What the file system does with it should be a contract between the user and the file system. We would like to set the rate limiter priority at file level, since the Flush/Compaction job level may be too coarse with multiple files and block IO level is too granular.
**This PR is for the Write path.** The **Write:** dynamic priority for different state are listed as follows:
| State | Normal | Delayed | Stalled |
| ----- | ------ | ------- | ------- |
| Flush | IO_HIGH | IO_USER | IO_USER |
| Compaction | IO_LOW | IO_USER | IO_USER |
Flush and Compaction writes share the same call path through BlockBaseTableWriter, WritableFileWriter, and FSWritableFile. When a new FSWritableFile object is created, its io_priority_ can be set dynamically based on the state of the WriteController. In WritableFileWriter, before the call sites of FSWritableFile functions, WritableFileWriter::DecideRateLimiterPriority() determines the rate_limiter_priority. The options (IOOptions) argument of FSWritableFile functions will be updated with the rate_limiter_priority.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9988
Test Plan: Add unit tests.
Reviewed By: anand1976
Differential Revision: D36395159
Pulled By: gitbw95
fbshipit-source-id: a7c82fc29759139a1a07ec46c37dbf7e753474cf
2022-05-18 07:41:41 +00:00
|
|
|
|
2020-11-12 19:40:52 +00:00
|
|
|
class CompactionJobTimestampTest : public CompactionJobTestBase {
|
|
|
|
public:
|
|
|
|
CompactionJobTimestampTest()
|
|
|
|
: CompactionJobTestBase(test::PerThreadDBPath("compaction_job_ts_test"),
|
2022-02-08 20:14:25 +00:00
|
|
|
test::BytewiseComparatorWithU64TsWrapper(),
|
2022-09-15 04:59:56 +00:00
|
|
|
test::EncodeInt, /*test_io_priority=*/false,
|
|
|
|
TableTypeForTest::kMockTable) {}
|
2020-11-12 19:40:52 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTimestampTest, GCDisabled) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 10, ValueType::kTypeValue, 100), "a10"},
|
|
|
|
{KeyStr("a", 9, ValueType::kTypeValue, 99), "a9"},
|
2021-09-27 18:49:35 +00:00
|
|
|
{KeyStr("b", 8, ValueType::kTypeValue, 98), "b8"},
|
|
|
|
{KeyStr("d", 7, ValueType::kTypeValue, 97), "d7"}});
|
|
|
|
|
2020-11-12 19:40:52 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile(
|
2021-09-27 18:49:35 +00:00
|
|
|
{{KeyStr("b", 6, ValueType::kTypeDeletionWithTimestamp, 96), ""},
|
|
|
|
{KeyStr("c", 5, ValueType::kTypeDeletionWithTimestamp, 95), ""},
|
|
|
|
{KeyStr("c", 4, ValueType::kTypeValue, 94), "c5"},
|
|
|
|
{KeyStr("d", 3, ValueType::kTypeSingleDeletion, 93), ""}});
|
2020-11-12 19:40:52 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
SetLastSequence(10);
|
|
|
|
|
|
|
|
auto expected_results = mock::MakeMockFile(
|
|
|
|
{{KeyStr("a", 10, ValueType::kTypeValue, 100), "a10"},
|
|
|
|
{KeyStr("a", 9, ValueType::kTypeValue, 99), "a9"},
|
|
|
|
{KeyStr("b", 8, ValueType::kTypeValue, 98), "b8"},
|
2021-09-27 18:49:35 +00:00
|
|
|
{KeyStr("b", 6, ValueType::kTypeDeletionWithTimestamp, 96), ""},
|
|
|
|
{KeyStr("c", 5, ValueType::kTypeDeletionWithTimestamp, 95), ""},
|
|
|
|
{KeyStr("c", 4, ValueType::kTypeValue, 94), "c5"},
|
|
|
|
{KeyStr("d", 7, ValueType::kTypeValue, 97), "d7"},
|
|
|
|
{KeyStr("d", 3, ValueType::kTypeSingleDeletion, 93), ""}});
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
const auto& files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2020-11-12 19:40:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTimestampTest, NoKeyExpired) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 6, ValueType::kTypeValue, 100), "a6"},
|
|
|
|
{KeyStr("b", 7, ValueType::kTypeValue, 101), "b7"},
|
|
|
|
{KeyStr("c", 5, ValueType::kTypeValue, 99), "c5"}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 4, ValueType::kTypeValue, 98), "a4"},
|
|
|
|
{KeyStr("c", 3, ValueType::kTypeValue, 97), "c3"}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
SetLastSequence(101);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 6, ValueType::kTypeValue, 100), "a6"},
|
|
|
|
{KeyStr("a", 4, ValueType::kTypeValue, 98), "a4"},
|
|
|
|
{KeyStr("b", 7, ValueType::kTypeValue, 101), "b7"},
|
|
|
|
{KeyStr("c", 5, ValueType::kTypeValue, 99), "c5"},
|
|
|
|
{KeyStr("c", 3, ValueType::kTypeValue, 97), "c3"}});
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
const auto& files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
2020-11-12 19:40:52 +00:00
|
|
|
|
|
|
|
full_history_ts_low_ = encode_u64_ts_(0);
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2020-11-12 19:40:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTimestampTest, AllKeysExpired) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("a", 5, ValueType::kTypeDeletionWithTimestamp, 100), ""},
|
2021-09-27 18:49:35 +00:00
|
|
|
{KeyStr("b", 6, ValueType::kTypeSingleDeletion, 99), ""},
|
|
|
|
{KeyStr("c", 7, ValueType::kTypeValue, 98), "c7"}});
|
2020-11-12 19:40:52 +00:00
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile(
|
2021-09-27 18:49:35 +00:00
|
|
|
{{KeyStr("a", 4, ValueType::kTypeValue, 97), "a4"},
|
|
|
|
{KeyStr("b", 3, ValueType::kTypeValue, 96), "b3"},
|
|
|
|
{KeyStr("c", 2, ValueType::kTypeDeletionWithTimestamp, 95), ""},
|
|
|
|
{KeyStr("c", 1, ValueType::kTypeValue, 94), "c1"}});
|
2020-11-12 19:40:52 +00:00
|
|
|
AddMockFile(file2);
|
|
|
|
|
2021-09-27 18:49:35 +00:00
|
|
|
SetLastSequence(7);
|
2020-11-12 19:40:52 +00:00
|
|
|
|
|
|
|
auto expected_results =
|
2021-09-27 18:49:35 +00:00
|
|
|
mock::MakeMockFile({{KeyStr("c", 0, ValueType::kTypeValue, 0), "c7"}});
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
const auto& files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
2020-11-12 19:40:52 +00:00
|
|
|
|
|
|
|
full_history_ts_low_ = encode_u64_ts_(std::numeric_limits<uint64_t>::max());
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
2020-11-12 19:40:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTimestampTest, SomeKeysExpired) {
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
auto file1 =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5, ValueType::kTypeValue, 50), "a5"},
|
|
|
|
{KeyStr("b", 6, ValueType::kTypeValue, 49), "b6"}});
|
|
|
|
AddMockFile(file1);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile(
|
|
|
|
{{KeyStr("a", 3, ValueType::kTypeValue, 48), "a3"},
|
|
|
|
{KeyStr("a", 2, ValueType::kTypeValue, 46), "a2"},
|
|
|
|
{KeyStr("b", 4, ValueType::kTypeDeletionWithTimestamp, 47), ""}});
|
|
|
|
AddMockFile(file2);
|
|
|
|
|
|
|
|
SetLastSequence(6);
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
mock::MakeMockFile({{KeyStr("a", 5, ValueType::kTypeValue, 50), "a5"},
|
2021-11-09 21:07:33 +00:00
|
|
|
{KeyStr("a", 0, ValueType::kTypeValue, 0), "a3"},
|
2020-11-12 19:40:52 +00:00
|
|
|
{KeyStr("b", 6, ValueType::kTypeValue, 49), "b6"}});
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
const auto& files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
2020-11-12 19:40:52 +00:00
|
|
|
|
|
|
|
full_history_ts_low_ = encode_u64_ts_(49);
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results});
|
|
|
|
}
|
|
|
|
|
|
|
|
class CompactionJobTimestampTestWithBbTable : public CompactionJobTestBase {
|
|
|
|
public:
|
|
|
|
// Block-based table is needed if we want to test subcompaction partitioning
|
|
|
|
// with anchors.
|
|
|
|
explicit CompactionJobTimestampTestWithBbTable()
|
|
|
|
: CompactionJobTestBase(
|
|
|
|
test::PerThreadDBPath("compaction_job_ts_bbt_test"),
|
|
|
|
test::BytewiseComparatorWithU64TsWrapper(), test::EncodeInt,
|
|
|
|
/*test_io_priority=*/false, TableTypeForTest::kBlockBasedTable) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTimestampTestWithBbTable, SubcompactionAnchorL1) {
|
|
|
|
cf_options_.target_file_size_base = 20;
|
|
|
|
mutable_cf_options_.target_file_size_base = 20;
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
const std::vector<std::string> keys = {
|
|
|
|
KeyStr("a", 20, ValueType::kTypeValue, 200),
|
|
|
|
KeyStr("b", 21, ValueType::kTypeValue, 210),
|
|
|
|
KeyStr("b", 20, ValueType::kTypeValue, 200),
|
|
|
|
KeyStr("b", 18, ValueType::kTypeValue, 180),
|
|
|
|
KeyStr("c", 17, ValueType::kTypeValue, 170),
|
|
|
|
KeyStr("c", 16, ValueType::kTypeValue, 160),
|
|
|
|
KeyStr("c", 15, ValueType::kTypeValue, 150)};
|
|
|
|
const std::vector<std::string> values = {"a20", "b21", "b20", "b18",
|
|
|
|
"c17", "c16", "c15"};
|
|
|
|
|
|
|
|
constexpr int input_level = 1;
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile(
|
|
|
|
{{keys[0], values[0]}, {keys[1], values[1]}, {keys[2], values[2]}});
|
|
|
|
AddMockFile(file1, input_level);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile(
|
|
|
|
{{keys[3], values[3]}, {keys[4], values[4]}, {keys[5], values[5]}});
|
|
|
|
AddMockFile(file2, input_level);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile({{keys[6], values[6]}});
|
|
|
|
AddMockFile(file3, input_level);
|
|
|
|
|
|
|
|
SetLastSequence(20);
|
|
|
|
|
|
|
|
auto output1 = mock::MakeMockFile({{keys[0], values[0]}});
|
|
|
|
auto output2 = mock::MakeMockFile(
|
|
|
|
{{keys[1], values[1]}, {keys[2], values[2]}, {keys[3], values[3]}});
|
|
|
|
auto output3 = mock::MakeMockFile(
|
|
|
|
{{keys[4], values[4]}, {keys[5], values[5]}, {keys[6], values[6]}});
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
std::vector<mock::KVVector>{output1, output2, output3};
|
|
|
|
const auto& files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
|
|
|
|
constexpr int output_level = 2;
|
|
|
|
constexpr int max_subcompactions = 4;
|
|
|
|
RunCompaction({files}, {input_level}, expected_results, /*snapshots=*/{},
|
|
|
|
/*earliest_write_conflict_snapshot=*/kMaxSequenceNumber,
|
|
|
|
output_level, /*verify=*/true, {kInvalidBlobFileNumber},
|
|
|
|
/*check_get_priority=*/false, Env::IO_TOTAL, Env::IO_TOTAL,
|
|
|
|
max_subcompactions);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobTimestampTestWithBbTable, SubcompactionL0) {
|
|
|
|
cf_options_.target_file_size_base = 20;
|
|
|
|
mutable_cf_options_.target_file_size_base = 20;
|
|
|
|
NewDB();
|
|
|
|
|
|
|
|
const std::vector<std::string> keys = {
|
|
|
|
KeyStr("a", 20, ValueType::kTypeValue, 200),
|
|
|
|
KeyStr("b", 20, ValueType::kTypeValue, 200),
|
|
|
|
KeyStr("b", 19, ValueType::kTypeValue, 190),
|
|
|
|
KeyStr("b", 18, ValueType::kTypeValue, 180),
|
|
|
|
KeyStr("c", 17, ValueType::kTypeValue, 170),
|
|
|
|
KeyStr("c", 16, ValueType::kTypeValue, 160),
|
|
|
|
KeyStr("c", 15, ValueType::kTypeValue, 150)};
|
|
|
|
const std::vector<std::string> values = {"a20", "b20", "b19", "b18",
|
|
|
|
"c17", "c16", "c15"};
|
|
|
|
|
|
|
|
constexpr int input_level = 0;
|
|
|
|
|
|
|
|
auto file1 = mock::MakeMockFile({{keys[5], values[5]}, {keys[6], values[6]}});
|
|
|
|
AddMockFile(file1, input_level);
|
|
|
|
|
|
|
|
auto file2 = mock::MakeMockFile({{keys[3], values[3]}, {keys[4], values[4]}});
|
|
|
|
AddMockFile(file2, input_level);
|
|
|
|
|
|
|
|
auto file3 = mock::MakeMockFile(
|
|
|
|
{{keys[0], values[0]}, {keys[1], values[1]}, {keys[2], values[2]}});
|
|
|
|
AddMockFile(file3, input_level);
|
|
|
|
|
|
|
|
SetLastSequence(20);
|
|
|
|
|
|
|
|
auto output1 = mock::MakeMockFile({{keys[0], values[0]}});
|
|
|
|
auto output2 = mock::MakeMockFile(
|
|
|
|
{{keys[1], values[1]}, {keys[2], values[2]}, {keys[3], values[3]}});
|
|
|
|
auto output3 = mock::MakeMockFile(
|
|
|
|
{{keys[4], values[4]}, {keys[5], values[5]}, {keys[6], values[6]}});
|
|
|
|
|
|
|
|
auto expected_results =
|
|
|
|
std::vector<mock::KVVector>{output1, output2, output3};
|
|
|
|
const auto& files = cfd_->current()->storage_info()->LevelFiles(input_level);
|
|
|
|
|
|
|
|
constexpr int output_level = 1;
|
|
|
|
constexpr int max_subcompactions = 4;
|
|
|
|
RunCompaction({files}, {input_level}, expected_results, /*snapshots=*/{},
|
|
|
|
/*earliest_write_conflict_snapshot=*/kMaxSequenceNumber,
|
|
|
|
output_level, /*verify=*/true, {kInvalidBlobFileNumber},
|
|
|
|
/*check_get_priority=*/false, Env::IO_TOTAL, Env::IO_TOTAL,
|
|
|
|
max_subcompactions);
|
2020-11-12 19:40:52 +00:00
|
|
|
}
|
|
|
|
|
2022-06-07 18:57:12 +00:00
|
|
|
// The io priority of the compaction reads and writes are different from
|
|
|
|
// other DB reads and writes. To prepare the compaction input files, use the
|
|
|
|
// default filesystem from Env. To test the io priority of the compaction
|
|
|
|
// reads and writes, db_options_.fs is set as MockTestFileSystem.
|
|
|
|
class CompactionJobIOPriorityTest : public CompactionJobTestBase {
|
|
|
|
public:
|
|
|
|
CompactionJobIOPriorityTest()
|
|
|
|
: CompactionJobTestBase(
|
|
|
|
test::PerThreadDBPath("compaction_job_io_priority_test"),
|
2022-09-15 04:59:56 +00:00
|
|
|
BytewiseComparator(), [](uint64_t /*ts*/) { return ""; },
|
|
|
|
/*test_io_priority=*/true, TableTypeForTest::kBlockBasedTable) {}
|
2022-06-07 18:57:12 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(CompactionJobIOPriorityTest, WriteControllerStateNormal) {
|
|
|
|
// When the state from WriteController is normal.
|
|
|
|
NewDB();
|
|
|
|
mock::KVVector expected_results = CreateTwoFiles(false);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(input_level);
|
2022-06-07 18:57:12 +00:00
|
|
|
ASSERT_EQ(2U, files.size());
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {},
|
|
|
|
kMaxSequenceNumber, 1, false, {kInvalidBlobFileNumber}, false,
|
|
|
|
Env::IO_LOW, Env::IO_LOW);
|
2022-06-07 18:57:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobIOPriorityTest, WriteControllerStateDelayed) {
|
|
|
|
// When the state from WriteController is Delayed.
|
|
|
|
NewDB();
|
|
|
|
mock::KVVector expected_results = CreateTwoFiles(false);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(input_level);
|
2022-06-07 18:57:12 +00:00
|
|
|
ASSERT_EQ(2U, files.size());
|
|
|
|
{
|
|
|
|
std::unique_ptr<WriteControllerToken> delay_token =
|
|
|
|
write_controller_.GetDelayToken(1000000);
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {},
|
|
|
|
kMaxSequenceNumber, 1, false, {kInvalidBlobFileNumber}, false,
|
|
|
|
Env::IO_USER, Env::IO_USER);
|
2022-06-07 18:57:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobIOPriorityTest, WriteControllerStateStalled) {
|
|
|
|
// When the state from WriteController is Stalled.
|
|
|
|
NewDB();
|
|
|
|
mock::KVVector expected_results = CreateTwoFiles(false);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(input_level);
|
2022-06-07 18:57:12 +00:00
|
|
|
ASSERT_EQ(2U, files.size());
|
|
|
|
{
|
|
|
|
std::unique_ptr<WriteControllerToken> stop_token =
|
|
|
|
write_controller_.GetStopToken();
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {},
|
|
|
|
kMaxSequenceNumber, 1, false, {kInvalidBlobFileNumber}, false,
|
|
|
|
Env::IO_USER, Env::IO_USER);
|
2022-06-07 18:57:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionJobIOPriorityTest, GetRateLimiterPriority) {
|
|
|
|
NewDB();
|
|
|
|
mock::KVVector expected_results = CreateTwoFiles(false);
|
|
|
|
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
|
2022-09-15 04:59:56 +00:00
|
|
|
constexpr int input_level = 0;
|
|
|
|
auto files = cfd->current()->storage_info()->LevelFiles(input_level);
|
2022-06-07 18:57:12 +00:00
|
|
|
ASSERT_EQ(2U, files.size());
|
2022-09-15 04:59:56 +00:00
|
|
|
RunCompaction({files}, {input_level}, {expected_results}, {},
|
|
|
|
kMaxSequenceNumber, 1, false, {kInvalidBlobFileNumber}, true,
|
|
|
|
Env::IO_LOW, Env::IO_LOW);
|
2022-06-07 18:57:12 +00:00
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2014-11-14 19:35:48 +00:00
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
int main(int argc, char** argv) {
|
2022-10-18 07:35:35 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2015-03-17 21:08:00 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
2021-11-08 19:04:01 +00:00
|
|
|
RegisterCustomObjects(argc, argv);
|
2015-03-17 21:08:00 +00:00
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|
2015-10-13 17:32:05 +00:00
|
|
|
|
|
|
|
#else
|
|
|
|
#include <stdio.h>
|
|
|
|
|
2018-04-16 00:19:57 +00:00
|
|
|
int main(int /*argc*/, char** /*argv*/) {
|
2015-10-13 17:32:05 +00:00
|
|
|
fprintf(stderr,
|
|
|
|
"SKIPPED as CompactionJobStats is not supported in ROCKSDB_LITE\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // ROCKSDB_LITE
|