mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-26 16:30:56 +00:00
443d8ef094
Summary: **Context:** Running the new test `DBMergeOperandTest.MergeOperandReadAfterFreeBug` prior to this fix surfaces the read-after-free bug of PinSef() as below: ``` READ of size 8 at 0x60400002529d thread T0 https://github.com/facebook/rocksdb/issues/5 0x7f199a in rocksdb::PinnableSlice::PinSelf(rocksdb::Slice const&) include/rocksdb/slice.h:171 https://github.com/facebook/rocksdb/issues/6 0x7f199a in rocksdb::DBImpl::GetImpl(rocksdb::ReadOptions const&, rocksdb::Slice const&, rocksdb::DBImpl::GetImplOptions&) db/db_impl/db_impl.cc:1919 https://github.com/facebook/rocksdb/issues/7 0x540d63 in rocksdb::DBImpl::GetMergeOperands(rocksdb::ReadOptions const&, rocksdb::ColumnFamilyHandle*, rocksdb::Slice const&, rocksdb::PinnableSlice*, rocksdb::GetMergeOperandsOptions*, int*) db/db_impl/db_impl.h:203 freed by thread T0 here: https://github.com/facebook/rocksdb/issues/3 0x1191399 in rocksdb::cache_entry_roles_detail::RegisteredDeleter<rocksdb::Block, (rocksdb::CacheEntryRole)0>::Delete(rocksdb::Slice const&, void*) cache/cache_entry_roles.h:99 https://github.com/facebook/rocksdb/issues/4 0x719348 in rocksdb::LRUHandle::Free() cache/lru_cache.h:205 https://github.com/facebook/rocksdb/issues/5 0x71047f in rocksdb::LRUCacheShard::Release(rocksdb::Cache::Handle*, bool) cache/lru_cache.cc:547 https://github.com/facebook/rocksdb/issues/6 0xa78f0a in rocksdb::Cleanable::DoCleanup() include/rocksdb/cleanable.h:60 https://github.com/facebook/rocksdb/issues/7 0xa78f0a in rocksdb::Cleanable::Reset() include/rocksdb/cleanable.h:38 https://github.com/facebook/rocksdb/issues/8 0xa78f0a in rocksdb::PinnedIteratorsManager::ReleasePinnedData() db/pinned_iterators_manager.h:71 https://github.com/facebook/rocksdb/issues/9 0xd0c21b in rocksdb::PinnedIteratorsManager::~PinnedIteratorsManager() db/pinned_iterators_manager.h:24 https://github.com/facebook/rocksdb/issues/10 0xd0c21b in rocksdb::Version::Get(rocksdb::ReadOptions const&, rocksdb::LookupKey const&, rocksdb::PinnableSlice*, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >*, rocksdb::Status*, rocksdb::MergeContext*, unsigned long*, bool*, bool*, unsigned long*, rocksdb::ReadCallback*, bool*, bool) db/pinned_iterators_manager.h:22 https://github.com/facebook/rocksdb/issues/11 0x7f0fdf in rocksdb::DBImpl::GetImpl(rocksdb::ReadOptions const&, rocksdb::Slice const&, rocksdb::DBImpl::GetImplOptions&) db/db_impl/db_impl.cc:1886 https://github.com/facebook/rocksdb/issues/12 0x540d63 in rocksdb::DBImpl::GetMergeOperands(rocksdb::ReadOptions const&, rocksdb::ColumnFamilyHandle*, rocksdb::Slice const&, rocksdb::PinnableSlice*, rocksdb::GetMergeOperandsOptions*, int*) db/db_impl/db_impl.h:203 previously allocated by thread T0 here: https://github.com/facebook/rocksdb/issues/1 0x1239896 in rocksdb::AllocateBlock(unsigned long, **rocksdb::MemoryAllocator*)** memory/memory_allocator.h:35 https://github.com/facebook/rocksdb/issues/2 0x1239896 in rocksdb::BlockFetcher::CopyBufferToHeapBuf() table/block_fetcher.cc:171 https://github.com/facebook/rocksdb/issues/3 0x1239896 in rocksdb::BlockFetcher::GetBlockContents() table/block_fetcher.cc:206 https://github.com/facebook/rocksdb/issues/4 0x122eae5 in rocksdb::BlockFetcher::ReadBlockContents() table/block_fetcher.cc:325 https://github.com/facebook/rocksdb/issues/5 0x11b1f45 in rocksdb::Status rocksdb::BlockBasedTable::MaybeReadBlockAndLoadToCache<rocksdb::Block>(rocksdb::FilePrefetchBuffer*, rocksdb::ReadOptions const&, rocksdb::BlockHandle const&, rocksdb::UncompressionDict const&, bool, rocksdb::CachableEntry<rocksdb::Block>*, rocksdb::BlockType, rocksdb::GetContext*, rocksdb::BlockCacheLookupContext*, rocksdb::BlockContents*) const table/block_based/block_based_table_reader.cc:1503 ``` Here is the analysis: - We have [PinnedIteratorsManager](https://github.com/facebook/rocksdb/blob/6.28.fb/db/version_set.cc#L1980) with `Cleanable` capability in our `Version::Get()` path. It's responsible for managing the life-time of pinned iterator and invoking registered cleanup functions during its own destruction. - For example in case above, the merge operands's clean-up gets associated with this manger in [GetContext::push_operand](https://github.com/facebook/rocksdb/blob/6.28.fb/table/get_context.cc#L405). During PinnedIteratorsManager's [destruction](https://github.com/facebook/rocksdb/blob/6.28.fb/db/pinned_iterators_manager.h#L67), the release function associated with those merge operand data is invoked. **And that's what we see in "freed by thread T955 here" in ASAN.** - Bug 🐛: `PinnedIteratorsManager` is local to `Version::Get()` while the data of merge operands need to outlive `Version::Get` and stay till they get [PinSelf()](https://github.com/facebook/rocksdb/blob/6.28.fb/db/db_impl/db_impl.cc#L1905), **which is the read-after-free in ASAN.** - This bug is likely to be an overlook of `PinnedIteratorsManager` when developing the API `DB::GetMergeOperands` cuz the current logic works fine with the existing case of getting the *merged value* where the operands do not need to live that long. - This bug was not surfaced much (even in its unit test) due to the release function associated with the merge operands (which are actually blocks put in cache as you can see in `BlockBasedTable::MaybeReadBlockAndLoadToCache` **in "previously allocated by" in ASAN report**) is a cache entry deleter. The deleter will call `Cache::Release()` which, for LRU cache, won't immediately deallocate the block based on LRU policy [unless the cache is full or being instructed to force erase](https://github.com/facebook/rocksdb/blob/6.28.fb/cache/lru_cache.cc#L521-L531) - `DBMergeOperandTest.MergeOperandReadAfterFreeBug` makes the cache extremely small to force cache full. **Summary:** - Fix the bug by align `PinnedIteratorsManager`'s lifetime with the merge operands Pull Request resolved: https://github.com/facebook/rocksdb/pull/9507 Test Plan: - New test `DBMergeOperandTest.MergeOperandReadAfterFreeBug` - db bench on read path - Setup (LSM tree with several levels, cache the whole db to avoid read IO, warm cache with readseq to avoid read IO): `TEST_TMPDIR=/dev/shm/rocksdb ./db_bench -benchmarks="fillrandom,readseq -num=1000000 -cache_size=100000000 -write_buffer_size=10000 -statistics=1 -max_bytes_for_level_base=10000 -level0_file_num_compaction_trigger=1``TEST_TMPDIR=/dev/shm/rocksdb ./db_bench -benchmarks="readrandom" -num=1000000 -cache_size=100000000 ` - Actual command run (run 20-run for 20 times and then average the 20-run's average micros/op) - `for j in {1..20}; do (for i in {1..20}; do rm -rf /dev/shm/rocksdb/ && TEST_TMPDIR=/dev/shm/rocksdb ./db_bench -benchmarks="fillrandom,readseq,readrandom" -num=1000000 -cache_size=100000000 -write_buffer_size=10000 -statistics=1 -max_bytes_for_level_base=10000 -level0_file_num_compaction_trigger=1 | egrep 'readrandom'; done > rr_output_pre.txt && (awk '{sum+=$3; sum_sqrt+=$3^2}END{print sum/20, sqrt(sum_sqrt/20-(sum/20)^2)}' rr_output_pre.txt) >> rr_output_pre_2.txt); done` - **Result: Pre-change: 3.79193 micros/op; Post-change: 3.79528 micros/op (+0.09%)** (pre-change)sorted avg micros/op of each 20-run | std of micros/op of each 20-run | (post-change) sorted avg micros/op of each 20-run | std of micros/op of each 20-run -- | -- | -- | -- 3.58355 | 0.265209 | 3.48715 | 0.382076 3.58845 | 0.519927 | 3.5832 | 0.382726 3.66415 | 0.452097 | 3.677 | 0.563831 3.68495 | 0.430897 | 3.68405 | 0.495355 3.70295 | 0.482893 | 3.68465 | 0.431438 3.719 | 0.463806 | 3.71945 | 0.457157 3.7393 | 0.453423 | 3.72795 | 0.538604 3.7806 | 0.527613 | 3.75075 | 0.444509 3.7817 | 0.426704 | 3.7683 | 0.468065 3.809 | 0.381033 | 3.8086 | 0.557378 3.80985 | 0.466011 | 3.81805 | 0.524833 3.8165 | 0.500351 | 3.83405 | 0.529339 3.8479 | 0.430326 | 3.86285 | 0.44831 3.85125 | 0.434108 | 3.8717 | 0.544098 3.8556 | 0.524602 | 3.895 | 0.411679 3.8656 | 0.476383 | 3.90965 | 0.566636 3.8911 | 0.488477 | 3.92735 | 0.608038 3.898 | 0.493978 | 3.9439 | 0.524511 3.97235 | 0.515008 | 3.9623 | 0.477416 3.9768 | 0.519993 | 3.98965 | 0.521481 - CI Reviewed By: ajkr Differential Revision: D34030519 Pulled By: hx235 fbshipit-source-id: a99ac585c11704c5ed93af033cb29ba0a7b16ae8
371 lines
13 KiB
C++
371 lines
13 KiB
C++
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
#include "db/db_test_util.h"
|
|
#include "port/stack_trace.h"
|
|
#include "rocksdb/perf_context.h"
|
|
#include "rocksdb/utilities/debug.h"
|
|
#include "table/block_based/block_builder.h"
|
|
#if !defined(ROCKSDB_LITE)
|
|
#include "test_util/sync_point.h"
|
|
#endif
|
|
#include "rocksdb/merge_operator.h"
|
|
#include "utilities/fault_injection_env.h"
|
|
#include "utilities/merge_operators.h"
|
|
#include "utilities/merge_operators/sortlist.h"
|
|
#include "utilities/merge_operators/string_append/stringappend2.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
namespace {
|
|
class LimitedStringAppendMergeOp : public StringAppendTESTOperator {
|
|
public:
|
|
LimitedStringAppendMergeOp(int limit, char delim)
|
|
: StringAppendTESTOperator(delim), limit_(limit) {}
|
|
|
|
const char* Name() const override {
|
|
return "DBMergeOperatorTest::LimitedStringAppendMergeOp";
|
|
}
|
|
|
|
bool ShouldMerge(const std::vector<Slice>& operands) const override {
|
|
if (operands.size() > 0 && limit_ > 0 && operands.size() >= limit_) {
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
private:
|
|
size_t limit_ = 0;
|
|
};
|
|
} // namespace
|
|
|
|
class DBMergeOperandTest : public DBTestBase {
|
|
public:
|
|
DBMergeOperandTest()
|
|
: DBTestBase("db_merge_operand_test", /*env_do_fsync=*/true) {}
|
|
};
|
|
|
|
TEST_F(DBMergeOperandTest, MergeOperandReadAfterFreeBug) {
|
|
// There was a bug of reading merge operands after they are mistakely freed
|
|
// in DB::GetMergeOperands, which is surfaced by cache full.
|
|
// See PR#9507 for more.
|
|
Options options;
|
|
options.create_if_missing = true;
|
|
options.merge_operator = MergeOperators::CreateStringAppendOperator();
|
|
options.env = env_;
|
|
BlockBasedTableOptions table_options;
|
|
|
|
// Small cache to simulate cache full
|
|
table_options.block_cache = NewLRUCache(1);
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
Reopen(options);
|
|
int num_records = 4;
|
|
int number_of_operands = 0;
|
|
std::vector<PinnableSlice> values(num_records);
|
|
GetMergeOperandsOptions merge_operands_info;
|
|
merge_operands_info.expected_max_number_of_operands = num_records;
|
|
|
|
ASSERT_OK(Merge("k1", "v1"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k1", "v2"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k1", "v3"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k1", "v4"));
|
|
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k1", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(number_of_operands, 4);
|
|
ASSERT_EQ(values[0].ToString(), "v1");
|
|
ASSERT_EQ(values[1].ToString(), "v2");
|
|
ASSERT_EQ(values[2].ToString(), "v3");
|
|
ASSERT_EQ(values[3].ToString(), "v4");
|
|
}
|
|
|
|
TEST_F(DBMergeOperandTest, GetMergeOperandsBasic) {
|
|
Options options;
|
|
options.create_if_missing = true;
|
|
// Use only the latest two merge operands.
|
|
options.merge_operator = std::make_shared<LimitedStringAppendMergeOp>(2, ',');
|
|
options.env = env_;
|
|
Reopen(options);
|
|
int num_records = 4;
|
|
int number_of_operands = 0;
|
|
std::vector<PinnableSlice> values(num_records);
|
|
GetMergeOperandsOptions merge_operands_info;
|
|
merge_operands_info.expected_max_number_of_operands = num_records;
|
|
|
|
// k0 value in memtable
|
|
ASSERT_OK(Put("k0", "PutARock"));
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k0", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "PutARock");
|
|
|
|
// k0.1 value in SST
|
|
ASSERT_OK(Put("k0.1", "RockInSST"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k0.1", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "RockInSST");
|
|
|
|
// All k1 values are in memtable.
|
|
ASSERT_OK(Merge("k1", "a"));
|
|
ASSERT_OK(Put("k1", "x"));
|
|
ASSERT_OK(Merge("k1", "b"));
|
|
ASSERT_OK(Merge("k1", "c"));
|
|
ASSERT_OK(Merge("k1", "d"));
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k1", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "x");
|
|
ASSERT_EQ(values[1], "b");
|
|
ASSERT_EQ(values[2], "c");
|
|
ASSERT_EQ(values[3], "d");
|
|
|
|
// expected_max_number_of_operands is less than number of merge operands so
|
|
// status should be Incomplete.
|
|
merge_operands_info.expected_max_number_of_operands = num_records - 1;
|
|
Status status = db_->GetMergeOperands(
|
|
ReadOptions(), db_->DefaultColumnFamily(), "k1", values.data(),
|
|
&merge_operands_info, &number_of_operands);
|
|
ASSERT_EQ(status.IsIncomplete(), true);
|
|
merge_operands_info.expected_max_number_of_operands = num_records;
|
|
|
|
// All k1.1 values are in memtable.
|
|
ASSERT_OK(Merge("k1.1", "r"));
|
|
ASSERT_OK(Delete("k1.1"));
|
|
ASSERT_OK(Merge("k1.1", "c"));
|
|
ASSERT_OK(Merge("k1.1", "k"));
|
|
ASSERT_OK(Merge("k1.1", "s"));
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k1.1", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "c");
|
|
ASSERT_EQ(values[1], "k");
|
|
ASSERT_EQ(values[2], "s");
|
|
|
|
// All k2 values are flushed to L0 into a single file.
|
|
ASSERT_OK(Merge("k2", "q"));
|
|
ASSERT_OK(Merge("k2", "w"));
|
|
ASSERT_OK(Merge("k2", "e"));
|
|
ASSERT_OK(Merge("k2", "r"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k2", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "q");
|
|
ASSERT_EQ(values[1], "w");
|
|
ASSERT_EQ(values[2], "e");
|
|
ASSERT_EQ(values[3], "r");
|
|
|
|
// All k2.1 values are flushed to L0 into a single file.
|
|
ASSERT_OK(Merge("k2.1", "m"));
|
|
ASSERT_OK(Put("k2.1", "l"));
|
|
ASSERT_OK(Merge("k2.1", "n"));
|
|
ASSERT_OK(Merge("k2.1", "o"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k2.1", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "l,n,o");
|
|
|
|
// All k2.2 values are flushed to L0 into a single file.
|
|
ASSERT_OK(Merge("k2.2", "g"));
|
|
ASSERT_OK(Delete("k2.2"));
|
|
ASSERT_OK(Merge("k2.2", "o"));
|
|
ASSERT_OK(Merge("k2.2", "t"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k2.2", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "o,t");
|
|
|
|
// Do some compaction that will make the following tests more predictable
|
|
// Slice start("PutARock");
|
|
// Slice end("t");
|
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
|
|
// All k3 values are flushed and are in different files.
|
|
ASSERT_OK(Merge("k3", "ab"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k3", "bc"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k3", "cd"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k3", "de"));
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k3", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "ab");
|
|
ASSERT_EQ(values[1], "bc");
|
|
ASSERT_EQ(values[2], "cd");
|
|
ASSERT_EQ(values[3], "de");
|
|
|
|
// All k3.1 values are flushed and are in different files.
|
|
ASSERT_OK(Merge("k3.1", "ab"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Put("k3.1", "bc"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k3.1", "cd"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k3.1", "de"));
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k3.1", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "bc");
|
|
ASSERT_EQ(values[1], "cd");
|
|
ASSERT_EQ(values[2], "de");
|
|
|
|
// All k3.2 values are flushed and are in different files.
|
|
ASSERT_OK(Merge("k3.2", "ab"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Delete("k3.2"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k3.2", "cd"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k3.2", "de"));
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k3.2", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "cd");
|
|
ASSERT_EQ(values[1], "de");
|
|
|
|
// All K4 values are in different levels
|
|
ASSERT_OK(Merge("k4", "ba"));
|
|
ASSERT_OK(Flush());
|
|
MoveFilesToLevel(4);
|
|
ASSERT_OK(Merge("k4", "cb"));
|
|
ASSERT_OK(Flush());
|
|
MoveFilesToLevel(3);
|
|
ASSERT_OK(Merge("k4", "dc"));
|
|
ASSERT_OK(Flush());
|
|
MoveFilesToLevel(1);
|
|
ASSERT_OK(Merge("k4", "ed"));
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k4", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "ba");
|
|
ASSERT_EQ(values[1], "cb");
|
|
ASSERT_EQ(values[2], "dc");
|
|
ASSERT_EQ(values[3], "ed");
|
|
|
|
// First 3 k5 values are in SST and next 4 k5 values are in Immutable
|
|
// Memtable
|
|
ASSERT_OK(Merge("k5", "who"));
|
|
ASSERT_OK(Merge("k5", "am"));
|
|
ASSERT_OK(Merge("k5", "i"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Put("k5", "remember"));
|
|
ASSERT_OK(Merge("k5", "i"));
|
|
ASSERT_OK(Merge("k5", "am"));
|
|
ASSERT_OK(Merge("k5", "rocks"));
|
|
ASSERT_OK(dbfull()->TEST_SwitchMemtable());
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k5", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "remember");
|
|
ASSERT_EQ(values[1], "i");
|
|
ASSERT_EQ(values[2], "am");
|
|
}
|
|
|
|
TEST_F(DBMergeOperandTest, BlobDBGetMergeOperandsBasic) {
|
|
Options options;
|
|
options.create_if_missing = true;
|
|
options.enable_blob_files = true;
|
|
options.min_blob_size = 0;
|
|
// Use only the latest two merge operands.
|
|
options.merge_operator = std::make_shared<LimitedStringAppendMergeOp>(2, ',');
|
|
options.env = env_;
|
|
Reopen(options);
|
|
int num_records = 4;
|
|
int number_of_operands = 0;
|
|
std::vector<PinnableSlice> values(num_records);
|
|
GetMergeOperandsOptions merge_operands_info;
|
|
merge_operands_info.expected_max_number_of_operands = num_records;
|
|
|
|
// All k1 values are in memtable.
|
|
ASSERT_OK(Put("k1", "x"));
|
|
ASSERT_OK(Merge("k1", "b"));
|
|
ASSERT_OK(Merge("k1", "c"));
|
|
ASSERT_OK(Merge("k1", "d"));
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k1", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "x");
|
|
ASSERT_EQ(values[1], "b");
|
|
ASSERT_EQ(values[2], "c");
|
|
ASSERT_EQ(values[3], "d");
|
|
|
|
// expected_max_number_of_operands is less than number of merge operands so
|
|
// status should be Incomplete.
|
|
merge_operands_info.expected_max_number_of_operands = num_records - 1;
|
|
Status status = db_->GetMergeOperands(
|
|
ReadOptions(), db_->DefaultColumnFamily(), "k1", values.data(),
|
|
&merge_operands_info, &number_of_operands);
|
|
ASSERT_EQ(status.IsIncomplete(), true);
|
|
merge_operands_info.expected_max_number_of_operands = num_records;
|
|
|
|
// All k2 values are flushed to L0 into a single file.
|
|
ASSERT_OK(Put("k2", "q"));
|
|
ASSERT_OK(Merge("k2", "w"));
|
|
ASSERT_OK(Merge("k2", "e"));
|
|
ASSERT_OK(Merge("k2", "r"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k2", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "q,w,e,r");
|
|
|
|
// Do some compaction that will make the following tests more predictable
|
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
|
|
// All k3 values are flushed and are in different files.
|
|
ASSERT_OK(Put("k3", "ab"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k3", "bc"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k3", "cd"));
|
|
ASSERT_OK(Flush());
|
|
ASSERT_OK(Merge("k3", "de"));
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k3", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "ab");
|
|
ASSERT_EQ(values[1], "bc");
|
|
ASSERT_EQ(values[2], "cd");
|
|
ASSERT_EQ(values[3], "de");
|
|
|
|
// All K4 values are in different levels
|
|
ASSERT_OK(Put("k4", "ba"));
|
|
ASSERT_OK(Flush());
|
|
MoveFilesToLevel(4);
|
|
ASSERT_OK(Merge("k4", "cb"));
|
|
ASSERT_OK(Flush());
|
|
MoveFilesToLevel(3);
|
|
ASSERT_OK(Merge("k4", "dc"));
|
|
ASSERT_OK(Flush());
|
|
MoveFilesToLevel(1);
|
|
ASSERT_OK(Merge("k4", "ed"));
|
|
ASSERT_OK(db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(),
|
|
"k4", values.data(), &merge_operands_info,
|
|
&number_of_operands));
|
|
ASSERT_EQ(values[0], "ba");
|
|
ASSERT_EQ(values[1], "cb");
|
|
ASSERT_EQ(values[2], "dc");
|
|
ASSERT_EQ(values[3], "ed");
|
|
}
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
|
|
|
int main(int argc, char** argv) {
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
return RUN_ALL_TESTS();
|
|
}
|