2018-12-18 01:26:56 +00:00
|
|
|
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2016-12-20 00:44:30 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
#include "db/range_del_aggregator.h"
|
|
|
|
|
|
|
|
#include <memory>
|
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
2016-12-20 00:44:30 +00:00
|
|
|
|
|
|
|
#include "db/db_test_util.h"
|
2018-12-18 01:26:56 +00:00
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "db/range_tombstone_fragmenter.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/testutil.h"
|
2016-12-20 00:44:30 +00:00
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
class RangeDelAggregatorTest : public testing::Test {};
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2018-09-18 19:06:59 +00:00
|
|
|
static auto bytewise_icmp = InternalKeyComparator(BytewiseComparator());
|
2017-11-28 19:18:42 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
std::unique_ptr<InternalIterator> MakeRangeDelIter(
|
|
|
|
const std::vector<RangeTombstone>& range_dels) {
|
2017-11-28 19:18:42 +00:00
|
|
|
std::vector<std::string> keys, values;
|
|
|
|
for (const auto& range_del : range_dels) {
|
|
|
|
auto key_and_value = range_del.Serialize();
|
|
|
|
keys.push_back(key_and_value.first.Encode().ToString());
|
|
|
|
values.push_back(key_and_value.second.ToString());
|
|
|
|
}
|
2018-12-18 01:26:56 +00:00
|
|
|
return std::unique_ptr<test::VectorIterator>(
|
2017-11-28 19:18:42 +00:00
|
|
|
new test::VectorIterator(keys, values));
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
2018-07-12 21:28:10 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
std::vector<std::unique_ptr<FragmentedRangeTombstoneList>>
|
|
|
|
MakeFragmentedTombstoneLists(
|
|
|
|
const std::vector<std::vector<RangeTombstone>>& range_dels_list) {
|
|
|
|
std::vector<std::unique_ptr<FragmentedRangeTombstoneList>> fragment_lists;
|
|
|
|
for (const auto& range_dels : range_dels_list) {
|
|
|
|
auto range_del_iter = MakeRangeDelIter(range_dels);
|
|
|
|
fragment_lists.emplace_back(new FragmentedRangeTombstoneList(
|
|
|
|
std::move(range_del_iter), bytewise_icmp));
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
2018-07-12 21:28:10 +00:00
|
|
|
}
|
2018-12-18 01:26:56 +00:00
|
|
|
return fragment_lists;
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
2018-07-12 21:28:10 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
struct TruncatedIterScanTestCase {
|
|
|
|
ParsedInternalKey start;
|
|
|
|
ParsedInternalKey end;
|
|
|
|
SequenceNumber seq;
|
|
|
|
};
|
2016-12-20 00:44:30 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
struct TruncatedIterSeekTestCase {
|
|
|
|
Slice target;
|
|
|
|
ParsedInternalKey start;
|
|
|
|
ParsedInternalKey end;
|
|
|
|
SequenceNumber seq;
|
|
|
|
bool invalid;
|
|
|
|
};
|
2016-12-20 00:44:30 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
struct ShouldDeleteTestCase {
|
|
|
|
ParsedInternalKey lookup_key;
|
|
|
|
bool result;
|
|
|
|
};
|
2016-12-20 00:44:30 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
struct IsRangeOverlappedTestCase {
|
|
|
|
Slice start;
|
|
|
|
Slice end;
|
|
|
|
bool result;
|
|
|
|
};
|
2016-12-20 00:44:30 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
ParsedInternalKey UncutEndpoint(const Slice& s) {
|
|
|
|
return ParsedInternalKey(s, kMaxSequenceNumber, kTypeRangeDeletion);
|
2016-12-20 00:44:30 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
ParsedInternalKey InternalValue(const Slice& key, SequenceNumber seq) {
|
|
|
|
return ParsedInternalKey(key, seq, kTypeValue);
|
Handle tombstones at the same seqno in the CollapsedRangeDelMap (#4424)
Summary:
The CollapsedRangeDelMap was entirely mishandling tombstones at the same
sequence number when the tombstones did not have identical start and end
keys. Such tombstones are common since 90fc40690, which causes
tombstones to be split during compactions.
For example, if the tombstone [a, c) @ 1 lies across a compaction
boundary at b, it will be split into [a, b) @ 1 and [b, c) @ 1. Without
this patch, the collapsed range deletion map would look like this:
a -> 1
b -> 1
c -> 0
Notice how the b -> 1 entry is redundant. When the tombstones overlap,
the problem is even worse. Consider tombstones [a, c) @ 1 and [b, d) @
1, which produces this map without this patch:
a -> 1
b -> 1
c -> 0
d -> 0
This map is corrupt, as a map can never contain adjacent sentinel (zero)
entries. When the iterator advances from b to c, it will notice that c
is a sentinel enty and skip to d--but d is also a sentinel entry! Asking
what tombstone this iterator points to will trigger an assertion, as it
is not pointing to a valid tombstone.
/cc ajkr
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4424
Differential Revision: D10039248
Pulled By: abhimadan
fbshipit-source-id: 6d737c1e88d60e80cf27286726627ba44463e7f4
2018-09-25 21:48:44 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
void VerifyIterator(
|
|
|
|
TruncatedRangeDelIterator* iter, const InternalKeyComparator& icmp,
|
|
|
|
const std::vector<TruncatedIterScanTestCase>& expected_range_dels) {
|
|
|
|
// Test forward iteration.
|
|
|
|
iter->SeekToFirst();
|
|
|
|
for (size_t i = 0; i < expected_range_dels.size(); i++, iter->Next()) {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->start_key(), expected_range_dels[i].start));
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->end_key(), expected_range_dels[i].end));
|
|
|
|
EXPECT_EQ(expected_range_dels[i].seq, iter->seq());
|
|
|
|
}
|
|
|
|
EXPECT_FALSE(iter->Valid());
|
|
|
|
|
|
|
|
// Test reverse iteration.
|
|
|
|
iter->SeekToLast();
|
|
|
|
std::vector<TruncatedIterScanTestCase> reverse_expected_range_dels(
|
|
|
|
expected_range_dels.rbegin(), expected_range_dels.rend());
|
|
|
|
for (size_t i = 0; i < reverse_expected_range_dels.size();
|
|
|
|
i++, iter->Prev()) {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->start_key(),
|
|
|
|
reverse_expected_range_dels[i].start));
|
|
|
|
EXPECT_EQ(
|
|
|
|
0, icmp.Compare(iter->end_key(), reverse_expected_range_dels[i].end));
|
|
|
|
EXPECT_EQ(reverse_expected_range_dels[i].seq, iter->seq());
|
|
|
|
}
|
|
|
|
EXPECT_FALSE(iter->Valid());
|
Handle tombstones at the same seqno in the CollapsedRangeDelMap (#4424)
Summary:
The CollapsedRangeDelMap was entirely mishandling tombstones at the same
sequence number when the tombstones did not have identical start and end
keys. Such tombstones are common since 90fc40690, which causes
tombstones to be split during compactions.
For example, if the tombstone [a, c) @ 1 lies across a compaction
boundary at b, it will be split into [a, b) @ 1 and [b, c) @ 1. Without
this patch, the collapsed range deletion map would look like this:
a -> 1
b -> 1
c -> 0
Notice how the b -> 1 entry is redundant. When the tombstones overlap,
the problem is even worse. Consider tombstones [a, c) @ 1 and [b, d) @
1, which produces this map without this patch:
a -> 1
b -> 1
c -> 0
d -> 0
This map is corrupt, as a map can never contain adjacent sentinel (zero)
entries. When the iterator advances from b to c, it will notice that c
is a sentinel enty and skip to d--but d is also a sentinel entry! Asking
what tombstone this iterator points to will trigger an assertion, as it
is not pointing to a valid tombstone.
/cc ajkr
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4424
Differential Revision: D10039248
Pulled By: abhimadan
fbshipit-source-id: 6d737c1e88d60e80cf27286726627ba44463e7f4
2018-09-25 21:48:44 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
void VerifySeek(TruncatedRangeDelIterator* iter,
|
|
|
|
const InternalKeyComparator& icmp,
|
|
|
|
const std::vector<TruncatedIterSeekTestCase>& test_cases) {
|
|
|
|
for (const auto& test_case : test_cases) {
|
|
|
|
iter->Seek(test_case.target);
|
|
|
|
if (test_case.invalid) {
|
|
|
|
ASSERT_FALSE(iter->Valid());
|
|
|
|
} else {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->start_key(), test_case.start));
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->end_key(), test_case.end));
|
|
|
|
EXPECT_EQ(test_case.seq, iter->seq());
|
|
|
|
}
|
|
|
|
}
|
Handle tombstones at the same seqno in the CollapsedRangeDelMap (#4424)
Summary:
The CollapsedRangeDelMap was entirely mishandling tombstones at the same
sequence number when the tombstones did not have identical start and end
keys. Such tombstones are common since 90fc40690, which causes
tombstones to be split during compactions.
For example, if the tombstone [a, c) @ 1 lies across a compaction
boundary at b, it will be split into [a, b) @ 1 and [b, c) @ 1. Without
this patch, the collapsed range deletion map would look like this:
a -> 1
b -> 1
c -> 0
Notice how the b -> 1 entry is redundant. When the tombstones overlap,
the problem is even worse. Consider tombstones [a, c) @ 1 and [b, d) @
1, which produces this map without this patch:
a -> 1
b -> 1
c -> 0
d -> 0
This map is corrupt, as a map can never contain adjacent sentinel (zero)
entries. When the iterator advances from b to c, it will notice that c
is a sentinel enty and skip to d--but d is also a sentinel entry! Asking
what tombstone this iterator points to will trigger an assertion, as it
is not pointing to a valid tombstone.
/cc ajkr
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4424
Differential Revision: D10039248
Pulled By: abhimadan
fbshipit-source-id: 6d737c1e88d60e80cf27286726627ba44463e7f4
2018-09-25 21:48:44 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
void VerifySeekForPrev(
|
|
|
|
TruncatedRangeDelIterator* iter, const InternalKeyComparator& icmp,
|
|
|
|
const std::vector<TruncatedIterSeekTestCase>& test_cases) {
|
|
|
|
for (const auto& test_case : test_cases) {
|
|
|
|
iter->SeekForPrev(test_case.target);
|
|
|
|
if (test_case.invalid) {
|
|
|
|
ASSERT_FALSE(iter->Valid());
|
|
|
|
} else {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->start_key(), test_case.start));
|
|
|
|
EXPECT_EQ(0, icmp.Compare(iter->end_key(), test_case.end));
|
|
|
|
EXPECT_EQ(test_case.seq, iter->seq());
|
|
|
|
}
|
|
|
|
}
|
Handle tombstones at the same seqno in the CollapsedRangeDelMap (#4424)
Summary:
The CollapsedRangeDelMap was entirely mishandling tombstones at the same
sequence number when the tombstones did not have identical start and end
keys. Such tombstones are common since 90fc40690, which causes
tombstones to be split during compactions.
For example, if the tombstone [a, c) @ 1 lies across a compaction
boundary at b, it will be split into [a, b) @ 1 and [b, c) @ 1. Without
this patch, the collapsed range deletion map would look like this:
a -> 1
b -> 1
c -> 0
Notice how the b -> 1 entry is redundant. When the tombstones overlap,
the problem is even worse. Consider tombstones [a, c) @ 1 and [b, d) @
1, which produces this map without this patch:
a -> 1
b -> 1
c -> 0
d -> 0
This map is corrupt, as a map can never contain adjacent sentinel (zero)
entries. When the iterator advances from b to c, it will notice that c
is a sentinel enty and skip to d--but d is also a sentinel entry! Asking
what tombstone this iterator points to will trigger an assertion, as it
is not pointing to a valid tombstone.
/cc ajkr
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4424
Differential Revision: D10039248
Pulled By: abhimadan
fbshipit-source-id: 6d737c1e88d60e80cf27286726627ba44463e7f4
2018-09-25 21:48:44 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
void VerifyShouldDelete(RangeDelAggregator* range_del_agg,
|
|
|
|
const std::vector<ShouldDeleteTestCase>& test_cases) {
|
|
|
|
for (const auto& test_case : test_cases) {
|
|
|
|
EXPECT_EQ(
|
|
|
|
test_case.result,
|
|
|
|
range_del_agg->ShouldDelete(
|
|
|
|
test_case.lookup_key, RangeDelPositioningMode::kForwardTraversal));
|
|
|
|
}
|
|
|
|
for (auto it = test_cases.rbegin(); it != test_cases.rend(); ++it) {
|
|
|
|
const auto& test_case = *it;
|
|
|
|
EXPECT_EQ(
|
|
|
|
test_case.result,
|
|
|
|
range_del_agg->ShouldDelete(
|
|
|
|
test_case.lookup_key, RangeDelPositioningMode::kBackwardTraversal));
|
|
|
|
}
|
2016-12-20 00:44:30 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
void VerifyIsRangeOverlapped(
|
|
|
|
ReadRangeDelAggregator* range_del_agg,
|
|
|
|
const std::vector<IsRangeOverlappedTestCase>& test_cases) {
|
|
|
|
for (const auto& test_case : test_cases) {
|
|
|
|
EXPECT_EQ(test_case.result,
|
|
|
|
range_del_agg->IsRangeOverlapped(test_case.start, test_case.end));
|
|
|
|
}
|
2016-12-20 00:44:30 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
void CheckIterPosition(const RangeTombstone& tombstone,
|
|
|
|
const FragmentedRangeTombstoneIterator* iter) {
|
|
|
|
// Test InternalIterator interface.
|
|
|
|
EXPECT_EQ(tombstone.start_key_, ExtractUserKey(iter->key()));
|
|
|
|
EXPECT_EQ(tombstone.end_key_, iter->value());
|
|
|
|
EXPECT_EQ(tombstone.seq_, iter->seq());
|
2016-12-20 00:44:30 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
// Test FragmentedRangeTombstoneIterator interface.
|
|
|
|
EXPECT_EQ(tombstone.start_key_, iter->start_key());
|
|
|
|
EXPECT_EQ(tombstone.end_key_, iter->end_key());
|
|
|
|
EXPECT_EQ(tombstone.seq_, GetInternalKeySeqno(iter->key()));
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
2018-07-12 21:28:10 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
void VerifyFragmentedRangeDels(
|
|
|
|
FragmentedRangeTombstoneIterator* iter,
|
|
|
|
const std::vector<RangeTombstone>& expected_tombstones) {
|
|
|
|
iter->SeekToFirst();
|
|
|
|
for (size_t i = 0; i < expected_tombstones.size(); i++, iter->Next()) {
|
|
|
|
ASSERT_TRUE(iter->Valid());
|
|
|
|
CheckIterPosition(expected_tombstones[i], iter);
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
2018-07-12 21:28:10 +00:00
|
|
|
}
|
2018-12-18 01:26:56 +00:00
|
|
|
EXPECT_FALSE(iter->Valid());
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, EmptyTruncatedIter) {
|
|
|
|
auto range_del_iter = MakeRangeDelIter({});
|
|
|
|
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
|
|
|
bytewise_icmp);
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
|
|
|
|
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr,
|
|
|
|
nullptr);
|
|
|
|
|
|
|
|
iter.SeekToFirst();
|
|
|
|
ASSERT_FALSE(iter.Valid());
|
|
|
|
|
|
|
|
iter.SeekToLast();
|
|
|
|
ASSERT_FALSE(iter.Valid());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, UntruncatedIter) {
|
|
|
|
auto range_del_iter =
|
|
|
|
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
|
|
|
|
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
|
|
|
bytewise_icmp);
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
|
|
|
|
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr,
|
|
|
|
nullptr);
|
|
|
|
|
|
|
|
VerifyIterator(&iter, bytewise_icmp,
|
|
|
|
{{UncutEndpoint("a"), UncutEndpoint("e"), 10},
|
|
|
|
{UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{UncutEndpoint("j"), UncutEndpoint("n"), 4}});
|
|
|
|
|
|
|
|
VerifySeek(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", UncutEndpoint("a"), UncutEndpoint("e"), 10},
|
|
|
|
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"ia", UncutEndpoint("j"), UncutEndpoint("n"), 4},
|
|
|
|
{"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
|
|
|
{"", UncutEndpoint("a"), UncutEndpoint("e"), 10}});
|
|
|
|
|
|
|
|
VerifySeekForPrev(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", UncutEndpoint("a"), UncutEndpoint("e"), 10},
|
|
|
|
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"n", UncutEndpoint("j"), UncutEndpoint("n"), 4},
|
|
|
|
{"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, UntruncatedIterWithSnapshot) {
|
|
|
|
auto range_del_iter =
|
|
|
|
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
|
|
|
|
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
|
|
|
bytewise_icmp);
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
|
|
|
9 /* snapshot */));
|
|
|
|
|
|
|
|
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr,
|
|
|
|
nullptr);
|
|
|
|
|
|
|
|
VerifyIterator(&iter, bytewise_icmp,
|
|
|
|
{{UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{UncutEndpoint("j"), UncutEndpoint("n"), 4}});
|
|
|
|
|
|
|
|
VerifySeek(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"ia", UncutEndpoint("j"), UncutEndpoint("n"), 4},
|
|
|
|
{"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
|
|
|
{"", UncutEndpoint("e"), UncutEndpoint("g"), 8}});
|
|
|
|
|
|
|
|
VerifySeekForPrev(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
|
|
|
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"n", UncutEndpoint("j"), UncutEndpoint("n"), 4},
|
|
|
|
{"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, TruncatedIterPartiallyCutTombstones) {
|
|
|
|
auto range_del_iter =
|
|
|
|
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
|
|
|
|
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
|
|
|
bytewise_icmp);
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
|
|
|
|
InternalKey smallest("d", 7, kTypeValue);
|
|
|
|
InternalKey largest("m", 9, kTypeValue);
|
|
|
|
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp,
|
|
|
|
&smallest, &largest);
|
|
|
|
|
|
|
|
VerifyIterator(&iter, bytewise_icmp,
|
|
|
|
{{InternalValue("d", 7), UncutEndpoint("e"), 10},
|
|
|
|
{UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{UncutEndpoint("j"), InternalValue("m", 8), 4}});
|
|
|
|
|
|
|
|
VerifySeek(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", InternalValue("d", 7), UncutEndpoint("e"), 10},
|
|
|
|
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"ia", UncutEndpoint("j"), InternalValue("m", 8), 4},
|
|
|
|
{"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
|
|
|
{"", InternalValue("d", 7), UncutEndpoint("e"), 10}});
|
|
|
|
|
|
|
|
VerifySeekForPrev(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", InternalValue("d", 7), UncutEndpoint("e"), 10},
|
|
|
|
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
|
|
|
{"n", UncutEndpoint("j"), InternalValue("m", 8), 4},
|
|
|
|
{"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, TruncatedIterFullyCutTombstones) {
|
|
|
|
auto range_del_iter =
|
|
|
|
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
|
|
|
|
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
|
|
|
bytewise_icmp);
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
|
|
|
|
InternalKey smallest("f", 7, kTypeValue);
|
|
|
|
InternalKey largest("i", 9, kTypeValue);
|
|
|
|
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp,
|
|
|
|
&smallest, &largest);
|
|
|
|
|
|
|
|
VerifyIterator(&iter, bytewise_icmp,
|
|
|
|
{{InternalValue("f", 7), UncutEndpoint("g"), 8}});
|
|
|
|
|
|
|
|
VerifySeek(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", InternalValue("f", 7), UncutEndpoint("g"), 8},
|
|
|
|
{"f", InternalValue("f", 7), UncutEndpoint("g"), 8},
|
|
|
|
{"j", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
|
|
|
|
|
|
|
|
VerifySeekForPrev(
|
|
|
|
&iter, bytewise_icmp,
|
|
|
|
{{"d", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
|
|
|
{"f", InternalValue("f", 7), UncutEndpoint("g"), 8},
|
|
|
|
{"j", InternalValue("f", 7), UncutEndpoint("g"), 8}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, SingleIterInAggregator) {
|
|
|
|
auto range_del_iter = MakeRangeDelIter({{"a", "e", 10}, {"c", "g", 8}});
|
|
|
|
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
|
|
|
bytewise_icmp);
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
|
|
|
|
ReadRangeDelAggregator range_del_agg(&bytewise_icmp, kMaxSequenceNumber);
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
|
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), false},
|
|
|
|
{InternalValue("b", 9), true},
|
|
|
|
{InternalValue("d", 9), true},
|
|
|
|
{InternalValue("e", 7), true},
|
|
|
|
{InternalValue("g", 7), false}});
|
|
|
|
|
|
|
|
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
|
|
|
{"_", "a", true},
|
|
|
|
{"a", "c", true},
|
|
|
|
{"d", "f", true},
|
|
|
|
{"g", "l", false}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, MultipleItersInAggregator) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
ReadRangeDelAggregator range_del_agg(&bytewise_icmp, kMaxSequenceNumber);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
2018-07-12 21:28:10 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), true},
|
|
|
|
{InternalValue("b", 19), false},
|
|
|
|
{InternalValue("b", 9), true},
|
|
|
|
{InternalValue("d", 9), true},
|
|
|
|
{InternalValue("e", 7), true},
|
|
|
|
{InternalValue("g", 7), false},
|
|
|
|
{InternalValue("h", 24), true},
|
|
|
|
{InternalValue("i", 24), false},
|
|
|
|
{InternalValue("ii", 14), true},
|
|
|
|
{InternalValue("j", 14), false}});
|
|
|
|
|
|
|
|
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
|
|
|
{"_", "a", true},
|
|
|
|
{"a", "c", true},
|
|
|
|
{"d", "f", true},
|
|
|
|
{"g", "l", true},
|
|
|
|
{"x", "y", false}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, MultipleItersInAggregatorWithUpperBound) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
ReadRangeDelAggregator range_del_agg(&bytewise_icmp, 19);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
19 /* snapshot */));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
2016-12-20 00:44:30 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), false},
|
|
|
|
{InternalValue("a", 9), true},
|
|
|
|
{InternalValue("b", 9), true},
|
|
|
|
{InternalValue("d", 9), true},
|
|
|
|
{InternalValue("e", 7), true},
|
|
|
|
{InternalValue("g", 7), false},
|
|
|
|
{InternalValue("h", 24), false},
|
|
|
|
{InternalValue("i", 24), false},
|
|
|
|
{InternalValue("ii", 14), true},
|
|
|
|
{InternalValue("j", 14), false}});
|
|
|
|
|
|
|
|
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
|
|
|
{"_", "a", true},
|
|
|
|
{"a", "c", true},
|
|
|
|
{"d", "f", true},
|
|
|
|
{"g", "l", true},
|
|
|
|
{"x", "y", false}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, MultipleTruncatedItersInAggregator) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "z", 10}}, {{"a", "z", 10}}, {{"a", "z", 10}}});
|
|
|
|
std::vector<std::pair<InternalKey, InternalKey>> iter_bounds = {
|
|
|
|
{InternalKey("a", 4, kTypeValue),
|
|
|
|
InternalKey("m", kMaxSequenceNumber, kTypeRangeDeletion)},
|
|
|
|
{InternalKey("m", 20, kTypeValue),
|
|
|
|
InternalKey("x", kMaxSequenceNumber, kTypeRangeDeletion)},
|
|
|
|
{InternalKey("x", 5, kTypeValue), InternalKey("zz", 30, kTypeValue)}};
|
|
|
|
|
|
|
|
ReadRangeDelAggregator range_del_agg(&bytewise_icmp, 19);
|
|
|
|
for (size_t i = 0; i < fragment_lists.size(); i++) {
|
|
|
|
const auto& fragment_list = fragment_lists[i];
|
|
|
|
const auto& bounds = iter_bounds[i];
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
19 /* snapshot */));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter), &bounds.first,
|
|
|
|
&bounds.second);
|
|
|
|
}
|
2018-07-14 00:34:54 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 10), false},
|
|
|
|
{InternalValue("a", 9), false},
|
|
|
|
{InternalValue("a", 4), true},
|
|
|
|
{InternalValue("m", 10), false},
|
|
|
|
{InternalValue("m", 9), true},
|
|
|
|
{InternalValue("x", 10), false},
|
|
|
|
{InternalValue("x", 9), false},
|
|
|
|
{InternalValue("x", 5), true},
|
|
|
|
{InternalValue("z", 9), false}});
|
|
|
|
|
|
|
|
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
|
|
|
{"_", "a", true},
|
|
|
|
{"a", "n", true},
|
|
|
|
{"l", "x", true},
|
|
|
|
{"w", "z", true},
|
|
|
|
{"zzz", "zz", false},
|
|
|
|
{"zz", "zzz", false}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, MultipleTruncatedItersInAggregatorSameLevel) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "z", 10}}, {{"a", "z", 10}}, {{"a", "z", 10}}});
|
|
|
|
std::vector<std::pair<InternalKey, InternalKey>> iter_bounds = {
|
|
|
|
{InternalKey("a", 4, kTypeValue),
|
|
|
|
InternalKey("m", kMaxSequenceNumber, kTypeRangeDeletion)},
|
|
|
|
{InternalKey("m", 20, kTypeValue),
|
|
|
|
InternalKey("x", kMaxSequenceNumber, kTypeRangeDeletion)},
|
|
|
|
{InternalKey("x", 5, kTypeValue), InternalKey("zz", 30, kTypeValue)}};
|
|
|
|
|
|
|
|
ReadRangeDelAggregator range_del_agg(&bytewise_icmp, 19);
|
|
|
|
|
|
|
|
auto add_iter_to_agg = [&](size_t i) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_lists[i].get(),
|
|
|
|
bytewise_icmp, 19 /* snapshot */));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter), &iter_bounds[i].first,
|
|
|
|
&iter_bounds[i].second);
|
|
|
|
};
|
|
|
|
|
|
|
|
add_iter_to_agg(0);
|
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 10), false},
|
|
|
|
{InternalValue("a", 9), false},
|
|
|
|
{InternalValue("a", 4), true}});
|
|
|
|
|
|
|
|
add_iter_to_agg(1);
|
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("m", 10), false},
|
|
|
|
{InternalValue("m", 9), true}});
|
|
|
|
|
|
|
|
add_iter_to_agg(2);
|
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("x", 10), false},
|
|
|
|
{InternalValue("x", 9), false},
|
|
|
|
{InternalValue("x", 5), true},
|
|
|
|
{InternalValue("z", 9), false}});
|
|
|
|
|
|
|
|
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
|
|
|
{"_", "a", true},
|
|
|
|
{"a", "n", true},
|
|
|
|
{"l", "x", true},
|
|
|
|
{"w", "z", true},
|
|
|
|
{"zzz", "zz", false},
|
|
|
|
{"zz", "zzz", false}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, CompactionAggregatorNoSnapshots) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots;
|
|
|
|
CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
2018-10-09 22:15:27 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), true},
|
|
|
|
{InternalValue("b", 19), false},
|
|
|
|
{InternalValue("b", 9), true},
|
|
|
|
{InternalValue("d", 9), true},
|
|
|
|
{InternalValue("e", 7), true},
|
|
|
|
{InternalValue("g", 7), false},
|
|
|
|
{InternalValue("h", 24), true},
|
|
|
|
{InternalValue("i", 24), false},
|
|
|
|
{InternalValue("ii", 14), true},
|
|
|
|
{InternalValue("j", 14), false}});
|
|
|
|
|
|
|
|
auto range_del_compaction_iter = range_del_agg.NewIterator();
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter.get(), {{"a", "b", 20},
|
|
|
|
{"b", "c", 10},
|
|
|
|
{"c", "e", 10},
|
|
|
|
{"e", "g", 8},
|
|
|
|
{"h", "i", 25},
|
|
|
|
{"ii", "j", 15}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, CompactionAggregatorWithSnapshots) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots{9, 19};
|
|
|
|
CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
2018-10-09 22:15:27 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
VerifyShouldDelete(
|
|
|
|
&range_del_agg,
|
|
|
|
{
|
|
|
|
{InternalValue("a", 19), false}, // [10, 19]
|
|
|
|
{InternalValue("a", 9), false}, // [0, 9]
|
|
|
|
{InternalValue("b", 9), false}, // [0, 9]
|
|
|
|
{InternalValue("d", 9), false}, // [0, 9]
|
|
|
|
{InternalValue("d", 7), true}, // [0, 9]
|
|
|
|
{InternalValue("e", 7), true}, // [0, 9]
|
|
|
|
{InternalValue("g", 7), false}, // [0, 9]
|
|
|
|
{InternalValue("h", 24), true}, // [20, kMaxSequenceNumber]
|
|
|
|
{InternalValue("i", 24), false}, // [20, kMaxSequenceNumber]
|
|
|
|
{InternalValue("ii", 14), true}, // [10, 19]
|
|
|
|
{InternalValue("j", 14), false} // [10, 19]
|
|
|
|
});
|
|
|
|
|
|
|
|
auto range_del_compaction_iter = range_del_agg.NewIterator();
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter.get(), {{"a", "b", 20},
|
|
|
|
{"a", "b", 10},
|
|
|
|
{"b", "c", 10},
|
|
|
|
{"c", "e", 10},
|
|
|
|
{"c", "e", 8},
|
|
|
|
{"e", "g", 8},
|
|
|
|
{"h", "i", 25},
|
|
|
|
{"ii", "j", 15}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, CompactionAggregatorEmptyIteratorLeft) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots{9, 19};
|
|
|
|
CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
2018-10-09 22:15:27 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
Slice start("_");
|
|
|
|
Slice end("__");
|
2018-10-09 22:15:27 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
TEST_F(RangeDelAggregatorTest, CompactionAggregatorEmptyIteratorRight) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
2018-10-09 22:15:27 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
std::vector<SequenceNumber> snapshots{9, 19};
|
|
|
|
CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
2018-10-09 22:15:27 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
Slice start("p");
|
|
|
|
Slice end("q");
|
|
|
|
auto range_del_compaction_iter1 =
|
|
|
|
range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */);
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), {});
|
|
|
|
|
|
|
|
auto range_del_compaction_iter2 =
|
|
|
|
range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */);
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter2.get(), {});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest, CompactionAggregatorBoundedIterator) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "e", 10}, {"c", "g", 8}},
|
|
|
|
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots{9, 19};
|
|
|
|
CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
2018-10-09 22:15:27 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
Slice start("bb");
|
|
|
|
Slice end("e");
|
|
|
|
auto range_del_compaction_iter1 =
|
|
|
|
range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */);
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter1.get(),
|
|
|
|
{{"a", "c", 10}, {"c", "e", 10}, {"c", "e", 8}});
|
|
|
|
|
|
|
|
auto range_del_compaction_iter2 =
|
|
|
|
range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */);
|
|
|
|
VerifyFragmentedRangeDels(
|
|
|
|
range_del_compaction_iter2.get(),
|
|
|
|
{{"a", "c", 10}, {"c", "e", 10}, {"c", "e", 8}, {"e", "g", 8}});
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RangeDelAggregatorTest,
|
|
|
|
CompactionAggregatorBoundedIteratorExtraFragments) {
|
|
|
|
auto fragment_lists = MakeFragmentedTombstoneLists(
|
|
|
|
{{{"a", "d", 10}, {"c", "g", 8}},
|
|
|
|
{{"b", "c", 20}, {"d", "f", 30}, {"h", "i", 25}, {"ii", "j", 15}}});
|
|
|
|
|
|
|
|
std::vector<SequenceNumber> snapshots{9, 19};
|
|
|
|
CompactionRangeDelAggregator range_del_agg(&bytewise_icmp, snapshots);
|
|
|
|
for (const auto& fragment_list : fragment_lists) {
|
|
|
|
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
|
|
|
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
|
|
|
kMaxSequenceNumber));
|
|
|
|
range_del_agg.AddTombstones(std::move(input_iter));
|
|
|
|
}
|
2018-10-09 22:15:27 +00:00
|
|
|
|
2018-12-18 01:26:56 +00:00
|
|
|
Slice start("bb");
|
|
|
|
Slice end("e");
|
|
|
|
auto range_del_compaction_iter1 =
|
|
|
|
range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */);
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), {{"a", "b", 10},
|
|
|
|
{"b", "c", 20},
|
|
|
|
{"b", "c", 10},
|
|
|
|
{"c", "d", 10},
|
|
|
|
{"c", "d", 8},
|
|
|
|
{"d", "f", 30},
|
|
|
|
{"d", "f", 8},
|
|
|
|
{"f", "g", 8}});
|
|
|
|
|
|
|
|
auto range_del_compaction_iter2 =
|
|
|
|
range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */);
|
|
|
|
VerifyFragmentedRangeDels(range_del_compaction_iter2.get(), {{"a", "b", 10},
|
|
|
|
{"b", "c", 20},
|
|
|
|
{"b", "c", 10},
|
|
|
|
{"c", "d", 10},
|
|
|
|
{"c", "d", 8},
|
|
|
|
{"d", "f", 30},
|
|
|
|
{"d", "f", 8},
|
|
|
|
{"f", "g", 8}});
|
2018-09-11 00:38:15 +00:00
|
|
|
}
|
|
|
|
|
2016-12-20 00:44:30 +00:00
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|