Test range deletions with more configurations (#4021)

Summary:
Run the basic range deletion tests against the standard set of
configurations. This testing exposed that files with hash indexes and
partitioned indexes were not handling the case where the file contained
only range deletions--i.e., where the index was empty.

Additionally file a TODO about the fact that range deletions are broken
when allow_mmap_reads = true is set.

/cc ajkr nvanbenschoten

Best viewed with ?w=1: https://github.com/facebook/rocksdb/pull/4021/files?w=1
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4021

Differential Revision: D8811860

Pulled By: ajkr

fbshipit-source-id: 3cc07e6d6210a2a00b932866481b3d5c59775343
This commit is contained in:
Nikhil Benesch 2018-07-11 15:45:21 -07:00 committed by Facebook Github Bot
parent cfee7fb51a
commit 5cd8240b86
4 changed files with 95 additions and 65 deletions

View file

@ -23,37 +23,44 @@ class DBRangeDelTest : public DBTestBase {
} }
}; };
const int kRangeDelSkipConfigs =
// Plain tables do not support range deletions.
DBRangeDelTest::kSkipPlainTable |
// MmapReads disables the iterator pinning that RangeDelAggregator requires.
DBRangeDelTest::kSkipMmapReads;
// PlainTableFactory and NumTableFilesAtLevel() are not supported in // PlainTableFactory and NumTableFilesAtLevel() are not supported in
// ROCKSDB_LITE // ROCKSDB_LITE
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
TEST_F(DBRangeDelTest, NonBlockBasedTableNotSupported) { TEST_F(DBRangeDelTest, NonBlockBasedTableNotSupported) {
if (!IsMemoryMappedAccessSupported()) { // TODO: figure out why MmapReads trips the iterator pinning assertion in
return; // RangeDelAggregator. Ideally it would be supported; otherwise it should at
} // least be explicitly unsupported.
Options opts = CurrentOptions(); for (auto config : {kPlainTableAllBytesPrefix, /* kWalDirAndMmapReads */}) {
opts.table_factory.reset(new PlainTableFactory()); option_config_ = config;
opts.prefix_extractor.reset(NewNoopTransform()); DestroyAndReopen(CurrentOptions());
opts.allow_mmap_reads = true;
opts.max_sequential_skip_in_iterations = 999999;
Reopen(opts);
ASSERT_TRUE( ASSERT_TRUE(
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "dr1", "dr1") db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "dr1", "dr1")
.IsNotSupported()); .IsNotSupported());
} }
}
TEST_F(DBRangeDelTest, FlushOutputHasOnlyRangeTombstones) { TEST_F(DBRangeDelTest, FlushOutputHasOnlyRangeTombstones) {
do {
DestroyAndReopen(CurrentOptions());
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "dr1", ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "dr1",
"dr2")); "dr2"));
ASSERT_OK(db_->Flush(FlushOptions())); ASSERT_OK(db_->Flush(FlushOptions()));
ASSERT_EQ(1, NumTableFilesAtLevel(0)); ASSERT_EQ(1, NumTableFilesAtLevel(0));
} while (ChangeOptions(kRangeDelSkipConfigs));
} }
TEST_F(DBRangeDelTest, CompactionOutputHasOnlyRangeTombstone) { TEST_F(DBRangeDelTest, CompactionOutputHasOnlyRangeTombstone) {
do {
Options opts = CurrentOptions(); Options opts = CurrentOptions();
opts.disable_auto_compactions = true; opts.disable_auto_compactions = true;
opts.statistics = CreateDBStatistics(); opts.statistics = CreateDBStatistics();
Reopen(opts); DestroyAndReopen(opts);
// snapshot protects range tombstone from dropping due to becoming obsolete. // snapshot protects range tombstone from dropping due to becoming obsolete.
const Snapshot* snapshot = db_->GetSnapshot(); const Snapshot* snapshot = db_->GetSnapshot();
@ -68,6 +75,11 @@ TEST_F(DBRangeDelTest, CompactionOutputHasOnlyRangeTombstone) {
ASSERT_EQ(1, NumTableFilesAtLevel(1)); ASSERT_EQ(1, NumTableFilesAtLevel(1));
ASSERT_EQ(0, TestGetTickerCount(opts, COMPACTION_RANGE_DEL_DROP_OBSOLETE)); ASSERT_EQ(0, TestGetTickerCount(opts, COMPACTION_RANGE_DEL_DROP_OBSOLETE));
db_->ReleaseSnapshot(snapshot); db_->ReleaseSnapshot(snapshot);
// Skip cuckoo memtables, which do not support snapshots. Skip non-leveled
// compactions as the above assertions about the number of files in a level
// do not hold true.
} while (ChangeOptions(kRangeDelSkipConfigs | kSkipHashCuckoo |
kSkipUniversalCompaction | kSkipFIFOCompaction));
} }
TEST_F(DBRangeDelTest, CompactionOutputFilesExactlyFilled) { TEST_F(DBRangeDelTest, CompactionOutputFilesExactlyFilled) {
@ -590,6 +602,8 @@ TEST_F(DBRangeDelTest, TableEvictedDuringScan) {
} }
TEST_F(DBRangeDelTest, GetCoveredKeyFromMutableMemtable) { TEST_F(DBRangeDelTest, GetCoveredKeyFromMutableMemtable) {
do {
DestroyAndReopen(CurrentOptions());
db_->Put(WriteOptions(), "key", "val"); db_->Put(WriteOptions(), "key", "val");
ASSERT_OK( ASSERT_OK(
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z")); db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "a", "z"));
@ -597,9 +611,11 @@ TEST_F(DBRangeDelTest, GetCoveredKeyFromMutableMemtable) {
ReadOptions read_opts; ReadOptions read_opts;
std::string value; std::string value;
ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound()); ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound());
} while (ChangeOptions(kRangeDelSkipConfigs));
} }
TEST_F(DBRangeDelTest, GetCoveredKeyFromImmutableMemtable) { TEST_F(DBRangeDelTest, GetCoveredKeyFromImmutableMemtable) {
do {
Options opts = CurrentOptions(); Options opts = CurrentOptions();
opts.max_write_buffer_number = 3; opts.max_write_buffer_number = 3;
opts.min_write_buffer_number_to_merge = 2; opts.min_write_buffer_number_to_merge = 2;
@ -608,7 +624,7 @@ TEST_F(DBRangeDelTest, GetCoveredKeyFromImmutableMemtable) {
// prevented by the above options) upon inserting an element that would // prevented by the above options) upon inserting an element that would
// overflow the memtable. // overflow the memtable.
opts.memtable_factory.reset(new SpecialSkipListFactory(1)); opts.memtable_factory.reset(new SpecialSkipListFactory(1));
Reopen(opts); DestroyAndReopen(opts);
db_->Put(WriteOptions(), "key", "val"); db_->Put(WriteOptions(), "key", "val");
ASSERT_OK( ASSERT_OK(
@ -618,9 +634,12 @@ TEST_F(DBRangeDelTest, GetCoveredKeyFromImmutableMemtable) {
ReadOptions read_opts; ReadOptions read_opts;
std::string value; std::string value;
ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound()); ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound());
} while (ChangeOptions(kRangeDelSkipConfigs));
} }
TEST_F(DBRangeDelTest, GetCoveredKeyFromSst) { TEST_F(DBRangeDelTest, GetCoveredKeyFromSst) {
do {
DestroyAndReopen(CurrentOptions());
db_->Put(WriteOptions(), "key", "val"); db_->Put(WriteOptions(), "key", "val");
// snapshot prevents key from being deleted during flush // snapshot prevents key from being deleted during flush
const Snapshot* snapshot = db_->GetSnapshot(); const Snapshot* snapshot = db_->GetSnapshot();
@ -632,6 +651,8 @@ TEST_F(DBRangeDelTest, GetCoveredKeyFromSst) {
std::string value; std::string value;
ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound()); ASSERT_TRUE(db_->Get(read_opts, "key", &value).IsNotFound());
db_->ReleaseSnapshot(snapshot); db_->ReleaseSnapshot(snapshot);
// Cuckoo memtables do not support snapshots.
} while (ChangeOptions(kRangeDelSkipConfigs | kSkipHashCuckoo));
} }
TEST_F(DBRangeDelTest, GetCoveredMergeOperandFromMemtable) { TEST_F(DBRangeDelTest, GetCoveredMergeOperandFromMemtable) {

View file

@ -268,6 +268,10 @@ class PartitionIndexReader : public IndexReader, public Cleanable {
// Index partitions are assumed to be consecuitive. Prefetch them all. // Index partitions are assumed to be consecuitive. Prefetch them all.
// Read the first block offset // Read the first block offset
biter.SeekToFirst(); biter.SeekToFirst();
if (!biter.Valid()) {
// Empty index.
return;
}
Slice input = biter.value(); Slice input = biter.value();
Status s = handle.DecodeFrom(&input); Status s = handle.DecodeFrom(&input);
assert(s.ok()); assert(s.ok());
@ -280,6 +284,10 @@ class PartitionIndexReader : public IndexReader, public Cleanable {
// Read the last block's offset // Read the last block's offset
biter.SeekToLast(); biter.SeekToLast();
if (!biter.Valid()) {
// Empty index.
return;
}
input = biter.value(); input = biter.value();
s = handle.DecodeFrom(&input); s = handle.DecodeFrom(&input);
assert(s.ok()); assert(s.ok());

View file

@ -143,7 +143,6 @@ void PartitionedIndexBuilder::AddIndexEntry(
Status PartitionedIndexBuilder::Finish( Status PartitionedIndexBuilder::Finish(
IndexBlocks* index_blocks, const BlockHandle& last_partition_block_handle) { IndexBlocks* index_blocks, const BlockHandle& last_partition_block_handle) {
assert(!entries_.empty());
// It must be set to null after last key is added // It must be set to null after last key is added
assert(sub_index_builder_ == nullptr); assert(sub_index_builder_ == nullptr);
if (finishing_indexes == true) { if (finishing_indexes == true) {

View file

@ -261,7 +261,9 @@ class HashIndexBuilder : public IndexBuilder {
virtual Status Finish( virtual Status Finish(
IndexBlocks* index_blocks, IndexBlocks* index_blocks,
const BlockHandle& last_partition_block_handle) override { const BlockHandle& last_partition_block_handle) override {
if (pending_block_num_ != 0) {
FlushPendingPrefix(); FlushPendingPrefix();
}
primary_index_builder_.Finish(index_blocks, last_partition_block_handle); primary_index_builder_.Finish(index_blocks, last_partition_block_handle);
index_blocks->meta_blocks.insert( index_blocks->meta_blocks.insert(
{kHashIndexPrefixesBlock.c_str(), prefix_block_}); {kHashIndexPrefixesBlock.c_str(), prefix_block_});