mirror of https://github.com/facebook/rocksdb.git
An initial implementation of kCompactionStopStyleSimilarSize for universal compaction
This commit is contained in:
parent
b1194f4903
commit
5e3aeb5f8e
|
@ -683,14 +683,32 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
|
|||
if (f->being_compacted) {
|
||||
break;
|
||||
}
|
||||
// pick files if the total candidate file size (increased by the
|
||||
// Pick files if the total/last candidate file size (increased by the
|
||||
// specified ratio) is still larger than the next candidate file.
|
||||
// candidate_size is the total size of files picked so far with the
|
||||
// default kCompactionStopStyleTotalSize; with
|
||||
// kCompactionStopStyleSimilarSize, it's simply the size of the last
|
||||
// picked file.
|
||||
uint64_t sz = (candidate_size * (100L + ratio)) /100;
|
||||
if (sz < f->file_size) {
|
||||
break;
|
||||
}
|
||||
if (options_->compaction_options_universal.stop_style == kCompactionStopStyleSimilarSize) {
|
||||
// Similar-size stopping rule: also check the last picked file isn't
|
||||
// far larger than the next candidate file.
|
||||
sz = (f->file_size * (100L + ratio)) / 100;
|
||||
if (sz < candidate_size) {
|
||||
// If the small file we've encountered begins a run of similar-size
|
||||
// files, we'll pick them up on a future iteration of the outer
|
||||
// loop. If it's some lonely straggler, it'll eventually get picked
|
||||
// by the last-resort read amp strategy which disregards size ratios.
|
||||
break;
|
||||
}
|
||||
candidate_size = f->file_size;
|
||||
} else { // default kCompactionStopStyleTotalSize
|
||||
candidate_size += f->file_size;
|
||||
}
|
||||
candidate_count++;
|
||||
candidate_size += f->file_size;
|
||||
}
|
||||
|
||||
// Found a series of consecutive files that need compaction.
|
||||
|
|
|
@ -2043,6 +2043,98 @@ TEST(DBTest, UniversalCompactionOptions) {
|
|||
}
|
||||
}
|
||||
|
||||
TEST(DBTest, UniversalCompactionStopStyleSimilarSize) {
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.write_buffer_size = 100<<10; //100KB
|
||||
// trigger compaction if there are >= 4 files
|
||||
options.level0_file_num_compaction_trigger = 4;
|
||||
options.compaction_options_universal.size_ratio = 10;
|
||||
options.compaction_options_universal.stop_style = kCompactionStopStyleSimilarSize;
|
||||
Reopen(&options);
|
||||
|
||||
Random rnd(301);
|
||||
int key_idx = 0;
|
||||
|
||||
// Stage 1:
|
||||
// Generate a set of files at level 0, but don't trigger level-0
|
||||
// compaction.
|
||||
for (int num = 0;
|
||||
num < options.level0_file_num_compaction_trigger-1;
|
||||
num++) {
|
||||
// Write 120KB (12 values, each 10K)
|
||||
for (int i = 0; i < 12; i++) {
|
||||
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
||||
key_idx++;
|
||||
}
|
||||
dbfull()->TEST_WaitForFlushMemTable();
|
||||
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
|
||||
}
|
||||
|
||||
// Generate one more file at level-0, which should trigger level-0
|
||||
// compaction.
|
||||
for (int i = 0; i < 12; i++) {
|
||||
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
||||
key_idx++;
|
||||
}
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
// Suppose each file flushed from mem table has size 1. Now we compact
|
||||
// (level0_file_num_compaction_trigger+1)=4 files and should have a big
|
||||
// file of size 4.
|
||||
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
|
||||
for (int i = 1; i < options.num_levels ; i++) {
|
||||
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
|
||||
}
|
||||
|
||||
// Stage 2:
|
||||
// Now we have one file at level 0, with size 4. We also have some data in
|
||||
// mem table. Let's continue generating new files at level 0, but don't
|
||||
// trigger level-0 compaction.
|
||||
// First, clean up memtable before inserting new data. This will generate
|
||||
// a level-0 file, with size around 0.4 (according to previously written
|
||||
// data amount).
|
||||
dbfull()->Flush(FlushOptions());
|
||||
for (int num = 0;
|
||||
num < options.level0_file_num_compaction_trigger-3;
|
||||
num++) {
|
||||
// Write 120KB (12 values, each 10K)
|
||||
for (int i = 0; i < 12; i++) {
|
||||
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
||||
key_idx++;
|
||||
}
|
||||
dbfull()->TEST_WaitForFlushMemTable();
|
||||
ASSERT_EQ(NumTableFilesAtLevel(0), num + 3);
|
||||
}
|
||||
|
||||
// Generate one more file at level-0, which should trigger level-0
|
||||
// compaction.
|
||||
for (int i = 0; i < 12; i++) {
|
||||
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
||||
key_idx++;
|
||||
}
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
// Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1.
|
||||
// After compaction, we should have 3 files, with size 4, 0.4, 2.
|
||||
ASSERT_EQ(NumTableFilesAtLevel(0), 3);
|
||||
for (int i = 1; i < options.num_levels ; i++) {
|
||||
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
|
||||
}
|
||||
|
||||
// Stage 3:
|
||||
// Now we have 3 files at level 0, with size 4, 0.4, 2. Generate one
|
||||
// more file at level-0, which should trigger level-0 compaction.
|
||||
for (int i = 0; i < 12; i++) {
|
||||
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
|
||||
key_idx++;
|
||||
}
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
// Level-0 compaction is triggered, but no file will be picked up.
|
||||
ASSERT_EQ(NumTableFilesAtLevel(0), 4);
|
||||
for (int i = 1; i < options.num_levels ; i++) {
|
||||
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(SNAPPY) && defined(ZLIB) && defined(BZIP2)
|
||||
TEST(DBTest, CompressedCache) {
|
||||
int num_iter = 80;
|
||||
|
|
Loading…
Reference in New Issue