Arena usage to be calculated using malloc_usable_size()

Summary: malloc_usable_size() gets a better estimation of memory usage. It is already used to calculate block cache memory usage. Use it in arena too.

Test Plan: Run all unit tests

Reviewers: anthony, kradhakrishnan, rven, IslamAbdelRahman, yhchiang

Reviewed By: yhchiang

Subscribers: leveldb, dhruba

Differential Revision: https://reviews.facebook.net/D43317
This commit is contained in:
sdong 2015-08-26 14:19:31 -07:00
parent effd9dd1e1
commit 3d78eb66bb
9 changed files with 145 additions and 119 deletions

View File

@ -625,6 +625,7 @@ TEST_F(ColumnFamilyTest, FlushTest) {
// Makes sure that obsolete log files get deleted
TEST_F(ColumnFamilyTest, LogDeletionTest) {
db_options_.max_total_wal_size = std::numeric_limits<uint64_t>::max();
column_family_options_.arena_block_size = 4 * 1024;
column_family_options_.write_buffer_size = 100000; // 100KB
Open();
CreateColumnFamilies({"one", "two", "three", "four"});
@ -702,18 +703,22 @@ TEST_F(ColumnFamilyTest, DifferentWriteBufferSizes) {
// "two" -> 1MB memtable, start flushing with three immutable memtables
// "three" -> 90KB memtable, start flushing with four immutable memtables
default_cf.write_buffer_size = 100000;
default_cf.arena_block_size = 4 * 4096;
default_cf.max_write_buffer_number = 10;
default_cf.min_write_buffer_number_to_merge = 1;
default_cf.max_write_buffer_number_to_maintain = 0;
one.write_buffer_size = 200000;
one.arena_block_size = 4 * 4096;
one.max_write_buffer_number = 10;
one.min_write_buffer_number_to_merge = 2;
one.max_write_buffer_number_to_maintain = 1;
two.write_buffer_size = 1000000;
two.arena_block_size = 4 * 4096;
two.max_write_buffer_number = 10;
two.min_write_buffer_number_to_merge = 3;
two.max_write_buffer_number_to_maintain = 2;
three.write_buffer_size = 90000;
three.write_buffer_size = 4096 * 22 + 2048;
three.arena_block_size = 4096;
three.max_write_buffer_number = 10;
three.min_write_buffer_number_to_merge = 4;
three.max_write_buffer_number_to_maintain = -1;
@ -737,15 +742,15 @@ TEST_F(ColumnFamilyTest, DifferentWriteBufferSizes) {
env_->SleepForMicroseconds(micros_wait_for_flush);
AssertNumberOfImmutableMemtables({0, 1, 2, 0});
AssertCountLiveLogFiles(4);
PutRandomData(3, 90, 1000);
PutRandomData(3, 91, 990);
env_->SleepForMicroseconds(micros_wait_for_flush);
AssertNumberOfImmutableMemtables({0, 1, 2, 1});
AssertCountLiveLogFiles(5);
PutRandomData(3, 90, 1000);
PutRandomData(3, 90, 990);
env_->SleepForMicroseconds(micros_wait_for_flush);
AssertNumberOfImmutableMemtables({0, 1, 2, 2});
AssertCountLiveLogFiles(6);
PutRandomData(3, 90, 1000);
PutRandomData(3, 90, 990);
env_->SleepForMicroseconds(micros_wait_for_flush);
AssertNumberOfImmutableMemtables({0, 1, 2, 3});
AssertCountLiveLogFiles(7);
@ -757,11 +762,11 @@ TEST_F(ColumnFamilyTest, DifferentWriteBufferSizes) {
WaitForFlush(2);
AssertNumberOfImmutableMemtables({0, 1, 0, 3});
AssertCountLiveLogFiles(9);
PutRandomData(3, 90, 1000);
PutRandomData(3, 90, 990);
WaitForFlush(3);
AssertNumberOfImmutableMemtables({0, 1, 0, 0});
AssertCountLiveLogFiles(10);
PutRandomData(3, 90, 1000);
PutRandomData(3, 90, 990);
env_->SleepForMicroseconds(micros_wait_for_flush);
AssertNumberOfImmutableMemtables({0, 1, 0, 1});
AssertCountLiveLogFiles(11);
@ -769,9 +774,9 @@ TEST_F(ColumnFamilyTest, DifferentWriteBufferSizes) {
WaitForFlush(1);
AssertNumberOfImmutableMemtables({0, 0, 0, 1});
AssertCountLiveLogFiles(5);
PutRandomData(3, 240, 1000);
PutRandomData(3, 90 * 3, 990);
WaitForFlush(3);
PutRandomData(3, 300, 1000);
PutRandomData(3, 90 * 4, 990);
WaitForFlush(3);
AssertNumberOfImmutableMemtables({0, 0, 0, 0});
AssertCountLiveLogFiles(12);
@ -779,7 +784,7 @@ TEST_F(ColumnFamilyTest, DifferentWriteBufferSizes) {
WaitForFlush(0);
AssertNumberOfImmutableMemtables({0, 0, 0, 0});
AssertCountLiveLogFiles(12);
PutRandomData(2, 3*100, 10000);
PutRandomData(2, 3 * 1000, 1000);
WaitForFlush(2);
AssertNumberOfImmutableMemtables({0, 0, 0, 0});
AssertCountLiveLogFiles(12);
@ -864,7 +869,7 @@ TEST_F(ColumnFamilyTest, DifferentCompactionStyles) {
one.num_levels = 1;
// trigger compaction if there are >= 4 files
one.level0_file_num_compaction_trigger = 4;
one.write_buffer_size = 100000;
one.write_buffer_size = 120000;
two.compaction_style = kCompactionStyleLevel;
two.num_levels = 4;
@ -875,23 +880,27 @@ TEST_F(ColumnFamilyTest, DifferentCompactionStyles) {
// SETUP column family "one" -- universal style
for (int i = 0; i < one.level0_file_num_compaction_trigger - 1; ++i) {
PutRandomData(1, 11, 10000);
PutRandomData(1, 10, 12000);
PutRandomData(1, 1, 10);
WaitForFlush(1);
AssertFilesPerLevel(ToString(i + 1), 1);
}
// SETUP column family "two" -- level style with 4 levels
for (int i = 0; i < two.level0_file_num_compaction_trigger - 1; ++i) {
PutRandomData(2, 15, 10000);
PutRandomData(2, 10, 12000);
PutRandomData(2, 1, 10);
WaitForFlush(2);
AssertFilesPerLevel(ToString(i + 1), 2);
}
// TRIGGER compaction "one"
PutRandomData(1, 12, 10000);
PutRandomData(1, 10, 12000);
PutRandomData(1, 1, 10);
// TRIGGER compaction "two"
PutRandomData(2, 10, 10000);
PutRandomData(2, 10, 12000);
PutRandomData(2, 1, 10);
// WAIT for compactions
WaitForCompaction();
@ -1045,6 +1054,7 @@ TEST_F(ColumnFamilyTest, FlushStaleColumnFamilies) {
CreateColumnFamilies({"one", "two"});
ColumnFamilyOptions default_cf, one, two;
default_cf.write_buffer_size = 100000; // small write buffer size
default_cf.arena_block_size = 4096;
default_cf.disable_auto_compactions = true;
one.disable_auto_compactions = true;
two.disable_auto_compactions = true;

View File

@ -505,7 +505,8 @@ TEST_F(DBCompactionTest, DisableStatsUpdateReopen) {
TEST_P(DBCompactionTestWithParam, CompactionTrigger) {
Options options;
options.write_buffer_size = 100 << 10; // 100KB
options.write_buffer_size = 110 << 10; // 100KB
options.arena_block_size = 4 << 10;
options.num_levels = 3;
options.level0_file_num_compaction_trigger = 3;
options.max_subcompactions = max_subcompactions_;
@ -517,9 +518,9 @@ TEST_P(DBCompactionTestWithParam, CompactionTrigger) {
for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
num++) {
std::vector<std::string> values;
// Write 120KB (12 values, each 10K)
for (int i = 0; i < 12; i++) {
values.push_back(RandomString(&rnd, 10000));
// Write 100KB (100 values, each 1K)
for (int i = 0; i < 100; i++) {
values.push_back(RandomString(&rnd, 990));
ASSERT_OK(Put(1, Key(i), values[i]));
}
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
@ -528,8 +529,8 @@ TEST_P(DBCompactionTestWithParam, CompactionTrigger) {
// generate one more file in level-0, and should trigger level-0 compaction
std::vector<std::string> values;
for (int i = 0; i < 12; i++) {
values.push_back(RandomString(&rnd, 10000));
for (int i = 0; i < 100; i++) {
values.push_back(RandomString(&rnd, 990));
ASSERT_OK(Put(1, Key(i), values[i]));
}
dbfull()->TEST_WaitForCompact();
@ -892,7 +893,8 @@ TEST_P(DBCompactionTestWithParam, LevelCompactionThirdPath) {
options.db_paths.emplace_back(dbname_ + "_2", 4 * 1024 * 1024);
options.db_paths.emplace_back(dbname_ + "_3", 1024 * 1024 * 1024);
options.compaction_style = kCompactionStyleLevel;
options.write_buffer_size = 100 << 10; // 100KB
options.write_buffer_size = 110 << 10; // 110KB
options.arena_block_size = 4 << 10;
options.level0_file_num_compaction_trigger = 2;
options.num_levels = 4;
options.max_bytes_for_level_base = 400 * 1024;
@ -986,7 +988,7 @@ TEST_P(DBCompactionTestWithParam, LevelCompactionThirdPath) {
for (int i = 0; i < key_idx; i++) {
auto v = Get(Key(i));
ASSERT_NE(v, "NOT_FOUND");
ASSERT_TRUE(v.size() == 1 || v.size() == 10000);
ASSERT_TRUE(v.size() == 1 || v.size() == 990);
}
Reopen(options);
@ -994,7 +996,7 @@ TEST_P(DBCompactionTestWithParam, LevelCompactionThirdPath) {
for (int i = 0; i < key_idx; i++) {
auto v = Get(Key(i));
ASSERT_NE(v, "NOT_FOUND");
ASSERT_TRUE(v.size() == 1 || v.size() == 10000);
ASSERT_TRUE(v.size() == 1 || v.size() == 990);
}
Destroy(options);
@ -1006,7 +1008,8 @@ TEST_P(DBCompactionTestWithParam, LevelCompactionPathUse) {
options.db_paths.emplace_back(dbname_ + "_2", 4 * 1024 * 1024);
options.db_paths.emplace_back(dbname_ + "_3", 1024 * 1024 * 1024);
options.compaction_style = kCompactionStyleLevel;
options.write_buffer_size = 100 << 10; // 100KB
options.write_buffer_size = 110 << 10; // 100KB
options.arena_block_size = 4 << 10;
options.level0_file_num_compaction_trigger = 2;
options.num_levels = 4;
options.max_bytes_for_level_base = 400 * 1024;
@ -1101,7 +1104,7 @@ TEST_P(DBCompactionTestWithParam, LevelCompactionPathUse) {
for (int i = 0; i < key_idx; i++) {
auto v = Get(Key(i));
ASSERT_NE(v, "NOT_FOUND");
ASSERT_TRUE(v.size() == 1 || v.size() == 10000);
ASSERT_TRUE(v.size() == 1 || v.size() == 990);
}
Reopen(options);
@ -1109,7 +1112,7 @@ TEST_P(DBCompactionTestWithParam, LevelCompactionPathUse) {
for (int i = 0; i < key_idx; i++) {
auto v = Get(Key(i));
ASSERT_NE(v, "NOT_FOUND");
ASSERT_TRUE(v.size() == 1 || v.size() == 10000);
ASSERT_TRUE(v.size() == 1 || v.size() == 990);
}
Destroy(options);
@ -1122,7 +1125,8 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) {
// Stage 1: generate a db with level compaction
Options options;
options.write_buffer_size = 100 << 10; // 100KB
options.write_buffer_size = 110 << 10; // 110KB
options.arena_block_size = 4 << 10;
options.num_levels = 4;
options.level0_file_num_compaction_trigger = 3;
options.max_bytes_for_level_base = 500 << 10; // 500KB
@ -1180,7 +1184,8 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) {
options = CurrentOptions();
options.compaction_style = kCompactionStyleUniversal;
options.num_levels = 4;
options.write_buffer_size = 100 << 10; // 100KB
options.write_buffer_size = 110 << 10; // 110KB
options.arena_block_size = 4 << 10;
options.level0_file_num_compaction_trigger = 3;
options = CurrentOptions(options);
ReopenWithColumnFamilies({"default", "pikachu"}, options);
@ -1634,7 +1639,8 @@ TEST_P(DBCompactionTestWithParam, CompressLevelCompaction) {
}
Options options = CurrentOptions();
options.compaction_style = kCompactionStyleLevel;
options.write_buffer_size = 100 << 10; // 100KB
options.write_buffer_size = 110 << 10; // 100KB
options.arena_block_size = 4 << 10;
options.level0_file_num_compaction_trigger = 2;
options.num_levels = 4;
options.max_bytes_for_level_base = 400 * 1024;
@ -1724,7 +1730,7 @@ TEST_P(DBCompactionTestWithParam, CompressLevelCompaction) {
for (int i = 0; i < key_idx; i++) {
auto v = Get(Key(i));
ASSERT_NE(v, "NOT_FOUND");
ASSERT_TRUE(v.size() == 1 || v.size() == 10000);
ASSERT_TRUE(v.size() == 1 || v.size() == 990);
}
Reopen(options);
@ -1732,7 +1738,7 @@ TEST_P(DBCompactionTestWithParam, CompressLevelCompaction) {
for (int i = 0; i < key_idx; i++) {
auto v = Get(Key(i));
ASSERT_NE(v, "NOT_FOUND");
ASSERT_TRUE(v.size() == 1 || v.size() == 10000);
ASSERT_TRUE(v.size() == 1 || v.size() == 990);
}
Destroy(options);
@ -1744,6 +1750,7 @@ TEST_P(DBCompactionTestWithParam, SuggestCompactRangeNoTwoLevel0Compactions) {
Options options = CurrentOptions();
options.compaction_style = kCompactionStyleLevel;
options.write_buffer_size = 110 << 10;
options.arena_block_size = 4 << 10;
options.level0_file_num_compaction_trigger = 4;
options.num_levels = 4;
options.compression = kNoCompression;

View File

@ -2602,7 +2602,7 @@ TEST_F(DBTest, FlushSchedule) {
options.min_write_buffer_number_to_merge = 1;
options.max_write_buffer_number_to_maintain = 1;
options.max_write_buffer_number = 2;
options.write_buffer_size = 100 * 1000;
options.write_buffer_size = 120 * 1024;
CreateAndReopenWithCF({"pikachu"}, options);
std::vector<std::thread> threads;
@ -2614,7 +2614,7 @@ TEST_F(DBTest, FlushSchedule) {
Random rnd(a);
WriteOptions wo;
// this should fill up 2 memtables
for (int k = 0; k < 5000; ++k) {
for (int k = 0; k < 6144; ++k) {
ASSERT_OK(db_->Put(wo, handles_[a & 1], RandomString(&rnd, 13), ""));
}
};
@ -2975,6 +2975,7 @@ bool MinLevelToCompress(CompressionType& type, Options& options, int wbits,
int lev, int strategy) {
fprintf(stderr, "Test with compression options : window_bits = %d, level = %d, strategy = %d}\n", wbits, lev, strategy);
options.write_buffer_size = 100<<10; //100KB
options.arena_block_size = 4096;
options.num_levels = 3;
options.level0_file_num_compaction_trigger = 3;
options.create_if_missing = true;
@ -3691,7 +3692,8 @@ TEST_F(DBTest, CustomComparator) {
new_options = CurrentOptions();
new_options.create_if_missing = true;
new_options.comparator = &cmp;
new_options.write_buffer_size = 1000; // Compact more often
new_options.write_buffer_size = 4096; // Compact more often
new_options.arena_block_size = 4096;
new_options = CurrentOptions(new_options);
DestroyAndReopen(new_options);
CreateAndReopenWithCF({"pikachu"}, new_options);
@ -3912,7 +3914,8 @@ TEST_F(DBTest, NoSpaceCompactRange) {
TEST_F(DBTest, NonWritableFileSystem) {
do {
Options options = CurrentOptions();
options.write_buffer_size = 1000;
options.write_buffer_size = 4096;
options.arena_block_size = 4096;
options.env = env_;
Reopen(options);
ASSERT_OK(Put("foo", "v1"));
@ -4531,7 +4534,8 @@ TEST_F(DBTest, RecoverCheckFileAmountWithSmallWriteBuffer) {
ASSERT_OK(Put(3, Key(10), DummyString(1)));
ASSERT_OK(Put(3, Key(10), DummyString(1)));
options.write_buffer_size = 10;
options.write_buffer_size = 4096;
options.arena_block_size = 4096;
ReopenWithColumnFamilies({"default", "pikachu", "dobrynia", "nikitich"},
options);
{
@ -4558,6 +4562,7 @@ TEST_F(DBTest, RecoverCheckFileAmountWithSmallWriteBuffer) {
TEST_F(DBTest, RecoverCheckFileAmount) {
Options options = CurrentOptions();
options.write_buffer_size = 100000;
options.arena_block_size = 4 * 1024;
CreateAndReopenWithCF({"pikachu", "dobrynia", "nikitich"}, options);
ASSERT_OK(Put(0, Key(1), DummyString(1)));
@ -5956,6 +5961,7 @@ TEST_P(DBTestWithParam, FIFOCompactionTest) {
Options options;
options.compaction_style = kCompactionStyleFIFO;
options.write_buffer_size = 100 << 10; // 100KB
options.arena_block_size = 4096;
options.compaction_options_fifo.max_table_files_size = 500 << 10; // 500KB
options.compression = kNoCompression;
options.create_if_missing = true;
@ -5968,8 +5974,8 @@ TEST_P(DBTestWithParam, FIFOCompactionTest) {
Random rnd(301);
for (int i = 0; i < 6; ++i) {
for (int j = 0; j < 100; ++j) {
ASSERT_OK(Put(ToString(i * 100 + j), RandomString(&rnd, 1024)));
for (int j = 0; j < 110; ++j) {
ASSERT_OK(Put(ToString(i * 100 + j), RandomString(&rnd, 980)));
}
// flush should happen here
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
@ -6344,6 +6350,7 @@ TEST_F(DBTest, DynamicMemtableOptions) {
options.compression = kNoCompression;
options.max_background_compactions = 1;
options.write_buffer_size = k64KB;
options.arena_block_size = 16 * 1024;
options.max_write_buffer_number = 2;
// Don't trigger compact/slowdown/stop
options.level0_file_num_compaction_trigger = 1024;
@ -6372,7 +6379,7 @@ TEST_F(DBTest, DynamicMemtableOptions) {
gen_l0_kb(64);
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
ASSERT_LT(SizeAtLevel(0), k64KB + k5KB);
ASSERT_GT(SizeAtLevel(0), k64KB - k5KB);
ASSERT_GT(SizeAtLevel(0), k64KB - k5KB * 2);
// Clean up L0
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
@ -6389,7 +6396,7 @@ TEST_F(DBTest, DynamicMemtableOptions) {
gen_l0_kb(256);
ASSERT_EQ(NumTableFilesAtLevel(0), 2); // (A)
ASSERT_LT(SizeAtLevel(0), k128KB + k64KB + 2 * k5KB);
ASSERT_GT(SizeAtLevel(0), k128KB + k64KB - 2 * k5KB);
ASSERT_GT(SizeAtLevel(0), k128KB + k64KB - 4 * k5KB);
// Test max_write_buffer_number
// Block compaction thread, which will also block the flushes because
@ -7150,6 +7157,7 @@ TEST_P(DBTestWithParam, DynamicCompactionOptions) {
options.compression = kNoCompression;
options.soft_rate_limit = 1.1;
options.write_buffer_size = k64KB;
options.arena_block_size = 4 * k4KB;
options.max_write_buffer_number = 2;
// Compaction related options
options.level0_file_num_compaction_trigger = 3;
@ -7773,7 +7781,8 @@ TEST_F(DBTest, DeleteObsoleteFilesPendingOutputs) {
TEST_F(DBTest, CloseSpeedup) {
Options options = CurrentOptions();
options.compaction_style = kCompactionStyleLevel;
options.write_buffer_size = 100 << 10; // 100KB
options.write_buffer_size = 110 << 10; // 110KB
options.arena_block_size = 4 << 10;
options.level0_file_num_compaction_trigger = 2;
options.num_levels = 4;
options.max_bytes_for_level_base = 400 * 1024;
@ -8101,7 +8110,8 @@ TEST_F(DBTest, SuggestCompactRangeTest) {
options.compaction_style = kCompactionStyleLevel;
options.compaction_filter_factory.reset(
new CompactionFilterFactoryGetContext());
options.write_buffer_size = 110 << 10;
options.write_buffer_size = 100 << 10;
options.arena_block_size = 4 << 10;
options.level0_file_num_compaction_trigger = 4;
options.num_levels = 4;
options.compression = kNoCompression;

View File

@ -120,7 +120,8 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrigger) {
Options options;
options.compaction_style = kCompactionStyleUniversal;
options.num_levels = num_levels_;
options.write_buffer_size = 100 << 10; // 100KB
options.write_buffer_size = 105 << 10; // 105KB
options.arena_block_size = 4 << 10;
options.target_file_size_base = 32 << 10; // 32KB
// trigger compaction if there are >= 4 files
options.level0_file_num_compaction_trigger = 4;
@ -151,22 +152,13 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrigger) {
// compaction.
for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 12; i++) {
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
ASSERT_EQ(NumSortedRuns(1), num + 1);
// Write 100KB
GenerateNewFile(1, &rnd, &key_idx);
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
GenerateNewFile(1, &rnd, &key_idx);
// Suppose each file flushed from mem table has size 1. Now we compact
// (level0_file_num_compaction_trigger+1)=4 files and should have a big
// file of size 4.
@ -183,22 +175,13 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrigger) {
ASSERT_OK(Flush(1));
for (int num = 0; num < options.level0_file_num_compaction_trigger - 3;
num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
GenerateNewFile(1, &rnd, &key_idx);
ASSERT_EQ(NumSortedRuns(1), num + 3);
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
GenerateNewFile(1, &rnd, &key_idx);
// Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1.
// After compaction, we should have 2 files, with size 4, 2.4.
ASSERT_EQ(NumSortedRuns(1), 2);
@ -208,22 +191,13 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrigger) {
// generating new files at level 0.
for (int num = 0; num < options.level0_file_num_compaction_trigger - 3;
num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
GenerateNewFile(1, &rnd, &key_idx);
ASSERT_EQ(NumSortedRuns(1), num + 3);
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 12; i++) {
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
GenerateNewFile(1, &rnd, &key_idx);
// Before compaction, we have 4 files at level 0, with size 4, 2.4, 1, 1.
// After compaction, we should have 3 files, with size 4, 2.4, 2.
ASSERT_EQ(NumSortedRuns(1), 3);
@ -231,10 +205,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrigger) {
// Stage 4:
// Now we have 3 files at level 0, with size 4, 2.4, 2. Let's generate a
// new file of size 1.
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
GenerateNewFile(1, &rnd, &key_idx);
dbfull()->TEST_WaitForCompact();
// Level-0 compaction is triggered, but no file will be picked up.
ASSERT_EQ(NumSortedRuns(1), 4);
@ -243,10 +214,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrigger) {
// Now we have 4 files at level 0, with size 4, 2.4, 2, 1. Let's generate
// a new file of size 1.
filter->expect_full_compaction_.store(true);
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
GenerateNewFile(1, &rnd, &key_idx);
dbfull()->TEST_WaitForCompact();
// All files at level 0 will be compacted into a single one.
ASSERT_EQ(NumSortedRuns(1), 1);
@ -569,7 +537,8 @@ INSTANTIATE_TEST_CASE_P(DBTestUniversalCompactionParallel,
TEST_P(DBTestUniversalCompaction, UniversalCompactionOptions) {
Options options;
options.compaction_style = kCompactionStyleUniversal;
options.write_buffer_size = 100 << 10; // 100KB
options.write_buffer_size = 105 << 10; // 105KB
options.arena_block_size = 4 << 10; // 4KB
options.target_file_size_base = 32 << 10; // 32KB
options.level0_file_num_compaction_trigger = 4;
options.num_levels = num_levels_;
@ -582,9 +551,9 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionOptions) {
int key_idx = 0;
for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000)));
// Write 100KB (100 values, each 1K)
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 990)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable(handles_[1]);
@ -601,7 +570,8 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionOptions) {
TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
Options options = CurrentOptions();
options.compaction_style = kCompactionStyleUniversal;
options.write_buffer_size = 100 << 10; // 100KB
options.write_buffer_size = 105 << 10; // 105KB
options.arena_block_size = 4 << 10; // 4KB
options.target_file_size_base = 32 << 10; // 32KB
// trigger compaction if there are >= 4 files
options.level0_file_num_compaction_trigger = 4;
@ -619,9 +589,9 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
// compaction.
for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
// Write 100KB (100 values, each 1K)
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
@ -630,8 +600,8 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
@ -651,8 +621,8 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
for (int num = 0; num < options.level0_file_num_compaction_trigger - 3;
num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
@ -661,8 +631,8 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
@ -672,8 +642,8 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
// Stage 3:
// Now we have 3 files at level 0, with size 4, 0.4, 2. Generate one
// more file at level-0, which should trigger level-0 compaction.
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
@ -882,7 +852,8 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionFourPaths) {
options.db_paths.emplace_back(dbname_ + "_3", 500 * 1024);
options.db_paths.emplace_back(dbname_ + "_4", 1024 * 1024 * 1024);
options.compaction_style = kCompactionStyleUniversal;
options.write_buffer_size = 100 << 10; // 100KB
options.write_buffer_size = 110 << 10; // 105KB
options.arena_block_size = 4 << 10;
options.level0_file_num_compaction_trigger = 2;
options.num_levels = 1;
options = CurrentOptions(options);
@ -960,7 +931,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionFourPaths) {
for (int i = 0; i < key_idx; i++) {
auto v = Get(Key(i));
ASSERT_NE(v, "NOT_FOUND");
ASSERT_TRUE(v.size() == 1 || v.size() == 10000);
ASSERT_TRUE(v.size() == 1 || v.size() == 990);
}
Reopen(options);
@ -968,7 +939,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionFourPaths) {
for (int i = 0; i < key_idx; i++) {
auto v = Get(Key(i));
ASSERT_NE(v, "NOT_FOUND");
ASSERT_TRUE(v.size() == 1 || v.size() == 10000);
ASSERT_TRUE(v.size() == 1 || v.size() == 990);
}
Destroy(options);
@ -1076,7 +1047,9 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionSecondPathRatio) {
options.db_paths.emplace_back(dbname_, 500 * 1024);
options.db_paths.emplace_back(dbname_ + "_2", 1024 * 1024 * 1024);
options.compaction_style = kCompactionStyleUniversal;
options.write_buffer_size = 100 << 10; // 100KB
options.write_buffer_size = 110 << 10; // 105KB
options.arena_block_size = 4 * 1024;
options.arena_block_size = 4 << 10;
options.level0_file_num_compaction_trigger = 2;
options.num_levels = 1;
options = CurrentOptions(options);
@ -1151,7 +1124,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionSecondPathRatio) {
for (int i = 0; i < key_idx; i++) {
auto v = Get(Key(i));
ASSERT_NE(v, "NOT_FOUND");
ASSERT_TRUE(v.size() == 1 || v.size() == 10000);
ASSERT_TRUE(v.size() == 1 || v.size() == 990);
}
Reopen(options);
@ -1159,7 +1132,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionSecondPathRatio) {
for (int i = 0; i < key_idx; i++) {
auto v = Get(Key(i));
ASSERT_NE(v, "NOT_FOUND");
ASSERT_TRUE(v.size() == 1 || v.size() == 10000);
ASSERT_TRUE(v.size() == 1 || v.size() == 990);
}
Destroy(options);

View File

@ -1012,7 +1012,7 @@ static std::string RandomString(Random* rnd, int len) {
TEST_F(PlainTableDBTest, CompactionTrigger) {
Options options = CurrentOptions();
options.write_buffer_size = 100 << 10; //100KB
options.write_buffer_size = 120 << 10; // 100KB
options.num_levels = 3;
options.level0_file_num_compaction_trigger = 3;
Reopen(&options);
@ -1022,11 +1022,12 @@ TEST_F(PlainTableDBTest, CompactionTrigger) {
for (int num = 0; num < options.level0_file_num_compaction_trigger - 1;
num++) {
std::vector<std::string> values;
// Write 120KB (12 values, each 10K)
for (int i = 0; i < 12; i++) {
values.push_back(RandomString(&rnd, 10000));
// Write 120KB (10 values, each 12K)
for (int i = 0; i < 10; i++) {
values.push_back(RandomString(&rnd, 12000));
ASSERT_OK(Put(Key(i), values[i]));
}
ASSERT_OK(Put(Key(999), ""));
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
}
@ -1037,6 +1038,7 @@ TEST_F(PlainTableDBTest, CompactionTrigger) {
values.push_back(RandomString(&rnd, 10000));
ASSERT_OK(Put(Key(i), values[i]));
}
ASSERT_OK(Put(Key(999), ""));
dbfull()->TEST_WaitForCompact();
ASSERT_EQ(NumTableFilesAtLevel(0), 0);

View File

@ -8,6 +8,9 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "util/arena.h"
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
#include <malloc.h>
#endif
#ifndef OS_WIN
#include <sys/mman.h>
#endif
@ -165,7 +168,12 @@ char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
char* Arena::AllocateNewBlock(size_t block_bytes) {
char* block = new char[block_bytes];
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
blocks_memory_ += malloc_usable_size(block);
#else
blocks_memory_ += block_bytes;
#endif // ROCKSDB_MALLOC_USABLE_SIZE
blocks_.push_back(block);
return block;
}

View File

@ -24,14 +24,14 @@ namespace {
void MemoryAllocatedBytesTest(size_t huge_page_size) {
const int N = 17;
size_t req_sz; // requested size
size_t bsz = 8192; // block size
size_t bsz = 32 * 1024; // block size
size_t expected_memory_allocated;
Arena arena(bsz, huge_page_size);
// requested size > quarter of a block:
// allocate requested size separately
req_sz = 3001;
req_sz = 12 * 1024;
for (int i = 0; i < N; i++) {
arena.Allocate(req_sz);
}
@ -60,7 +60,7 @@ void MemoryAllocatedBytesTest(size_t huge_page_size) {
// requested size > quarter of a block:
// allocate requested size separately
req_sz = 99999999;
req_sz = 999 * 4096;
for (int i = 0; i < N; i++) {
arena.Allocate(req_sz);
}

View File

@ -182,6 +182,7 @@ bool DBTestBase::ChangeFilterOptions() {
Options DBTestBase::CurrentOptions(
const anon::OptionsOverride& options_override) {
Options options;
options.write_buffer_size = 4090 * 4096;
return CurrentOptions(options, options_override);
}
@ -776,10 +777,23 @@ int DBTestBase::GetSstFileCount(std::string path) {
return sst_count;
}
// this will generate non-overlapping files since it keeps increasing key_idx
void DBTestBase::GenerateNewFile(int cf, Random* rnd, int* key_idx,
bool nowait) {
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put(cf, Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990)));
(*key_idx)++;
}
if (!nowait) {
dbfull()->TEST_WaitForFlushMemTable();
dbfull()->TEST_WaitForCompact();
}
}
// this will generate non-overlapping files since it keeps increasing key_idx
void DBTestBase::GenerateNewFile(Random* rnd, int* key_idx, bool nowait) {
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(*key_idx), RandomString(rnd, (i == 10) ? 1 : 10000)));
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put(Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990)));
(*key_idx)++;
}
if (!nowait) {
@ -789,10 +803,10 @@ void DBTestBase::GenerateNewFile(Random* rnd, int* key_idx, bool nowait) {
}
void DBTestBase::GenerateNewRandomFile(Random* rnd, bool nowait) {
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 1000)));
for (int i = 0; i < 51; i++) {
ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 2000)));
}
ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 1)));
ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 200)));
if (!nowait) {
dbfull()->TEST_WaitForFlushMemTable();
dbfull()->TEST_WaitForCompact();

View File

@ -612,6 +612,8 @@ class DBTestBase : public testing::Test {
// this will generate non-overlapping files since it keeps increasing key_idx
void GenerateNewFile(Random* rnd, int* key_idx, bool nowait = false);
void GenerateNewFile(int fd, Random* rnd, int* key_idx, bool nowait = false);
void GenerateNewRandomFile(Random* rnd, bool nowait = false);
std::string IterStatus(Iterator* iter);