diff --git a/db/corruption_test.cc b/db/corruption_test.cc index ecd5b5344f..f835edc68b 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -230,7 +230,7 @@ TEST(CorruptionTest, NewFileErrorDuringWrite) { TEST(CorruptionTest, TableFile) { Build(100); DBImpl* dbi = reinterpret_cast(db_); - dbi->TEST_CompactMemTable(); + dbi->TEST_FlushMemTable(); dbi->TEST_CompactRange(0, nullptr, nullptr); dbi->TEST_CompactRange(1, nullptr, nullptr); @@ -241,7 +241,7 @@ TEST(CorruptionTest, TableFile) { TEST(CorruptionTest, TableFileIndexData) { Build(10000); // Enough to build multiple Tables DBImpl* dbi = reinterpret_cast(db_); - dbi->TEST_CompactMemTable(); + dbi->TEST_FlushMemTable(); Corrupt(kTableFile, -2000, 500); Reopen(); @@ -279,7 +279,7 @@ TEST(CorruptionTest, SequenceNumberRecovery) { TEST(CorruptionTest, CorruptedDescriptor) { ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello")); DBImpl* dbi = reinterpret_cast(db_); - dbi->TEST_CompactMemTable(); + dbi->TEST_FlushMemTable(); dbi->TEST_CompactRange(0, nullptr, nullptr); Corrupt(kDescriptorFile, 0, 1000); @@ -296,7 +296,7 @@ TEST(CorruptionTest, CorruptedDescriptor) { TEST(CorruptionTest, CompactionInputError) { Build(10); DBImpl* dbi = reinterpret_cast(db_); - dbi->TEST_CompactMemTable(); + dbi->TEST_FlushMemTable(); const int last = dbi->MaxMemCompactionLevel(); ASSERT_EQ(1, Property("rocksdb.num-files-at-level" + NumberToString(last))); @@ -319,11 +319,11 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) { for (int level = 1; level < dbi->NumberLevels(); level++) { dbi->Put(WriteOptions(), "", "begin"); dbi->Put(WriteOptions(), "~", "end"); - dbi->TEST_CompactMemTable(); + dbi->TEST_FlushMemTable(); } Build(10); - dbi->TEST_CompactMemTable(); + dbi->TEST_FlushMemTable(); ASSERT_EQ(1, Property("rocksdb.num-files-at-level0")); Corrupt(kTableFile, 100, 1); @@ -341,7 +341,7 @@ TEST(CorruptionTest, CompactionInputErrorParanoid) { TEST(CorruptionTest, UnrelatedKeys) { Build(10); DBImpl* dbi = reinterpret_cast(db_); - dbi->TEST_CompactMemTable(); + dbi->TEST_FlushMemTable(); Corrupt(kTableFile, 100, 1); std::string tmp1, tmp2; @@ -349,7 +349,7 @@ TEST(CorruptionTest, UnrelatedKeys) { std::string v; ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v)); ASSERT_EQ(Value(1000, &tmp2).ToString(), v); - dbi->TEST_CompactMemTable(); + dbi->TEST_FlushMemTable(); ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v)); ASSERT_EQ(Value(1000, &tmp2).ToString(), v); } diff --git a/db/db_impl.cc b/db/db_impl.cc index 57d5916b77..cfb523364b 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -924,13 +924,13 @@ Status DBImpl::WriteLevel0Table(std::vector &mems, VersionEdit* edit, return s; } -Status DBImpl::CompactMemTable(bool* madeProgress) { +Status DBImpl::FlushMemTableToOutputFile(bool* madeProgress) { mutex_.AssertHeld(); assert(imm_.size() != 0); if (!imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) { - Log(options_.info_log, "Memcompaction already in progress"); - Status s = Status::IOError("Memcompaction already in progress"); + Log(options_.info_log, "FlushMemTableToOutputFile already in progress"); + Status s = Status::IOError("FlushMemTableToOutputFile already in progress"); return s; } @@ -998,7 +998,7 @@ void DBImpl::CompactRange(const Slice* begin, const Slice* end, } } } - TEST_CompactMemTable(); // TODO(sanjay): Skip if memtable does not overlap + TEST_FlushMemTable(); // TODO(sanjay): Skip if memtable does not overlap for (int level = 0; level < max_level_with_files; level++) { TEST_CompactRange(level, begin, end); } @@ -1345,7 +1345,7 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) { if (bg_compaction_scheduled_ == LargeNumber) { bg_compaction_scheduled_ = newvalue; } - MaybeScheduleCompaction(); + MaybeScheduleFlushOrCompaction(); while (manual_compaction_ == &manual) { bg_cv_.Wait(); } @@ -1366,12 +1366,12 @@ Status DBImpl::FlushMemTable(const FlushOptions& options) { Status s = Write(WriteOptions(), nullptr); if (s.ok() && options.wait) { // Wait until the compaction completes - s = WaitForCompactMemTable(); + s = WaitForFlushMemTable(); } return s; } -Status DBImpl::WaitForCompactMemTable() { +Status DBImpl::WaitForFlushMemTable() { Status s; // Wait until the compaction completes MutexLock l(&mutex_); @@ -1384,16 +1384,21 @@ Status DBImpl::WaitForCompactMemTable() { return s; } -Status DBImpl::TEST_CompactMemTable() { +Status DBImpl::TEST_FlushMemTable() { return FlushMemTable(FlushOptions()); } -Status DBImpl::TEST_WaitForCompactMemTable() { - return WaitForCompactMemTable(); +Status DBImpl::TEST_WaitForFlushMemTable() { + return WaitForFlushMemTable(); } Status DBImpl::TEST_WaitForCompact() { // Wait until the compaction completes + + // TODO: a bug here. This function actually does not necessarily + // wait for compact. It actually waits for scheduled compaction + // OR flush to finish. + MutexLock l(&mutex_); while ((bg_compaction_scheduled_ || bg_flush_scheduled_) && bg_error_.ok()) { @@ -1402,7 +1407,7 @@ Status DBImpl::TEST_WaitForCompact() { return bg_error_; } -void DBImpl::MaybeScheduleCompaction() { +void DBImpl::MaybeScheduleFlushOrCompaction() { mutex_.AssertHeld(); if (bg_work_gate_closed_) { // gate closed for backgrond work @@ -1442,9 +1447,9 @@ Status DBImpl::BackgroundFlush() { while (stat.ok() && imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) { Log(options_.info_log, - "BackgroundCallFlush doing CompactMemTable, flush slots available %d", + "BackgroundCallFlush doing FlushMemTableToOutputFile, flush slots available %d", options_.max_background_flushes - bg_flush_scheduled_); - stat = CompactMemTable(); + stat = FlushMemTableToOutputFile(); } return stat; } @@ -1521,7 +1526,7 @@ void DBImpl::BackgroundCallCompaction() { // So reschedule another compaction if we made progress in the // last compaction. if (madeProgress) { - MaybeScheduleCompaction(); + MaybeScheduleFlushOrCompaction(); } bg_cv_.SignalAll(); @@ -1535,9 +1540,10 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress, // TODO: remove memtable flush from formal compaction while (imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) { Log(options_.info_log, - "BackgroundCompaction doing CompactMemTable, compaction slots available %d", + "BackgroundCompaction doing FlushMemTableToOutputFile, compaction slots " + "available %d", options_.max_background_compactions - bg_compaction_scheduled_); - Status stat = CompactMemTable(madeProgress); + Status stat = FlushMemTableToOutputFile(madeProgress); if (!stat.ok()) { return stat; } @@ -1590,7 +1596,7 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress, versions_->ReleaseCompactionFiles(c.get(), status); *madeProgress = true; } else { - MaybeScheduleCompaction(); // do more compaction work in parallel. + MaybeScheduleFlushOrCompaction(); // do more compaction work in parallel. CompactionState* compact = new CompactionState(c.get()); status = DoCompactionWork(compact); CleanupCompaction(compact); @@ -1914,7 +1920,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { const uint64_t imm_start = env_->NowMicros(); mutex_.Lock(); if (imm_.IsFlushPending(options_.min_write_buffer_number_to_merge)) { - CompactMemTable(); + FlushMemTableToOutputFile(); bg_cv_.SignalAll(); // Wakeup MakeRoomForWrite() if necessary } mutex_.Unlock(); @@ -2356,7 +2362,7 @@ Status DBImpl::GetImpl(const ReadOptions& options, if (!options_.disable_seek_compaction && have_stat_update && current->UpdateStats(stats)) { - MaybeScheduleCompaction(); + MaybeScheduleFlushOrCompaction(); } mem->Unref(); imm.UnrefAll(); @@ -2434,7 +2440,7 @@ std::vector DBImpl::MultiGet(const ReadOptions& options, mutex_.Lock(); if (!options_.disable_seek_compaction && have_stat_update && current->UpdateStats(stats)) { - MaybeScheduleCompaction(); + MaybeScheduleFlushOrCompaction(); } mem->Unref(); imm.UnrefAll(); @@ -2853,7 +2859,7 @@ Status DBImpl::MakeRoomForWrite(bool force) { mem_->Ref(); mem_->SetLogNumber(logfile_number_); force = false; // Do not force another compaction if have room - MaybeScheduleCompaction(); + MaybeScheduleFlushOrCompaction(); } } return s; @@ -3229,7 +3235,7 @@ Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) { if (s.ok()) { impl->mem_->SetLogNumber(impl->logfile_number_); impl->DeleteObsoleteFiles(); - impl->MaybeScheduleCompaction(); + impl->MaybeScheduleFlushOrCompaction(); impl->MaybeScheduleLogDBDeployStats(); } } diff --git a/db/db_impl.h b/db/db_impl.h index efc59d2533..ef2ef59449 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -82,11 +82,11 @@ class DBImpl : public DB { // Compact any files in the named level that overlap [*begin, *end] void TEST_CompactRange(int level, const Slice* begin, const Slice* end); - // Force current memtable contents to be compacted. - Status TEST_CompactMemTable(); + // Force current memtable contents to be flushed. + Status TEST_FlushMemTable(); // Wait for memtable compaction - Status TEST_WaitForCompactMemTable(); + Status TEST_WaitForFlushMemTable(); // Wait for any compaction Status TEST_WaitForCompact(); @@ -148,9 +148,9 @@ class DBImpl : public DB { // Delete any unneeded files and stale in-memory entries. void DeleteObsoleteFiles(); - // Compact the in-memory write buffer to disk. Switches to a new + // Flush the in-memory write buffer to storage. Switches to a new // log-file/memtable and writes a new descriptor iff successful. - Status CompactMemTable(bool* madeProgress = nullptr); + Status FlushMemTableToOutputFile(bool* madeProgress = nullptr); Status RecoverLogFile(uint64_t log_number, VersionEdit* edit, @@ -173,14 +173,14 @@ class DBImpl : public DB { // Force current memtable contents to be flushed. Status FlushMemTable(const FlushOptions& options); - // Wait for memtable compaction - Status WaitForCompactMemTable(); + // Wait for memtable flushed + Status WaitForFlushMemTable(); void MaybeScheduleLogDBDeployStats(); static void BGLogDBDeployStats(void* db); void LogDBDeployStats(); - void MaybeScheduleCompaction(); + void MaybeScheduleFlushOrCompaction(); static void BGWorkCompaction(void* db); static void BGWorkFlush(void* db); void BackgroundCallCompaction(); diff --git a/db/db_test.cc b/db/db_test.cc index d0dc261469..b59de240b2 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -570,7 +570,7 @@ class DBTest { for (int i = 0; i < n; i++) { Put(small, "begin"); Put(large, "end"); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); } } @@ -739,7 +739,7 @@ TEST(DBTest, GetFromImmutableLayer) { TEST(DBTest, GetFromVersions) { do { ASSERT_OK(Put("foo", "v1")); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_EQ("v1", Get("foo")); } while (ChangeOptions()); } @@ -754,7 +754,7 @@ TEST(DBTest, GetSnapshot) { ASSERT_OK(Put(key, "v2")); ASSERT_EQ("v2", Get(key)); ASSERT_EQ("v1", Get(key, s1)); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_EQ("v2", Get(key)); ASSERT_EQ("v1", Get(key, s1)); db_->ReleaseSnapshot(s1); @@ -770,9 +770,9 @@ TEST(DBTest, GetLevel0Ordering) { // one has a smaller "smallest" key. ASSERT_OK(Put("bar", "b")); ASSERT_OK(Put("foo", "v1")); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_OK(Put("foo", "v2")); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_EQ("v2", Get("foo")); } while (ChangeOptions()); } @@ -784,7 +784,7 @@ TEST(DBTest, GetOrderedByLevels) { ASSERT_EQ("v1", Get("foo")); ASSERT_OK(Put("foo", "v2")); ASSERT_EQ("v2", Get("foo")); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_EQ("v2", Get("foo")); } while (ChangeOptions()); } @@ -822,7 +822,7 @@ TEST(DBTest, GetEncountersEmptyLevel) { compaction_count++; Put("a", "begin"); Put("z", "end"); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); } // Step 2: clear level 1 if necessary. @@ -1668,7 +1668,7 @@ TEST(DBTest, CompactionTrigger) { values.push_back(RandomString(&rnd, 10000)); ASSERT_OK(Put(Key(i), values[i])); } - dbfull()->TEST_WaitForCompactMemTable(); + dbfull()->TEST_WaitForFlushMemTable(); ASSERT_EQ(NumTableFilesAtLevel(0), num + 1); } @@ -1706,7 +1706,7 @@ TEST(DBTest, UniversalCompactionTrigger) { ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000))); key_idx++; } - dbfull()->TEST_WaitForCompactMemTable(); + dbfull()->TEST_WaitForFlushMemTable(); ASSERT_EQ(NumTableFilesAtLevel(0), num + 1); } @@ -1741,7 +1741,7 @@ TEST(DBTest, UniversalCompactionTrigger) { ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000))); key_idx++; } - dbfull()->TEST_WaitForCompactMemTable(); + dbfull()->TEST_WaitForFlushMemTable(); ASSERT_EQ(NumTableFilesAtLevel(0), num + 3); } @@ -1770,7 +1770,7 @@ TEST(DBTest, UniversalCompactionTrigger) { ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000))); key_idx++; } - dbfull()->TEST_WaitForCompactMemTable(); + dbfull()->TEST_WaitForFlushMemTable(); ASSERT_EQ(NumTableFilesAtLevel(0), num + 3); } @@ -1840,7 +1840,7 @@ TEST(DBTest, UniversalCompactionSizeAmplification) { ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000))); key_idx++; } - dbfull()->TEST_WaitForCompactMemTable(); + dbfull()->TEST_WaitForFlushMemTable(); ASSERT_EQ(NumTableFilesAtLevel(0), num + 1); } ASSERT_EQ(NumTableFilesAtLevel(0), 2); @@ -1873,7 +1873,7 @@ TEST(DBTest, UniversalCompactionOptions) { ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000))); key_idx++; } - dbfull()->TEST_WaitForCompactMemTable(); + dbfull()->TEST_WaitForFlushMemTable(); if (num < options.level0_file_num_compaction_trigger - 1) { ASSERT_EQ(NumTableFilesAtLevel(0), num + 1); @@ -1994,7 +1994,7 @@ void MinLevelHelper(DBTest* self, Options& options) { values.push_back(RandomString(&rnd, 10000)); ASSERT_OK(self->Put(Key(i), values[i])); } - self->dbfull()->TEST_WaitForCompactMemTable(); + self->dbfull()->TEST_WaitForFlushMemTable(); ASSERT_EQ(self->NumTableFilesAtLevel(0), num + 1); } @@ -2219,7 +2219,7 @@ TEST(DBTest, CompactionFilter) { snprintf(key, sizeof(key), "B%010d", i); Put(key, value); } - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); // Push all files to the highest level L2. Verify that // the compaction is each level invokes the filter for @@ -2267,7 +2267,7 @@ TEST(DBTest, CompactionFilter) { snprintf(key, sizeof(key), "B%010d", i); Put(key, value); } - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); // push all files to the highest level L2. This // means that all keys should pass at least once @@ -2294,7 +2294,7 @@ TEST(DBTest, CompactionFilter) { snprintf(key, sizeof(key), "B%010d", i); Put(key, value); } - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_NE(NumTableFilesAtLevel(0), 0); ASSERT_EQ(NumTableFilesAtLevel(1), 0); ASSERT_EQ(NumTableFilesAtLevel(2), 0); @@ -2365,7 +2365,7 @@ TEST(DBTest, CompactionFilterWithValueChange) { } // push all files to lower levels - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); dbfull()->TEST_CompactRange(0, nullptr, nullptr); dbfull()->TEST_CompactRange(1, nullptr, nullptr); @@ -2378,7 +2378,7 @@ TEST(DBTest, CompactionFilterWithValueChange) { // push all files to lower levels. This should // invoke the compaction filter for all 100000 keys. - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); dbfull()->TEST_CompactRange(0, nullptr, nullptr); dbfull()->TEST_CompactRange(1, nullptr, nullptr); @@ -2416,14 +2416,14 @@ TEST(DBTest, SparseMerge) { Put(key, value); } Put("C", "vc"); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); dbfull()->TEST_CompactRange(0, nullptr, nullptr); // Make sparse update Put("A", "va2"); Put("B100", "bvalue2"); Put("C", "vc2"); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); // Compactions should not cause us to create a situation where // a file overlaps too much data at the next level. @@ -2599,7 +2599,7 @@ TEST(DBTest, HiddenValuesAreRemoved) { Put("foo", "tiny"); Put("pastfoo2", "v2"); // Advance sequence number one more - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_GT(NumTableFilesAtLevel(0), 0); ASSERT_EQ(big, Get("foo", snapshot)); @@ -2634,7 +2634,7 @@ TEST(DBTest, CompactBetweenSnapshots) { // All entries (including duplicates) exist // before any compaction is triggered. - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_EQ("sixth", Get("foo")); ASSERT_EQ("fourth", Get("foo", snapshot2)); ASSERT_EQ("first", Get("foo", snapshot1)); @@ -2673,21 +2673,21 @@ TEST(DBTest, CompactBetweenSnapshots) { TEST(DBTest, DeletionMarkers1) { Put("foo", "v1"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); const int last = dbfull()->MaxMemCompactionLevel(); ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level // Place a table at level last-1 to prevent merging with preceding mutation Put("a", "begin"); Put("z", "end"); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_EQ(NumTableFilesAtLevel(last), 1); ASSERT_EQ(NumTableFilesAtLevel(last-1), 1); Delete("foo"); Put("foo", "v2"); ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 + ASSERT_OK(dbfull()->TEST_FlushMemTable()); // Moves to level last-2 if (CurrentOptions().purge_redundant_kvs_while_flush) { ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]"); } else { @@ -2706,20 +2706,20 @@ TEST(DBTest, DeletionMarkers1) { TEST(DBTest, DeletionMarkers2) { Put("foo", "v1"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); const int last = dbfull()->MaxMemCompactionLevel(); ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level // Place a table at level last-1 to prevent merging with preceding mutation Put("a", "begin"); Put("z", "end"); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_EQ(NumTableFilesAtLevel(last), 1); ASSERT_EQ(NumTableFilesAtLevel(last-1), 1); Delete("foo"); ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); // Moves to level last-2 + ASSERT_OK(dbfull()->TEST_FlushMemTable()); // Moves to level last-2 ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]"); dbfull()->TEST_CompactRange(last-2, nullptr, nullptr); // DEL kept: "last" file overlaps @@ -2738,10 +2738,10 @@ TEST(DBTest, OverlapInLevel0) { //Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. ASSERT_OK(Put("100", "v100")); ASSERT_OK(Put("999", "v999")); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_OK(Delete("100")); ASSERT_OK(Delete("999")); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_EQ("0,1,1", FilesPerLevel()); // Make files spanning the following ranges in level-0: @@ -2750,11 +2750,11 @@ TEST(DBTest, OverlapInLevel0) { // Note that files are sorted by smallest key. ASSERT_OK(Put("300", "v300")); ASSERT_OK(Put("500", "v500")); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_OK(Put("200", "v200")); ASSERT_OK(Put("600", "v600")); ASSERT_OK(Put("900", "v900")); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_EQ("2,1,1", FilesPerLevel()); // Compact away the placeholder files we created initially @@ -2766,7 +2766,7 @@ TEST(DBTest, OverlapInLevel0) { // not detect the overlap with level-0 files and would incorrectly place // the deletion in a deeper level. ASSERT_OK(Delete("600")); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_EQ("3", FilesPerLevel()); ASSERT_EQ("NOT_FOUND", Get("600")); } while (ChangeOptions(kSkipUniversalCompaction)); @@ -3104,7 +3104,7 @@ TEST(DBTest, ManifestWriteError) { ASSERT_EQ("bar", Get("foo")); // Memtable compaction (will succeed) - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); ASSERT_EQ("bar", Get("foo")); const int last = dbfull()->MaxMemCompactionLevel(); ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level @@ -3152,7 +3152,7 @@ TEST(DBTest, BloomFilter) { for (int i = 0; i < N; i += 100) { ASSERT_OK(Put(Key(i), Key(i))); } - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); // Prevent auto compactions triggered by seeks env_->delay_sstable_sync_.Release_Store(env_); @@ -3322,13 +3322,13 @@ TEST(DBTest, CompactOnFlush) { Reopen(&options); Put("foo", "v1"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_EQ(AllEntriesFor("foo"), "[ v1 ]"); // Write two new keys Put("a", "begin"); Put("z", "end"); - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); // Case1: Delete followed by a put Delete("foo"); @@ -3337,7 +3337,7 @@ TEST(DBTest, CompactOnFlush) { // After the current memtable is flushed, the DEL should // have been removed - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]"); dbfull()->CompactRange(nullptr, nullptr); @@ -3347,7 +3347,7 @@ TEST(DBTest, CompactOnFlush) { Delete("foo"); Delete("foo"); ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, DEL, v2 ]"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v2 ]"); dbfull()->CompactRange(nullptr, nullptr); ASSERT_EQ(AllEntriesFor("foo"), "[ ]"); @@ -3356,7 +3356,7 @@ TEST(DBTest, CompactOnFlush) { Put("foo", "v3"); Delete("foo"); ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v3 ]"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_EQ(AllEntriesFor("foo"), "[ DEL ]"); dbfull()->CompactRange(nullptr, nullptr); ASSERT_EQ(AllEntriesFor("foo"), "[ ]"); @@ -3365,7 +3365,7 @@ TEST(DBTest, CompactOnFlush) { Put("foo", "v4"); Put("foo", "v5"); ASSERT_EQ(AllEntriesFor("foo"), "[ v5, v4 ]"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_EQ(AllEntriesFor("foo"), "[ v5 ]"); dbfull()->CompactRange(nullptr, nullptr); ASSERT_EQ(AllEntriesFor("foo"), "[ v5 ]"); @@ -3380,7 +3380,7 @@ TEST(DBTest, CompactOnFlush) { Put("foo", "v6"); const Snapshot* snapshot = db_->GetSnapshot(); Put("foo", "v7"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_EQ(AllEntriesFor("foo"), "[ v7, v6 ]"); db_->ReleaseSnapshot(snapshot); @@ -3394,7 +3394,7 @@ TEST(DBTest, CompactOnFlush) { const Snapshot* snapshot1 = db_->GetSnapshot(); Put("foo", "v8"); Put("foo", "v9"); - ASSERT_OK(dbfull()->TEST_CompactMemTable()); + ASSERT_OK(dbfull()->TEST_FlushMemTable()); ASSERT_EQ(AllEntriesFor("foo"), "[ v9 ]"); db_->ReleaseSnapshot(snapshot1); } while (ChangeCompactOptions()); @@ -3662,7 +3662,7 @@ TEST(DBTest, ReadCompaction) { } // clear level 0 and 1 if necessary. - dbfull()->TEST_CompactMemTable(); + dbfull()->TEST_FlushMemTable(); dbfull()->TEST_CompactRange(0, nullptr, nullptr); dbfull()->TEST_CompactRange(1, nullptr, nullptr); ASSERT_EQ(NumTableFilesAtLevel(0), 0); @@ -4186,7 +4186,7 @@ void PrefixScanInit(DBTest *dbtest) { snprintf(buf, sizeof(buf), "%02d______:end", 10); keystr = std::string(buf); ASSERT_OK(dbtest->Put(keystr, keystr)); - dbtest->dbfull()->TEST_CompactMemTable(); + dbtest->dbfull()->TEST_FlushMemTable(); dbtest->dbfull()->CompactRange(nullptr, nullptr); // move to level 1 // GROUP 1 @@ -4197,7 +4197,7 @@ void PrefixScanInit(DBTest *dbtest) { snprintf(buf, sizeof(buf), "%02d______:end", i+1); keystr = std::string(buf); ASSERT_OK(dbtest->Put(keystr, keystr)); - dbtest->dbfull()->TEST_CompactMemTable(); + dbtest->dbfull()->TEST_FlushMemTable(); } // GROUP 2 @@ -4210,7 +4210,7 @@ void PrefixScanInit(DBTest *dbtest) { small_range_sstfiles+i+1); keystr = std::string(buf); ASSERT_OK(dbtest->Put(keystr, keystr)); - dbtest->dbfull()->TEST_CompactMemTable(); + dbtest->dbfull()->TEST_FlushMemTable(); } } diff --git a/db/deletefile_test.cc b/db/deletefile_test.cc index 3c4c459b46..78a55febb8 100644 --- a/db/deletefile_test.cc +++ b/db/deletefile_test.cc @@ -92,12 +92,12 @@ class DeleteFileTest { void CreateTwoLevels() { AddKeys(50000, 10000); DBImpl* dbi = reinterpret_cast(db_); - ASSERT_OK(dbi->TEST_CompactMemTable()); - ASSERT_OK(dbi->TEST_WaitForCompactMemTable()); + ASSERT_OK(dbi->TEST_FlushMemTable()); + ASSERT_OK(dbi->TEST_WaitForFlushMemTable()); AddKeys(50000, 10000); - ASSERT_OK(dbi->TEST_CompactMemTable()); - ASSERT_OK(dbi->TEST_WaitForCompactMemTable()); + ASSERT_OK(dbi->TEST_FlushMemTable()); + ASSERT_OK(dbi->TEST_WaitForFlushMemTable()); } }; diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc index 7cdc2bbeba..19fc8ff2a8 100644 --- a/helpers/memenv/memenv_test.cc +++ b/helpers/memenv/memenv_test.cc @@ -215,7 +215,7 @@ TEST(MemEnvTest, DBTest) { delete iterator; DBImpl* dbi = reinterpret_cast(db); - ASSERT_OK(dbi->TEST_CompactMemTable()); + ASSERT_OK(dbi->TEST_FlushMemTable()); for (size_t i = 0; i < 3; ++i) { std::string res; diff --git a/tools/reduce_levels_test.cc b/tools/reduce_levels_test.cc index 154e7929c3..0b504c3ca5 100644 --- a/tools/reduce_levels_test.cc +++ b/tools/reduce_levels_test.cc @@ -45,7 +45,7 @@ public: return Status::InvalidArgument("DB not opened."); } DBImpl* db_impl = reinterpret_cast(db_); - return db_impl->TEST_CompactMemTable(); + return db_impl->TEST_FlushMemTable(); } void CloseDB() {