2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2013-08-22 21:32:53 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2024-01-05 19:53:57 +00:00
|
|
|
#include <cstdlib>
|
2016-06-22 01:41:23 +00:00
|
|
|
#include <map>
|
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
2022-11-02 21:34:24 +00:00
|
|
|
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2019-09-18 23:56:25 +00:00
|
|
|
#include "db/db_test_util.h"
|
2013-08-22 21:32:53 +00:00
|
|
|
#include "db/version_set.h"
|
|
|
|
#include "db/write_batch_internal.h"
|
2019-05-30 03:44:08 +00:00
|
|
|
#include "file/filename.h"
|
2019-09-18 23:56:25 +00:00
|
|
|
#include "port/stack_trace.h"
|
2016-06-22 01:41:23 +00:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/transaction_log.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/sync_point.h"
|
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "util/string_util.h"
|
2013-08-22 21:32:53 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2013-08-22 21:32:53 +00:00
|
|
|
|
2019-09-18 23:56:25 +00:00
|
|
|
class DeleteFileTest : public DBTestBase {
|
2013-08-22 21:32:53 +00:00
|
|
|
public:
|
2019-09-18 23:56:25 +00:00
|
|
|
const int numlevels_;
|
|
|
|
const std::string wal_dir_;
|
|
|
|
|
|
|
|
DeleteFileTest()
|
2021-07-23 15:37:27 +00:00
|
|
|
: DBTestBase("deletefile_test", /*env_do_fsync=*/true),
|
2019-09-18 23:56:25 +00:00
|
|
|
numlevels_(7),
|
|
|
|
wal_dir_(dbname_ + "/wal_files") {}
|
|
|
|
|
|
|
|
void SetOptions(Options* options) {
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_NE(options, nullptr);
|
2019-09-18 23:56:25 +00:00
|
|
|
options->delete_obsolete_files_period_micros = 0; // always do full purge
|
|
|
|
options->enable_thread_tracking = true;
|
|
|
|
options->write_buffer_size = 1024 * 1024 * 1000;
|
|
|
|
options->target_file_size_base = 1024 * 1024 * 1000;
|
|
|
|
options->max_bytes_for_level_base = 1024 * 1024 * 1000;
|
|
|
|
options->WAL_ttl_seconds = 300; // Used to test log files
|
|
|
|
options->WAL_size_limit_MB = 1024; // Used to test log files
|
|
|
|
options->wal_dir = wal_dir_;
|
2013-08-22 21:32:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void AddKeys(int numkeys, int startkey = 0) {
|
|
|
|
WriteOptions options;
|
|
|
|
options.sync = false;
|
|
|
|
ReadOptions roptions;
|
2022-11-02 21:34:24 +00:00
|
|
|
for (int i = startkey; i < (numkeys + startkey); i++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string temp = std::to_string(i);
|
2013-08-22 21:32:53 +00:00
|
|
|
Slice key(temp);
|
|
|
|
Slice value(temp);
|
|
|
|
ASSERT_OK(db_->Put(options, key, value));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
int numKeysInLevels(std::vector<LiveFileMetaData>& metadata,
|
|
|
|
std::vector<int>* keysperlevel = nullptr) {
|
2013-08-22 21:32:53 +00:00
|
|
|
if (keysperlevel != nullptr) {
|
|
|
|
keysperlevel->resize(numlevels_);
|
|
|
|
}
|
|
|
|
|
|
|
|
int numKeys = 0;
|
|
|
|
for (size_t i = 0; i < metadata.size(); i++) {
|
|
|
|
int startkey = atoi(metadata[i].smallestkey.c_str());
|
|
|
|
int endkey = atoi(metadata[i].largestkey.c_str());
|
|
|
|
int numkeysinfile = (endkey - startkey + 1);
|
|
|
|
numKeys += numkeysinfile;
|
|
|
|
if (keysperlevel != nullptr) {
|
|
|
|
(*keysperlevel)[(int)metadata[i].level] += numkeysinfile;
|
|
|
|
}
|
|
|
|
fprintf(stderr, "level %d name %s smallest %s largest %s\n",
|
|
|
|
metadata[i].level, metadata[i].name.c_str(),
|
2022-11-02 21:34:24 +00:00
|
|
|
metadata[i].smallestkey.c_str(), metadata[i].largestkey.c_str());
|
2013-08-22 21:32:53 +00:00
|
|
|
}
|
|
|
|
return numKeys;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CreateTwoLevels() {
|
|
|
|
AddKeys(50000, 10000);
|
2019-09-18 23:56:25 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
2015-07-17 19:02:52 +00:00
|
|
|
for (int i = 0; i < 2; ++i) {
|
2019-09-18 23:56:25 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_CompactRange(i, nullptr, nullptr));
|
2015-07-17 19:02:52 +00:00
|
|
|
}
|
2013-08-22 21:32:53 +00:00
|
|
|
|
|
|
|
AddKeys(50000, 10000);
|
2019-09-18 23:56:25 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
|
|
|
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
|
2013-08-22 21:32:53 +00:00
|
|
|
}
|
|
|
|
|
2019-09-18 23:56:25 +00:00
|
|
|
void CheckFileTypeCounts(const std::string& dir, int required_log,
|
|
|
|
int required_sst, int required_manifest) {
|
2013-11-15 02:03:57 +00:00
|
|
|
std::vector<std::string> filenames;
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(env_->GetChildren(dir, &filenames));
|
2013-11-15 02:03:57 +00:00
|
|
|
|
|
|
|
int log_cnt = 0, sst_cnt = 0, manifest_cnt = 0;
|
2024-01-05 19:53:57 +00:00
|
|
|
for (const auto& file : filenames) {
|
2013-11-15 02:03:57 +00:00
|
|
|
uint64_t number;
|
|
|
|
FileType type;
|
|
|
|
if (ParseFileName(file, &number, &type)) {
|
2020-10-23 00:04:39 +00:00
|
|
|
log_cnt += (type == kWalFile);
|
2013-11-15 02:03:57 +00:00
|
|
|
sst_cnt += (type == kTableFile);
|
|
|
|
manifest_cnt += (type == kDescriptorFile);
|
|
|
|
}
|
|
|
|
}
|
2021-11-03 19:20:19 +00:00
|
|
|
if (required_log >= 0) {
|
|
|
|
ASSERT_EQ(required_log, log_cnt);
|
|
|
|
}
|
|
|
|
if (required_sst >= 0) {
|
|
|
|
ASSERT_EQ(required_sst, sst_cnt);
|
|
|
|
}
|
|
|
|
if (required_manifest >= 0) {
|
|
|
|
ASSERT_EQ(required_manifest, manifest_cnt);
|
|
|
|
}
|
2013-11-15 02:03:57 +00:00
|
|
|
}
|
|
|
|
|
2016-06-22 01:41:23 +00:00
|
|
|
static void DoSleep(void* arg) {
|
Prefer static_cast in place of most reinterpret_cast (#12308)
Summary:
The following are risks associated with pointer-to-pointer reinterpret_cast:
* Can produce the "wrong result" (crash or memory corruption). IIRC, in theory this can happen for any up-cast or down-cast for a non-standard-layout type, though in practice would only happen for multiple inheritance cases (where the base class pointer might be "inside" the derived object). We don't use multiple inheritance a lot, but we do.
* Can mask useful compiler errors upon code change, including converting between unrelated pointer types that you are expecting to be related, and converting between pointer and scalar types unintentionally.
I can only think of some obscure cases where static_cast could be troublesome when it compiles as a replacement:
* Going through `void*` could plausibly cause unnecessary or broken pointer arithmetic. Suppose we have
`struct Derived: public Base1, public Base2`. If we have `Derived*` -> `void*` -> `Base2*` -> `Derived*` through reinterpret casts, this could plausibly work (though technical UB) assuming the `Base2*` is not dereferenced. Changing to static cast could introduce breaking pointer arithmetic.
* Unnecessary (but safe) pointer arithmetic could arise in a case like `Derived*` -> `Base2*` -> `Derived*` where before the Base2 pointer might not have been dereferenced. This could potentially affect performance.
With some light scripting, I tried replacing pointer-to-pointer reinterpret_casts with static_cast and kept the cases that still compile. Most occurrences of reinterpret_cast have successfully been changed (except for java/ and third-party/). 294 changed, 257 remain.
A couple of related interventions included here:
* Previously Cache::Handle was not actually derived from in the implementations and just used as a `void*` stand-in with reinterpret_cast. Now there is a relationship to allow static_cast. In theory, this could introduce pointer arithmetic (as described above) but is unlikely without multiple inheritance AND non-empty Cache::Handle.
* Remove some unnecessary casts to void* as this is allowed to be implicit (for better or worse).
Most of the remaining reinterpret_casts are for converting to/from raw bytes of objects. We could consider better idioms for these patterns in follow-up work.
I wish there were a way to implement a template variant of static_cast that would only compile if no pointer arithmetic is generated, but best I can tell, this is not possible. AFAIK the best you could do is a dynamic check that the void* conversion after the static cast is unchanged.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12308
Test Plan: existing tests, CI
Reviewed By: ltamasi
Differential Revision: D53204947
Pulled By: pdillinger
fbshipit-source-id: 9de23e618263b0d5b9820f4e15966876888a16e2
2024-02-07 18:44:11 +00:00
|
|
|
auto test = static_cast<DeleteFileTest*>(arg);
|
2016-06-22 01:41:23 +00:00
|
|
|
test->env_->SleepForMicroseconds(2 * 1000 * 1000);
|
|
|
|
}
|
|
|
|
|
|
|
|
// An empty job to guard all jobs are processed
|
2018-03-05 21:08:17 +00:00
|
|
|
static void GuardFinish(void* /*arg*/) {
|
2016-06-22 01:41:23 +00:00
|
|
|
TEST_SYNC_POINT("DeleteFileTest::GuardFinish");
|
|
|
|
}
|
2013-08-22 21:32:53 +00:00
|
|
|
};
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(DeleteFileTest, AddKeysAndQueryLevels) {
|
2019-09-18 23:56:25 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
SetOptions(&options);
|
|
|
|
Destroy(options);
|
|
|
|
options.create_if_missing = true;
|
|
|
|
Reopen(options);
|
|
|
|
|
2013-08-22 21:32:53 +00:00
|
|
|
CreateTwoLevels();
|
|
|
|
std::vector<LiveFileMetaData> metadata;
|
|
|
|
db_->GetLiveFilesMetaData(&metadata);
|
|
|
|
|
2024-01-05 19:53:57 +00:00
|
|
|
std::string level1file;
|
2013-08-22 21:32:53 +00:00
|
|
|
int level1keycount = 0;
|
2024-01-05 19:53:57 +00:00
|
|
|
std::string level2file;
|
2013-08-22 21:32:53 +00:00
|
|
|
int level2keycount = 0;
|
|
|
|
int level1index = 0;
|
|
|
|
int level2index = 1;
|
|
|
|
|
|
|
|
ASSERT_EQ((int)metadata.size(), 2);
|
|
|
|
if (metadata[0].level == 2) {
|
|
|
|
level1index = 1;
|
|
|
|
level2index = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
level1file = metadata[level1index].name;
|
|
|
|
int startkey = atoi(metadata[level1index].smallestkey.c_str());
|
|
|
|
int endkey = atoi(metadata[level1index].largestkey.c_str());
|
|
|
|
level1keycount = (endkey - startkey + 1);
|
|
|
|
level2file = metadata[level2index].name;
|
|
|
|
startkey = atoi(metadata[level2index].smallestkey.c_str());
|
|
|
|
endkey = atoi(metadata[level2index].largestkey.c_str());
|
|
|
|
level2keycount = (endkey - startkey + 1);
|
|
|
|
|
|
|
|
// COntrolled setup. Levels 1 and 2 should both have 50K files.
|
|
|
|
// This is a little fragile as it depends on the current
|
|
|
|
// compaction heuristics.
|
|
|
|
ASSERT_EQ(level1keycount, 50000);
|
|
|
|
ASSERT_EQ(level2keycount, 50000);
|
|
|
|
|
|
|
|
Status status = db_->DeleteFile("0.sst");
|
|
|
|
ASSERT_TRUE(status.IsInvalidArgument());
|
|
|
|
|
|
|
|
// intermediate level files cannot be deleted.
|
|
|
|
status = db_->DeleteFile(level1file);
|
|
|
|
ASSERT_TRUE(status.IsInvalidArgument());
|
|
|
|
|
|
|
|
// Lowest level file deletion should succeed.
|
2020-12-22 23:08:17 +00:00
|
|
|
status = db_->DeleteFile(level2file);
|
|
|
|
ASSERT_OK(status);
|
2013-08-22 21:32:53 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(DeleteFileTest, PurgeObsoleteFilesTest) {
|
2019-09-18 23:56:25 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
SetOptions(&options);
|
|
|
|
Destroy(options);
|
|
|
|
options.create_if_missing = true;
|
|
|
|
Reopen(options);
|
|
|
|
|
2013-11-15 02:03:57 +00:00
|
|
|
CreateTwoLevels();
|
|
|
|
// there should be only one (empty) log file because CreateTwoLevels()
|
|
|
|
// flushes the memtables to disk
|
2019-09-18 23:56:25 +00:00
|
|
|
CheckFileTypeCounts(wal_dir_, 1, 0, 0);
|
2013-11-15 02:03:57 +00:00
|
|
|
// 2 ssts, 1 manifest
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 2, 1);
|
|
|
|
std::string first("0"), last("999999");
|
2015-06-17 21:36:14 +00:00
|
|
|
CompactRangeOptions compact_options;
|
|
|
|
compact_options.change_level = true;
|
|
|
|
compact_options.target_level = 2;
|
2013-11-15 02:03:57 +00:00
|
|
|
Slice first_slice(first), last_slice(last);
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db_->CompactRange(compact_options, &first_slice, &last_slice));
|
2013-11-15 02:03:57 +00:00
|
|
|
// 1 sst after compaction
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 1, 1);
|
|
|
|
|
|
|
|
// this time, we keep an iterator alive
|
2019-09-18 23:56:25 +00:00
|
|
|
Reopen(options);
|
2022-11-02 21:34:24 +00:00
|
|
|
Iterator* itr = nullptr;
|
2013-11-15 02:03:57 +00:00
|
|
|
CreateTwoLevels();
|
|
|
|
itr = db_->NewIterator(ReadOptions());
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(itr->status());
|
|
|
|
ASSERT_OK(db_->CompactRange(compact_options, &first_slice, &last_slice));
|
|
|
|
ASSERT_OK(itr->status());
|
2013-11-15 02:03:57 +00:00
|
|
|
// 3 sst after compaction with live iterator
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 3, 1);
|
|
|
|
delete itr;
|
|
|
|
// 1 sst after iterator deletion
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 1, 1);
|
|
|
|
}
|
|
|
|
|
2024-04-18 00:33:27 +00:00
|
|
|
TEST_F(DeleteFileTest, WaitForCompactWithWaitForPurgeOptionTest) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
SetOptions(&options);
|
|
|
|
Destroy(options);
|
|
|
|
options.create_if_missing = true;
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
std::string first("0"), last("999999");
|
|
|
|
CompactRangeOptions compact_options;
|
|
|
|
compact_options.change_level = true;
|
|
|
|
compact_options.target_level = 2;
|
|
|
|
Slice first_slice(first), last_slice(last);
|
|
|
|
|
|
|
|
CreateTwoLevels();
|
|
|
|
Iterator* itr = nullptr;
|
|
|
|
ReadOptions read_options;
|
|
|
|
read_options.background_purge_on_iterator_cleanup = true;
|
|
|
|
itr = db_->NewIterator(read_options);
|
|
|
|
ASSERT_OK(itr->status());
|
|
|
|
ASSERT_OK(db_->CompactRange(compact_options, &first_slice, &last_slice));
|
|
|
|
SyncPoint::GetInstance()->LoadDependency(
|
|
|
|
{{"DBImpl::BGWorkPurge:start", "DeleteFileTest::WaitForPurgeTest"},
|
|
|
|
{"DBImpl::WaitForCompact:InsideLoop",
|
|
|
|
"DBImpl::BackgroundCallPurge:beforeMutexLock"}});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
delete itr;
|
|
|
|
|
|
|
|
TEST_SYNC_POINT("DeleteFileTest::WaitForPurgeTest");
|
|
|
|
// At this point, purge got started, but can't finish due to sync points
|
|
|
|
// not purged yet
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 3, 1);
|
|
|
|
|
|
|
|
// The sync point in WaitForCompact should unblock the purge
|
|
|
|
WaitForCompactOptions wait_for_compact_options;
|
|
|
|
wait_for_compact_options.wait_for_purge = true;
|
|
|
|
Status s = dbfull()->WaitForCompact(wait_for_compact_options);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
|
|
|
|
// Now files should be purged
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 1, 1);
|
|
|
|
}
|
|
|
|
|
2019-04-02 00:07:38 +00:00
|
|
|
TEST_F(DeleteFileTest, BackgroundPurgeIteratorTest) {
|
2019-09-18 23:56:25 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
SetOptions(&options);
|
|
|
|
Destroy(options);
|
|
|
|
options.create_if_missing = true;
|
|
|
|
Reopen(options);
|
|
|
|
|
2016-06-22 01:41:23 +00:00
|
|
|
std::string first("0"), last("999999");
|
|
|
|
CompactRangeOptions compact_options;
|
|
|
|
compact_options.change_level = true;
|
|
|
|
compact_options.target_level = 2;
|
|
|
|
Slice first_slice(first), last_slice(last);
|
|
|
|
|
|
|
|
// We keep an iterator alive
|
2018-03-07 20:39:19 +00:00
|
|
|
Iterator* itr = nullptr;
|
2016-06-22 01:41:23 +00:00
|
|
|
CreateTwoLevels();
|
2019-09-18 23:56:25 +00:00
|
|
|
ReadOptions read_options;
|
|
|
|
read_options.background_purge_on_iterator_cleanup = true;
|
|
|
|
itr = db_->NewIterator(read_options);
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(itr->status());
|
|
|
|
ASSERT_OK(db_->CompactRange(compact_options, &first_slice, &last_slice));
|
2016-06-22 01:41:23 +00:00
|
|
|
// 3 sst after compaction with live iterator
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 3, 1);
|
|
|
|
test::SleepingBackgroundTask sleeping_task_before;
|
|
|
|
env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
|
|
|
|
&sleeping_task_before, Env::Priority::HIGH);
|
|
|
|
delete itr;
|
|
|
|
test::SleepingBackgroundTask sleeping_task_after;
|
|
|
|
env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
|
|
|
|
&sleeping_task_after, Env::Priority::HIGH);
|
|
|
|
|
|
|
|
// Make sure no purges are executed foreground
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 3, 1);
|
|
|
|
sleeping_task_before.WakeUp();
|
|
|
|
sleeping_task_before.WaitUntilDone();
|
|
|
|
|
|
|
|
// Make sure all background purges are executed
|
|
|
|
sleeping_task_after.WakeUp();
|
|
|
|
sleeping_task_after.WaitUntilDone();
|
|
|
|
// 1 sst after iterator deletion
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 1, 1);
|
|
|
|
}
|
|
|
|
|
2021-11-03 19:20:19 +00:00
|
|
|
TEST_F(DeleteFileTest, PurgeDuringOpen) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
CheckFileTypeCounts(dbname_, -1, 0, -1);
|
|
|
|
Close();
|
|
|
|
std::unique_ptr<WritableFile> file;
|
|
|
|
ASSERT_OK(options.env->NewWritableFile(dbname_ + "/000002.sst", &file,
|
|
|
|
EnvOptions()));
|
|
|
|
ASSERT_OK(file->Close());
|
|
|
|
CheckFileTypeCounts(dbname_, -1, 1, -1);
|
|
|
|
options.avoid_unnecessary_blocking_io = false;
|
|
|
|
options.create_if_missing = false;
|
|
|
|
Reopen(options);
|
|
|
|
CheckFileTypeCounts(dbname_, -1, 0, -1);
|
|
|
|
Close();
|
|
|
|
|
|
|
|
// test background purge
|
|
|
|
options.avoid_unnecessary_blocking_io = true;
|
|
|
|
options.create_if_missing = false;
|
|
|
|
ASSERT_OK(options.env->NewWritableFile(dbname_ + "/000002.sst", &file,
|
|
|
|
EnvOptions()));
|
|
|
|
ASSERT_OK(file->Close());
|
|
|
|
CheckFileTypeCounts(dbname_, -1, 1, -1);
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
SyncPoint::GetInstance()->LoadDependency(
|
|
|
|
{{"DeleteFileTest::PurgeDuringOpen:1", "DBImpl::BGWorkPurge:start"}});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
Reopen(options);
|
|
|
|
// the obsolete file is not deleted until the background purge job is ran
|
|
|
|
CheckFileTypeCounts(dbname_, -1, 1, -1);
|
|
|
|
TEST_SYNC_POINT("DeleteFileTest::PurgeDuringOpen:1");
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForPurge());
|
|
|
|
CheckFileTypeCounts(dbname_, -1, 0, -1);
|
|
|
|
}
|
|
|
|
|
2019-04-02 00:07:38 +00:00
|
|
|
TEST_F(DeleteFileTest, BackgroundPurgeCFDropTest) {
|
2019-09-18 23:56:25 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
SetOptions(&options);
|
|
|
|
Destroy(options);
|
|
|
|
options.create_if_missing = true;
|
|
|
|
Reopen(options);
|
|
|
|
|
2019-04-02 00:07:38 +00:00
|
|
|
auto do_test = [&](bool bg_purge) {
|
|
|
|
ColumnFamilyOptions co;
|
Refactor trimming logic for immutable memtables (#5022)
Summary:
MyRocks currently sets `max_write_buffer_number_to_maintain` in order to maintain enough history for transaction conflict checking. The effectiveness of this approach depends on the size of memtables. When memtables are small, it may not keep enough history; when memtables are large, this may consume too much memory.
We are proposing a new way to configure memtable list history: by limiting the memory usage of immutable memtables. The new option is `max_write_buffer_size_to_maintain` and it will take precedence over the old `max_write_buffer_number_to_maintain` if they are both set to non-zero values. The new option accounts for the total memory usage of flushed immutable memtables and mutable memtable. When the total usage exceeds the limit, RocksDB may start dropping immutable memtables (which is also called trimming history), starting from the oldest one.
The semantics of the old option actually works both as an upper bound and lower bound. History trimming will start if number of immutable memtables exceeds the limit, but it will never go below (limit-1) due to history trimming.
In order the mimic the behavior with the new option, history trimming will stop if dropping the next immutable memtable causes the total memory usage go below the size limit. For example, assuming the size limit is set to 64MB, and there are 3 immutable memtables with sizes of 20, 30, 30. Although the total memory usage is 80MB > 64MB, dropping the oldest memtable will reduce the memory usage to 60MB < 64MB, so in this case no memtable will be dropped.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5022
Differential Revision: D14394062
Pulled By: miasantreble
fbshipit-source-id: 60457a509c6af89d0993f988c9b5c2aa9e45f5c5
2019-08-23 20:54:09 +00:00
|
|
|
co.max_write_buffer_size_to_maintain =
|
|
|
|
static_cast<int64_t>(co.write_buffer_size);
|
2019-04-02 00:07:38 +00:00
|
|
|
WriteOptions wo;
|
|
|
|
FlushOptions fo;
|
|
|
|
ColumnFamilyHandle* cfh = nullptr;
|
|
|
|
|
|
|
|
ASSERT_OK(db_->CreateColumnFamily(co, "dropme", &cfh));
|
|
|
|
|
|
|
|
ASSERT_OK(db_->Put(wo, cfh, "pika", "chu"));
|
|
|
|
ASSERT_OK(db_->Flush(fo, cfh));
|
|
|
|
// Expect 1 sst file.
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 1, 1);
|
|
|
|
|
|
|
|
ASSERT_OK(db_->DropColumnFamily(cfh));
|
|
|
|
// Still 1 file, it won't be deleted while ColumnFamilyHandle is alive.
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 1, 1);
|
|
|
|
|
|
|
|
delete cfh;
|
|
|
|
test::SleepingBackgroundTask sleeping_task_after;
|
|
|
|
env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
|
|
|
|
&sleeping_task_after, Env::Priority::HIGH);
|
|
|
|
// If background purge is enabled, the file should still be there.
|
|
|
|
CheckFileTypeCounts(dbname_, 0, bg_purge ? 1 : 0, 1);
|
2019-05-15 22:13:44 +00:00
|
|
|
TEST_SYNC_POINT("DeleteFileTest::BackgroundPurgeCFDropTest:1");
|
2019-04-02 00:07:38 +00:00
|
|
|
|
|
|
|
// Execute background purges.
|
|
|
|
sleeping_task_after.WakeUp();
|
|
|
|
sleeping_task_after.WaitUntilDone();
|
|
|
|
// The file should have been deleted.
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 0, 1);
|
|
|
|
};
|
|
|
|
|
|
|
|
{
|
|
|
|
SCOPED_TRACE("avoid_unnecessary_blocking_io = false");
|
|
|
|
do_test(false);
|
|
|
|
}
|
|
|
|
|
2021-11-03 19:20:19 +00:00
|
|
|
options.avoid_unnecessary_blocking_io = true;
|
|
|
|
options.create_if_missing = false;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForPurge());
|
|
|
|
|
2019-05-15 22:13:44 +00:00
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
SyncPoint::GetInstance()->LoadDependency(
|
|
|
|
{{"DeleteFileTest::BackgroundPurgeCFDropTest:1",
|
|
|
|
"DBImpl::BGWorkPurge:start"}});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
2019-04-02 00:07:38 +00:00
|
|
|
{
|
|
|
|
SCOPED_TRACE("avoid_unnecessary_blocking_io = true");
|
|
|
|
do_test(true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-05 18:57:14 +00:00
|
|
|
// This test is to reproduce a bug that read invalid ReadOption in iterator
|
|
|
|
// cleanup function
|
|
|
|
TEST_F(DeleteFileTest, BackgroundPurgeCopyOptions) {
|
2019-09-18 23:56:25 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
SetOptions(&options);
|
|
|
|
Destroy(options);
|
|
|
|
options.create_if_missing = true;
|
|
|
|
Reopen(options);
|
|
|
|
|
2016-07-05 18:57:14 +00:00
|
|
|
std::string first("0"), last("999999");
|
|
|
|
CompactRangeOptions compact_options;
|
|
|
|
compact_options.change_level = true;
|
|
|
|
compact_options.target_level = 2;
|
|
|
|
Slice first_slice(first), last_slice(last);
|
|
|
|
|
|
|
|
// We keep an iterator alive
|
2018-03-07 20:39:19 +00:00
|
|
|
Iterator* itr = nullptr;
|
2016-07-05 18:57:14 +00:00
|
|
|
CreateTwoLevels();
|
2019-09-18 23:56:25 +00:00
|
|
|
{
|
|
|
|
ReadOptions read_options;
|
|
|
|
read_options.background_purge_on_iterator_cleanup = true;
|
|
|
|
itr = db_->NewIterator(read_options);
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(itr->status());
|
2019-09-18 23:56:25 +00:00
|
|
|
// ReadOptions is deleted, but iterator cleanup function should not be
|
|
|
|
// affected
|
|
|
|
}
|
2016-07-05 18:57:14 +00:00
|
|
|
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db_->CompactRange(compact_options, &first_slice, &last_slice));
|
2016-07-05 18:57:14 +00:00
|
|
|
// 3 sst after compaction with live iterator
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 3, 1);
|
|
|
|
delete itr;
|
|
|
|
|
|
|
|
test::SleepingBackgroundTask sleeping_task_after;
|
|
|
|
env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
|
|
|
|
&sleeping_task_after, Env::Priority::HIGH);
|
|
|
|
|
|
|
|
// Make sure all background purges are executed
|
|
|
|
sleeping_task_after.WakeUp();
|
|
|
|
sleeping_task_after.WaitUntilDone();
|
|
|
|
// 1 sst after iterator deletion
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 1, 1);
|
|
|
|
}
|
|
|
|
|
2016-06-22 01:41:23 +00:00
|
|
|
TEST_F(DeleteFileTest, BackgroundPurgeTestMultipleJobs) {
|
2019-09-18 23:56:25 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
SetOptions(&options);
|
|
|
|
Destroy(options);
|
|
|
|
options.create_if_missing = true;
|
|
|
|
Reopen(options);
|
|
|
|
|
2016-06-22 01:41:23 +00:00
|
|
|
std::string first("0"), last("999999");
|
|
|
|
CompactRangeOptions compact_options;
|
|
|
|
compact_options.change_level = true;
|
|
|
|
compact_options.target_level = 2;
|
|
|
|
Slice first_slice(first), last_slice(last);
|
|
|
|
|
|
|
|
// We keep an iterator alive
|
|
|
|
CreateTwoLevels();
|
2019-09-18 23:56:25 +00:00
|
|
|
ReadOptions read_options;
|
|
|
|
read_options.background_purge_on_iterator_cleanup = true;
|
|
|
|
Iterator* itr1 = db_->NewIterator(read_options);
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(itr1->status());
|
2016-06-22 01:41:23 +00:00
|
|
|
CreateTwoLevels();
|
2019-09-18 23:56:25 +00:00
|
|
|
Iterator* itr2 = db_->NewIterator(read_options);
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(itr2->status());
|
|
|
|
ASSERT_OK(db_->CompactRange(compact_options, &first_slice, &last_slice));
|
2016-06-22 01:41:23 +00:00
|
|
|
// 5 sst files after 2 compactions with 2 live iterators
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 5, 1);
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
2016-06-22 01:41:23 +00:00
|
|
|
// ~DBImpl should wait until all BGWorkPurge are finished
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
2016-06-22 01:41:23 +00:00
|
|
|
{{"DBImpl::~DBImpl:WaitJob", "DBImpl::BGWorkPurge"},
|
|
|
|
{"DeleteFileTest::GuardFinish",
|
|
|
|
"DeleteFileTest::BackgroundPurgeTestMultipleJobs:DBClose"}});
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2016-06-22 01:41:23 +00:00
|
|
|
|
|
|
|
delete itr1;
|
|
|
|
env_->Schedule(&DeleteFileTest::DoSleep, this, Env::Priority::HIGH);
|
|
|
|
delete itr2;
|
|
|
|
env_->Schedule(&DeleteFileTest::GuardFinish, nullptr, Env::Priority::HIGH);
|
2019-09-18 23:56:25 +00:00
|
|
|
Close();
|
2016-06-22 01:41:23 +00:00
|
|
|
|
|
|
|
TEST_SYNC_POINT("DeleteFileTest::BackgroundPurgeTestMultipleJobs:DBClose");
|
|
|
|
// 1 sst after iterator deletion
|
|
|
|
CheckFileTypeCounts(dbname_, 0, 1, 1);
|
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(DeleteFileTest, DeleteFileWithIterator) {
|
2019-09-18 23:56:25 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
SetOptions(&options);
|
|
|
|
Destroy(options);
|
|
|
|
options.create_if_missing = true;
|
|
|
|
Reopen(options);
|
|
|
|
|
2013-08-22 21:32:53 +00:00
|
|
|
CreateTwoLevels();
|
2019-09-18 23:56:25 +00:00
|
|
|
ReadOptions read_options;
|
|
|
|
Iterator* it = db_->NewIterator(read_options);
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(it->status());
|
2013-08-22 21:32:53 +00:00
|
|
|
std::vector<LiveFileMetaData> metadata;
|
|
|
|
db_->GetLiveFilesMetaData(&metadata);
|
|
|
|
|
2019-09-18 23:56:25 +00:00
|
|
|
std::string level2file;
|
2013-08-22 21:32:53 +00:00
|
|
|
|
2019-09-18 23:56:25 +00:00
|
|
|
ASSERT_EQ(metadata.size(), static_cast<size_t>(2));
|
2013-08-22 21:32:53 +00:00
|
|
|
if (metadata[0].level == 1) {
|
|
|
|
level2file = metadata[1].name;
|
|
|
|
} else {
|
|
|
|
level2file = metadata[0].name;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status status = db_->DeleteFile(level2file);
|
2022-11-02 21:34:24 +00:00
|
|
|
fprintf(stdout, "Deletion status %s: %s\n", level2file.c_str(),
|
|
|
|
status.ToString().c_str());
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(status);
|
2013-08-22 21:32:53 +00:00
|
|
|
it->SeekToFirst();
|
|
|
|
int numKeysIterated = 0;
|
2022-11-02 21:34:24 +00:00
|
|
|
while (it->Valid()) {
|
2013-08-22 21:32:53 +00:00
|
|
|
numKeysIterated++;
|
|
|
|
it->Next();
|
|
|
|
}
|
|
|
|
ASSERT_EQ(numKeysIterated, 50000);
|
|
|
|
delete it;
|
|
|
|
}
|
2013-10-24 06:39:23 +00:00
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(DeleteFileTest, DeleteLogFiles) {
|
2019-09-18 23:56:25 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
SetOptions(&options);
|
|
|
|
Destroy(options);
|
|
|
|
options.create_if_missing = true;
|
|
|
|
Reopen(options);
|
|
|
|
|
2013-10-24 06:39:23 +00:00
|
|
|
AddKeys(10, 0);
|
|
|
|
VectorLogPtr logfiles;
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db_->GetSortedWalFiles(logfiles));
|
2013-10-24 06:39:23 +00:00
|
|
|
ASSERT_GT(logfiles.size(), 0UL);
|
|
|
|
// Take the last log file which is expected to be alive and try to delete it
|
|
|
|
// Should not succeed because live logs are not allowed to be deleted
|
|
|
|
std::unique_ptr<LogFile> alive_log = std::move(logfiles.back());
|
|
|
|
ASSERT_EQ(alive_log->Type(), kAliveLogFile);
|
2019-09-18 23:56:25 +00:00
|
|
|
ASSERT_OK(env_->FileExists(wal_dir_ + "/" + alive_log->PathName()));
|
2013-10-24 06:39:23 +00:00
|
|
|
fprintf(stdout, "Deleting alive log file %s\n",
|
|
|
|
alive_log->PathName().c_str());
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_NOK(db_->DeleteFile(alive_log->PathName()));
|
2019-09-18 23:56:25 +00:00
|
|
|
ASSERT_OK(env_->FileExists(wal_dir_ + "/" + alive_log->PathName()));
|
2013-10-24 06:39:23 +00:00
|
|
|
logfiles.clear();
|
|
|
|
|
|
|
|
// Call Flush to bring about a new working log file and add more keys
|
|
|
|
// Call Flush again to flush out memtable and move alive log to archived log
|
|
|
|
// and try to delete the archived log file
|
|
|
|
FlushOptions fopts;
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db_->Flush(fopts));
|
2013-10-24 06:39:23 +00:00
|
|
|
AddKeys(10, 0);
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(db_->Flush(fopts));
|
|
|
|
ASSERT_OK(db_->GetSortedWalFiles(logfiles));
|
2013-10-24 06:39:23 +00:00
|
|
|
ASSERT_GT(logfiles.size(), 0UL);
|
|
|
|
std::unique_ptr<LogFile> archived_log = std::move(logfiles.front());
|
|
|
|
ASSERT_EQ(archived_log->Type(), kArchivedLogFile);
|
2019-09-18 23:56:25 +00:00
|
|
|
ASSERT_OK(env_->FileExists(wal_dir_ + "/" + archived_log->PathName()));
|
2013-10-24 06:39:23 +00:00
|
|
|
fprintf(stdout, "Deleting archived log file %s\n",
|
|
|
|
archived_log->PathName().c_str());
|
|
|
|
ASSERT_OK(db_->DeleteFile(archived_log->PathName()));
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_TRUE(
|
|
|
|
env_->FileExists(wal_dir_ + "/" + archived_log->PathName()).IsNotFound());
|
2013-10-24 06:39:23 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 21:08:00 +00:00
|
|
|
TEST_F(DeleteFileTest, DeleteNonDefaultColumnFamily) {
|
2019-09-18 23:56:25 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
SetOptions(&options);
|
|
|
|
Destroy(options);
|
|
|
|
options.create_if_missing = true;
|
|
|
|
Reopen(options);
|
|
|
|
CreateAndReopenWithCF({"new_cf"}, options);
|
2014-10-21 18:23:06 +00:00
|
|
|
|
|
|
|
Random rnd(5);
|
|
|
|
for (int i = 0; i < 1000; ++i) {
|
2019-09-18 23:56:25 +00:00
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), handles_[1], test::RandomKey(&rnd, 10),
|
|
|
|
test::RandomKey(&rnd, 10)));
|
2014-10-21 18:23:06 +00:00
|
|
|
}
|
2019-09-18 23:56:25 +00:00
|
|
|
ASSERT_OK(db_->Flush(FlushOptions(), handles_[1]));
|
2014-10-21 18:23:06 +00:00
|
|
|
for (int i = 0; i < 1000; ++i) {
|
2019-09-18 23:56:25 +00:00
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), handles_[1], test::RandomKey(&rnd, 10),
|
|
|
|
test::RandomKey(&rnd, 10)));
|
2014-10-21 18:23:06 +00:00
|
|
|
}
|
2019-09-18 23:56:25 +00:00
|
|
|
ASSERT_OK(db_->Flush(FlushOptions(), handles_[1]));
|
2014-10-21 18:23:06 +00:00
|
|
|
|
|
|
|
std::vector<LiveFileMetaData> metadata;
|
2019-09-18 23:56:25 +00:00
|
|
|
db_->GetLiveFilesMetaData(&metadata);
|
2014-10-21 18:23:06 +00:00
|
|
|
ASSERT_EQ(2U, metadata.size());
|
|
|
|
ASSERT_EQ("new_cf", metadata[0].column_family_name);
|
|
|
|
ASSERT_EQ("new_cf", metadata[1].column_family_name);
|
|
|
|
auto old_file = metadata[0].smallest_seqno < metadata[1].smallest_seqno
|
|
|
|
? metadata[0].name
|
|
|
|
: metadata[1].name;
|
|
|
|
auto new_file = metadata[0].smallest_seqno > metadata[1].smallest_seqno
|
|
|
|
? metadata[0].name
|
|
|
|
: metadata[1].name;
|
2019-09-18 23:56:25 +00:00
|
|
|
ASSERT_TRUE(db_->DeleteFile(new_file).IsInvalidArgument());
|
|
|
|
ASSERT_OK(db_->DeleteFile(old_file));
|
2014-10-21 18:23:06 +00:00
|
|
|
|
|
|
|
{
|
2019-09-18 23:56:25 +00:00
|
|
|
std::unique_ptr<Iterator> itr(db_->NewIterator(ReadOptions(), handles_[1]));
|
2020-12-22 23:08:17 +00:00
|
|
|
ASSERT_OK(itr->status());
|
2014-10-21 18:23:06 +00:00
|
|
|
int count = 0;
|
|
|
|
for (itr->SeekToFirst(); itr->Valid(); itr->Next()) {
|
|
|
|
ASSERT_OK(itr->status());
|
|
|
|
++count;
|
|
|
|
}
|
2023-10-18 16:38:38 +00:00
|
|
|
ASSERT_OK(itr->status());
|
2014-10-21 18:23:06 +00:00
|
|
|
ASSERT_EQ(count, 1000);
|
|
|
|
}
|
|
|
|
|
2019-09-18 23:56:25 +00:00
|
|
|
Close();
|
|
|
|
ReopenWithColumnFamilies({kDefaultColumnFamilyName, "new_cf"}, options);
|
2014-10-21 18:23:06 +00:00
|
|
|
|
|
|
|
{
|
2019-09-18 23:56:25 +00:00
|
|
|
std::unique_ptr<Iterator> itr(db_->NewIterator(ReadOptions(), handles_[1]));
|
2014-10-21 18:23:06 +00:00
|
|
|
int count = 0;
|
|
|
|
for (itr->SeekToFirst(); itr->Valid(); itr->Next()) {
|
|
|
|
ASSERT_OK(itr->status());
|
|
|
|
++count;
|
|
|
|
}
|
2023-10-18 16:38:38 +00:00
|
|
|
ASSERT_OK(itr->status());
|
2014-10-21 18:23:06 +00:00
|
|
|
ASSERT_EQ(count, 1000);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2013-08-22 21:32:53 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2015-03-17 21:08:00 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
2019-09-18 23:56:25 +00:00
|
|
|
RegisterCustomObjects(argc, argv);
|
2015-03-17 21:08:00 +00:00
|
|
|
return RUN_ALL_TESTS();
|
2013-08-22 21:32:53 +00:00
|
|
|
}
|