mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-29 09:36:17 +00:00
Ignore value of BackupableDBOptions::max_valid_backups_to_open when B… (#6072)
Summary: This change ignores the value of BackupableDBOptions::max_valid_backups_to_open when a BackupEngine is not read-only. Issue: https://github.com/facebook/rocksdb/issues/4997 Note on tests: I had to remove test case WriteOnlyEngine of BackupableDBTest because it was not consistent with the new semantic of BackupableDBOptions::max_valid_backups_to_open. Maybe, we should think about adding a new interface for append-only BackupEngines. On the other hand, I changed LimitBackupsOpened test case to use a read-only BackupEngine, and I added a new specific test case for the change. Pull Request resolved: https://github.com/facebook/rocksdb/pull/6072 Reviewed By: pdillinger Differential Revision: D18687364 Pulled By: sebastianopeluso fbshipit-source-id: 77bc1f927d623964d59137a93de123bbd719da4e
This commit is contained in:
parent
0bc87442ae
commit
fcd7e03832
|
@ -7,6 +7,7 @@
|
|||
* With FIFO compaction style, options.periodic_compaction_seconds will have the same meaning as options.ttl. Whichever stricter will be used. With the default options.periodic_compaction_seconds value with options.ttl's default of 0, RocksDB will give a default of 30 days.
|
||||
* Added an API GetCreationTimeOfOldestFile(uint64_t* creation_time) to get the file_creation_time of the oldest SST file in the DB.
|
||||
* An unlikely usage of FilterPolicy is no longer supported. Calling GetFilterBitsBuilder() on the FilterPolicy returned by NewBloomFilterPolicy will now cause an assertion violation in debug builds, because RocksDB has internally migrated to a more elaborate interface that is expected to evolve further. Custom implementations of FilterPolicy should work as before, except those wrapping the return of NewBloomFilterPolicy, which will require a new override of a protected function in FilterPolicy.
|
||||
* The option BackupableDBOptions::max_valid_backups_to_open is now only used when opening BackupEngineReadOnly. When opening a read/write BackupEngine, anything but the default value logs a warning and is treated as the default. This change ensures that backup deletion has proper accounting of shared files to ensure they are deleted when no longer referenced by a backup.
|
||||
|
||||
### New Features
|
||||
* Universal compaction to support options.periodic_compaction_seconds. A full compaction will be triggered if any file is over the threshold.
|
||||
|
|
|
@ -568,6 +568,14 @@ Status BackupEngineImpl::Initialize() {
|
|||
// we might need to clean up from previous crash or I/O errors
|
||||
might_need_garbage_collect_ = true;
|
||||
|
||||
if (options_.max_valid_backups_to_open != port::kMaxInt32) {
|
||||
options_.max_valid_backups_to_open = port::kMaxInt32;
|
||||
ROCKS_LOG_WARN(
|
||||
options_.info_log,
|
||||
"`max_valid_backups_to_open` is not set to the default value. Ignoring "
|
||||
"its value since BackupEngine is not read-only.");
|
||||
}
|
||||
|
||||
// gather the list of directories that we need to create
|
||||
std::vector<std::pair<std::string, std::unique_ptr<Directory>*>>
|
||||
directories;
|
||||
|
@ -1044,29 +1052,21 @@ Status BackupEngineImpl::DeleteBackupInternal(BackupID backup_id) {
|
|||
// After removing meta file, best effort deletion even with errors.
|
||||
// (Don't delete other files if we can't delete the meta file right
|
||||
// now.)
|
||||
|
||||
if (options_.max_valid_backups_to_open == port::kMaxInt32) {
|
||||
std::vector<std::string> to_delete;
|
||||
for (auto& itr : backuped_file_infos_) {
|
||||
if (itr.second->refs == 0) {
|
||||
Status s = backup_env_->DeleteFile(GetAbsolutePath(itr.first));
|
||||
ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s",
|
||||
itr.first.c_str(), s.ToString().c_str());
|
||||
to_delete.push_back(itr.first);
|
||||
if (!s.ok()) {
|
||||
// Trying again later might work
|
||||
might_need_garbage_collect_ = true;
|
||||
}
|
||||
std::vector<std::string> to_delete;
|
||||
for (auto& itr : backuped_file_infos_) {
|
||||
if (itr.second->refs == 0) {
|
||||
Status s = backup_env_->DeleteFile(GetAbsolutePath(itr.first));
|
||||
ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s", itr.first.c_str(),
|
||||
s.ToString().c_str());
|
||||
to_delete.push_back(itr.first);
|
||||
if (!s.ok()) {
|
||||
// Trying again later might work
|
||||
might_need_garbage_collect_ = true;
|
||||
}
|
||||
}
|
||||
for (auto& td : to_delete) {
|
||||
backuped_file_infos_.erase(td);
|
||||
}
|
||||
} else {
|
||||
ROCKS_LOG_WARN(
|
||||
options_.info_log,
|
||||
"DeleteBackup cleanup is limited since `max_valid_backups_to_open` "
|
||||
"constrains how many backups the engine knows about");
|
||||
}
|
||||
for (auto& td : to_delete) {
|
||||
backuped_file_infos_.erase(td);
|
||||
}
|
||||
|
||||
// take care of private dirs -- GarbageCollect() will take care of them
|
||||
|
@ -1569,64 +1569,54 @@ Status BackupEngineImpl::GarbageCollect() {
|
|||
might_need_garbage_collect_ = false;
|
||||
|
||||
ROCKS_LOG_INFO(options_.info_log, "Starting garbage collection");
|
||||
if (options_.max_valid_backups_to_open != port::kMaxInt32) {
|
||||
ROCKS_LOG_WARN(
|
||||
options_.info_log,
|
||||
"Garbage collection is limited since `max_valid_backups_to_open` "
|
||||
"constrains how many backups the engine knows about");
|
||||
}
|
||||
|
||||
if (options_.max_valid_backups_to_open == port::kMaxInt32) {
|
||||
// delete obsolete shared files
|
||||
// we cannot do this when BackupEngine has `max_valid_backups_to_open` set
|
||||
// as those engines don't know about all shared files.
|
||||
for (bool with_checksum : {false, true}) {
|
||||
std::vector<std::string> shared_children;
|
||||
{
|
||||
std::string shared_path;
|
||||
if (with_checksum) {
|
||||
shared_path = GetAbsolutePath(GetSharedFileWithChecksumRel());
|
||||
} else {
|
||||
shared_path = GetAbsolutePath(GetSharedFileRel());
|
||||
}
|
||||
auto s = backup_env_->FileExists(shared_path);
|
||||
if (s.ok()) {
|
||||
s = backup_env_->GetChildren(shared_path, &shared_children);
|
||||
} else if (s.IsNotFound()) {
|
||||
s = Status::OK();
|
||||
}
|
||||
// delete obsolete shared files
|
||||
for (bool with_checksum : {false, true}) {
|
||||
std::vector<std::string> shared_children;
|
||||
{
|
||||
std::string shared_path;
|
||||
if (with_checksum) {
|
||||
shared_path = GetAbsolutePath(GetSharedFileWithChecksumRel());
|
||||
} else {
|
||||
shared_path = GetAbsolutePath(GetSharedFileRel());
|
||||
}
|
||||
auto s = backup_env_->FileExists(shared_path);
|
||||
if (s.ok()) {
|
||||
s = backup_env_->GetChildren(shared_path, &shared_children);
|
||||
} else if (s.IsNotFound()) {
|
||||
s = Status::OK();
|
||||
}
|
||||
if (!s.ok()) {
|
||||
overall_status = s;
|
||||
// Trying again later might work
|
||||
might_need_garbage_collect_ = true;
|
||||
}
|
||||
}
|
||||
for (auto& child : shared_children) {
|
||||
if (child == "." || child == "..") {
|
||||
continue;
|
||||
}
|
||||
std::string rel_fname;
|
||||
if (with_checksum) {
|
||||
rel_fname = GetSharedFileWithChecksumRel(child);
|
||||
} else {
|
||||
rel_fname = GetSharedFileRel(child);
|
||||
}
|
||||
auto child_itr = backuped_file_infos_.find(rel_fname);
|
||||
// if it's not refcounted, delete it
|
||||
if (child_itr == backuped_file_infos_.end() ||
|
||||
child_itr->second->refs == 0) {
|
||||
// this might be a directory, but DeleteFile will just fail in that
|
||||
// case, so we're good
|
||||
Status s = backup_env_->DeleteFile(GetAbsolutePath(rel_fname));
|
||||
ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s",
|
||||
rel_fname.c_str(), s.ToString().c_str());
|
||||
backuped_file_infos_.erase(rel_fname);
|
||||
if (!s.ok()) {
|
||||
overall_status = s;
|
||||
// Trying again later might work
|
||||
might_need_garbage_collect_ = true;
|
||||
}
|
||||
}
|
||||
for (auto& child : shared_children) {
|
||||
if (child == "." || child == "..") {
|
||||
continue;
|
||||
}
|
||||
std::string rel_fname;
|
||||
if (with_checksum) {
|
||||
rel_fname = GetSharedFileWithChecksumRel(child);
|
||||
} else {
|
||||
rel_fname = GetSharedFileRel(child);
|
||||
}
|
||||
auto child_itr = backuped_file_infos_.find(rel_fname);
|
||||
// if it's not refcounted, delete it
|
||||
if (child_itr == backuped_file_infos_.end() ||
|
||||
child_itr->second->refs == 0) {
|
||||
// this might be a directory, but DeleteFile will just fail in that
|
||||
// case, so we're good
|
||||
Status s = backup_env_->DeleteFile(GetAbsolutePath(rel_fname));
|
||||
ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s",
|
||||
rel_fname.c_str(), s.ToString().c_str());
|
||||
backuped_file_infos_.erase(rel_fname);
|
||||
if (!s.ok()) {
|
||||
// Trying again later might work
|
||||
might_need_garbage_collect_ = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1645,8 +1635,7 @@ Status BackupEngineImpl::GarbageCollect() {
|
|||
if (child == "." || child == "..") {
|
||||
continue;
|
||||
}
|
||||
// it's ok to do this when BackupEngine has `max_valid_backups_to_open` set
|
||||
// as the engine always knows all valid backup numbers.
|
||||
|
||||
BackupID backup_id = 0;
|
||||
bool tmp_dir = child.find(".tmp") != std::string::npos;
|
||||
sscanf(child.c_str(), "%u", &backup_id);
|
||||
|
|
|
@ -1687,12 +1687,50 @@ TEST_F(BackupableDBTest, LimitBackupsOpened) {
|
|||
CloseDBAndBackupEngine();
|
||||
|
||||
backupable_options_->max_valid_backups_to_open = 2;
|
||||
OpenDBAndBackupEngine();
|
||||
backupable_options_->destroy_old_data = false;
|
||||
BackupEngineReadOnly* read_only_backup_engine;
|
||||
ASSERT_OK(BackupEngineReadOnly::Open(backup_chroot_env_.get(),
|
||||
*backupable_options_,
|
||||
&read_only_backup_engine));
|
||||
|
||||
std::vector<BackupInfo> backup_infos;
|
||||
backup_engine_->GetBackupInfo(&backup_infos);
|
||||
read_only_backup_engine->GetBackupInfo(&backup_infos);
|
||||
ASSERT_EQ(2, backup_infos.size());
|
||||
ASSERT_EQ(2, backup_infos[0].backup_id);
|
||||
ASSERT_EQ(4, backup_infos[1].backup_id);
|
||||
delete read_only_backup_engine;
|
||||
}
|
||||
|
||||
TEST_F(BackupableDBTest, IgnoreLimitBackupsOpenedWhenNotReadOnly) {
|
||||
// Verify the specified max_valid_backups_to_open is ignored if the engine
|
||||
// is not read-only.
|
||||
//
|
||||
// Setup:
|
||||
// - backups 1, 2, and 4 are valid
|
||||
// - backup 3 is corrupt
|
||||
// - max_valid_backups_to_open == 2
|
||||
//
|
||||
// Expectation: the engine opens backups 4, 2, and 1 since those are latest
|
||||
// non-corrupt backups, by ignoring max_valid_backups_to_open == 2.
|
||||
const int kNumKeys = 5000;
|
||||
OpenDBAndBackupEngine(true);
|
||||
for (int i = 1; i <= 4; ++i) {
|
||||
FillDB(db_.get(), kNumKeys * i, kNumKeys * (i + 1));
|
||||
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
||||
if (i == 3) {
|
||||
ASSERT_OK(file_manager_->CorruptFile(backupdir_ + "/meta/3", 3));
|
||||
}
|
||||
}
|
||||
CloseDBAndBackupEngine();
|
||||
|
||||
backupable_options_->max_valid_backups_to_open = 2;
|
||||
OpenDBAndBackupEngine();
|
||||
std::vector<BackupInfo> backup_infos;
|
||||
backup_engine_->GetBackupInfo(&backup_infos);
|
||||
ASSERT_EQ(3, backup_infos.size());
|
||||
ASSERT_EQ(1, backup_infos[0].backup_id);
|
||||
ASSERT_EQ(2, backup_infos[1].backup_id);
|
||||
ASSERT_EQ(4, backup_infos[2].backup_id);
|
||||
CloseDBAndBackupEngine();
|
||||
DestroyDB(dbname_, options_);
|
||||
}
|
||||
|
@ -1718,33 +1756,6 @@ TEST_F(BackupableDBTest, CreateWhenLatestBackupCorrupted) {
|
|||
ASSERT_EQ(2, backup_infos[0].backup_id);
|
||||
}
|
||||
|
||||
TEST_F(BackupableDBTest, WriteOnlyEngine) {
|
||||
// Verify we can open a backup engine and create new ones even if reading old
|
||||
// backups would fail with IOError. IOError is a more serious condition than
|
||||
// corruption and would cause the engine to fail opening. So the only way to
|
||||
// avoid is by not reading old backups at all, i.e., respecting
|
||||
// `max_valid_backups_to_open == 0`.
|
||||
const int kNumKeys = 5000;
|
||||
OpenDBAndBackupEngine(true /* destroy_old_data */);
|
||||
FillDB(db_.get(), 0 /* from */, kNumKeys);
|
||||
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
||||
CloseDBAndBackupEngine();
|
||||
|
||||
backupable_options_->max_valid_backups_to_open = 0;
|
||||
// cause any meta-file reads to fail with IOError during Open
|
||||
test_backup_env_->SetDummySequentialFile(true);
|
||||
test_backup_env_->SetDummySequentialFileFailReads(true);
|
||||
OpenDBAndBackupEngine();
|
||||
test_backup_env_->SetDummySequentialFileFailReads(false);
|
||||
test_backup_env_->SetDummySequentialFile(false);
|
||||
|
||||
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
||||
std::vector<BackupInfo> backup_infos;
|
||||
backup_engine_->GetBackupInfo(&backup_infos);
|
||||
ASSERT_EQ(1, backup_infos.size());
|
||||
ASSERT_EQ(2, backup_infos[0].backup_id);
|
||||
}
|
||||
|
||||
TEST_F(BackupableDBTest, WriteOnlyEngineNoSharedFileDeletion) {
|
||||
// Verifies a write-only BackupEngine does not delete files belonging to valid
|
||||
// backups when GarbageCollect, PurgeOldBackups, or DeleteBackup are called.
|
||||
|
|
Loading…
Reference in a new issue