2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 21:59:46 +00:00
|
|
|
//
|
2011-03-18 22:37:00 +00:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2019-05-30 03:44:08 +00:00
|
|
|
#include "file/filename.h"
|
2012-11-30 01:28:37 +00:00
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
#include <ctype.h>
|
|
|
|
#include <stdio.h>
|
2022-10-25 01:34:52 +00:00
|
|
|
|
|
|
|
#include <cinttypes>
|
2014-07-02 16:54:20 +00:00
|
|
|
#include <vector>
|
2022-10-25 01:34:52 +00:00
|
|
|
|
2019-09-16 17:31:27 +00:00
|
|
|
#include "file/writable_file_writer.h"
|
2013-08-23 15:38:13 +00:00
|
|
|
#include "rocksdb/env.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "test_util/sync_point.h"
|
2015-01-22 19:43:38 +00:00
|
|
|
#include "util/stop_watch.h"
|
2015-11-11 06:58:01 +00:00
|
|
|
#include "util/string_util.h"
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2021-10-16 17:03:19 +00:00
|
|
|
const std::string kCurrentFileName = "CURRENT";
|
|
|
|
const std::string kOptionsFileNamePrefix = "OPTIONS-";
|
|
|
|
const std::string kTempFileNameSuffix = "dbtmp";
|
|
|
|
|
2015-10-07 00:46:22 +00:00
|
|
|
static const std::string kRocksDbTFileExt = "sst";
|
|
|
|
static const std::string kLevelDbTFileExt = "ldb";
|
2017-04-18 19:00:36 +00:00
|
|
|
static const std::string kRocksDBBlobFileExt = "blob";
|
2021-10-16 17:03:19 +00:00
|
|
|
static const std::string kArchivalDirName = "archive";
|
2015-10-07 00:46:22 +00:00
|
|
|
|
2012-09-06 00:44:13 +00:00
|
|
|
// Given a path, flatten the path name by replacing all chars not in
|
2014-08-14 17:05:16 +00:00
|
|
|
// {[0-9,a-z,A-Z,-,_,.]} with _. And append '_LOG\0' at the end.
|
2012-09-06 00:44:13 +00:00
|
|
|
// Return the number of chars stored in dest not including the trailing '\0'.
|
2014-08-14 17:05:16 +00:00
|
|
|
static size_t GetInfoLogPrefix(const std::string& path, char* dest, int len) {
|
|
|
|
const char suffix[] = "_LOG";
|
2012-09-06 00:44:13 +00:00
|
|
|
|
2014-08-14 17:05:16 +00:00
|
|
|
size_t write_idx = 0;
|
|
|
|
size_t i = 0;
|
|
|
|
size_t src_len = path.size();
|
|
|
|
|
|
|
|
while (i < src_len && write_idx < len - sizeof(suffix)) {
|
2012-09-06 00:44:13 +00:00
|
|
|
if ((path[i] >= 'a' && path[i] <= 'z') ||
|
|
|
|
(path[i] >= '0' && path[i] <= '9') ||
|
2022-10-25 01:34:52 +00:00
|
|
|
(path[i] >= 'A' && path[i] <= 'Z') || path[i] == '-' ||
|
|
|
|
path[i] == '.' || path[i] == '_') {
|
2012-09-06 00:44:13 +00:00
|
|
|
dest[write_idx++] = path[i];
|
|
|
|
} else {
|
2015-11-11 06:58:01 +00:00
|
|
|
if (i > 0) {
|
2012-09-06 00:44:13 +00:00
|
|
|
dest[write_idx++] = '_';
|
2015-11-11 06:58:01 +00:00
|
|
|
}
|
2012-09-06 00:44:13 +00:00
|
|
|
}
|
|
|
|
i++;
|
|
|
|
}
|
2014-08-14 17:05:16 +00:00
|
|
|
assert(sizeof(suffix) <= len - write_idx);
|
|
|
|
// "\0" is automatically added by snprintf
|
|
|
|
snprintf(dest + write_idx, len - write_idx, suffix);
|
|
|
|
write_idx += sizeof(suffix) - 1;
|
2012-09-06 00:44:13 +00:00
|
|
|
return write_idx;
|
|
|
|
}
|
|
|
|
|
2019-08-01 22:45:19 +00:00
|
|
|
static std::string MakeFileName(uint64_t number, const char* suffix) {
|
2011-03-18 22:37:00 +00:00
|
|
|
char buf[100];
|
2019-08-01 22:45:19 +00:00
|
|
|
snprintf(buf, sizeof(buf), "%06llu.%s",
|
New API to get all merge operands for a Key (#5604)
Summary:
This is a new API added to db.h to allow for fetching all merge operands associated with a Key. The main motivation for this API is to support use cases where doing a full online merge is not necessary as it is performance sensitive. Example use-cases:
1. Update subset of columns and read subset of columns -
Imagine a SQL Table, a row is encoded as a K/V pair (as it is done in MyRocks). If there are many columns and users only updated one of them, we can use merge operator to reduce write amplification. While users only read one or two columns in the read query, this feature can avoid a full merging of the whole row, and save some CPU.
2. Updating very few attributes in a value which is a JSON-like document -
Updating one attribute can be done efficiently using merge operator, while reading back one attribute can be done more efficiently if we don't need to do a full merge.
----------------------------------------------------------------------------------------------------
API :
Status GetMergeOperands(
const ReadOptions& options, ColumnFamilyHandle* column_family,
const Slice& key, PinnableSlice* merge_operands,
GetMergeOperandsOptions* get_merge_operands_options,
int* number_of_operands)
Example usage :
int size = 100;
int number_of_operands = 0;
std::vector<PinnableSlice> values(size);
GetMergeOperandsOptions merge_operands_info;
db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(), "k1", values.data(), merge_operands_info, &number_of_operands);
Description :
Returns all the merge operands corresponding to the key. If the number of merge operands in DB is greater than merge_operands_options.expected_max_number_of_operands no merge operands are returned and status is Incomplete. Merge operands returned are in the order of insertion.
merge_operands-> Points to an array of at-least merge_operands_options.expected_max_number_of_operands and the caller is responsible for allocating it. If the status returned is Incomplete then number_of_operands will contain the total number of merge operands found in DB for key.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5604
Test Plan:
Added unit test and perf test in db_bench that can be run using the command:
./db_bench -benchmarks=getmergeoperands --merge_operator=sortlist
Differential Revision: D16657366
Pulled By: vjnadimpalli
fbshipit-source-id: 0faadd752351745224ee12d4ae9ef3cb529951bf
2019-08-06 21:22:34 +00:00
|
|
|
static_cast<unsigned long long>(number), suffix);
|
2019-08-01 22:45:19 +00:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::string MakeFileName(const std::string& name, uint64_t number,
|
|
|
|
const char* suffix) {
|
|
|
|
return name + "/" + MakeFileName(number, suffix);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string LogFileName(const std::string& name, uint64_t number) {
|
|
|
|
assert(number > 0);
|
|
|
|
return MakeFileName(name, number, "log");
|
|
|
|
}
|
|
|
|
|
2019-08-01 22:45:19 +00:00
|
|
|
std::string LogFileName(uint64_t number) {
|
|
|
|
assert(number > 0);
|
|
|
|
return MakeFileName(number, "log");
|
|
|
|
}
|
|
|
|
|
2020-05-07 16:29:21 +00:00
|
|
|
std::string BlobFileName(uint64_t number) {
|
|
|
|
assert(number > 0);
|
|
|
|
return MakeFileName(number, kRocksDBBlobFileExt.c_str());
|
|
|
|
}
|
|
|
|
|
2017-04-18 19:00:36 +00:00
|
|
|
std::string BlobFileName(const std::string& blobdirname, uint64_t number) {
|
|
|
|
assert(number > 0);
|
|
|
|
return MakeFileName(blobdirname, number, kRocksDBBlobFileExt.c_str());
|
|
|
|
}
|
|
|
|
|
2018-08-31 18:59:49 +00:00
|
|
|
std::string BlobFileName(const std::string& dbname, const std::string& blob_dir,
|
|
|
|
uint64_t number) {
|
|
|
|
assert(number > 0);
|
|
|
|
return MakeFileName(dbname + "/" + blob_dir, number,
|
|
|
|
kRocksDBBlobFileExt.c_str());
|
|
|
|
}
|
|
|
|
|
2013-10-01 21:46:52 +00:00
|
|
|
std::string ArchivalDirectory(const std::string& dir) {
|
2021-10-16 17:03:19 +00:00
|
|
|
return dir + "/" + kArchivalDirName;
|
2012-12-08 00:30:22 +00:00
|
|
|
}
|
2012-11-30 01:28:37 +00:00
|
|
|
std::string ArchivedLogFileName(const std::string& name, uint64_t number) {
|
|
|
|
assert(number > 0);
|
2021-10-16 17:03:19 +00:00
|
|
|
return MakeFileName(name + "/" + kArchivalDirName, number, "log");
|
2012-11-30 01:28:37 +00:00
|
|
|
}
|
|
|
|
|
2014-07-02 16:54:20 +00:00
|
|
|
std::string MakeTableFileName(const std::string& path, uint64_t number) {
|
2015-10-07 00:46:22 +00:00
|
|
|
return MakeFileName(path, number, kRocksDbTFileExt.c_str());
|
|
|
|
}
|
|
|
|
|
2019-08-01 22:45:19 +00:00
|
|
|
std::string MakeTableFileName(uint64_t number) {
|
|
|
|
return MakeFileName(number, kRocksDbTFileExt.c_str());
|
|
|
|
}
|
|
|
|
|
2015-10-07 00:46:22 +00:00
|
|
|
std::string Rocks2LevelTableFileName(const std::string& fullname) {
|
|
|
|
assert(fullname.size() > kRocksDbTFileExt.size() + 1);
|
|
|
|
if (fullname.size() <= kRocksDbTFileExt.size() + 1) {
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
return fullname.substr(0, fullname.size() - kRocksDbTFileExt.size()) +
|
|
|
|
kLevelDbTFileExt;
|
2014-07-02 16:54:20 +00:00
|
|
|
}
|
|
|
|
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 22:45:18 +00:00
|
|
|
uint64_t TableFileNameToNumber(const std::string& name) {
|
|
|
|
uint64_t number = 0;
|
|
|
|
uint64_t base = 1;
|
|
|
|
int pos = static_cast<int>(name.find_last_of('.'));
|
|
|
|
while (--pos >= 0 && name[pos] >= '0' && name[pos] <= '9') {
|
|
|
|
number += (name[pos] - '0') * base;
|
|
|
|
base *= 10;
|
|
|
|
}
|
|
|
|
return number;
|
|
|
|
}
|
|
|
|
|
2014-07-14 22:34:30 +00:00
|
|
|
std::string TableFileName(const std::vector<DbPath>& db_paths, uint64_t number,
|
|
|
|
uint32_t path_id) {
|
2011-03-18 22:37:00 +00:00
|
|
|
assert(number > 0);
|
2014-07-02 16:54:20 +00:00
|
|
|
std::string path;
|
|
|
|
if (path_id >= db_paths.size()) {
|
2014-07-14 22:34:30 +00:00
|
|
|
path = db_paths.back().path;
|
2014-07-02 16:54:20 +00:00
|
|
|
} else {
|
2014-07-14 22:34:30 +00:00
|
|
|
path = db_paths[path_id].path;
|
2014-07-02 16:54:20 +00:00
|
|
|
}
|
|
|
|
return MakeTableFileName(path, number);
|
|
|
|
}
|
|
|
|
|
2014-08-13 18:57:40 +00:00
|
|
|
void FormatFileNumber(uint64_t number, uint32_t path_id, char* out_buf,
|
|
|
|
size_t out_buf_size) {
|
2014-07-02 16:54:20 +00:00
|
|
|
if (path_id == 0) {
|
2014-08-13 18:57:40 +00:00
|
|
|
snprintf(out_buf, out_buf_size, "%" PRIu64, number);
|
2014-07-02 16:54:20 +00:00
|
|
|
} else {
|
2022-10-25 01:34:52 +00:00
|
|
|
snprintf(out_buf, out_buf_size,
|
|
|
|
"%" PRIu64
|
|
|
|
"(path "
|
|
|
|
"%" PRIu32 ")",
|
2014-08-13 18:57:40 +00:00
|
|
|
number, path_id);
|
2014-07-02 16:54:20 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2021-10-16 17:03:19 +00:00
|
|
|
std::string DescriptorFileName(uint64_t number) {
|
2011-03-18 22:37:00 +00:00
|
|
|
assert(number > 0);
|
|
|
|
char buf[100];
|
2021-10-16 17:03:19 +00:00
|
|
|
snprintf(buf, sizeof(buf), "MANIFEST-%06llu",
|
2011-03-18 22:37:00 +00:00
|
|
|
static_cast<unsigned long long>(number));
|
2021-10-16 17:03:19 +00:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string DescriptorFileName(const std::string& dbname, uint64_t number) {
|
|
|
|
return dbname + "/" + DescriptorFileName(number);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string CurrentFileName(const std::string& dbname) {
|
2021-10-16 17:03:19 +00:00
|
|
|
return dbname + "/" + kCurrentFileName;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2022-10-25 01:34:52 +00:00
|
|
|
std::string LockFileName(const std::string& dbname) { return dbname + "/LOCK"; }
|
2011-03-18 22:37:00 +00:00
|
|
|
|
|
|
|
std::string TempFileName(const std::string& dbname, uint64_t number) {
|
2015-11-11 06:58:01 +00:00
|
|
|
return MakeFileName(dbname, number, kTempFileNameSuffix.c_str());
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2014-08-14 17:05:16 +00:00
|
|
|
InfoLogPrefix::InfoLogPrefix(bool has_log_dir,
|
|
|
|
const std::string& db_absolute_path) {
|
|
|
|
if (!has_log_dir) {
|
|
|
|
const char kInfoLogPrefix[] = "LOG";
|
|
|
|
// "\0" is automatically added to the end
|
|
|
|
snprintf(buf, sizeof(buf), kInfoLogPrefix);
|
|
|
|
prefix = Slice(buf, sizeof(kInfoLogPrefix) - 1);
|
|
|
|
} else {
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
size_t len =
|
|
|
|
GetInfoLogPrefix(NormalizePath(db_absolute_path), buf, sizeof(buf));
|
2014-08-14 17:05:16 +00:00
|
|
|
prefix = Slice(buf, len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-06 00:44:13 +00:00
|
|
|
std::string InfoLogFileName(const std::string& dbname,
|
2022-10-25 01:34:52 +00:00
|
|
|
const std::string& db_path,
|
|
|
|
const std::string& log_dir) {
|
2015-09-23 19:39:16 +00:00
|
|
|
if (log_dir.empty()) {
|
2012-09-06 00:44:13 +00:00
|
|
|
return dbname + "/LOG";
|
2015-09-23 19:39:16 +00:00
|
|
|
}
|
2012-09-06 00:44:13 +00:00
|
|
|
|
2014-08-14 17:05:16 +00:00
|
|
|
InfoLogPrefix info_log_prefix(true, db_path);
|
|
|
|
return log_dir + "/" + info_log_prefix.buf;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return the name of the old info log file for "dbname".
|
2012-09-06 00:44:13 +00:00
|
|
|
std::string OldInfoLogFileName(const std::string& dbname, uint64_t ts,
|
2022-10-25 01:34:52 +00:00
|
|
|
const std::string& db_path,
|
|
|
|
const std::string& log_dir) {
|
2012-08-17 23:06:05 +00:00
|
|
|
char buf[50];
|
|
|
|
snprintf(buf, sizeof(buf), "%llu", static_cast<unsigned long long>(ts));
|
2012-09-06 00:44:13 +00:00
|
|
|
|
2015-09-23 19:39:16 +00:00
|
|
|
if (log_dir.empty()) {
|
2012-09-06 00:44:13 +00:00
|
|
|
return dbname + "/LOG.old." + buf;
|
2015-09-23 19:39:16 +00:00
|
|
|
}
|
2012-09-06 00:44:13 +00:00
|
|
|
|
2014-08-14 17:05:16 +00:00
|
|
|
InfoLogPrefix info_log_prefix(true, db_path);
|
|
|
|
return log_dir + "/" + info_log_prefix.buf + ".old." + buf;
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
|
2021-10-16 17:03:19 +00:00
|
|
|
std::string OptionsFileName(uint64_t file_num) {
|
2015-11-11 06:58:01 +00:00
|
|
|
char buffer[256];
|
|
|
|
snprintf(buffer, sizeof(buffer), "%s%06" PRIu64,
|
|
|
|
kOptionsFileNamePrefix.c_str(), file_num);
|
2021-10-16 17:03:19 +00:00
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
std::string OptionsFileName(const std::string& dbname, uint64_t file_num) {
|
|
|
|
return dbname + "/" + OptionsFileName(file_num);
|
2015-11-11 06:58:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string TempOptionsFileName(const std::string& dbname, uint64_t file_num) {
|
|
|
|
char buffer[256];
|
|
|
|
snprintf(buffer, sizeof(buffer), "%s%06" PRIu64 ".%s",
|
|
|
|
kOptionsFileNamePrefix.c_str(), file_num,
|
|
|
|
kTempFileNameSuffix.c_str());
|
|
|
|
return dbname + "/" + buffer;
|
|
|
|
}
|
|
|
|
|
2012-12-17 19:26:59 +00:00
|
|
|
std::string MetaDatabaseName(const std::string& dbname, uint64_t number) {
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "/METADB-%llu",
|
|
|
|
static_cast<unsigned long long>(number));
|
|
|
|
return dbname + buf;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
|
2013-10-18 21:50:54 +00:00
|
|
|
std::string IdentityFileName(const std::string& dbname) {
|
|
|
|
return dbname + "/IDENTITY";
|
|
|
|
}
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
// Owned filenames have the form:
|
2013-10-18 21:50:54 +00:00
|
|
|
// dbname/IDENTITY
|
2011-03-18 22:37:00 +00:00
|
|
|
// dbname/CURRENT
|
|
|
|
// dbname/LOCK
|
2014-08-14 17:05:16 +00:00
|
|
|
// dbname/<info_log_name_prefix>
|
|
|
|
// dbname/<info_log_name_prefix>.old.[0-9]+
|
2011-03-18 22:37:00 +00:00
|
|
|
// dbname/MANIFEST-[0-9]+
|
2017-04-18 19:00:36 +00:00
|
|
|
// dbname/[0-9]+.(log|sst|blob)
|
2012-12-17 19:26:59 +00:00
|
|
|
// dbname/METADB-[0-9]+
|
2015-11-11 06:58:01 +00:00
|
|
|
// dbname/OPTIONS-[0-9]+
|
|
|
|
// dbname/OPTIONS-[0-9]+.dbtmp
|
2013-09-01 08:52:32 +00:00
|
|
|
// Disregards / at the beginning
|
2022-10-25 01:34:52 +00:00
|
|
|
bool ParseFileName(const std::string& fname, uint64_t* number, FileType* type,
|
2013-10-24 06:39:23 +00:00
|
|
|
WalFileType* log_type) {
|
2014-08-14 17:05:16 +00:00
|
|
|
return ParseFileName(fname, number, "", type, log_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ParseFileName(const std::string& fname, uint64_t* number,
|
|
|
|
const Slice& info_log_name_prefix, FileType* type,
|
|
|
|
WalFileType* log_type) {
|
2011-03-18 22:37:00 +00:00
|
|
|
Slice rest(fname);
|
2013-09-01 08:52:32 +00:00
|
|
|
if (fname.length() > 1 && fname[0] == '/') {
|
|
|
|
rest.remove_prefix(1);
|
|
|
|
}
|
2013-10-18 21:50:54 +00:00
|
|
|
if (rest == "IDENTITY") {
|
|
|
|
*number = 0;
|
|
|
|
*type = kIdentityFile;
|
|
|
|
} else if (rest == "CURRENT") {
|
2011-03-18 22:37:00 +00:00
|
|
|
*number = 0;
|
|
|
|
*type = kCurrentFile;
|
|
|
|
} else if (rest == "LOCK") {
|
|
|
|
*number = 0;
|
|
|
|
*type = kDBLockFile;
|
2014-08-14 17:05:16 +00:00
|
|
|
} else if (info_log_name_prefix.size() > 0 &&
|
|
|
|
rest.starts_with(info_log_name_prefix)) {
|
|
|
|
rest.remove_prefix(info_log_name_prefix.size());
|
|
|
|
if (rest == "" || rest == ".old") {
|
|
|
|
*number = 0;
|
|
|
|
*type = kInfoLogFile;
|
|
|
|
} else if (rest.starts_with(".old.")) {
|
|
|
|
uint64_t ts_suffix;
|
|
|
|
// sizeof also counts the trailing '\0'.
|
|
|
|
rest.remove_prefix(sizeof(".old.") - 1);
|
|
|
|
if (!ConsumeDecimalNumber(&rest, &ts_suffix)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*number = ts_suffix;
|
|
|
|
*type = kInfoLogFile;
|
2012-08-17 23:06:05 +00:00
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
} else if (rest.starts_with("MANIFEST-")) {
|
|
|
|
rest.remove_prefix(strlen("MANIFEST-"));
|
|
|
|
uint64_t num;
|
|
|
|
if (!ConsumeDecimalNumber(&rest, &num)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!rest.empty()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*type = kDescriptorFile;
|
|
|
|
*number = num;
|
2012-12-17 19:26:59 +00:00
|
|
|
} else if (rest.starts_with("METADB-")) {
|
|
|
|
rest.remove_prefix(strlen("METADB-"));
|
|
|
|
uint64_t num;
|
|
|
|
if (!ConsumeDecimalNumber(&rest, &num)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!rest.empty()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*type = kMetaDatabase;
|
|
|
|
*number = num;
|
2015-11-11 06:58:01 +00:00
|
|
|
} else if (rest.starts_with(kOptionsFileNamePrefix)) {
|
|
|
|
uint64_t ts_suffix;
|
|
|
|
bool is_temp_file = false;
|
|
|
|
rest.remove_prefix(kOptionsFileNamePrefix.size());
|
|
|
|
const std::string kTempFileNameSuffixWithDot =
|
|
|
|
std::string(".") + kTempFileNameSuffix;
|
|
|
|
if (rest.ends_with(kTempFileNameSuffixWithDot)) {
|
|
|
|
rest.remove_suffix(kTempFileNameSuffixWithDot.size());
|
|
|
|
is_temp_file = true;
|
|
|
|
}
|
|
|
|
if (!ConsumeDecimalNumber(&rest, &ts_suffix)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*number = ts_suffix;
|
|
|
|
*type = is_temp_file ? kTempFile : kOptionsFile;
|
2011-03-18 22:37:00 +00:00
|
|
|
} else {
|
|
|
|
// Avoid strtoull() to keep filename format independent of the
|
|
|
|
// current locale
|
2013-10-24 06:39:23 +00:00
|
|
|
bool archive_dir_found = false;
|
2021-10-16 17:03:19 +00:00
|
|
|
if (rest.starts_with(kArchivalDirName)) {
|
|
|
|
if (rest.size() <= kArchivalDirName.size()) {
|
2013-10-24 06:39:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
2021-10-16 17:03:19 +00:00
|
|
|
rest.remove_prefix(kArchivalDirName.size() +
|
|
|
|
1); // Add 1 to remove / also
|
2013-10-24 06:39:23 +00:00
|
|
|
if (log_type) {
|
|
|
|
*log_type = kArchivedLogFile;
|
|
|
|
}
|
|
|
|
archive_dir_found = true;
|
|
|
|
}
|
2011-03-18 22:37:00 +00:00
|
|
|
uint64_t num;
|
|
|
|
if (!ConsumeDecimalNumber(&rest, &num)) {
|
|
|
|
return false;
|
|
|
|
}
|
2015-10-07 00:46:22 +00:00
|
|
|
if (rest.size() <= 1 || rest[0] != '.') {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
rest.remove_prefix(1);
|
|
|
|
|
2011-03-18 22:37:00 +00:00
|
|
|
Slice suffix = rest;
|
2015-10-07 00:46:22 +00:00
|
|
|
if (suffix == Slice("log")) {
|
2020-10-23 00:04:39 +00:00
|
|
|
*type = kWalFile;
|
2013-10-24 06:39:23 +00:00
|
|
|
if (log_type && !archive_dir_found) {
|
|
|
|
*log_type = kAliveLogFile;
|
|
|
|
}
|
|
|
|
} else if (archive_dir_found) {
|
2022-10-25 01:34:52 +00:00
|
|
|
return false; // Archive dir can contain only log files
|
2015-10-07 00:46:22 +00:00
|
|
|
} else if (suffix == Slice(kRocksDbTFileExt) ||
|
|
|
|
suffix == Slice(kLevelDbTFileExt)) {
|
2011-03-18 22:37:00 +00:00
|
|
|
*type = kTableFile;
|
2017-04-18 19:00:36 +00:00
|
|
|
} else if (suffix == Slice(kRocksDBBlobFileExt)) {
|
|
|
|
*type = kBlobFile;
|
2015-11-11 06:58:01 +00:00
|
|
|
} else if (suffix == Slice(kTempFileNameSuffix)) {
|
2011-03-18 22:37:00 +00:00
|
|
|
*type = kTempFile;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*number = num;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-27 23:03:05 +00:00
|
|
|
IOStatus SetCurrentFile(FileSystem* fs, const std::string& dbname,
|
2020-03-29 02:05:54 +00:00
|
|
|
uint64_t descriptor_number,
|
Sync dir containing CURRENT after RenameFile on CURRENT as much as possible (#10573)
Summary:
**Context:**
Below crash test revealed a bug that directory containing CURRENT file (short for `dir_contains_current_file` below) was not always get synced after a new CURRENT is created and being called with `RenameFile` as part of the creation.
This bug exposes a risk that such un-synced directory containing the updated CURRENT can’t survive a host crash (e.g, power loss) hence get corrupted. This then will be followed by a recovery from a corrupted CURRENT that we don't want.
The root-cause is that a nullptr `FSDirectory* dir_contains_current_file` sometimes gets passed-down to `SetCurrentFile()` hence in those case `dir_contains_current_file->FSDirectory::FsyncWithDirOptions()` will be skipped (which otherwise will internally call`Env/FS::SyncDic()` )
```
./db_stress --acquire_snapshot_one_in=10000 --adaptive_readahead=1 --allow_data_in_errors=True --avoid_unnecessary_blocking_io=0 --backup_max_size=104857600 --backup_one_in=100000 --batch_protection_bytes_per_key=8 --block_size=16384 --bloom_bits=134.8015470676662 --bottommost_compression_type=disable --cache_size=8388608 --checkpoint_one_in=1000000 --checksum_type=kCRC32c --clear_column_family_one_in=0 --compact_files_one_in=1000000 --compact_range_one_in=1000000 --compaction_pri=2 --compaction_ttl=100 --compression_max_dict_buffer_bytes=511 --compression_max_dict_bytes=16384 --compression_type=zstd --compression_use_zstd_dict_trainer=1 --compression_zstd_max_train_bytes=65536 --continuous_verification_interval=0 --data_block_index_type=0 --db=$db --db_write_buffer_size=1048576 --delpercent=5 --delrangepercent=0 --destroy_db_initially=0 --disable_wal=0 --enable_compaction_filter=0 --enable_pipelined_write=1 --expected_values_dir=$exp --fail_if_options_file_error=1 --file_checksum_impl=none --flush_one_in=1000000 --get_current_wal_file_one_in=0 --get_live_files_one_in=1000000 --get_property_one_in=1000000 --get_sorted_wal_files_one_in=0 --index_block_restart_interval=4 --ingest_external_file_one_in=0 --iterpercent=10 --key_len_percent_dist=1,30,69 --level_compaction_dynamic_level_bytes=True --mark_for_compaction_one_file_in=10 --max_background_compactions=20 --max_bytes_for_level_base=10485760 --max_key=10000 --max_key_len=3 --max_manifest_file_size=16384 --max_write_batch_group_size_bytes=64 --max_write_buffer_number=3 --max_write_buffer_size_to_maintain=0 --memtable_prefix_bloom_size_ratio=0.001 --memtable_protection_bytes_per_key=1 --memtable_whole_key_filtering=1 --mmap_read=1 --nooverwritepercent=1 --open_metadata_write_fault_one_in=0 --open_read_fault_one_in=0 --open_write_fault_one_in=0 --ops_per_thread=100000000 --optimize_filters_for_memory=1 --paranoid_file_checks=1 --partition_pinning=2 --pause_background_one_in=1000000 --periodic_compaction_seconds=0 --prefix_size=5 --prefixpercent=5 --prepopulate_block_cache=1 --progress_reports=0 --read_fault_one_in=1000 --readpercent=45 --recycle_log_file_num=0 --reopen=0 --ribbon_starting_level=999 --secondary_cache_fault_one_in=32 --secondary_cache_uri=compressed_secondary_cache://capacity=8388608 --set_options_one_in=10000 --snapshot_hold_ops=100000 --sst_file_manager_bytes_per_sec=0 --sst_file_manager_bytes_per_truncate=0 --subcompactions=3 --sync_fault_injection=1 --target_file_size_base=2097 --target_file_size_multiplier=2 --test_batches_snapshots=1 --top_level_index_pinning=1 --use_full_merge_v1=1 --use_merge=1 --value_size_mult=32 --verify_checksum=1 --verify_checksum_one_in=1000000 --verify_db_one_in=100000 --verify_sst_unique_id_in_manifest=1 --wal_bytes_per_sync=524288 --write_buffer_size=4194 --writepercent=35
```
```
stderr:
WARNING: prefix_size is non-zero but memtablerep != prefix_hash
db_stress: utilities/fault_injection_fs.cc:748: virtual rocksdb::IOStatus rocksdb::FaultInjectionTestFS::RenameFile(const std::string &, const std::string &, const rocksdb::IOOptions &, rocksdb::IODebugContext *): Assertion `tlist.find(tdn.second) == tlist.end()' failed.`
```
**Summary:**
The PR ensured the non-test path pass down a non-null dir containing CURRENT (which is by current RocksDB assumption just db_dir) by doing the following:
- Renamed `directory_to_fsync` as `dir_contains_current_file` in `SetCurrentFile()` to tighten the association between this directory and CURRENT file
- Changed `SetCurrentFile()` API to require `dir_contains_current_file` being passed-in, instead of making it by default nullptr.
- Because `SetCurrentFile()`'s `dir_contains_current_file` is passed down from `VersionSet::LogAndApply()` then `VersionSet::ProcessManifestWrites()` (i.e, think about this as a chain of 3 functions related to MANIFEST update), these 2 functions also got refactored to require `dir_contains_current_file`
- Updated the non-test-path callers of these 3 functions to obtain and pass in non-nullptr `dir_contains_current_file`, which by current assumption of RocksDB, is the `FSDirectory* db_dir`.
- `db_impl` path will obtain `DBImpl::directories_.getDbDir()` while others with no access to such `directories_` are obtained on the fly by creating such object `FileSystem::NewDirectory(..)` and manage it by unique pointers to ensure short life time.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10573
Test Plan:
- `make check`
- Passed the repro db_stress command
- For future improvement, since we currently don't assert dir containing CURRENT to be non-nullptr due to https://github.com/facebook/rocksdb/pull/10573#pullrequestreview-1087698899, there is still chances that future developers mistakenly pass down nullptr dir containing CURRENT thus resulting skipped sync dir and cause the bug again. Therefore a smarter test (e.g, such as quoted from ajkr "(make) unsynced data loss to be dropping files corresponding to unsynced directory entries") is still needed.
Reviewed By: ajkr
Differential Revision: D39005886
Pulled By: hx235
fbshipit-source-id: 336fb9090d0cfa6ca3dd580db86268007dde7f5a
2022-08-30 00:35:21 +00:00
|
|
|
FSDirectory* dir_contains_current_file) {
|
2011-03-18 22:37:00 +00:00
|
|
|
// Remove leading "dbname/" and add newline to manifest file name
|
|
|
|
std::string manifest = DescriptorFileName(dbname, descriptor_number);
|
|
|
|
Slice contents = manifest;
|
|
|
|
assert(contents.starts_with(dbname + "/"));
|
|
|
|
contents.remove_prefix(dbname.size() + 1);
|
|
|
|
std::string tmp = TempFileName(dbname, descriptor_number);
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-27 23:03:05 +00:00
|
|
|
IOStatus s = WriteStringToFile(fs, contents.ToString() + "\n", tmp, true);
|
Handle rename() failure in non-local FS (#8192)
Summary:
In a distributed environment, a file `rename()` operation can succeed on server (remote)
side, but the client can somehow return non-ok status to RocksDB. Possible reasons include
network partition, connection issue, etc. This happens in `rocksdb::SetCurrentFile()`, which
can be called in `LogAndApply() -> ProcessManifestWrites()` if RocksDB tries to switch to a
new MANIFEST. We currently always delete the new MANIFEST if an error occurs.
This is problematic in distributed world. If the server-side successfully updates the CURRENT
file via renaming, then a subsequent `DB::Open()` will try to look for the new MANIFEST and fail.
As a fix, we can track the execution result of IO operations on the new MANIFEST.
- If IO operations on the new MANIFEST fail, then we know the CURRENT must point to the original
MANIFEST. Therefore, it is safe to remove the new MANIFEST.
- If IO operations on the new MANIFEST all succeed, but somehow we end up in the clean up
code block, then we do not know whether CURRENT points to the new or old MANIFEST. (For local
POSIX-compliant FS, it should still point to old MANIFEST, but it does not matter if we keep the
new MANIFEST.) Therefore, we keep the new MANIFEST.
- Any future `LogAndApply()` will switch to a new MANIFEST and update CURRENT.
- If process reopens the db immediately after the failure, then the CURRENT file can point
to either the new MANIFEST or the old one, both of which exist. Therefore, recovery can
succeed and ignore the other.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8192
Test Plan: make check
Reviewed By: zhichao-cao
Differential Revision: D27804648
Pulled By: riversand963
fbshipit-source-id: 9c16f2a5ce41bc6aadf085e48449b19ede8423e4
2021-04-20 01:10:23 +00:00
|
|
|
TEST_SYNC_POINT_CALLBACK("SetCurrentFile:BeforeRename", &s);
|
2011-03-18 22:37:00 +00:00
|
|
|
if (s.ok()) {
|
2021-05-05 22:49:29 +00:00
|
|
|
TEST_KILL_RANDOM_WITH_WEIGHT("SetCurrentFile:0", REDUCE_ODDS2);
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-27 23:03:05 +00:00
|
|
|
s = fs->RenameFile(tmp, CurrentFileName(dbname), IOOptions(), nullptr);
|
2021-05-05 22:49:29 +00:00
|
|
|
TEST_KILL_RANDOM_WITH_WEIGHT("SetCurrentFile:1", REDUCE_ODDS2);
|
Handle rename() failure in non-local FS (#8192)
Summary:
In a distributed environment, a file `rename()` operation can succeed on server (remote)
side, but the client can somehow return non-ok status to RocksDB. Possible reasons include
network partition, connection issue, etc. This happens in `rocksdb::SetCurrentFile()`, which
can be called in `LogAndApply() -> ProcessManifestWrites()` if RocksDB tries to switch to a
new MANIFEST. We currently always delete the new MANIFEST if an error occurs.
This is problematic in distributed world. If the server-side successfully updates the CURRENT
file via renaming, then a subsequent `DB::Open()` will try to look for the new MANIFEST and fail.
As a fix, we can track the execution result of IO operations on the new MANIFEST.
- If IO operations on the new MANIFEST fail, then we know the CURRENT must point to the original
MANIFEST. Therefore, it is safe to remove the new MANIFEST.
- If IO operations on the new MANIFEST all succeed, but somehow we end up in the clean up
code block, then we do not know whether CURRENT points to the new or old MANIFEST. (For local
POSIX-compliant FS, it should still point to old MANIFEST, but it does not matter if we keep the
new MANIFEST.) Therefore, we keep the new MANIFEST.
- Any future `LogAndApply()` will switch to a new MANIFEST and update CURRENT.
- If process reopens the db immediately after the failure, then the CURRENT file can point
to either the new MANIFEST or the old one, both of which exist. Therefore, recovery can
succeed and ignore the other.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8192
Test Plan: make check
Reviewed By: zhichao-cao
Differential Revision: D27804648
Pulled By: riversand963
fbshipit-source-id: 9c16f2a5ce41bc6aadf085e48449b19ede8423e4
2021-04-20 01:10:23 +00:00
|
|
|
TEST_SYNC_POINT_CALLBACK("SetCurrentFile:AfterRename", &s);
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
2014-05-06 21:51:33 +00:00
|
|
|
if (s.ok()) {
|
Sync dir containing CURRENT after RenameFile on CURRENT as much as possible (#10573)
Summary:
**Context:**
Below crash test revealed a bug that directory containing CURRENT file (short for `dir_contains_current_file` below) was not always get synced after a new CURRENT is created and being called with `RenameFile` as part of the creation.
This bug exposes a risk that such un-synced directory containing the updated CURRENT can’t survive a host crash (e.g, power loss) hence get corrupted. This then will be followed by a recovery from a corrupted CURRENT that we don't want.
The root-cause is that a nullptr `FSDirectory* dir_contains_current_file` sometimes gets passed-down to `SetCurrentFile()` hence in those case `dir_contains_current_file->FSDirectory::FsyncWithDirOptions()` will be skipped (which otherwise will internally call`Env/FS::SyncDic()` )
```
./db_stress --acquire_snapshot_one_in=10000 --adaptive_readahead=1 --allow_data_in_errors=True --avoid_unnecessary_blocking_io=0 --backup_max_size=104857600 --backup_one_in=100000 --batch_protection_bytes_per_key=8 --block_size=16384 --bloom_bits=134.8015470676662 --bottommost_compression_type=disable --cache_size=8388608 --checkpoint_one_in=1000000 --checksum_type=kCRC32c --clear_column_family_one_in=0 --compact_files_one_in=1000000 --compact_range_one_in=1000000 --compaction_pri=2 --compaction_ttl=100 --compression_max_dict_buffer_bytes=511 --compression_max_dict_bytes=16384 --compression_type=zstd --compression_use_zstd_dict_trainer=1 --compression_zstd_max_train_bytes=65536 --continuous_verification_interval=0 --data_block_index_type=0 --db=$db --db_write_buffer_size=1048576 --delpercent=5 --delrangepercent=0 --destroy_db_initially=0 --disable_wal=0 --enable_compaction_filter=0 --enable_pipelined_write=1 --expected_values_dir=$exp --fail_if_options_file_error=1 --file_checksum_impl=none --flush_one_in=1000000 --get_current_wal_file_one_in=0 --get_live_files_one_in=1000000 --get_property_one_in=1000000 --get_sorted_wal_files_one_in=0 --index_block_restart_interval=4 --ingest_external_file_one_in=0 --iterpercent=10 --key_len_percent_dist=1,30,69 --level_compaction_dynamic_level_bytes=True --mark_for_compaction_one_file_in=10 --max_background_compactions=20 --max_bytes_for_level_base=10485760 --max_key=10000 --max_key_len=3 --max_manifest_file_size=16384 --max_write_batch_group_size_bytes=64 --max_write_buffer_number=3 --max_write_buffer_size_to_maintain=0 --memtable_prefix_bloom_size_ratio=0.001 --memtable_protection_bytes_per_key=1 --memtable_whole_key_filtering=1 --mmap_read=1 --nooverwritepercent=1 --open_metadata_write_fault_one_in=0 --open_read_fault_one_in=0 --open_write_fault_one_in=0 --ops_per_thread=100000000 --optimize_filters_for_memory=1 --paranoid_file_checks=1 --partition_pinning=2 --pause_background_one_in=1000000 --periodic_compaction_seconds=0 --prefix_size=5 --prefixpercent=5 --prepopulate_block_cache=1 --progress_reports=0 --read_fault_one_in=1000 --readpercent=45 --recycle_log_file_num=0 --reopen=0 --ribbon_starting_level=999 --secondary_cache_fault_one_in=32 --secondary_cache_uri=compressed_secondary_cache://capacity=8388608 --set_options_one_in=10000 --snapshot_hold_ops=100000 --sst_file_manager_bytes_per_sec=0 --sst_file_manager_bytes_per_truncate=0 --subcompactions=3 --sync_fault_injection=1 --target_file_size_base=2097 --target_file_size_multiplier=2 --test_batches_snapshots=1 --top_level_index_pinning=1 --use_full_merge_v1=1 --use_merge=1 --value_size_mult=32 --verify_checksum=1 --verify_checksum_one_in=1000000 --verify_db_one_in=100000 --verify_sst_unique_id_in_manifest=1 --wal_bytes_per_sync=524288 --write_buffer_size=4194 --writepercent=35
```
```
stderr:
WARNING: prefix_size is non-zero but memtablerep != prefix_hash
db_stress: utilities/fault_injection_fs.cc:748: virtual rocksdb::IOStatus rocksdb::FaultInjectionTestFS::RenameFile(const std::string &, const std::string &, const rocksdb::IOOptions &, rocksdb::IODebugContext *): Assertion `tlist.find(tdn.second) == tlist.end()' failed.`
```
**Summary:**
The PR ensured the non-test path pass down a non-null dir containing CURRENT (which is by current RocksDB assumption just db_dir) by doing the following:
- Renamed `directory_to_fsync` as `dir_contains_current_file` in `SetCurrentFile()` to tighten the association between this directory and CURRENT file
- Changed `SetCurrentFile()` API to require `dir_contains_current_file` being passed-in, instead of making it by default nullptr.
- Because `SetCurrentFile()`'s `dir_contains_current_file` is passed down from `VersionSet::LogAndApply()` then `VersionSet::ProcessManifestWrites()` (i.e, think about this as a chain of 3 functions related to MANIFEST update), these 2 functions also got refactored to require `dir_contains_current_file`
- Updated the non-test-path callers of these 3 functions to obtain and pass in non-nullptr `dir_contains_current_file`, which by current assumption of RocksDB, is the `FSDirectory* db_dir`.
- `db_impl` path will obtain `DBImpl::directories_.getDbDir()` while others with no access to such `directories_` are obtained on the fly by creating such object `FileSystem::NewDirectory(..)` and manage it by unique pointers to ensure short life time.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10573
Test Plan:
- `make check`
- Passed the repro db_stress command
- For future improvement, since we currently don't assert dir containing CURRENT to be non-nullptr due to https://github.com/facebook/rocksdb/pull/10573#pullrequestreview-1087698899, there is still chances that future developers mistakenly pass down nullptr dir containing CURRENT thus resulting skipped sync dir and cause the bug again. Therefore a smarter test (e.g, such as quoted from ajkr "(make) unsynced data loss to be dropping files corresponding to unsynced directory entries") is still needed.
Reviewed By: ajkr
Differential Revision: D39005886
Pulled By: hx235
fbshipit-source-id: 336fb9090d0cfa6ca3dd580db86268007dde7f5a
2022-08-30 00:35:21 +00:00
|
|
|
if (dir_contains_current_file != nullptr) {
|
|
|
|
s = dir_contains_current_file->FsyncWithDirOptions(
|
2021-11-03 19:20:19 +00:00
|
|
|
IOOptions(), nullptr, DirFsyncOptions(CurrentFileName(dbname)));
|
2014-05-06 21:51:33 +00:00
|
|
|
}
|
|
|
|
} else {
|
2021-08-16 15:09:46 +00:00
|
|
|
fs->DeleteFile(tmp, IOOptions(), nullptr)
|
|
|
|
.PermitUncheckedError(); // NOTE: PermitUncheckedError is acceptable
|
|
|
|
// here as we are already handling an error
|
|
|
|
// case, and this is just a best-attempt
|
|
|
|
// effort at some cleanup
|
2011-03-18 22:37:00 +00:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2019-09-03 15:50:47 +00:00
|
|
|
Status SetIdentityFile(Env* env, const std::string& dbname,
|
|
|
|
const std::string& db_id) {
|
|
|
|
std::string id;
|
|
|
|
if (db_id.empty()) {
|
|
|
|
id = env->GenerateUniqueId();
|
|
|
|
} else {
|
|
|
|
id = db_id;
|
|
|
|
}
|
2013-10-18 21:50:54 +00:00
|
|
|
assert(!id.empty());
|
2013-10-23 17:59:08 +00:00
|
|
|
// Reserve the filename dbname/000000.dbtmp for the temporary identity file
|
|
|
|
std::string tmp = TempFileName(dbname, 0);
|
2021-11-03 19:20:19 +00:00
|
|
|
std::string identify_file_name = IdentityFileName(dbname);
|
2014-04-10 04:17:14 +00:00
|
|
|
Status s = WriteStringToFile(env, id, tmp, true);
|
2013-10-18 21:50:54 +00:00
|
|
|
if (s.ok()) {
|
2021-11-03 19:20:19 +00:00
|
|
|
s = env->RenameFile(tmp, identify_file_name);
|
|
|
|
}
|
|
|
|
std::unique_ptr<FSDirectory> dir_obj;
|
|
|
|
if (s.ok()) {
|
|
|
|
s = env->GetFileSystem()->NewDirectory(dbname, IOOptions(), &dir_obj,
|
|
|
|
nullptr);
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
s = dir_obj->FsyncWithDirOptions(IOOptions(), nullptr,
|
|
|
|
DirFsyncOptions(identify_file_name));
|
2013-10-18 21:50:54 +00:00
|
|
|
}
|
2022-06-07 16:49:31 +00:00
|
|
|
|
|
|
|
// The default Close() could return "NotSupported" and we bypass it
|
|
|
|
// if it is not impelmented. Detailed explanations can be found in
|
|
|
|
// db/db_impl/db_impl.h
|
Explicitly closing all directory file descriptors (#10049)
Summary:
Currently, the DB directory file descriptor is left open until the deconstruction process (`DB::Close()` does not close the file descriptor). To verify this, comment out the lines between `db_ = nullptr` and `db_->Close()` (line 512, 513, 514, 515 in ldb_cmd.cc) to leak the ``db_'' object, build `ldb` tool and run
```
strace --trace=open,openat,close ./ldb --db=$TEST_TMPDIR --ignore_unknown_options put K1 V1 --create_if_missing
```
There is one directory file descriptor that is not closed in the strace log.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10049
Test Plan: Add a new unit test DBBasicTest.DBCloseAllDirectoryFDs: Open a database with different WAL directory and three different data directories, and all directory file descriptors should be closed after calling Close(). Explicitly call Close() after a directory file descriptor is not used so that the counter of directory open and close should be equivalent.
Reviewed By: ajkr, hx235
Differential Revision: D36722135
Pulled By: littlepig2013
fbshipit-source-id: 07bdc2abc417c6b30997b9bbef1f79aa757b21ff
2022-06-02 01:03:34 +00:00
|
|
|
if (s.ok()) {
|
2022-06-07 16:49:31 +00:00
|
|
|
Status temp_s = dir_obj->Close(IOOptions(), nullptr);
|
|
|
|
if (!temp_s.ok()) {
|
|
|
|
if (temp_s.IsNotSupported()) {
|
|
|
|
temp_s.PermitUncheckedError();
|
|
|
|
} else {
|
|
|
|
s = temp_s;
|
|
|
|
}
|
|
|
|
}
|
Explicitly closing all directory file descriptors (#10049)
Summary:
Currently, the DB directory file descriptor is left open until the deconstruction process (`DB::Close()` does not close the file descriptor). To verify this, comment out the lines between `db_ = nullptr` and `db_->Close()` (line 512, 513, 514, 515 in ldb_cmd.cc) to leak the ``db_'' object, build `ldb` tool and run
```
strace --trace=open,openat,close ./ldb --db=$TEST_TMPDIR --ignore_unknown_options put K1 V1 --create_if_missing
```
There is one directory file descriptor that is not closed in the strace log.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10049
Test Plan: Add a new unit test DBBasicTest.DBCloseAllDirectoryFDs: Open a database with different WAL directory and three different data directories, and all directory file descriptors should be closed after calling Close(). Explicitly call Close() after a directory file descriptor is not used so that the counter of directory open and close should be equivalent.
Reviewed By: ajkr, hx235
Differential Revision: D36722135
Pulled By: littlepig2013
fbshipit-source-id: 07bdc2abc417c6b30997b9bbef1f79aa757b21ff
2022-06-02 01:03:34 +00:00
|
|
|
}
|
2013-10-18 21:50:54 +00:00
|
|
|
if (!s.ok()) {
|
2020-10-05 21:56:59 +00:00
|
|
|
env->DeleteFile(tmp).PermitUncheckedError();
|
2013-10-18 21:50:54 +00:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2021-03-15 11:32:24 +00:00
|
|
|
IOStatus SyncManifest(const ImmutableDBOptions* db_options,
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-27 23:03:05 +00:00
|
|
|
WritableFileWriter* file) {
|
2021-05-05 22:49:29 +00:00
|
|
|
TEST_KILL_RANDOM_WITH_WEIGHT("SyncManifest:0", REDUCE_ODDS2);
|
2021-05-05 20:59:21 +00:00
|
|
|
StopWatch sw(db_options->clock, db_options->stats, MANIFEST_FILE_SYNC_MICROS);
|
2017-02-13 18:54:38 +00:00
|
|
|
return file->Sync(db_options->use_fsync);
|
2015-01-22 19:43:38 +00:00
|
|
|
}
|
|
|
|
|
2021-01-26 06:07:26 +00:00
|
|
|
Status GetInfoLogFiles(const std::shared_ptr<FileSystem>& fs,
|
|
|
|
const std::string& db_log_dir, const std::string& dbname,
|
|
|
|
std::string* parent_dir,
|
2019-05-31 17:45:20 +00:00
|
|
|
std::vector<std::string>* info_log_list) {
|
|
|
|
assert(parent_dir != nullptr);
|
|
|
|
assert(info_log_list != nullptr);
|
|
|
|
uint64_t number = 0;
|
2020-10-23 00:04:39 +00:00
|
|
|
FileType type = kWalFile;
|
2019-05-31 17:45:20 +00:00
|
|
|
|
|
|
|
if (!db_log_dir.empty()) {
|
|
|
|
*parent_dir = db_log_dir;
|
|
|
|
} else {
|
|
|
|
*parent_dir = dbname;
|
|
|
|
}
|
|
|
|
|
|
|
|
InfoLogPrefix info_log_prefix(!db_log_dir.empty(), dbname);
|
|
|
|
|
|
|
|
std::vector<std::string> file_names;
|
2021-01-26 06:07:26 +00:00
|
|
|
Status s = fs->GetChildren(*parent_dir, IOOptions(), &file_names, nullptr);
|
2019-05-31 17:45:20 +00:00
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& f : file_names) {
|
|
|
|
if (ParseFileName(f, &number, info_log_prefix.prefix, &type) &&
|
|
|
|
(type == kInfoLogFile)) {
|
|
|
|
info_log_list->push_back(f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2020-03-21 02:17:54 +00:00
|
|
|
std::string NormalizePath(const std::string& path) {
|
|
|
|
std::string dst;
|
2022-03-30 22:55:31 +00:00
|
|
|
|
|
|
|
if (path.length() > 2 && path[0] == kFilePathSeparator &&
|
|
|
|
path[1] == kFilePathSeparator) { // Handle UNC names
|
|
|
|
dst.append(2, kFilePathSeparator);
|
|
|
|
}
|
|
|
|
|
2020-03-21 02:17:54 +00:00
|
|
|
for (auto c : path) {
|
Fix MSVC-related build issues (#7439)
Summary:
This PR addresses some build and functional issues on MSVC targets, as a step towards an eventual goal of having RocksDB build successfully for Windows on ARM64.
Addressed issues include:
- BitsSetToOne and CountTrailingZeroBits do not compile on non-x64 MSVC targets. A fallback implementation of BitsSetToOne when Intel intrinsics are not available is added, based on the C++20 `<bit>` popcount implementation in Microsoft's STL.
- The implementation of FloorLog2 for MSVC targets (including x64) gives incorrect results. The unit test easily detects this, but CircleCI is currently configured to only run a specific set of tests for Windows CMake builds, so this seems to have been unnoticed.
- AsmVolatilePause does not use YieldProcessor on Windows ARM64 targets, even though it is available.
- When CondVar::TimedWait calls Microsoft STL's condition_variable::wait_for, it can potentially trigger a bug (just recently fixed in the upcoming VS 16.8's STL) that deadlocks various tests that wait for a timer to execute, since `Timer::Run` doesn't get a chance to execute before being blocked by the test function acquiring the mutex.
- In c_test, `GetTempDir` assumes a POSIX-style temp path.
- `NormalizePath` did not eliminate consecutive POSIX-style path separators on Windows, resulting in test failures in e.g., wal_manager_test.
- Various other test failures.
In a followup PR I hope to modify CircleCI's config.yml to invoke all RocksDB unit tests in Windows CMake builds with CTest, instead of the current use of `run_ci_db_test.ps1` which requires individual tests to be specified and is missing many of the existing tests.
Notes from peterd: FloorLog2 is not yet used in production code (it's for something in progress). I also added a few more inexpensive platform-dependent tests to Windows CircleCI runs. And included facebook/folly#1461 as requested
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7439
Reviewed By: jay-zhuang
Differential Revision: D24021563
Pulled By: pdillinger
fbshipit-source-id: 0ec2027c0d6a494d8a0fe38d9667fc2f7e29f7e7
2020-10-01 16:21:30 +00:00
|
|
|
if (!dst.empty() && (c == kFilePathSeparator || c == '/') &&
|
|
|
|
(dst.back() == kFilePathSeparator || dst.back() == '/')) {
|
2020-03-21 02:17:54 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
dst.push_back(c);
|
|
|
|
}
|
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|