mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-29 00:34:03 +00:00
Run clang-format on some files in db/db_impl directory (#10869)
Summary: Run clang-format on some files in db/db_impl/ directory ``` clang-format -i <file> ``` Pull Request resolved: https://github.com/facebook/rocksdb/pull/10869 Test Plan: make check Reviewed By: ltamasi Differential Revision: D40685390 Pulled By: riversand963 fbshipit-source-id: 64449ccb21b0d61c5142eb2bcbff828acb45c154
This commit is contained in:
parent
727bad78b8
commit
84563a2701
|
@ -26,16 +26,16 @@ CompactedDBImpl::CompactedDBImpl(const DBOptions& options,
|
||||||
version_(nullptr),
|
version_(nullptr),
|
||||||
user_comparator_(nullptr) {}
|
user_comparator_(nullptr) {}
|
||||||
|
|
||||||
CompactedDBImpl::~CompactedDBImpl() {
|
CompactedDBImpl::~CompactedDBImpl() {}
|
||||||
}
|
|
||||||
|
|
||||||
size_t CompactedDBImpl::FindFile(const Slice& key) {
|
size_t CompactedDBImpl::FindFile(const Slice& key) {
|
||||||
size_t right = files_.num_files - 1;
|
size_t right = files_.num_files - 1;
|
||||||
auto cmp = [&](const FdWithKeyRange& f, const Slice& k) -> bool {
|
auto cmp = [&](const FdWithKeyRange& f, const Slice& k) -> bool {
|
||||||
return user_comparator_->Compare(ExtractUserKey(f.largest_key), k) < 0;
|
return user_comparator_->Compare(ExtractUserKey(f.largest_key), k) < 0;
|
||||||
};
|
};
|
||||||
return static_cast<size_t>(std::lower_bound(files_.files,
|
return static_cast<size_t>(
|
||||||
files_.files + right, key, cmp) - files_.files);
|
std::lower_bound(files_.files, files_.files + right, key, cmp) -
|
||||||
|
files_.files);
|
||||||
}
|
}
|
||||||
|
|
||||||
Status CompactedDBImpl::Get(const ReadOptions& options, ColumnFamilyHandle*,
|
Status CompactedDBImpl::Get(const ReadOptions& options, ColumnFamilyHandle*,
|
||||||
|
@ -228,8 +228,8 @@ Status CompactedDBImpl::Init(const Options& options) {
|
||||||
return Status::NotSupported("no file exists");
|
return Status::NotSupported("no file exists");
|
||||||
}
|
}
|
||||||
|
|
||||||
Status CompactedDBImpl::Open(const Options& options,
|
Status CompactedDBImpl::Open(const Options& options, const std::string& dbname,
|
||||||
const std::string& dbname, DB** dbptr) {
|
DB** dbptr) {
|
||||||
*dbptr = nullptr;
|
*dbptr = nullptr;
|
||||||
|
|
||||||
if (options.max_open_files != -1) {
|
if (options.max_open_files != -1) {
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "db/db_impl/db_impl.h"
|
#include "db/db_impl/db_impl.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
|
@ -1357,7 +1357,7 @@ Status DBImpl::SetDBOptions(
|
||||||
file_options_for_compaction_ = fs_->OptimizeForCompactionTableWrite(
|
file_options_for_compaction_ = fs_->OptimizeForCompactionTableWrite(
|
||||||
file_options_for_compaction_, immutable_db_options_);
|
file_options_for_compaction_, immutable_db_options_);
|
||||||
versions_->ChangeFileOptions(mutable_db_options_);
|
versions_->ChangeFileOptions(mutable_db_options_);
|
||||||
//TODO(xiez): clarify why apply optimize for read to write options
|
// TODO(xiez): clarify why apply optimize for read to write options
|
||||||
file_options_for_compaction_ = fs_->OptimizeForCompactionTableRead(
|
file_options_for_compaction_ = fs_->OptimizeForCompactionTableRead(
|
||||||
file_options_for_compaction_, immutable_db_options_);
|
file_options_for_compaction_, immutable_db_options_);
|
||||||
file_options_for_compaction_.compaction_readahead_size =
|
file_options_for_compaction_.compaction_readahead_size =
|
||||||
|
@ -2357,8 +2357,8 @@ std::vector<Status> DBImpl::MultiGet(
|
||||||
std::string* timestamp = timestamps ? &(*timestamps)[keys_read] : nullptr;
|
std::string* timestamp = timestamps ? &(*timestamps)[keys_read] : nullptr;
|
||||||
|
|
||||||
LookupKey lkey(keys[keys_read], consistent_seqnum, read_options.timestamp);
|
LookupKey lkey(keys[keys_read], consistent_seqnum, read_options.timestamp);
|
||||||
auto cfh =
|
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(
|
||||||
static_cast_with_check<ColumnFamilyHandleImpl>(column_family[keys_read]);
|
column_family[keys_read]);
|
||||||
SequenceNumber max_covering_tombstone_seq = 0;
|
SequenceNumber max_covering_tombstone_seq = 0;
|
||||||
auto mgd_iter = multiget_cf_data.find(cfh->cfd()->GetID());
|
auto mgd_iter = multiget_cf_data.find(cfh->cfd()->GetID());
|
||||||
assert(mgd_iter != multiget_cf_data.end());
|
assert(mgd_iter != multiget_cf_data.end());
|
||||||
|
@ -3983,8 +3983,7 @@ SuperVersion* DBImpl::GetAndRefSuperVersion(uint32_t column_family_id) {
|
||||||
void DBImpl::CleanupSuperVersion(SuperVersion* sv) {
|
void DBImpl::CleanupSuperVersion(SuperVersion* sv) {
|
||||||
// Release SuperVersion
|
// Release SuperVersion
|
||||||
if (sv->Unref()) {
|
if (sv->Unref()) {
|
||||||
bool defer_purge =
|
bool defer_purge = immutable_db_options().avoid_unnecessary_blocking_io;
|
||||||
immutable_db_options().avoid_unnecessary_blocking_io;
|
|
||||||
{
|
{
|
||||||
InstrumentedMutexLock l(&mutex_);
|
InstrumentedMutexLock l(&mutex_);
|
||||||
sv->Cleanup();
|
sv->Cleanup();
|
||||||
|
@ -5668,8 +5667,7 @@ Status DBImpl::VerifyChecksumInternal(const ReadOptions& read_options,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool defer_purge =
|
bool defer_purge = immutable_db_options().avoid_unnecessary_blocking_io;
|
||||||
immutable_db_options().avoid_unnecessary_blocking_io;
|
|
||||||
{
|
{
|
||||||
InstrumentedMutexLock l(&mutex_);
|
InstrumentedMutexLock l(&mutex_);
|
||||||
for (auto sv : sv_list) {
|
for (auto sv : sv_list) {
|
||||||
|
|
|
@ -270,7 +270,8 @@ Status DBImpl::ValidateOptions(const DBOptions& db_options) {
|
||||||
if (db_options.unordered_write &&
|
if (db_options.unordered_write &&
|
||||||
!db_options.allow_concurrent_memtable_write) {
|
!db_options.allow_concurrent_memtable_write) {
|
||||||
return Status::InvalidArgument(
|
return Status::InvalidArgument(
|
||||||
"unordered_write is incompatible with !allow_concurrent_memtable_write");
|
"unordered_write is incompatible with "
|
||||||
|
"!allow_concurrent_memtable_write");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (db_options.unordered_write && db_options.enable_pipelined_write) {
|
if (db_options.unordered_write && db_options.enable_pipelined_write) {
|
||||||
|
@ -1058,9 +1059,8 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& wal_numbers,
|
||||||
std::unique_ptr<SequentialFileReader> file_reader;
|
std::unique_ptr<SequentialFileReader> file_reader;
|
||||||
{
|
{
|
||||||
std::unique_ptr<FSSequentialFile> file;
|
std::unique_ptr<FSSequentialFile> file;
|
||||||
status = fs_->NewSequentialFile(fname,
|
status = fs_->NewSequentialFile(
|
||||||
fs_->OptimizeForLogRead(file_options_),
|
fname, fs_->OptimizeForLogRead(file_options_), &file, nullptr);
|
||||||
&file, nullptr);
|
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
MaybeIgnoreError(&status);
|
MaybeIgnoreError(&status);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "db/db_impl/db_impl.h"
|
#include "db/db_impl/db_impl.h"
|
||||||
|
|
||||||
namespace ROCKSDB_NAMESPACE {
|
namespace ROCKSDB_NAMESPACE {
|
||||||
|
|
|
@ -157,8 +157,7 @@ Status DBImplSecondary::MaybeInitLogReader(
|
||||||
{
|
{
|
||||||
std::unique_ptr<FSSequentialFile> file;
|
std::unique_ptr<FSSequentialFile> file;
|
||||||
Status status = fs_->NewSequentialFile(
|
Status status = fs_->NewSequentialFile(
|
||||||
fname, fs_->OptimizeForLogRead(file_options_), &file,
|
fname, fs_->OptimizeForLogRead(file_options_), &file, nullptr);
|
||||||
nullptr);
|
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
*log_reader = nullptr;
|
*log_reader = nullptr;
|
||||||
return status;
|
return status;
|
||||||
|
@ -200,7 +199,7 @@ Status DBImplSecondary::RecoverLogFiles(
|
||||||
assert(reader != nullptr);
|
assert(reader != nullptr);
|
||||||
}
|
}
|
||||||
for (auto log_number : log_numbers) {
|
for (auto log_number : log_numbers) {
|
||||||
auto it = log_readers_.find(log_number);
|
auto it = log_readers_.find(log_number);
|
||||||
assert(it != log_readers_.end());
|
assert(it != log_readers_.end());
|
||||||
log::FragmentBufferedReader* reader = it->second->reader_;
|
log::FragmentBufferedReader* reader = it->second->reader_;
|
||||||
Status* wal_read_status = it->second->status_;
|
Status* wal_read_status = it->second->status_;
|
||||||
|
|
|
@ -47,6 +47,7 @@ class LogReaderContainer {
|
||||||
delete reporter_;
|
delete reporter_;
|
||||||
delete status_;
|
delete status_;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct LogReporter : public log::Reader::Reporter {
|
struct LogReporter : public log::Reader::Reporter {
|
||||||
Env* env;
|
Env* env;
|
||||||
|
@ -247,7 +248,6 @@ class DBImplSecondary : public DBImpl {
|
||||||
// method can take long time due to all the I/O and CPU costs.
|
// method can take long time due to all the I/O and CPU costs.
|
||||||
Status TryCatchUpWithPrimary() override;
|
Status TryCatchUpWithPrimary() override;
|
||||||
|
|
||||||
|
|
||||||
// Try to find log reader using log_number from log_readers_ map, initialize
|
// Try to find log reader using log_number from log_readers_ map, initialize
|
||||||
// if it doesn't exist
|
// if it doesn't exist
|
||||||
Status MaybeInitLogReader(uint64_t log_number,
|
Status MaybeInitLogReader(uint64_t log_number,
|
||||||
|
|
Loading…
Reference in a new issue