Run clang-format on some files in db/db_impl directory (#10869)

Summary:
Run clang-format on some files in db/db_impl/ directory

```
clang-format -i <file>
```

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10869

Test Plan: make check

Reviewed By: ltamasi

Differential Revision: D40685390

Pulled By: riversand963

fbshipit-source-id: 64449ccb21b0d61c5142eb2bcbff828acb45c154
This commit is contained in:
Yanqin Jin 2022-10-25 13:49:09 -07:00 committed by Facebook GitHub Bot
parent 727bad78b8
commit 84563a2701
7 changed files with 20 additions and 21 deletions

View File

@ -26,16 +26,16 @@ CompactedDBImpl::CompactedDBImpl(const DBOptions& options,
version_(nullptr),
user_comparator_(nullptr) {}
CompactedDBImpl::~CompactedDBImpl() {
}
CompactedDBImpl::~CompactedDBImpl() {}
size_t CompactedDBImpl::FindFile(const Slice& key) {
size_t right = files_.num_files - 1;
auto cmp = [&](const FdWithKeyRange& f, const Slice& k) -> bool {
return user_comparator_->Compare(ExtractUserKey(f.largest_key), k) < 0;
};
return static_cast<size_t>(std::lower_bound(files_.files,
files_.files + right, key, cmp) - files_.files);
return static_cast<size_t>(
std::lower_bound(files_.files, files_.files + right, key, cmp) -
files_.files);
}
Status CompactedDBImpl::Get(const ReadOptions& options, ColumnFamilyHandle*,
@ -228,8 +228,8 @@ Status CompactedDBImpl::Init(const Options& options) {
return Status::NotSupported("no file exists");
}
Status CompactedDBImpl::Open(const Options& options,
const std::string& dbname, DB** dbptr) {
Status CompactedDBImpl::Open(const Options& options, const std::string& dbname,
DB** dbptr) {
*dbptr = nullptr;
if (options.max_open_files != -1) {

View File

@ -7,6 +7,7 @@
#ifndef ROCKSDB_LITE
#include <string>
#include <vector>
#include "db/db_impl/db_impl.h"
namespace ROCKSDB_NAMESPACE {

View File

@ -1357,7 +1357,7 @@ Status DBImpl::SetDBOptions(
file_options_for_compaction_ = fs_->OptimizeForCompactionTableWrite(
file_options_for_compaction_, immutable_db_options_);
versions_->ChangeFileOptions(mutable_db_options_);
//TODO(xiez): clarify why apply optimize for read to write options
// TODO(xiez): clarify why apply optimize for read to write options
file_options_for_compaction_ = fs_->OptimizeForCompactionTableRead(
file_options_for_compaction_, immutable_db_options_);
file_options_for_compaction_.compaction_readahead_size =
@ -2357,8 +2357,8 @@ std::vector<Status> DBImpl::MultiGet(
std::string* timestamp = timestamps ? &(*timestamps)[keys_read] : nullptr;
LookupKey lkey(keys[keys_read], consistent_seqnum, read_options.timestamp);
auto cfh =
static_cast_with_check<ColumnFamilyHandleImpl>(column_family[keys_read]);
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(
column_family[keys_read]);
SequenceNumber max_covering_tombstone_seq = 0;
auto mgd_iter = multiget_cf_data.find(cfh->cfd()->GetID());
assert(mgd_iter != multiget_cf_data.end());
@ -3983,8 +3983,7 @@ SuperVersion* DBImpl::GetAndRefSuperVersion(uint32_t column_family_id) {
void DBImpl::CleanupSuperVersion(SuperVersion* sv) {
// Release SuperVersion
if (sv->Unref()) {
bool defer_purge =
immutable_db_options().avoid_unnecessary_blocking_io;
bool defer_purge = immutable_db_options().avoid_unnecessary_blocking_io;
{
InstrumentedMutexLock l(&mutex_);
sv->Cleanup();
@ -5668,8 +5667,7 @@ Status DBImpl::VerifyChecksumInternal(const ReadOptions& read_options,
}
}
bool defer_purge =
immutable_db_options().avoid_unnecessary_blocking_io;
bool defer_purge = immutable_db_options().avoid_unnecessary_blocking_io;
{
InstrumentedMutexLock l(&mutex_);
for (auto sv : sv_list) {

View File

@ -270,7 +270,8 @@ Status DBImpl::ValidateOptions(const DBOptions& db_options) {
if (db_options.unordered_write &&
!db_options.allow_concurrent_memtable_write) {
return Status::InvalidArgument(
"unordered_write is incompatible with !allow_concurrent_memtable_write");
"unordered_write is incompatible with "
"!allow_concurrent_memtable_write");
}
if (db_options.unordered_write && db_options.enable_pipelined_write) {
@ -1058,9 +1059,8 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& wal_numbers,
std::unique_ptr<SequentialFileReader> file_reader;
{
std::unique_ptr<FSSequentialFile> file;
status = fs_->NewSequentialFile(fname,
fs_->OptimizeForLogRead(file_options_),
&file, nullptr);
status = fs_->NewSequentialFile(
fname, fs_->OptimizeForLogRead(file_options_), &file, nullptr);
if (!status.ok()) {
MaybeIgnoreError(&status);
if (!status.ok()) {

View File

@ -9,6 +9,7 @@
#include <string>
#include <vector>
#include "db/db_impl/db_impl.h"
namespace ROCKSDB_NAMESPACE {

View File

@ -157,8 +157,7 @@ Status DBImplSecondary::MaybeInitLogReader(
{
std::unique_ptr<FSSequentialFile> file;
Status status = fs_->NewSequentialFile(
fname, fs_->OptimizeForLogRead(file_options_), &file,
nullptr);
fname, fs_->OptimizeForLogRead(file_options_), &file, nullptr);
if (!status.ok()) {
*log_reader = nullptr;
return status;
@ -200,7 +199,7 @@ Status DBImplSecondary::RecoverLogFiles(
assert(reader != nullptr);
}
for (auto log_number : log_numbers) {
auto it = log_readers_.find(log_number);
auto it = log_readers_.find(log_number);
assert(it != log_readers_.end());
log::FragmentBufferedReader* reader = it->second->reader_;
Status* wal_read_status = it->second->status_;

View File

@ -47,6 +47,7 @@ class LogReaderContainer {
delete reporter_;
delete status_;
}
private:
struct LogReporter : public log::Reader::Reporter {
Env* env;
@ -247,7 +248,6 @@ class DBImplSecondary : public DBImpl {
// method can take long time due to all the I/O and CPU costs.
Status TryCatchUpWithPrimary() override;
// Try to find log reader using log_number from log_readers_ map, initialize
// if it doesn't exist
Status MaybeInitLogReader(uint64_t log_number,