mirror of https://github.com/facebook/rocksdb.git
internal_repo_rocksdb (-8794174668376270091) (#12114)
Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/12114 Reviewed By: jowlyzhang Differential Revision: D51745613 Pulled By: ajkr fbshipit-source-id: 27ca4bda275cab057d3a3ec99f0f92cdb9be5177
This commit is contained in:
parent
7eca51dfc3
commit
be3bc36811
|
@ -574,8 +574,8 @@ TEST_F(AutoRollLoggerTest, Close) {
|
|||
static std::vector<std::string> GetOldFileNames(const std::string& path) {
|
||||
std::vector<std::string> ret;
|
||||
|
||||
const std::string dirname = path.substr(/*start=*/0, path.find_last_of("/"));
|
||||
const std::string fname = path.substr(path.find_last_of("/") + 1);
|
||||
const std::string dirname = path.substr(/*start=*/0, path.find_last_of('/'));
|
||||
const std::string fname = path.substr(path.find_last_of('/') + 1);
|
||||
|
||||
std::vector<std::string> children;
|
||||
EXPECT_OK(Env::Default()->GetChildren(dirname, &children));
|
||||
|
|
|
@ -138,7 +138,7 @@ TEST_F(EnvLoggerTest, ConcurrentLogging) {
|
|||
const int kNumThreads = 5;
|
||||
// Create threads.
|
||||
for (int ii = 0; ii < kNumThreads; ++ii) {
|
||||
threads.push_back(port::Thread(cb));
|
||||
threads.emplace_back(cb);
|
||||
}
|
||||
|
||||
// Wait for them to complete.
|
||||
|
|
|
@ -170,7 +170,7 @@ static void SimpleTest(size_t huge_page_size) {
|
|||
r[b] = i % 256;
|
||||
}
|
||||
bytes += s;
|
||||
allocated.push_back(std::make_pair(s, r));
|
||||
allocated.emplace_back(s, r);
|
||||
ASSERT_GE(arena.ApproximateMemoryUsage(), bytes);
|
||||
if (i > N / 10) {
|
||||
ASSERT_LE(arena.ApproximateMemoryUsage(), bytes * 1.10);
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include <assert.h>
|
||||
#include <cassert>
|
||||
|
||||
#include "memory/allocator.h"
|
||||
#include "memory/arena.h"
|
||||
|
|
|
@ -81,7 +81,7 @@ struct Node {
|
|||
void NoBarrier_SetNext(Node* x) { next_.store(x, std::memory_order_relaxed); }
|
||||
|
||||
// Needed for placement new below which is fine
|
||||
Node() {}
|
||||
Node() = default;
|
||||
|
||||
private:
|
||||
std::atomic<Node*> next_;
|
||||
|
@ -265,7 +265,7 @@ class HashLinkListRep : public MemTableRep {
|
|||
explicit FullListIterator(MemtableSkipList* list, Allocator* allocator)
|
||||
: iter_(list), full_list_(list), allocator_(allocator) {}
|
||||
|
||||
~FullListIterator() override {}
|
||||
~FullListIterator() override = default;
|
||||
|
||||
// Returns true iff the iterator is positioned at a valid node.
|
||||
bool Valid() const override { return iter_.Valid(); }
|
||||
|
@ -332,7 +332,7 @@ class HashLinkListRep : public MemTableRep {
|
|||
head_(head),
|
||||
node_(nullptr) {}
|
||||
|
||||
~LinkListIterator() override {}
|
||||
~LinkListIterator() override = default;
|
||||
|
||||
// Returns true iff the iterator is positioned at a valid node.
|
||||
bool Valid() const override { return node_ != nullptr; }
|
||||
|
@ -482,7 +482,7 @@ class HashLinkListRep : public MemTableRep {
|
|||
// This is used when there wasn't a bucket. It is cheaper than
|
||||
// instantiating an empty bucket over which to iterate.
|
||||
public:
|
||||
EmptyIterator() {}
|
||||
EmptyIterator() = default;
|
||||
bool Valid() const override { return false; }
|
||||
const char* key() const override {
|
||||
assert(false);
|
||||
|
@ -526,7 +526,7 @@ HashLinkListRep::HashLinkListRep(
|
|||
}
|
||||
}
|
||||
|
||||
HashLinkListRep::~HashLinkListRep() {}
|
||||
HashLinkListRep::~HashLinkListRep() = default;
|
||||
|
||||
KeyHandle HashLinkListRep::Allocate(const size_t len, char** buf) {
|
||||
char* mem = allocator_->AllocateAligned(sizeof(Node) + len);
|
||||
|
|
|
@ -208,7 +208,7 @@ class HashSkipListRep : public MemTableRep {
|
|||
// This is used when there wasn't a bucket. It is cheaper than
|
||||
// instantiating an empty bucket over which to iterate.
|
||||
public:
|
||||
EmptyIterator() {}
|
||||
EmptyIterator() = default;
|
||||
bool Valid() const override { return false; }
|
||||
const char* key() const override {
|
||||
assert(false);
|
||||
|
@ -248,7 +248,7 @@ HashSkipListRep::HashSkipListRep(const MemTableRep::KeyComparator& compare,
|
|||
}
|
||||
}
|
||||
|
||||
HashSkipListRep::~HashSkipListRep() {}
|
||||
HashSkipListRep::~HashSkipListRep() = default;
|
||||
|
||||
HashSkipListRep::Bucket* HashSkipListRep::GetInitializedBucket(
|
||||
const Slice& transformed) {
|
||||
|
|
|
@ -161,7 +161,7 @@ class SkipListRep : public MemTableRep {
|
|||
}
|
||||
}
|
||||
|
||||
~SkipListRep() override {}
|
||||
~SkipListRep() override = default;
|
||||
|
||||
// Iteration over the contents of a skip list
|
||||
class Iterator : public MemTableRep::Iterator {
|
||||
|
@ -174,7 +174,7 @@ class SkipListRep : public MemTableRep {
|
|||
const InlineSkipList<const MemTableRep::KeyComparator&>* list)
|
||||
: iter_(list) {}
|
||||
|
||||
~Iterator() override {}
|
||||
~Iterator() override = default;
|
||||
|
||||
// Returns true iff the iterator is positioned at a valid node.
|
||||
bool Valid() const override { return iter_.Valid(); }
|
||||
|
@ -232,7 +232,7 @@ class SkipListRep : public MemTableRep {
|
|||
explicit LookaheadIterator(const SkipListRep& rep)
|
||||
: rep_(rep), iter_(&rep_.skip_list_), prev_(iter_) {}
|
||||
|
||||
~LookaheadIterator() override {}
|
||||
~LookaheadIterator() override = default;
|
||||
|
||||
bool Valid() const override { return iter_.Valid(); }
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ class VectorRep : public MemTableRep {
|
|||
void Get(const LookupKey& k, void* callback_args,
|
||||
bool (*callback_func)(void* arg, const char* entry)) override;
|
||||
|
||||
~VectorRep() override {}
|
||||
~VectorRep() override = default;
|
||||
|
||||
class Iterator : public MemTableRep::Iterator {
|
||||
class VectorRep* vrep_;
|
||||
|
@ -59,7 +59,7 @@ class VectorRep : public MemTableRep {
|
|||
// Initialize an iterator over the specified collection.
|
||||
// The returned iterator is not valid.
|
||||
// explicit Iterator(const MemTableRep* collection);
|
||||
~Iterator() override{};
|
||||
~Iterator() override = default;
|
||||
|
||||
// Returns true iff the iterator is positioned at a valid node.
|
||||
bool Valid() const override;
|
||||
|
|
|
@ -543,7 +543,9 @@ BENCHMARK(ManualFlush)->Iterations(1)->Apply(ManualFlushArguments);
|
|||
static Slice CompressibleString(Random* rnd, double compressed_fraction,
|
||||
int len, std::string* dst) {
|
||||
int raw = static_cast<int>(len * compressed_fraction);
|
||||
if (raw < 1) raw = 1;
|
||||
if (raw < 1) {
|
||||
raw = 1;
|
||||
}
|
||||
std::string raw_data = rnd->RandomBinaryString(raw);
|
||||
|
||||
// Duplicate the random data until we have filled "len" bytes
|
||||
|
|
|
@ -9,12 +9,11 @@
|
|||
|
||||
#include "monitoring/histogram.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdio>
|
||||
|
||||
#include "port/port.h"
|
||||
#include "util/cast_util.h"
|
||||
|
@ -45,11 +44,12 @@ HistogramBucketMapper::HistogramBucketMapper() {
|
|||
size_t HistogramBucketMapper::IndexForValue(const uint64_t value) const {
|
||||
auto beg = bucketValues_.begin();
|
||||
auto end = bucketValues_.end();
|
||||
if (value >= maxBucketValue_)
|
||||
if (value >= maxBucketValue_) {
|
||||
return end - beg - 1; // bucketValues_.size() - 1
|
||||
else
|
||||
} else {
|
||||
return std::lower_bound(beg, end, value) - beg;
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
const HistogramBucketMapper bucketMapper;
|
||||
|
@ -147,8 +147,12 @@ double HistogramStat::Percentile(double p) const {
|
|||
double r = left_point + (right_point - left_point) * pos;
|
||||
uint64_t cur_min = min();
|
||||
uint64_t cur_max = max();
|
||||
if (r < cur_min) r = static_cast<double>(cur_min);
|
||||
if (r > cur_max) r = static_cast<double>(cur_max);
|
||||
if (r < cur_min) {
|
||||
r = static_cast<double>(cur_min);
|
||||
}
|
||||
if (r > cur_max) {
|
||||
r = static_cast<double>(cur_max);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
@ -158,7 +162,9 @@ double HistogramStat::Percentile(double p) const {
|
|||
double HistogramStat::Average() const {
|
||||
uint64_t cur_num = num();
|
||||
uint64_t cur_sum = sum();
|
||||
if (cur_num == 0) return 0;
|
||||
if (cur_num == 0) {
|
||||
return 0;
|
||||
}
|
||||
return static_cast<double>(cur_sum) / static_cast<double>(cur_num);
|
||||
}
|
||||
|
||||
|
@ -193,12 +199,16 @@ std::string HistogramStat::ToString() const {
|
|||
Percentile(99.99));
|
||||
r.append(buf);
|
||||
r.append("------------------------------------------------------\n");
|
||||
if (cur_num == 0) return r; // all buckets are empty
|
||||
if (cur_num == 0) {
|
||||
return r; // all buckets are empty
|
||||
}
|
||||
const double mult = 100.0 / cur_num;
|
||||
uint64_t cumulative_sum = 0;
|
||||
for (unsigned int b = 0; b < num_buckets_; b++) {
|
||||
uint64_t bucket_value = bucket_at(b);
|
||||
if (bucket_value <= 0.0) continue;
|
||||
if (bucket_value <= 0.0) {
|
||||
continue;
|
||||
}
|
||||
cumulative_sum += bucket_value;
|
||||
snprintf(buf, sizeof(buf),
|
||||
"%c %7" PRIu64 ", %7" PRIu64 " ] %8" PRIu64 " %7.3f%% %7.3f%% ",
|
||||
|
|
|
@ -34,7 +34,7 @@ HistogramWindowingImpl::HistogramWindowingImpl(uint64_t num_windows,
|
|||
Clear();
|
||||
}
|
||||
|
||||
HistogramWindowingImpl::~HistogramWindowingImpl() {}
|
||||
HistogramWindowingImpl::~HistogramWindowingImpl() = default;
|
||||
|
||||
void HistogramWindowingImpl::Clear() {
|
||||
std::lock_guard<std::mutex> lock(mutex_);
|
||||
|
@ -159,7 +159,9 @@ void HistogramWindowingImpl::SwapHistoryBucket() {
|
|||
for (unsigned int i = 0; i < num_windows_; i++) {
|
||||
if (i != next_window) {
|
||||
uint64_t m = window_stats_[i].min();
|
||||
if (m < new_min) new_min = m;
|
||||
if (m < new_min) {
|
||||
new_min = m;
|
||||
}
|
||||
}
|
||||
}
|
||||
stats_.min_.store(new_min, std::memory_order_relaxed);
|
||||
|
@ -170,7 +172,9 @@ void HistogramWindowingImpl::SwapHistoryBucket() {
|
|||
for (unsigned int i = 0; i < num_windows_; i++) {
|
||||
if (i != next_window) {
|
||||
uint64_t m = window_stats_[i].max();
|
||||
if (m > new_max) new_max = m;
|
||||
if (m > new_max) {
|
||||
new_max = m;
|
||||
}
|
||||
}
|
||||
}
|
||||
stats_.max_.store(new_max, std::memory_order_relaxed);
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
InMemoryStatsHistoryIterator::~InMemoryStatsHistoryIterator() {}
|
||||
InMemoryStatsHistoryIterator::~InMemoryStatsHistoryIterator() = default;
|
||||
|
||||
bool InMemoryStatsHistoryIterator::Valid() const { return valid_; }
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
|
||||
#include <assert.h>
|
||||
#include <cassert>
|
||||
|
||||
#include "monitoring/perf_level_imp.h"
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ void OptimizeForPersistentStats(ColumnFamilyOptions* cfo) {
|
|||
cfo->compression = kNoCompression;
|
||||
}
|
||||
|
||||
PersistentStatsHistoryIterator::~PersistentStatsHistoryIterator() {}
|
||||
PersistentStatsHistoryIterator::~PersistentStatsHistoryIterator() = default;
|
||||
|
||||
bool PersistentStatsHistoryIterator::Valid() const { return valid_; }
|
||||
|
||||
|
@ -96,7 +96,7 @@ std::pair<uint64_t, std::string> parseKey(const Slice& key,
|
|||
uint64_t start_time) {
|
||||
std::pair<uint64_t, std::string> result;
|
||||
std::string key_str = key.ToString();
|
||||
std::string::size_type pos = key_str.find("#");
|
||||
std::string::size_type pos = key_str.find('#');
|
||||
// TODO(Zhongyi): add counters to track parse failures?
|
||||
if (pos == std::string::npos) {
|
||||
result.first = std::numeric_limits<uint64_t>::max();
|
||||
|
|
|
@ -379,7 +379,7 @@ StatisticsImpl::StatisticsImpl(std::shared_ptr<Statistics> stats)
|
|||
RegisterOptions("StatisticsOptions", &stats_, &stats_type_info);
|
||||
}
|
||||
|
||||
StatisticsImpl::~StatisticsImpl() {}
|
||||
StatisticsImpl::~StatisticsImpl() = default;
|
||||
|
||||
uint64_t StatisticsImpl::getTickerCount(uint32_t tickerType) const {
|
||||
MutexLock lock(&aggregate_lock_);
|
||||
|
@ -538,7 +538,9 @@ std::string StatisticsImpl::ToString() const {
|
|||
bool StatisticsImpl::getTickerMap(
|
||||
std::map<std::string, uint64_t>* stats_map) const {
|
||||
assert(stats_map);
|
||||
if (!stats_map) return false;
|
||||
if (!stats_map) {
|
||||
return false;
|
||||
}
|
||||
stats_map->clear();
|
||||
MutexLock lock(&aggregate_lock_);
|
||||
for (const auto& t : TickersNameMap) {
|
||||
|
|
|
@ -67,7 +67,7 @@ const std::string ThreadStatus::MicrosToString(uint64_t micros) {
|
|||
|
||||
const std::string& ThreadStatus::GetOperationPropertyName(
|
||||
ThreadStatus::OperationType op_type, int i) {
|
||||
static const std::string empty_str = "";
|
||||
static const std::string empty_str;
|
||||
switch (op_type) {
|
||||
case ThreadStatus::OP_COMPACTION:
|
||||
if (i >= NUM_COMPACTION_PROPERTIES) {
|
||||
|
|
|
@ -417,7 +417,7 @@ static std::unordered_map<std::string, OptionTypeInfo>
|
|||
// value, say, like "23", which would be assigned to
|
||||
// max_table_files_size.
|
||||
if (name == "compaction_options_fifo" &&
|
||||
value.find("=") == std::string::npos) {
|
||||
value.find('=') == std::string::npos) {
|
||||
// Old format. Parse just a single uint64_t value.
|
||||
auto options = static_cast<CompactionOptionsFIFO*>(addr);
|
||||
options->max_table_files_size = ParseUint64(value);
|
||||
|
@ -529,7 +529,7 @@ static std::unordered_map<std::string, OptionTypeInfo>
|
|||
// This is to handle backward compatibility, where
|
||||
// compression_options was a ":" separated list.
|
||||
if (name == kOptNameCompOpts &&
|
||||
value.find("=") == std::string::npos) {
|
||||
value.find('=') == std::string::npos) {
|
||||
auto* compression = static_cast<CompressionOptions*>(addr);
|
||||
return ParseCompressionOptions(value, name, *compression);
|
||||
} else {
|
||||
|
@ -549,7 +549,7 @@ static std::unordered_map<std::string, OptionTypeInfo>
|
|||
// This is to handle backward compatibility, where
|
||||
// compression_options was a ":" separated list.
|
||||
if (name == kOptNameBMCompOpts &&
|
||||
value.find("=") == std::string::npos) {
|
||||
value.find('=') == std::string::npos) {
|
||||
auto* compression = static_cast<CompressionOptions*>(addr);
|
||||
return ParseCompressionOptions(value, name, *compression);
|
||||
} else {
|
||||
|
@ -627,7 +627,7 @@ static std::unordered_map<std::string, OptionTypeInfo>
|
|||
{offsetof(struct ImmutableCFOptions,
|
||||
max_write_buffer_number_to_maintain),
|
||||
OptionType::kInt, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kNone, 0}},
|
||||
OptionTypeFlags::kNone, nullptr}},
|
||||
{"max_write_buffer_size_to_maintain",
|
||||
{offsetof(struct ImmutableCFOptions,
|
||||
max_write_buffer_size_to_maintain),
|
||||
|
@ -636,7 +636,7 @@ static std::unordered_map<std::string, OptionTypeInfo>
|
|||
{"min_write_buffer_number_to_merge",
|
||||
{offsetof(struct ImmutableCFOptions, min_write_buffer_number_to_merge),
|
||||
OptionType::kInt, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kNone, 0}},
|
||||
OptionTypeFlags::kNone, nullptr}},
|
||||
{"num_levels",
|
||||
{offsetof(struct ImmutableCFOptions, num_levels), OptionType::kInt,
|
||||
OptionVerificationType::kNormal, OptionTypeFlags::kNone}},
|
||||
|
|
|
@ -37,9 +37,9 @@ Status Configurable::PrepareOptions(const ConfigOptions& opts) {
|
|||
// We ignore the invoke_prepare_options here intentionally,
|
||||
// as if you are here, you must have called PrepareOptions explicitly.
|
||||
Status status = Status::OK();
|
||||
for (auto opt_iter : options_) {
|
||||
for (const auto& opt_iter : options_) {
|
||||
if (opt_iter.type_map != nullptr) {
|
||||
for (auto map_iter : *(opt_iter.type_map)) {
|
||||
for (const auto& map_iter : *(opt_iter.type_map)) {
|
||||
auto& opt_info = map_iter.second;
|
||||
if (opt_info.ShouldPrepare()) {
|
||||
status = opt_info.Prepare(opts, map_iter.first, opt_iter.opt_ptr);
|
||||
|
@ -56,9 +56,9 @@ Status Configurable::PrepareOptions(const ConfigOptions& opts) {
|
|||
Status Configurable::ValidateOptions(const DBOptions& db_opts,
|
||||
const ColumnFamilyOptions& cf_opts) const {
|
||||
Status status;
|
||||
for (auto opt_iter : options_) {
|
||||
for (const auto& opt_iter : options_) {
|
||||
if (opt_iter.type_map != nullptr) {
|
||||
for (auto map_iter : *(opt_iter.type_map)) {
|
||||
for (const auto& map_iter : *(opt_iter.type_map)) {
|
||||
auto& opt_info = map_iter.second;
|
||||
if (opt_info.ShouldValidate()) {
|
||||
status = opt_info.Validate(db_opts, cf_opts, map_iter.first,
|
||||
|
@ -80,7 +80,7 @@ Status Configurable::ValidateOptions(const DBOptions& db_opts,
|
|||
/*********************************************************************************/
|
||||
|
||||
const void* Configurable::GetOptionsPtr(const std::string& name) const {
|
||||
for (auto o : options_) {
|
||||
for (const auto& o : options_) {
|
||||
if (o.name == name) {
|
||||
return o.opt_ptr;
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ std::string Configurable::GetOptionName(const std::string& opt_name) const {
|
|||
const OptionTypeInfo* ConfigurableHelper::FindOption(
|
||||
const std::vector<Configurable::RegisteredOptions>& options,
|
||||
const std::string& short_name, std::string* opt_name, void** opt_ptr) {
|
||||
for (auto iter : options) {
|
||||
for (const auto& iter : options) {
|
||||
if (iter.type_map != nullptr) {
|
||||
const auto opt_info =
|
||||
OptionTypeInfo::Find(short_name, *(iter.type_map), opt_name);
|
||||
|
@ -318,21 +318,29 @@ Status ConfigurableHelper::ConfigureSomeOptions(
|
|||
} // End while found one or options remain
|
||||
|
||||
// Now that we have been through the list, remove any unsupported
|
||||
for (auto u : unsupported) {
|
||||
for (const auto& u : unsupported) {
|
||||
auto it = options->find(u);
|
||||
if (it != options->end()) {
|
||||
options->erase(it);
|
||||
}
|
||||
}
|
||||
if (config_options.ignore_unknown_options) {
|
||||
if (!result.ok()) result.PermitUncheckedError();
|
||||
if (!notsup.ok()) notsup.PermitUncheckedError();
|
||||
if (!result.ok()) {
|
||||
result.PermitUncheckedError();
|
||||
}
|
||||
if (!notsup.ok()) {
|
||||
notsup.PermitUncheckedError();
|
||||
}
|
||||
return Status::OK();
|
||||
} else if (!result.ok()) {
|
||||
if (!notsup.ok()) notsup.PermitUncheckedError();
|
||||
if (!notsup.ok()) {
|
||||
notsup.PermitUncheckedError();
|
||||
}
|
||||
return result;
|
||||
} else if (config_options.ignore_unsupported_options) {
|
||||
if (!notsup.ok()) notsup.PermitUncheckedError();
|
||||
if (!notsup.ok()) {
|
||||
notsup.PermitUncheckedError();
|
||||
}
|
||||
return Status::OK();
|
||||
} else {
|
||||
return notsup;
|
||||
|
@ -374,7 +382,7 @@ Status ConfigurableHelper::ConfigureCustomizableOption(
|
|||
return Status::OK();
|
||||
} else if (custom == nullptr || !StartsWith(name, custom->GetId() + ".")) {
|
||||
return configurable.ParseOption(copy, opt_info, name, value, opt_ptr);
|
||||
} else if (value.find("=") != std::string::npos) {
|
||||
} else if (value.find('=') != std::string::npos) {
|
||||
return custom->ConfigureFromString(copy, value);
|
||||
} else {
|
||||
return custom->ConfigureOption(copy, name, value);
|
||||
|
|
|
@ -436,7 +436,7 @@ TEST_F(ConfigurableTest, AliasOptionsTest) {
|
|||
OptionVerificationType::kNormal, OptionTypeFlags::kNone}},
|
||||
{"alias",
|
||||
{offsetof(struct TestOptions, b), OptionType::kBoolean,
|
||||
OptionVerificationType::kAlias, OptionTypeFlags::kNone, 0}}};
|
||||
OptionVerificationType::kAlias, OptionTypeFlags::kNone, nullptr}}};
|
||||
std::unique_ptr<Configurable> orig;
|
||||
orig.reset(SimpleConfigurable::Create("simple", TestConfigMode::kDefaultMode,
|
||||
&alias_option_info));
|
||||
|
@ -758,7 +758,7 @@ void ConfigurableParamTest::TestConfigureOptions(
|
|||
ASSERT_OK(base->GetOptionNames(config_options, &names));
|
||||
std::unordered_map<std::string, std::string> unused;
|
||||
bool found_one = false;
|
||||
for (auto name : names) {
|
||||
for (const auto& name : names) {
|
||||
std::string value;
|
||||
Status s = base->GetOption(config_options, name, &value);
|
||||
if (s.ok()) {
|
||||
|
|
|
@ -33,8 +33,8 @@ struct TestOptions {
|
|||
bool b = false;
|
||||
bool d = true;
|
||||
TestEnum e = TestEnum::kTestA;
|
||||
std::string s = "";
|
||||
std::string u = "";
|
||||
std::string s;
|
||||
std::string u;
|
||||
};
|
||||
|
||||
static std::unordered_map<std::string, OptionTypeInfo> simple_option_info = {
|
||||
|
|
|
@ -1265,7 +1265,7 @@ class TestStatistics : public StatisticsImpl {
|
|||
|
||||
class TestFlushBlockPolicyFactory : public FlushBlockPolicyFactory {
|
||||
public:
|
||||
TestFlushBlockPolicyFactory() {}
|
||||
TestFlushBlockPolicyFactory() = default;
|
||||
|
||||
static const char* kClassName() { return "TestFlushBlockPolicyFactory"; }
|
||||
const char* Name() const override { return kClassName(); }
|
||||
|
|
|
@ -995,8 +995,7 @@ MutableDBOptions::MutableDBOptions()
|
|||
wal_bytes_per_sync(0),
|
||||
strict_bytes_per_sync(false),
|
||||
compaction_readahead_size(0),
|
||||
max_background_flushes(-1),
|
||||
daily_offpeak_time_utc("") {}
|
||||
max_background_flushes(-1) {}
|
||||
|
||||
MutableDBOptions::MutableDBOptions(const DBOptions& options)
|
||||
: max_background_jobs(options.max_background_jobs),
|
||||
|
|
|
@ -127,7 +127,7 @@ ColumnFamilyOptions::ColumnFamilyOptions()
|
|||
ColumnFamilyOptions::ColumnFamilyOptions(const Options& options)
|
||||
: ColumnFamilyOptions(*static_cast<const ColumnFamilyOptions*>(&options)) {}
|
||||
|
||||
DBOptions::DBOptions() {}
|
||||
DBOptions::DBOptions() = default;
|
||||
DBOptions::DBOptions(const Options& options)
|
||||
: DBOptions(*static_cast<const DBOptions*>(&options)) {}
|
||||
|
||||
|
|
|
@ -46,7 +46,9 @@ Status ValidateOptions(const DBOptions& db_opts,
|
|||
auto db_cfg = DBOptionsAsConfigurable(db_opts);
|
||||
auto cf_cfg = CFOptionsAsConfigurable(cf_opts);
|
||||
s = db_cfg->ValidateOptions(db_opts, cf_opts);
|
||||
if (s.ok()) s = cf_cfg->ValidateOptions(db_opts, cf_opts);
|
||||
if (s.ok()) {
|
||||
s = cf_cfg->ValidateOptions(db_opts, cf_opts);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
|
@ -912,7 +914,7 @@ Status OptionTypeInfo::Parse(const ConfigOptions& config_options,
|
|||
ConfigOptions copy = config_options;
|
||||
copy.ignore_unknown_options = false;
|
||||
copy.invoke_prepare_options = false;
|
||||
if (opt_value.find("=") != std::string::npos) {
|
||||
if (opt_value.find('=') != std::string::npos) {
|
||||
return config->ConfigureFromString(copy, opt_value);
|
||||
} else {
|
||||
return config->ConfigureOption(copy, opt_name, opt_value);
|
||||
|
@ -1047,7 +1049,7 @@ Status OptionTypeInfo::Serialize(const ConfigOptions& config_options,
|
|||
}
|
||||
std::string value = custom->ToString(embedded);
|
||||
if (!embedded.mutable_options_only ||
|
||||
value.find("=") != std::string::npos) {
|
||||
value.find('=') != std::string::npos) {
|
||||
*opt_value = value;
|
||||
} else {
|
||||
*opt_value = "";
|
||||
|
@ -1423,7 +1425,7 @@ const OptionTypeInfo* OptionTypeInfo::Find(
|
|||
*elem_name = opt_name; // Return the name
|
||||
return &(iter->second); // Return the contents of the iterator
|
||||
} else {
|
||||
auto idx = opt_name.find("."); // Look for a separator
|
||||
auto idx = opt_name.find('.'); // Look for a separator
|
||||
if (idx > 0 && idx != std::string::npos) { // We found a separator
|
||||
auto siter =
|
||||
opt_map.find(opt_name.substr(0, idx)); // Look for the short name
|
||||
|
|
|
@ -179,8 +179,8 @@ Status RocksDBOptionsParser::ParseSection(OptionSection* section,
|
|||
*section = kOptionSectionUnknown;
|
||||
// A section is of the form [<SectionName> "<SectionArg>"], where
|
||||
// "<SectionArg>" is optional.
|
||||
size_t arg_start_pos = line.find("\"");
|
||||
size_t arg_end_pos = line.rfind("\"");
|
||||
size_t arg_start_pos = line.find('\"');
|
||||
size_t arg_end_pos = line.rfind('\"');
|
||||
// The following if-then check tries to identify whether the input
|
||||
// section has the optional section argument.
|
||||
if (arg_start_pos != std::string::npos && arg_start_pos != arg_end_pos) {
|
||||
|
@ -224,7 +224,7 @@ Status RocksDBOptionsParser::ParseStatement(std::string* name,
|
|||
std::string* value,
|
||||
const std::string& line,
|
||||
const int line_num) {
|
||||
size_t eq_pos = line.find("=");
|
||||
size_t eq_pos = line.find('=');
|
||||
if (eq_pos == std::string::npos) {
|
||||
return InvalidArgument(line_num, "A valid statement must have a '='.");
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ class OptionsTest : public testing::Test {};
|
|||
|
||||
class UnregisteredTableFactory : public TableFactory {
|
||||
public:
|
||||
UnregisteredTableFactory() {}
|
||||
UnregisteredTableFactory() = default;
|
||||
const char* Name() const override { return "Unregistered"; }
|
||||
using TableFactory::NewTableReader;
|
||||
Status NewTableReader(const ReadOptions&, const TableReaderOptions&,
|
||||
|
@ -1888,7 +1888,7 @@ TEST_F(OptionsTest, StringToMapRandomTest) {
|
|||
"a={aa={};tt={xxx={}}};c=defff;d={{}yxx{}3{xx}}",
|
||||
"abc={{}{}{}{{{}}}{{}{}{}{}{}{}{}"};
|
||||
|
||||
for (std::string base : bases) {
|
||||
for (const std::string& base : bases) {
|
||||
for (int rand_seed = 301; rand_seed < 401; rand_seed++) {
|
||||
Random rnd(rand_seed);
|
||||
for (int attempt = 0; attempt < 10; attempt++) {
|
||||
|
@ -1909,7 +1909,7 @@ TEST_F(OptionsTest, StringToMapRandomTest) {
|
|||
for (int rand_seed = 301; rand_seed < 1301; rand_seed++) {
|
||||
Random rnd(rand_seed);
|
||||
int len = rnd.Uniform(30);
|
||||
std::string str = "";
|
||||
std::string str;
|
||||
for (int attempt = 0; attempt < len; attempt++) {
|
||||
// Add a random character
|
||||
size_t pos = static_cast<size_t>(
|
||||
|
@ -3554,7 +3554,7 @@ TEST_F(OptionsParserTest, ParseVersion) {
|
|||
"3..2",
|
||||
".", ".1.2", // must have at least one digit before each dot
|
||||
"1.2.", "1.", "2.34."}; // must have at least one digit after each dot
|
||||
for (auto iv : invalid_versions) {
|
||||
for (const auto& iv : invalid_versions) {
|
||||
snprintf(buffer, kLength - 1, file_template.c_str(), iv.c_str());
|
||||
|
||||
parser.Reset();
|
||||
|
@ -3564,7 +3564,7 @@ TEST_F(OptionsParserTest, ParseVersion) {
|
|||
|
||||
const std::vector<std::string> valid_versions = {
|
||||
"1.232", "100", "3.12", "1", "12.3 ", " 1.25 "};
|
||||
for (auto vv : valid_versions) {
|
||||
for (const auto& vv : valid_versions) {
|
||||
snprintf(buffer, kLength - 1, file_template.c_str(), vv.c_str());
|
||||
parser.Reset();
|
||||
ASSERT_OK(fs_->WriteToNewFile(vv, buffer));
|
||||
|
@ -4643,42 +4643,42 @@ TEST_F(OptionTypeInfoTest, TestCustomEnum) {
|
|||
|
||||
TEST_F(OptionTypeInfoTest, TestBuiltinEnum) {
|
||||
ConfigOptions config_options;
|
||||
for (auto iter : OptionsHelper::compaction_style_string_map) {
|
||||
for (const auto& iter : OptionsHelper::compaction_style_string_map) {
|
||||
CompactionStyle e1, e2;
|
||||
TestParseAndCompareOption(config_options,
|
||||
OptionTypeInfo(0, OptionType::kCompactionStyle),
|
||||
"CompactionStyle", iter.first, &e1, &e2);
|
||||
ASSERT_EQ(e1, iter.second);
|
||||
}
|
||||
for (auto iter : OptionsHelper::compaction_pri_string_map) {
|
||||
for (const auto& iter : OptionsHelper::compaction_pri_string_map) {
|
||||
CompactionPri e1, e2;
|
||||
TestParseAndCompareOption(config_options,
|
||||
OptionTypeInfo(0, OptionType::kCompactionPri),
|
||||
"CompactionPri", iter.first, &e1, &e2);
|
||||
ASSERT_EQ(e1, iter.second);
|
||||
}
|
||||
for (auto iter : OptionsHelper::compression_type_string_map) {
|
||||
for (const auto& iter : OptionsHelper::compression_type_string_map) {
|
||||
CompressionType e1, e2;
|
||||
TestParseAndCompareOption(config_options,
|
||||
OptionTypeInfo(0, OptionType::kCompressionType),
|
||||
"CompressionType", iter.first, &e1, &e2);
|
||||
ASSERT_EQ(e1, iter.second);
|
||||
}
|
||||
for (auto iter : OptionsHelper::compaction_stop_style_string_map) {
|
||||
for (const auto& iter : OptionsHelper::compaction_stop_style_string_map) {
|
||||
CompactionStopStyle e1, e2;
|
||||
TestParseAndCompareOption(
|
||||
config_options, OptionTypeInfo(0, OptionType::kCompactionStopStyle),
|
||||
"CompactionStopStyle", iter.first, &e1, &e2);
|
||||
ASSERT_EQ(e1, iter.second);
|
||||
}
|
||||
for (auto iter : OptionsHelper::checksum_type_string_map) {
|
||||
for (const auto& iter : OptionsHelper::checksum_type_string_map) {
|
||||
ChecksumType e1, e2;
|
||||
TestParseAndCompareOption(config_options,
|
||||
OptionTypeInfo(0, OptionType::kChecksumType),
|
||||
"CheckSumType", iter.first, &e1, &e2);
|
||||
ASSERT_EQ(e1, iter.second);
|
||||
}
|
||||
for (auto iter : OptionsHelper::encoding_type_string_map) {
|
||||
for (const auto& iter : OptionsHelper::encoding_type_string_map) {
|
||||
EncodingType e1, e2;
|
||||
TestParseAndCompareOption(config_options,
|
||||
OptionTypeInfo(0, OptionType::kEncodingType),
|
||||
|
|
|
@ -11,20 +11,20 @@
|
|||
|
||||
#include "port/port_posix.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <cassert>
|
||||
#if defined(__i386__) || defined(__x86_64__)
|
||||
#include <cpuid.h>
|
||||
#endif
|
||||
#include <errno.h>
|
||||
#include <sched.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <cerrno>
|
||||
#include <csignal>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
|
||||
|
|
|
@ -26,12 +26,13 @@ void* SaveStack(int* /*num_frames*/, int /*first_frames_to_skip*/) {
|
|||
#include <cxxabi.h>
|
||||
#include <execinfo.h>
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <csignal>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
|
||||
#ifdef OS_OPENBSD
|
||||
#include <sys/wait.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
|
|
@ -49,8 +49,12 @@ struct DecodeEntry {
|
|||
// Fast path: all three values are encoded in one byte each
|
||||
p += 3;
|
||||
} else {
|
||||
if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr;
|
||||
if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr;
|
||||
if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -82,8 +86,12 @@ struct CheckAndDecodeEntry {
|
|||
// Fast path: all three values are encoded in one byte each
|
||||
p += 3;
|
||||
} else {
|
||||
if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr;
|
||||
if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr;
|
||||
if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -113,15 +121,21 @@ struct DecodeKeyV4 {
|
|||
// We need 2 bytes for shared and non_shared size. We also need one more
|
||||
// byte either for value size or the actual value in case of value delta
|
||||
// encoding.
|
||||
if (limit - p < 3) return nullptr;
|
||||
if (limit - p < 3) {
|
||||
return nullptr;
|
||||
}
|
||||
*shared = reinterpret_cast<const unsigned char*>(p)[0];
|
||||
*non_shared = reinterpret_cast<const unsigned char*>(p)[1];
|
||||
if ((*shared | *non_shared) < 128) {
|
||||
// Fast path: all three values are encoded in one byte each
|
||||
p += 2;
|
||||
} else {
|
||||
if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr;
|
||||
if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr;
|
||||
if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
@ -140,7 +154,9 @@ struct DecodeEntryV4 {
|
|||
|
||||
void DataBlockIter::NextImpl() {
|
||||
#ifndef NDEBUG
|
||||
if (TEST_Corrupt_Callback("DataBlockIter::NextImpl")) return;
|
||||
if (TEST_Corrupt_Callback("DataBlockIter::NextImpl")) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
bool is_shared = false;
|
||||
ParseNextDataKey(&is_shared);
|
||||
|
@ -446,7 +462,9 @@ bool DataBlockIter::SeekForGetImpl(const Slice& target) {
|
|||
|
||||
void IndexBlockIter::SeekImpl(const Slice& target) {
|
||||
#ifndef NDEBUG
|
||||
if (TEST_Corrupt_Callback("IndexBlockIter::SeekImpl")) return;
|
||||
if (TEST_Corrupt_Callback("IndexBlockIter::SeekImpl")) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
TEST_SYNC_POINT("IndexBlockIter::Seek:0");
|
||||
PERF_TIMER_GUARD(block_seek_nanos);
|
||||
|
@ -560,7 +578,9 @@ void MetaBlockIter::SeekToFirstImpl() {
|
|||
|
||||
void IndexBlockIter::SeekToFirstImpl() {
|
||||
#ifndef NDEBUG
|
||||
if (TEST_Corrupt_Callback("IndexBlockIter::SeekToFirstImpl")) return;
|
||||
if (TEST_Corrupt_Callback("IndexBlockIter::SeekToFirstImpl")) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
if (data_ == nullptr) { // Not init yet
|
||||
return;
|
||||
|
@ -910,7 +930,9 @@ bool IndexBlockIter::BinaryBlockIndexSeek(const Slice& target,
|
|||
// Key at "target" is <= "mid". Therefore all blocks
|
||||
// after "mid" are uninteresting.
|
||||
// If there is only one block left, we found it.
|
||||
if (left == right) break;
|
||||
if (left == right) {
|
||||
break;
|
||||
}
|
||||
right = mid;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,10 +9,9 @@
|
|||
|
||||
#include "table/block_based/block_based_table_builder.h"
|
||||
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <cassert>
|
||||
#include <cstdio>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
|
@ -231,7 +230,6 @@ class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector
|
|||
uint64_t /* block_compressed_bytes_slow */) override {
|
||||
// Intentionally left blank. No interest in collecting stats for
|
||||
// blocks.
|
||||
return;
|
||||
}
|
||||
|
||||
Status Finish(UserCollectedProperties* properties) override {
|
||||
|
@ -985,7 +983,9 @@ BlockBasedTableBuilder::~BlockBasedTableBuilder() {
|
|||
void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) {
|
||||
Rep* r = rep_;
|
||||
assert(rep_->state != Rep::State::kClosed);
|
||||
if (!ok()) return;
|
||||
if (!ok()) {
|
||||
return;
|
||||
}
|
||||
ValueType value_type = ExtractValueType(key);
|
||||
if (IsValueType(value_type)) {
|
||||
#ifndef NDEBUG
|
||||
|
@ -1097,8 +1097,12 @@ void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) {
|
|||
void BlockBasedTableBuilder::Flush() {
|
||||
Rep* r = rep_;
|
||||
assert(rep_->state != Rep::State::kClosed);
|
||||
if (!ok()) return;
|
||||
if (r->data_block.empty()) return;
|
||||
if (!ok()) {
|
||||
return;
|
||||
}
|
||||
if (r->data_block.empty()) {
|
||||
return;
|
||||
}
|
||||
if (r->IsParallelCompressionEnabled() &&
|
||||
r->state == Rep::State::kUnbuffered) {
|
||||
r->data_block.Finish();
|
||||
|
|
|
@ -9,9 +9,8 @@
|
|||
|
||||
#include "table/block_based/block_based_table_factory.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <cinttypes>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
|
|
|
@ -3053,7 +3053,7 @@ Status BlockBasedTable::DumpIndexBlock(std::ostream& out_stream) {
|
|||
<< " size " << blockhandles_iter->value().handle.size() << "\n";
|
||||
|
||||
std::string str_key = user_key.ToString();
|
||||
std::string res_key("");
|
||||
std::string res_key;
|
||||
char cspace = ' ';
|
||||
for (size_t i = 0; i < str_key.size(); i++) {
|
||||
res_key.append(&str_key[i], 1);
|
||||
|
@ -3154,7 +3154,7 @@ void BlockBasedTable::DumpKeyValue(const Slice& key, const Slice& value,
|
|||
|
||||
std::string str_key = ikey.user_key().ToString();
|
||||
std::string str_value = value.ToString();
|
||||
std::string res_key(""), res_value("");
|
||||
std::string res_key, res_value;
|
||||
char cspace = ' ';
|
||||
for (size_t i = 0; i < str_key.size(); i++) {
|
||||
if (str_key[i] == '\0') {
|
||||
|
|
|
@ -33,9 +33,8 @@
|
|||
|
||||
#include "table/block_based/block_builder.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
|
||||
#include "db/dbformat.h"
|
||||
#include "rocksdb/comparator.h"
|
||||
|
|
|
@ -6,9 +6,8 @@
|
|||
|
||||
#include "table/block_based/block.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdio>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
|
|
|
@ -72,7 +72,7 @@ class XXPH3FilterBitsBuilder : public BuiltinFilterBitsBuilder {
|
|||
detect_filter_construct_corruption_(
|
||||
detect_filter_construct_corruption) {}
|
||||
|
||||
~XXPH3FilterBitsBuilder() override {}
|
||||
~XXPH3FilterBitsBuilder() override = default;
|
||||
|
||||
virtual void AddKey(const Slice& key) override {
|
||||
uint64_t hash = GetSliceHash64(key);
|
||||
|
@ -321,7 +321,7 @@ class FastLocalBloomBitsBuilder : public XXPH3FilterBitsBuilder {
|
|||
FastLocalBloomBitsBuilder(const FastLocalBloomBitsBuilder&) = delete;
|
||||
void operator=(const FastLocalBloomBitsBuilder&) = delete;
|
||||
|
||||
~FastLocalBloomBitsBuilder() override {}
|
||||
~FastLocalBloomBitsBuilder() override = default;
|
||||
|
||||
using FilterBitsBuilder::Finish;
|
||||
|
||||
|
@ -525,7 +525,7 @@ class FastLocalBloomBitsReader : public BuiltinFilterBitsReader {
|
|||
FastLocalBloomBitsReader(const FastLocalBloomBitsReader&) = delete;
|
||||
void operator=(const FastLocalBloomBitsReader&) = delete;
|
||||
|
||||
~FastLocalBloomBitsReader() override {}
|
||||
~FastLocalBloomBitsReader() override = default;
|
||||
|
||||
bool MayMatch(const Slice& key) override {
|
||||
uint64_t h = GetSliceHash64(key);
|
||||
|
@ -606,7 +606,7 @@ class Standard128RibbonBitsBuilder : public XXPH3FilterBitsBuilder {
|
|||
Standard128RibbonBitsBuilder(const Standard128RibbonBitsBuilder&) = delete;
|
||||
void operator=(const Standard128RibbonBitsBuilder&) = delete;
|
||||
|
||||
~Standard128RibbonBitsBuilder() override {}
|
||||
~Standard128RibbonBitsBuilder() override = default;
|
||||
|
||||
using FilterBitsBuilder::Finish;
|
||||
|
||||
|
@ -967,7 +967,7 @@ class Standard128RibbonBitsReader : public BuiltinFilterBitsReader {
|
|||
Standard128RibbonBitsReader(const Standard128RibbonBitsReader&) = delete;
|
||||
void operator=(const Standard128RibbonBitsReader&) = delete;
|
||||
|
||||
~Standard128RibbonBitsReader() override {}
|
||||
~Standard128RibbonBitsReader() override = default;
|
||||
|
||||
bool MayMatch(const Slice& key) override {
|
||||
uint64_t h = GetSliceHash64(key);
|
||||
|
@ -1070,7 +1070,7 @@ LegacyBloomBitsBuilder::LegacyBloomBitsBuilder(const int bits_per_key,
|
|||
assert(bits_per_key_);
|
||||
}
|
||||
|
||||
LegacyBloomBitsBuilder::~LegacyBloomBitsBuilder() {}
|
||||
LegacyBloomBitsBuilder::~LegacyBloomBitsBuilder() = default;
|
||||
|
||||
void LegacyBloomBitsBuilder::AddKey(const Slice& key) {
|
||||
uint32_t hash = BloomHash(key);
|
||||
|
@ -1220,7 +1220,7 @@ class LegacyBloomBitsReader : public BuiltinFilterBitsReader {
|
|||
LegacyBloomBitsReader(const LegacyBloomBitsReader&) = delete;
|
||||
void operator=(const LegacyBloomBitsReader&) = delete;
|
||||
|
||||
~LegacyBloomBitsReader() override {}
|
||||
~LegacyBloomBitsReader() override = default;
|
||||
|
||||
// "contents" contains the data built by a preceding call to
|
||||
// FilterBitsBuilder::Finish. MayMatch must return true if the key was
|
||||
|
@ -1359,7 +1359,7 @@ BloomLikeFilterPolicy::BloomLikeFilterPolicy(double bits_per_key)
|
|||
whole_bits_per_key_ = (millibits_per_key_ + 500) / 1000;
|
||||
}
|
||||
|
||||
BloomLikeFilterPolicy::~BloomLikeFilterPolicy() {}
|
||||
BloomLikeFilterPolicy::~BloomLikeFilterPolicy() = default;
|
||||
const char* BloomLikeFilterPolicy::kClassName() {
|
||||
return "rocksdb.internal.BloomLikeFilter";
|
||||
}
|
||||
|
@ -1805,7 +1805,7 @@ FilterBuildingContext::FilterBuildingContext(
|
|||
const BlockBasedTableOptions& _table_options)
|
||||
: table_options(_table_options) {}
|
||||
|
||||
FilterPolicy::~FilterPolicy() {}
|
||||
FilterPolicy::~FilterPolicy() = default;
|
||||
|
||||
std::shared_ptr<const FilterPolicy> BloomLikeFilterPolicy::Create(
|
||||
const std::string& name, double bits_per_key) {
|
||||
|
|
|
@ -259,7 +259,7 @@ void FullFilterBlockReader::MayMatch(MultiGetRange* range, bool no_io,
|
|||
}
|
||||
}
|
||||
|
||||
filter_bits_reader->MayMatch(num_keys, &keys[0], &may_match[0]);
|
||||
filter_bits_reader->MayMatch(num_keys, keys.data(), may_match.data());
|
||||
|
||||
int i = 0;
|
||||
for (auto iter = filter_range.begin(); iter != filter_range.end(); ++iter) {
|
||||
|
|
|
@ -23,7 +23,7 @@ namespace ROCKSDB_NAMESPACE {
|
|||
|
||||
class TestFilterBitsBuilder : public FilterBitsBuilder {
|
||||
public:
|
||||
explicit TestFilterBitsBuilder() {}
|
||||
explicit TestFilterBitsBuilder() = default;
|
||||
|
||||
// Add Key to filter
|
||||
void AddKey(const Slice& key) override {
|
||||
|
@ -197,7 +197,7 @@ class CountUniqueFilterBitsBuilderWrapper : public FilterBitsBuilder {
|
|||
public:
|
||||
explicit CountUniqueFilterBitsBuilderWrapper(FilterBitsBuilder* b) : b_(b) {}
|
||||
|
||||
~CountUniqueFilterBitsBuilderWrapper() override {}
|
||||
~CountUniqueFilterBitsBuilderWrapper() override = default;
|
||||
|
||||
void AddKey(const Slice& key) override {
|
||||
b_->AddKey(key);
|
||||
|
|
|
@ -9,8 +9,7 @@
|
|||
|
||||
#include "table/block_based/index_builder.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include <cassert>
|
||||
#include <cinttypes>
|
||||
#include <list>
|
||||
#include <string>
|
||||
|
|
|
@ -87,7 +87,7 @@ class PartitionedFilterBlockTest
|
|||
table_options_.index_block_restart_interval = 3;
|
||||
}
|
||||
|
||||
~PartitionedFilterBlockTest() override {}
|
||||
~PartitionedFilterBlockTest() override = default;
|
||||
|
||||
static constexpr int kKeyNum = 4;
|
||||
static constexpr int kMissingKeyNum = 2;
|
||||
|
@ -200,7 +200,7 @@ class PartitionedFilterBlockTest
|
|||
// Querying added keys
|
||||
const bool no_io = true;
|
||||
std::vector<std::string> keys = PrepareKeys(keys_without_ts, kKeyNum);
|
||||
for (auto key : keys) {
|
||||
for (const auto& key : keys) {
|
||||
auto ikey = InternalKey(key, 0, ValueType::kTypeValue);
|
||||
const Slice ikey_slice = Slice(*ikey.rep());
|
||||
ASSERT_TRUE(reader->KeyMayMatch(
|
||||
|
@ -220,7 +220,7 @@ class PartitionedFilterBlockTest
|
|||
// querying missing keys
|
||||
std::vector<std::string> missing_keys =
|
||||
PrepareKeys(missing_keys_without_ts, kMissingKeyNum);
|
||||
for (auto key : missing_keys) {
|
||||
for (const auto& key : missing_keys) {
|
||||
auto ikey = InternalKey(key, 0, ValueType::kTypeValue);
|
||||
const Slice ikey_slice = Slice(*ikey.rep());
|
||||
if (empty) {
|
||||
|
@ -386,7 +386,7 @@ TEST_P(PartitionedFilterBlockTest, SamePrefixInMultipleBlocks) {
|
|||
CutABlock(pib.get(), pkeys[2]);
|
||||
std::unique_ptr<PartitionedFilterBlockReader> reader(
|
||||
NewReader(builder.get(), pib.get()));
|
||||
for (auto key : pkeys) {
|
||||
for (const auto& key : pkeys) {
|
||||
auto ikey = InternalKey(key, 0, ValueType::kTypeValue);
|
||||
const Slice ikey_slice = Slice(*ikey.rep());
|
||||
ASSERT_TRUE(reader->PrefixMayMatch(prefix_extractor->Transform(key),
|
||||
|
@ -400,7 +400,7 @@ TEST_P(PartitionedFilterBlockTest, SamePrefixInMultipleBlocks) {
|
|||
"p-key31"};
|
||||
std::vector<std::string> pnonkeys =
|
||||
PrepareKeys(pnonkeys_without_ts, 4 /* number_of_keys */);
|
||||
for (auto key : pnonkeys) {
|
||||
for (const auto& key : pnonkeys) {
|
||||
auto ikey = InternalKey(key, 0, ValueType::kTypeValue);
|
||||
const Slice ikey_slice = Slice(*ikey.rep());
|
||||
ASSERT_TRUE(reader->PrefixMayMatch(prefix_extractor->Transform(key),
|
||||
|
@ -440,7 +440,7 @@ TEST_P(PartitionedFilterBlockTest, PrefixInWrongPartitionBug) {
|
|||
CutABlock(pib.get(), pkeys[4]);
|
||||
std::unique_ptr<PartitionedFilterBlockReader> reader(
|
||||
NewReader(builder.get(), pib.get()));
|
||||
for (auto key : pkeys) {
|
||||
for (const auto& key : pkeys) {
|
||||
auto prefix = prefix_extractor->Transform(key);
|
||||
auto ikey = InternalKey(key, 0, ValueType::kTypeValue);
|
||||
const Slice ikey_slice = Slice(*ikey.rep());
|
||||
|
|
|
@ -134,7 +134,9 @@ class BlockFetcherTest : public testing::Test {
|
|||
std::array<TestStats, NumModes> expected_stats_by_mode) {
|
||||
for (CompressionType compression_type : GetSupportedCompressions()) {
|
||||
bool do_compress = compression_type != kNoCompression;
|
||||
if (compressed != do_compress) continue;
|
||||
if (compressed != do_compress) {
|
||||
continue;
|
||||
}
|
||||
std::string compression_type_str =
|
||||
CompressionTypeToString(compression_type);
|
||||
|
||||
|
|
|
@ -5,9 +5,8 @@
|
|||
|
||||
#include "table/cuckoo/cuckoo_table_builder.h"
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <limits>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
@ -481,7 +480,7 @@ bool CuckooTableBuilder::MakeSpaceForKey(
|
|||
uint64_t bid = hash_vals[hash_cnt];
|
||||
(*buckets)[static_cast<size_t>(bid)].make_space_for_key_call_id =
|
||||
make_space_for_key_call_id;
|
||||
tree.push_back(CuckooNode(bid, 0, 0));
|
||||
tree.emplace_back(bid, 0, 0);
|
||||
}
|
||||
bool null_found = false;
|
||||
uint32_t curr_pos = 0;
|
||||
|
@ -507,7 +506,7 @@ bool CuckooTableBuilder::MakeSpaceForKey(
|
|||
}
|
||||
(*buckets)[static_cast<size_t>(child_bucket_id)]
|
||||
.make_space_for_key_call_id = make_space_for_key_call_id;
|
||||
tree.push_back(CuckooNode(child_bucket_id, curr_depth + 1, curr_pos));
|
||||
tree.emplace_back(child_bucket_id, curr_depth + 1, curr_pos);
|
||||
if ((*buckets)[static_cast<size_t>(child_bucket_id)].vector_idx ==
|
||||
kMaxVectorIdx) {
|
||||
null_found = true;
|
||||
|
|
Loading…
Reference in New Issue