mirror of https://github.com/facebook/rocksdb.git
Automated modernization (#12210)
Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/12210 Reviewed By: hx235 Differential Revision: D52559771 Pulled By: ajkr fbshipit-source-id: 1ccdd3a0180cc02bc0441f20b0e4a1db50841b03
This commit is contained in:
parent
5da900f28a
commit
5a9ecf6614
|
@ -5,10 +5,9 @@
|
||||||
|
|
||||||
#include "db/db_info_dumper.h"
|
#include "db/db_info_dumper.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
|
#include <cstdio>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,9 @@ struct Entry {
|
||||||
bool visible = true;
|
bool visible = true;
|
||||||
|
|
||||||
bool operator<(const Entry& e) const {
|
bool operator<(const Entry& e) const {
|
||||||
if (key != e.key) return key < e.key;
|
if (key != e.key) {
|
||||||
|
return key < e.key;
|
||||||
|
}
|
||||||
return std::tie(sequence, type) > std::tie(e.sequence, e.type);
|
return std::tie(sequence, type) > std::tie(e.sequence, e.type);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -177,7 +179,9 @@ struct StressTestIterator : public InternalIterator {
|
||||||
}
|
}
|
||||||
|
|
||||||
void SeekToFirst() override {
|
void SeekToFirst() override {
|
||||||
if (MaybeFail()) return;
|
if (MaybeFail()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
MaybeMutate();
|
MaybeMutate();
|
||||||
|
|
||||||
status_ = Status::OK();
|
status_ = Status::OK();
|
||||||
|
@ -185,7 +189,9 @@ struct StressTestIterator : public InternalIterator {
|
||||||
SkipForward();
|
SkipForward();
|
||||||
}
|
}
|
||||||
void SeekToLast() override {
|
void SeekToLast() override {
|
||||||
if (MaybeFail()) return;
|
if (MaybeFail()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
MaybeMutate();
|
MaybeMutate();
|
||||||
|
|
||||||
status_ = Status::OK();
|
status_ = Status::OK();
|
||||||
|
@ -194,7 +200,9 @@ struct StressTestIterator : public InternalIterator {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Seek(const Slice& target) override {
|
void Seek(const Slice& target) override {
|
||||||
if (MaybeFail()) return;
|
if (MaybeFail()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
MaybeMutate();
|
MaybeMutate();
|
||||||
|
|
||||||
status_ = Status::OK();
|
status_ = Status::OK();
|
||||||
|
@ -206,7 +214,9 @@ struct StressTestIterator : public InternalIterator {
|
||||||
SkipForward();
|
SkipForward();
|
||||||
}
|
}
|
||||||
void SeekForPrev(const Slice& target) override {
|
void SeekForPrev(const Slice& target) override {
|
||||||
if (MaybeFail()) return;
|
if (MaybeFail()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
MaybeMutate();
|
MaybeMutate();
|
||||||
|
|
||||||
status_ = Status::OK();
|
status_ = Status::OK();
|
||||||
|
@ -221,14 +231,18 @@ struct StressTestIterator : public InternalIterator {
|
||||||
|
|
||||||
void Next() override {
|
void Next() override {
|
||||||
assert(Valid());
|
assert(Valid());
|
||||||
if (MaybeFail()) return;
|
if (MaybeFail()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
MaybeMutate();
|
MaybeMutate();
|
||||||
++iter;
|
++iter;
|
||||||
SkipForward();
|
SkipForward();
|
||||||
}
|
}
|
||||||
void Prev() override {
|
void Prev() override {
|
||||||
assert(Valid());
|
assert(Valid());
|
||||||
if (MaybeFail()) return;
|
if (MaybeFail()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
MaybeMutate();
|
MaybeMutate();
|
||||||
--iter;
|
--iter;
|
||||||
SkipBackward();
|
SkipBackward();
|
||||||
|
@ -318,7 +332,9 @@ struct ReferenceIterator {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
assert(e.sequence <= sequence);
|
assert(e.sequence <= sequence);
|
||||||
if (!e.visible) continue;
|
if (!e.visible) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (e.type == kTypeDeletion) {
|
if (e.type == kTypeDeletion) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -339,11 +355,13 @@ struct ReferenceIterator {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
assert(e.sequence <= sequence);
|
assert(e.sequence <= sequence);
|
||||||
if (!e.visible) continue;
|
if (!e.visible) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (e.type == kTypeDeletion) {
|
if (e.type == kTypeDeletion) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
operands.push_back(e.value);
|
operands.emplace_back(e.value);
|
||||||
if (e.type == kTypeValue) {
|
if (e.type == kTypeValue) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -588,15 +606,17 @@ TEST_F(DBIteratorStressTest, StressTest) {
|
||||||
|
|
||||||
// Check that the key moved in the right direction.
|
// Check that the key moved in the right direction.
|
||||||
if (forward) {
|
if (forward) {
|
||||||
if (seek)
|
if (seek) {
|
||||||
ASSERT_GE(db_iter->key().ToString(), old_key);
|
ASSERT_GE(db_iter->key().ToString(), old_key);
|
||||||
else
|
} else {
|
||||||
ASSERT_GT(db_iter->key().ToString(), old_key);
|
ASSERT_GT(db_iter->key().ToString(), old_key);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
if (seek)
|
if (seek) {
|
||||||
ASSERT_LE(db_iter->key().ToString(), old_key);
|
ASSERT_LE(db_iter->key().ToString(), old_key);
|
||||||
else
|
} else {
|
||||||
ASSERT_LT(db_iter->key().ToString(), old_key);
|
ASSERT_LT(db_iter->key().ToString(), old_key);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ref_iter->Valid()) {
|
if (ref_iter->Valid()) {
|
||||||
|
|
|
@ -65,8 +65,7 @@ class TestIterator : public InternalIterator {
|
||||||
size_t seq_num, bool update_iter = false) {
|
size_t seq_num, bool update_iter = false) {
|
||||||
valid_ = true;
|
valid_ = true;
|
||||||
ParsedInternalKey internal_key(argkey, seq_num, type);
|
ParsedInternalKey internal_key(argkey, seq_num, type);
|
||||||
data_.push_back(
|
data_.emplace_back(std::string(), argvalue);
|
||||||
std::pair<std::string, std::string>(std::string(), argvalue));
|
|
||||||
AppendInternalKey(&data_.back().first, internal_key);
|
AppendInternalKey(&data_.back().first, internal_key);
|
||||||
if (update_iter && valid_ && cmp.Compare(data_.back().first, key()) < 0) {
|
if (update_iter && valid_ && cmp.Compare(data_.back().first, key()) < 0) {
|
||||||
// insert a key smaller than current key
|
// insert a key smaller than current key
|
||||||
|
@ -2617,7 +2616,7 @@ class DBIterWithMergeIterTest : public testing::Test {
|
||||||
child_iters.push_back(internal_iter2_);
|
child_iters.push_back(internal_iter2_);
|
||||||
InternalKeyComparator icomp(BytewiseComparator());
|
InternalKeyComparator icomp(BytewiseComparator());
|
||||||
InternalIterator* merge_iter =
|
InternalIterator* merge_iter =
|
||||||
NewMergingIterator(&icomp_, &child_iters[0], 2u);
|
NewMergingIterator(&icomp_, child_iters.data(), 2u);
|
||||||
|
|
||||||
db_iter_.reset(NewDBIterator(
|
db_iter_.reset(NewDBIterator(
|
||||||
env_, ro_, ImmutableOptions(options_), MutableCFOptions(options_),
|
env_, ro_, ImmutableOptions(options_), MutableCFOptions(options_),
|
||||||
|
|
|
@ -86,7 +86,7 @@ TEST_F(DBIteratorBaseTest, APICallsWithPerfContext) {
|
||||||
class DBIteratorTest : public DBIteratorBaseTest,
|
class DBIteratorTest : public DBIteratorBaseTest,
|
||||||
public testing::WithParamInterface<bool> {
|
public testing::WithParamInterface<bool> {
|
||||||
public:
|
public:
|
||||||
DBIteratorTest() {}
|
DBIteratorTest() = default;
|
||||||
|
|
||||||
Iterator* NewIterator(const ReadOptions& read_options,
|
Iterator* NewIterator(const ReadOptions& read_options,
|
||||||
ColumnFamilyHandle* column_family = nullptr) {
|
ColumnFamilyHandle* column_family = nullptr) {
|
||||||
|
|
|
@ -658,7 +658,7 @@ TEST_F(DbKVChecksumWALToWriteBatchTest, WriteBatchChecksumHandoff) {
|
||||||
Options options = CurrentOptions();
|
Options options = CurrentOptions();
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
ASSERT_OK(db_->Put(WriteOptions(), "key", "val"));
|
ASSERT_OK(db_->Put(WriteOptions(), "key", "val"));
|
||||||
std::string content = "";
|
std::string content;
|
||||||
SyncPoint::GetInstance()->SetCallBack(
|
SyncPoint::GetInstance()->SetCallBack(
|
||||||
"DBImpl::RecoverLogFiles:BeforeUpdateProtectionInfo:batch",
|
"DBImpl::RecoverLogFiles:BeforeUpdateProtectionInfo:batch",
|
||||||
[&](void* batch_ptr) {
|
[&](void* batch_ptr) {
|
||||||
|
|
|
@ -1146,11 +1146,11 @@ TEST_F(DBOptionsTest, OffpeakTimes) {
|
||||||
"1:0000000000000-2:000000000042", // Weird, but we can parse the int.
|
"1:0000000000000-2:000000000042", // Weird, but we can parse the int.
|
||||||
};
|
};
|
||||||
|
|
||||||
for (std::string invalid_case : invalid_cases) {
|
for (const std::string& invalid_case : invalid_cases) {
|
||||||
options.daily_offpeak_time_utc = invalid_case;
|
options.daily_offpeak_time_utc = invalid_case;
|
||||||
verify_invalid();
|
verify_invalid();
|
||||||
}
|
}
|
||||||
for (std::string valid_case : valid_cases) {
|
for (const std::string& valid_case : valid_cases) {
|
||||||
options.daily_offpeak_time_utc = valid_case;
|
options.daily_offpeak_time_utc = valid_case;
|
||||||
verify_valid();
|
verify_valid();
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,9 +7,8 @@
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <cstdio>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "db/db_test_util.h"
|
#include "db/db_test_util.h"
|
||||||
|
|
|
@ -22,7 +22,7 @@ class DBRangeDelTest : public DBTestBase {
|
||||||
uint64_t uint64_key = static_cast<uint64_t>(key);
|
uint64_t uint64_key = static_cast<uint64_t>(key);
|
||||||
std::string str;
|
std::string str;
|
||||||
str.resize(8);
|
str.resize(8);
|
||||||
memcpy(&str[0], static_cast<void*>(&uint64_key), 8);
|
memcpy(str.data(), static_cast<void*>(&uint64_key), 8);
|
||||||
return str;
|
return str;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -103,7 +103,7 @@ void DBSecondaryTestBase::CheckFileTypeCounts(const std::string& dir,
|
||||||
ASSERT_OK(env_->GetChildren(dir, &filenames));
|
ASSERT_OK(env_->GetChildren(dir, &filenames));
|
||||||
|
|
||||||
int log_cnt = 0, sst_cnt = 0, manifest_cnt = 0;
|
int log_cnt = 0, sst_cnt = 0, manifest_cnt = 0;
|
||||||
for (auto file : filenames) {
|
for (const auto& file : filenames) {
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
if (ParseFileName(file, &number, &type)) {
|
if (ParseFileName(file, &number, &type)) {
|
||||||
|
|
|
@ -27,8 +27,8 @@ class DBSSTTest : public DBTestBase {
|
||||||
// A class which remembers the name of each flushed file.
|
// A class which remembers the name of each flushed file.
|
||||||
class FlushedFileCollector : public EventListener {
|
class FlushedFileCollector : public EventListener {
|
||||||
public:
|
public:
|
||||||
FlushedFileCollector() {}
|
FlushedFileCollector() = default;
|
||||||
~FlushedFileCollector() override {}
|
~FlushedFileCollector() override = default;
|
||||||
|
|
||||||
void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
|
void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
|
@ -38,7 +38,7 @@ class FlushedFileCollector : public EventListener {
|
||||||
std::vector<std::string> GetFlushedFiles() {
|
std::vector<std::string> GetFlushedFiles() {
|
||||||
std::lock_guard<std::mutex> lock(mutex_);
|
std::lock_guard<std::mutex> lock(mutex_);
|
||||||
std::vector<std::string> result;
|
std::vector<std::string> result;
|
||||||
for (auto fname : flushed_files_) {
|
for (const auto& fname : flushed_files_) {
|
||||||
result.push_back(fname);
|
result.push_back(fname);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
|
@ -661,7 +661,7 @@ class DBSSTTestRateLimit : public DBSSTTest,
|
||||||
public ::testing::WithParamInterface<bool> {
|
public ::testing::WithParamInterface<bool> {
|
||||||
public:
|
public:
|
||||||
DBSSTTestRateLimit() : DBSSTTest() {}
|
DBSSTTestRateLimit() : DBSSTTest() {}
|
||||||
~DBSSTTestRateLimit() override {}
|
~DBSSTTestRateLimit() override = default;
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_P(DBSSTTestRateLimit, RateLimitedDelete) {
|
TEST_P(DBSSTTestRateLimit, RateLimitedDelete) {
|
||||||
|
@ -1093,10 +1093,10 @@ TEST_F(DBSSTTest, DestroyDBWithRateLimitedDelete) {
|
||||||
int num_wal_files = 0;
|
int num_wal_files = 0;
|
||||||
std::vector<std::string> db_files;
|
std::vector<std::string> db_files;
|
||||||
ASSERT_OK(env_->GetChildren(dbname_, &db_files));
|
ASSERT_OK(env_->GetChildren(dbname_, &db_files));
|
||||||
for (std::string f : db_files) {
|
for (const std::string& f : db_files) {
|
||||||
if (f.substr(f.find_last_of(".") + 1) == "sst") {
|
if (f.substr(f.find_last_of('.') + 1) == "sst") {
|
||||||
num_sst_files++;
|
num_sst_files++;
|
||||||
} else if (f.substr(f.find_last_of(".") + 1) == "log") {
|
} else if (f.substr(f.find_last_of('.') + 1) == "log") {
|
||||||
num_wal_files++;
|
num_wal_files++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -246,7 +246,7 @@ DBTablePropertiesTest::TestGetPropertiesOfTablesInRange(
|
||||||
// run the query
|
// run the query
|
||||||
TablePropertiesCollection props;
|
TablePropertiesCollection props;
|
||||||
EXPECT_OK(db_->GetPropertiesOfTablesInRange(
|
EXPECT_OK(db_->GetPropertiesOfTablesInRange(
|
||||||
db_->DefaultColumnFamily(), &ranges[0], ranges.size(), &props));
|
db_->DefaultColumnFamily(), ranges.data(), ranges.size(), &props));
|
||||||
|
|
||||||
// Make sure that we've received properties for those and for those files
|
// Make sure that we've received properties for those and for those files
|
||||||
// only which fall within requested ranges
|
// only which fall within requested ranges
|
||||||
|
@ -363,7 +363,7 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfTablesInRange) {
|
||||||
std::vector<Range> ranges;
|
std::vector<Range> ranges;
|
||||||
auto it = random_keys.begin();
|
auto it = random_keys.begin();
|
||||||
while (it != random_keys.end()) {
|
while (it != random_keys.end()) {
|
||||||
ranges.push_back(Range(*it, *(it + 1)));
|
ranges.emplace_back(*it, *(it + 1));
|
||||||
it += 2;
|
it += 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -453,7 +453,7 @@ TEST_F(DBTablePropertiesTest, FactoryReturnsNull) {
|
||||||
std::make_shared<SometimesTablePropertiesCollectorFactory>());
|
std::make_shared<SometimesTablePropertiesCollectorFactory>());
|
||||||
// For plain table
|
// For plain table
|
||||||
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
|
options.prefix_extractor.reset(NewFixedPrefixTransform(4));
|
||||||
for (std::shared_ptr<TableFactory> tf :
|
for (const std::shared_ptr<TableFactory>& tf :
|
||||||
{options.table_factory,
|
{options.table_factory,
|
||||||
std::shared_ptr<TableFactory>(NewPlainTableFactory({}))}) {
|
std::shared_ptr<TableFactory>(NewPlainTableFactory({}))}) {
|
||||||
SCOPED_TRACE("Table factory = " + std::string(tf->Name()));
|
SCOPED_TRACE("Table factory = " + std::string(tf->Name()));
|
||||||
|
|
|
@ -360,8 +360,9 @@ TEST_P(DBTestTailingIterator, TailingIteratorDeletes) {
|
||||||
|
|
||||||
// make sure we can read all new records using the existing iterator
|
// make sure we can read all new records using the existing iterator
|
||||||
int count = 0;
|
int count = 0;
|
||||||
for (; iter->Valid(); iter->Next(), ++count)
|
for (; iter->Valid(); iter->Next(), ++count) {
|
||||||
;
|
;
|
||||||
|
}
|
||||||
ASSERT_OK(iter->status());
|
ASSERT_OK(iter->status());
|
||||||
ASSERT_EQ(count, num_records);
|
ASSERT_EQ(count, num_records);
|
||||||
}
|
}
|
||||||
|
|
|
@ -678,8 +678,8 @@ TEST_F(DBTest, ReadFromPersistedTier) {
|
||||||
multiget_cfs.push_back(handles_[1]);
|
multiget_cfs.push_back(handles_[1]);
|
||||||
multiget_cfs.push_back(handles_[1]);
|
multiget_cfs.push_back(handles_[1]);
|
||||||
std::vector<Slice> multiget_keys;
|
std::vector<Slice> multiget_keys;
|
||||||
multiget_keys.push_back("foo");
|
multiget_keys.emplace_back("foo");
|
||||||
multiget_keys.push_back("bar");
|
multiget_keys.emplace_back("bar");
|
||||||
std::vector<std::string> multiget_values;
|
std::vector<std::string> multiget_values;
|
||||||
for (int i = 0; i < 2; i++) {
|
for (int i = 0; i < 2; i++) {
|
||||||
bool batched = i == 0;
|
bool batched = i == 0;
|
||||||
|
@ -714,7 +714,7 @@ TEST_F(DBTest, ReadFromPersistedTier) {
|
||||||
|
|
||||||
// Expect same result in multiget
|
// Expect same result in multiget
|
||||||
multiget_cfs.push_back(handles_[1]);
|
multiget_cfs.push_back(handles_[1]);
|
||||||
multiget_keys.push_back("rocksdb");
|
multiget_keys.emplace_back("rocksdb");
|
||||||
multiget_values.clear();
|
multiget_values.clear();
|
||||||
|
|
||||||
for (int i = 0; i < 2; i++) {
|
for (int i = 0; i < 2; i++) {
|
||||||
|
@ -2701,7 +2701,7 @@ TEST_F(DBTest, PurgeInfoLogs) {
|
||||||
ASSERT_OK(env_->GetChildren(
|
ASSERT_OK(env_->GetChildren(
|
||||||
options.db_log_dir.empty() ? dbname_ : options.db_log_dir, &files));
|
options.db_log_dir.empty() ? dbname_ : options.db_log_dir, &files));
|
||||||
int info_log_count = 0;
|
int info_log_count = 0;
|
||||||
for (std::string file : files) {
|
for (const std::string& file : files) {
|
||||||
if (file.find("LOG") != std::string::npos) {
|
if (file.find("LOG") != std::string::npos) {
|
||||||
info_log_count++;
|
info_log_count++;
|
||||||
}
|
}
|
||||||
|
@ -2719,7 +2719,7 @@ TEST_F(DBTest, PurgeInfoLogs) {
|
||||||
if (mode == 1) {
|
if (mode == 1) {
|
||||||
// Cleaning up
|
// Cleaning up
|
||||||
ASSERT_OK(env_->GetChildren(options.db_log_dir, &files));
|
ASSERT_OK(env_->GetChildren(options.db_log_dir, &files));
|
||||||
for (std::string file : files) {
|
for (const std::string& file : files) {
|
||||||
ASSERT_OK(env_->DeleteFile(options.db_log_dir + "/" + file));
|
ASSERT_OK(env_->DeleteFile(options.db_log_dir + "/" + file));
|
||||||
}
|
}
|
||||||
ASSERT_OK(env_->DeleteDir(options.db_log_dir));
|
ASSERT_OK(env_->DeleteDir(options.db_log_dir));
|
||||||
|
@ -2879,7 +2879,9 @@ class MultiThreadedDBTest
|
||||||
};
|
};
|
||||||
|
|
||||||
TEST_P(MultiThreadedDBTest, MultiThreaded) {
|
TEST_P(MultiThreadedDBTest, MultiThreaded) {
|
||||||
if (option_config_ == kPipelinedWrite) return;
|
if (option_config_ == kPipelinedWrite) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
anon::OptionsOverride options_override;
|
anon::OptionsOverride options_override;
|
||||||
options_override.skip_policy = kSkipNoSnapshot;
|
options_override.skip_policy = kSkipNoSnapshot;
|
||||||
Options options = CurrentOptions(options_override);
|
Options options = CurrentOptions(options_override);
|
||||||
|
@ -2978,7 +2980,7 @@ TEST_F(DBTest, GroupCommitTest) {
|
||||||
|
|
||||||
Iterator* itr = db_->NewIterator(ReadOptions());
|
Iterator* itr = db_->NewIterator(ReadOptions());
|
||||||
itr->SeekToFirst();
|
itr->SeekToFirst();
|
||||||
for (auto x : expected_db) {
|
for (const auto& x : expected_db) {
|
||||||
ASSERT_TRUE(itr->Valid());
|
ASSERT_TRUE(itr->Valid());
|
||||||
ASSERT_EQ(itr->key().ToString(), x);
|
ASSERT_EQ(itr->key().ToString(), x);
|
||||||
ASSERT_EQ(itr->value().ToString(), x);
|
ASSERT_EQ(itr->value().ToString(), x);
|
||||||
|
@ -3302,9 +3304,9 @@ class ModelDB : public DB {
|
||||||
return Status::NotSupported("Not supported operation.");
|
return Status::NotSupported("Not supported operation.");
|
||||||
}
|
}
|
||||||
|
|
||||||
void EnableManualCompaction() override { return; }
|
void EnableManualCompaction() override {}
|
||||||
|
|
||||||
void DisableManualCompaction() override { return; }
|
void DisableManualCompaction() override {}
|
||||||
|
|
||||||
virtual Status WaitForCompact(
|
virtual Status WaitForCompact(
|
||||||
const WaitForCompactOptions& /* wait_for_compact_options */) override {
|
const WaitForCompactOptions& /* wait_for_compact_options */) override {
|
||||||
|
@ -3425,7 +3427,9 @@ class ModelDB : public DB {
|
||||||
ModelIter(const KVMap* map, bool owned)
|
ModelIter(const KVMap* map, bool owned)
|
||||||
: map_(map), owned_(owned), iter_(map_->end()) {}
|
: map_(map), owned_(owned), iter_(map_->end()) {}
|
||||||
~ModelIter() override {
|
~ModelIter() override {
|
||||||
if (owned_) delete map_;
|
if (owned_) {
|
||||||
|
delete map_;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
bool Valid() const override { return iter_ != map_->end(); }
|
bool Valid() const override { return iter_ != map_->end(); }
|
||||||
void SeekToFirst() override { iter_ = map_->begin(); }
|
void SeekToFirst() override { iter_ = map_->begin(); }
|
||||||
|
@ -3463,7 +3467,7 @@ class ModelDB : public DB {
|
||||||
};
|
};
|
||||||
const Options options_;
|
const Options options_;
|
||||||
KVMap map_;
|
KVMap map_;
|
||||||
std::string name_ = "";
|
std::string name_;
|
||||||
};
|
};
|
||||||
|
|
||||||
#if !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
|
#if !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
|
||||||
|
@ -3611,8 +3615,12 @@ TEST_P(DBTestRandomized, Randomized) {
|
||||||
// Save a snapshot from each DB this time that we'll use next
|
// Save a snapshot from each DB this time that we'll use next
|
||||||
// time we compare things, to make sure the current state is
|
// time we compare things, to make sure the current state is
|
||||||
// preserved with the snapshot
|
// preserved with the snapshot
|
||||||
if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
|
if (model_snap != nullptr) {
|
||||||
if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
|
model.ReleaseSnapshot(model_snap);
|
||||||
|
}
|
||||||
|
if (db_snap != nullptr) {
|
||||||
|
db_->ReleaseSnapshot(db_snap);
|
||||||
|
}
|
||||||
|
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
|
ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
|
||||||
|
@ -3621,8 +3629,12 @@ TEST_P(DBTestRandomized, Randomized) {
|
||||||
db_snap = db_->GetSnapshot();
|
db_snap = db_->GetSnapshot();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
|
if (model_snap != nullptr) {
|
||||||
if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
|
model.ReleaseSnapshot(model_snap);
|
||||||
|
}
|
||||||
|
if (db_snap != nullptr) {
|
||||||
|
db_->ReleaseSnapshot(db_snap);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif // !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
|
#endif // !defined(ROCKSDB_VALGRIND_RUN) || defined(ROCKSDB_FULL_VALGRIND_RUN)
|
||||||
|
|
||||||
|
@ -4249,9 +4261,9 @@ TEST_F(DBTest, DISABLED_RateLimitingTest) {
|
||||||
// (e.g, RateLimiter::GetTotalPendingRequests())
|
// (e.g, RateLimiter::GetTotalPendingRequests())
|
||||||
class MockedRateLimiterWithNoOptionalAPIImpl : public RateLimiter {
|
class MockedRateLimiterWithNoOptionalAPIImpl : public RateLimiter {
|
||||||
public:
|
public:
|
||||||
MockedRateLimiterWithNoOptionalAPIImpl() {}
|
MockedRateLimiterWithNoOptionalAPIImpl() = default;
|
||||||
|
|
||||||
~MockedRateLimiterWithNoOptionalAPIImpl() override {}
|
~MockedRateLimiterWithNoOptionalAPIImpl() override = default;
|
||||||
|
|
||||||
void SetBytesPerSecond(int64_t bytes_per_second) override {
|
void SetBytesPerSecond(int64_t bytes_per_second) override {
|
||||||
(void)bytes_per_second;
|
(void)bytes_per_second;
|
||||||
|
@ -4653,7 +4665,7 @@ void VerifyOperationCount(Env* env, ThreadStatus::OperationType op_type,
|
||||||
int op_count = 0;
|
int op_count = 0;
|
||||||
std::vector<ThreadStatus> thread_list;
|
std::vector<ThreadStatus> thread_list;
|
||||||
ASSERT_OK(env->GetThreadList(&thread_list));
|
ASSERT_OK(env->GetThreadList(&thread_list));
|
||||||
for (auto thread : thread_list) {
|
for (const auto& thread : thread_list) {
|
||||||
if (thread.operation_type == op_type) {
|
if (thread.operation_type == op_type) {
|
||||||
op_count++;
|
op_count++;
|
||||||
}
|
}
|
||||||
|
@ -4693,7 +4705,7 @@ TEST_F(DBTest, GetThreadStatus) {
|
||||||
s = env_->GetThreadList(&thread_list);
|
s = env_->GetThreadList(&thread_list);
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
memset(thread_type_counts, 0, sizeof(thread_type_counts));
|
memset(thread_type_counts, 0, sizeof(thread_type_counts));
|
||||||
for (auto thread : thread_list) {
|
for (const auto& thread : thread_list) {
|
||||||
ASSERT_LT(thread.thread_type, ThreadStatus::NUM_THREAD_TYPES);
|
ASSERT_LT(thread.thread_type, ThreadStatus::NUM_THREAD_TYPES);
|
||||||
thread_type_counts[thread.thread_type]++;
|
thread_type_counts[thread.thread_type]++;
|
||||||
}
|
}
|
||||||
|
@ -4974,7 +4986,7 @@ TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_OK(env_->GetThreadList(&thread_list));
|
ASSERT_OK(env_->GetThreadList(&thread_list));
|
||||||
for (auto thread : thread_list) {
|
for (const auto& thread : thread_list) {
|
||||||
operation_count[thread.operation_type]++;
|
operation_count[thread.operation_type]++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4999,7 +5011,7 @@ TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) {
|
||||||
operation_count[i] = 0;
|
operation_count[i] = 0;
|
||||||
}
|
}
|
||||||
ASSERT_OK(env_->GetThreadList(&thread_list));
|
ASSERT_OK(env_->GetThreadList(&thread_list));
|
||||||
for (auto thread : thread_list) {
|
for (const auto& thread : thread_list) {
|
||||||
operation_count[thread.operation_type]++;
|
operation_count[thread.operation_type]++;
|
||||||
}
|
}
|
||||||
ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0);
|
ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0);
|
||||||
|
@ -5061,7 +5073,7 @@ TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_OK(env_->GetThreadList(&thread_list));
|
ASSERT_OK(env_->GetThreadList(&thread_list));
|
||||||
for (auto thread : thread_list) {
|
for (const auto& thread : thread_list) {
|
||||||
operation_count[thread.operation_type]++;
|
operation_count[thread.operation_type]++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5086,7 +5098,7 @@ TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) {
|
||||||
operation_count[i] = 0;
|
operation_count[i] = 0;
|
||||||
}
|
}
|
||||||
ASSERT_OK(env_->GetThreadList(&thread_list));
|
ASSERT_OK(env_->GetThreadList(&thread_list));
|
||||||
for (auto thread : thread_list) {
|
for (const auto& thread : thread_list) {
|
||||||
operation_count[thread.operation_type]++;
|
operation_count[thread.operation_type]++;
|
||||||
}
|
}
|
||||||
ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0);
|
ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0);
|
||||||
|
@ -5171,7 +5183,7 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
|
||||||
}));
|
}));
|
||||||
ColumnFamilyMetaData cf_meta;
|
ColumnFamilyMetaData cf_meta;
|
||||||
db_->GetColumnFamilyMetaData(&cf_meta);
|
db_->GetColumnFamilyMetaData(&cf_meta);
|
||||||
for (auto file : cf_meta.levels[4].files) {
|
for (const auto& file : cf_meta.levels[4].files) {
|
||||||
listener->SetExpectedFileName(dbname_ + file.name);
|
listener->SetExpectedFileName(dbname_ + file.name);
|
||||||
ASSERT_OK(dbfull()->DeleteFile(file.name));
|
ASSERT_OK(dbfull()->DeleteFile(file.name));
|
||||||
}
|
}
|
||||||
|
@ -5669,7 +5681,7 @@ TEST_F(DBTest, FileCreationRandomFailure) {
|
||||||
|
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int i = 0; i < kTestSize; ++i) {
|
for (int i = 0; i < kTestSize; ++i) {
|
||||||
values.push_back("NOT_FOUND");
|
values.emplace_back("NOT_FOUND");
|
||||||
}
|
}
|
||||||
for (int j = 0; j < kTotalIteration; ++j) {
|
for (int j = 0; j < kTotalIteration; ++j) {
|
||||||
if (j == kRandomFailureTest) {
|
if (j == kRandomFailureTest) {
|
||||||
|
@ -7115,7 +7127,7 @@ TEST_F(DBTest, ReusePinnableSlice) {
|
||||||
|
|
||||||
{
|
{
|
||||||
std::vector<Slice> multiget_keys;
|
std::vector<Slice> multiget_keys;
|
||||||
multiget_keys.push_back("foo");
|
multiget_keys.emplace_back("foo");
|
||||||
std::vector<PinnableSlice> multiget_values(1);
|
std::vector<PinnableSlice> multiget_values(1);
|
||||||
std::vector<Status> statuses({Status::NotFound()});
|
std::vector<Status> statuses({Status::NotFound()});
|
||||||
ReadOptions ropt;
|
ReadOptions ropt;
|
||||||
|
@ -7142,7 +7154,7 @@ TEST_F(DBTest, ReusePinnableSlice) {
|
||||||
std::vector<ColumnFamilyHandle*> multiget_cfs;
|
std::vector<ColumnFamilyHandle*> multiget_cfs;
|
||||||
multiget_cfs.push_back(dbfull()->DefaultColumnFamily());
|
multiget_cfs.push_back(dbfull()->DefaultColumnFamily());
|
||||||
std::vector<Slice> multiget_keys;
|
std::vector<Slice> multiget_keys;
|
||||||
multiget_keys.push_back("foo");
|
multiget_keys.emplace_back("foo");
|
||||||
std::vector<PinnableSlice> multiget_values(1);
|
std::vector<PinnableSlice> multiget_values(1);
|
||||||
std::vector<Status> statuses({Status::NotFound()});
|
std::vector<Status> statuses({Status::NotFound()});
|
||||||
ReadOptions ropt;
|
ReadOptions ropt;
|
||||||
|
|
|
@ -80,9 +80,8 @@ TEST_F(DBTest2, OpenForReadOnlyWithColumnFamilies) {
|
||||||
|
|
||||||
ColumnFamilyOptions cf_options(options);
|
ColumnFamilyOptions cf_options(options);
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
column_families.push_back(
|
column_families.emplace_back(kDefaultColumnFamilyName, cf_options);
|
||||||
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
|
column_families.emplace_back("goku", cf_options);
|
||||||
column_families.push_back(ColumnFamilyDescriptor("goku", cf_options));
|
|
||||||
std::vector<ColumnFamilyHandle*> handles;
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
// OpenForReadOnly should fail but will create <dbname> in the file system
|
// OpenForReadOnly should fail but will create <dbname> in the file system
|
||||||
ASSERT_NOK(
|
ASSERT_NOK(
|
||||||
|
@ -748,7 +747,7 @@ TEST_F(DBTest2, WalFilterTest) {
|
||||||
// we expect all records to be processed
|
// we expect all records to be processed
|
||||||
for (size_t i = 0; i < batch_keys.size(); i++) {
|
for (size_t i = 0; i < batch_keys.size(); i++) {
|
||||||
for (size_t j = 0; j < batch_keys[i].size(); j++) {
|
for (size_t j = 0; j < batch_keys[i].size(); j++) {
|
||||||
keys_must_exist.push_back(Slice(batch_keys[i][j]));
|
keys_must_exist.emplace_back(batch_keys[i][j]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -762,9 +761,9 @@ TEST_F(DBTest2, WalFilterTest) {
|
||||||
for (size_t i = 0; i < batch_keys.size(); i++) {
|
for (size_t i = 0; i < batch_keys.size(); i++) {
|
||||||
for (size_t j = 0; j < batch_keys[i].size(); j++) {
|
for (size_t j = 0; j < batch_keys[i].size(); j++) {
|
||||||
if (i == apply_option_for_record_index) {
|
if (i == apply_option_for_record_index) {
|
||||||
keys_must_not_exist.push_back(Slice(batch_keys[i][j]));
|
keys_must_not_exist.emplace_back(batch_keys[i][j]);
|
||||||
} else {
|
} else {
|
||||||
keys_must_exist.push_back(Slice(batch_keys[i][j]));
|
keys_must_exist.emplace_back(batch_keys[i][j]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -780,9 +779,9 @@ TEST_F(DBTest2, WalFilterTest) {
|
||||||
for (size_t i = 0; i < batch_keys.size(); i++) {
|
for (size_t i = 0; i < batch_keys.size(); i++) {
|
||||||
for (size_t j = 0; j < batch_keys[i].size(); j++) {
|
for (size_t j = 0; j < batch_keys[i].size(); j++) {
|
||||||
if (i >= apply_option_for_record_index) {
|
if (i >= apply_option_for_record_index) {
|
||||||
keys_must_not_exist.push_back(Slice(batch_keys[i][j]));
|
keys_must_not_exist.emplace_back(batch_keys[i][j]);
|
||||||
} else {
|
} else {
|
||||||
keys_must_exist.push_back(Slice(batch_keys[i][j]));
|
keys_must_exist.emplace_back(batch_keys[i][j]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -922,9 +921,9 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatch) {
|
||||||
for (size_t i = 0; i < batch_keys.size(); i++) {
|
for (size_t i = 0; i < batch_keys.size(); i++) {
|
||||||
for (size_t j = 0; j < batch_keys[i].size(); j++) {
|
for (size_t j = 0; j < batch_keys[i].size(); j++) {
|
||||||
if (i >= change_records_from_index && j >= num_keys_to_add_in_new_batch) {
|
if (i >= change_records_from_index && j >= num_keys_to_add_in_new_batch) {
|
||||||
keys_must_not_exist.push_back(Slice(batch_keys[i][j]));
|
keys_must_not_exist.emplace_back(batch_keys[i][j]);
|
||||||
} else {
|
} else {
|
||||||
keys_must_exist.push_back(Slice(batch_keys[i][j]));
|
keys_must_exist.emplace_back(batch_keys[i][j]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1012,7 +1011,7 @@ TEST_F(DBTest2, WalFilterTestWithChangeBatchExtraKeys) {
|
||||||
|
|
||||||
for (size_t i = 0; i < batch_keys.size(); i++) {
|
for (size_t i = 0; i < batch_keys.size(); i++) {
|
||||||
for (size_t j = 0; j < batch_keys[i].size(); j++) {
|
for (size_t j = 0; j < batch_keys[i].size(); j++) {
|
||||||
keys_must_exist.push_back(Slice(batch_keys[i][j]));
|
keys_must_exist.emplace_back(batch_keys[i][j]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2350,7 +2349,7 @@ class MockPersistentCache : public PersistentCache {
|
||||||
"GetUniqueIdFromFile:FS_IOC_GETVERSION", UniqueIdCallback);
|
"GetUniqueIdFromFile:FS_IOC_GETVERSION", UniqueIdCallback);
|
||||||
}
|
}
|
||||||
|
|
||||||
~MockPersistentCache() override {}
|
~MockPersistentCache() override = default;
|
||||||
|
|
||||||
PersistentCache::StatsType Stats() override {
|
PersistentCache::StatsType Stats() override {
|
||||||
return PersistentCache::StatsType();
|
return PersistentCache::StatsType();
|
||||||
|
@ -3036,7 +3035,7 @@ TEST_F(DBTest2, PausingManualCompaction1) {
|
||||||
// Remember file name before compaction is triggered
|
// Remember file name before compaction is triggered
|
||||||
std::vector<LiveFileMetaData> files_meta;
|
std::vector<LiveFileMetaData> files_meta;
|
||||||
dbfull()->GetLiveFilesMetaData(&files_meta);
|
dbfull()->GetLiveFilesMetaData(&files_meta);
|
||||||
for (auto file : files_meta) {
|
for (const auto& file : files_meta) {
|
||||||
files_before_compact.push_back(file.name);
|
files_before_compact.push_back(file.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3051,7 +3050,7 @@ TEST_F(DBTest2, PausingManualCompaction1) {
|
||||||
// Get file names after compaction is stopped
|
// Get file names after compaction is stopped
|
||||||
files_meta.clear();
|
files_meta.clear();
|
||||||
dbfull()->GetLiveFilesMetaData(&files_meta);
|
dbfull()->GetLiveFilesMetaData(&files_meta);
|
||||||
for (auto file : files_meta) {
|
for (const auto& file : files_meta) {
|
||||||
files_after_compact.push_back(file.name);
|
files_after_compact.push_back(file.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3071,7 +3070,7 @@ TEST_F(DBTest2, PausingManualCompaction1) {
|
||||||
files_meta.clear();
|
files_meta.clear();
|
||||||
files_after_compact.clear();
|
files_after_compact.clear();
|
||||||
dbfull()->GetLiveFilesMetaData(&files_meta);
|
dbfull()->GetLiveFilesMetaData(&files_meta);
|
||||||
for (auto file : files_meta) {
|
for (const auto& file : files_meta) {
|
||||||
files_after_compact.push_back(file.name);
|
files_after_compact.push_back(file.name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4213,8 +4212,8 @@ TEST_F(DBTest2, TestNumPread) {
|
||||||
|
|
||||||
class TraceExecutionResultHandler : public TraceRecordResult::Handler {
|
class TraceExecutionResultHandler : public TraceRecordResult::Handler {
|
||||||
public:
|
public:
|
||||||
TraceExecutionResultHandler() {}
|
TraceExecutionResultHandler() = default;
|
||||||
~TraceExecutionResultHandler() override {}
|
~TraceExecutionResultHandler() override = default;
|
||||||
|
|
||||||
virtual Status Handle(const StatusOnlyTraceExecutionResult& result) override {
|
virtual Status Handle(const StatusOnlyTraceExecutionResult& result) override {
|
||||||
if (result.GetStartTimestamp() > result.GetEndTimestamp()) {
|
if (result.GetStartTimestamp() > result.GetEndTimestamp()) {
|
||||||
|
@ -4399,9 +4398,8 @@ TEST_F(DBTest2, TraceAndReplay) {
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
ColumnFamilyOptions cf_options;
|
ColumnFamilyOptions cf_options;
|
||||||
cf_options.merge_operator = MergeOperators::CreatePutOperator();
|
cf_options.merge_operator = MergeOperators::CreatePutOperator();
|
||||||
column_families.push_back(ColumnFamilyDescriptor("default", cf_options));
|
column_families.emplace_back("default", cf_options);
|
||||||
column_families.push_back(
|
column_families.emplace_back("pikachu", ColumnFamilyOptions());
|
||||||
ColumnFamilyDescriptor("pikachu", ColumnFamilyOptions()));
|
|
||||||
std::vector<ColumnFamilyHandle*> handles;
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
DBOptions db_opts;
|
DBOptions db_opts;
|
||||||
db_opts.env = env_;
|
db_opts.env = env_;
|
||||||
|
@ -4591,9 +4589,8 @@ TEST_F(DBTest2, TraceAndManualReplay) {
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
ColumnFamilyOptions cf_options;
|
ColumnFamilyOptions cf_options;
|
||||||
cf_options.merge_operator = MergeOperators::CreatePutOperator();
|
cf_options.merge_operator = MergeOperators::CreatePutOperator();
|
||||||
column_families.push_back(ColumnFamilyDescriptor("default", cf_options));
|
column_families.emplace_back("default", cf_options);
|
||||||
column_families.push_back(
|
column_families.emplace_back("pikachu", ColumnFamilyOptions());
|
||||||
ColumnFamilyDescriptor("pikachu", ColumnFamilyOptions()));
|
|
||||||
std::vector<ColumnFamilyHandle*> handles;
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
DBOptions db_opts;
|
DBOptions db_opts;
|
||||||
db_opts.env = env_;
|
db_opts.env = env_;
|
||||||
|
@ -4868,9 +4865,8 @@ TEST_F(DBTest2, TraceWithLimit) {
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
ColumnFamilyOptions cf_options;
|
ColumnFamilyOptions cf_options;
|
||||||
cf_options.merge_operator = MergeOperators::CreatePutOperator();
|
cf_options.merge_operator = MergeOperators::CreatePutOperator();
|
||||||
column_families.push_back(ColumnFamilyDescriptor("default", cf_options));
|
column_families.emplace_back("default", cf_options);
|
||||||
column_families.push_back(
|
column_families.emplace_back("pikachu", ColumnFamilyOptions());
|
||||||
ColumnFamilyDescriptor("pikachu", ColumnFamilyOptions()));
|
|
||||||
std::vector<ColumnFamilyHandle*> handles;
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
DBOptions db_opts;
|
DBOptions db_opts;
|
||||||
db_opts.env = env_;
|
db_opts.env = env_;
|
||||||
|
@ -4942,9 +4938,8 @@ TEST_F(DBTest2, TraceWithSampling) {
|
||||||
DB* db2 = nullptr;
|
DB* db2 = nullptr;
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
ColumnFamilyOptions cf_options;
|
ColumnFamilyOptions cf_options;
|
||||||
column_families.push_back(ColumnFamilyDescriptor("default", cf_options));
|
column_families.emplace_back("default", cf_options);
|
||||||
column_families.push_back(
|
column_families.emplace_back("pikachu", ColumnFamilyOptions());
|
||||||
ColumnFamilyDescriptor("pikachu", ColumnFamilyOptions()));
|
|
||||||
std::vector<ColumnFamilyHandle*> handles;
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
DBOptions db_opts;
|
DBOptions db_opts;
|
||||||
db_opts.env = env_;
|
db_opts.env = env_;
|
||||||
|
@ -5048,9 +5043,8 @@ TEST_F(DBTest2, TraceWithFilter) {
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
ColumnFamilyOptions cf_options;
|
ColumnFamilyOptions cf_options;
|
||||||
cf_options.merge_operator = MergeOperators::CreatePutOperator();
|
cf_options.merge_operator = MergeOperators::CreatePutOperator();
|
||||||
column_families.push_back(ColumnFamilyDescriptor("default", cf_options));
|
column_families.emplace_back("default", cf_options);
|
||||||
column_families.push_back(
|
column_families.emplace_back("pikachu", ColumnFamilyOptions());
|
||||||
ColumnFamilyDescriptor("pikachu", ColumnFamilyOptions()));
|
|
||||||
std::vector<ColumnFamilyHandle*> handles;
|
std::vector<ColumnFamilyHandle*> handles;
|
||||||
DBOptions db_opts;
|
DBOptions db_opts;
|
||||||
db_opts.env = env_;
|
db_opts.env = env_;
|
||||||
|
@ -5098,9 +5092,8 @@ TEST_F(DBTest2, TraceWithFilter) {
|
||||||
delete db3_init;
|
delete db3_init;
|
||||||
|
|
||||||
column_families.clear();
|
column_families.clear();
|
||||||
column_families.push_back(ColumnFamilyDescriptor("default", cf_options));
|
column_families.emplace_back("default", cf_options);
|
||||||
column_families.push_back(
|
column_families.emplace_back("pikachu", ColumnFamilyOptions());
|
||||||
ColumnFamilyDescriptor("pikachu", ColumnFamilyOptions()));
|
|
||||||
handles.clear();
|
handles.clear();
|
||||||
|
|
||||||
DB* db3 = nullptr;
|
DB* db3 = nullptr;
|
||||||
|
@ -6441,7 +6434,7 @@ class RenameCurrentTest : public DBTestBase,
|
||||||
: DBTestBase("rename_current_test", /*env_do_fsync=*/true),
|
: DBTestBase("rename_current_test", /*env_do_fsync=*/true),
|
||||||
sync_point_(GetParam()) {}
|
sync_point_(GetParam()) {}
|
||||||
|
|
||||||
~RenameCurrentTest() override {}
|
~RenameCurrentTest() override = default;
|
||||||
|
|
||||||
void SetUp() override {
|
void SetUp() override {
|
||||||
env_->no_file_overwrite_.store(true, std::memory_order_release);
|
env_->no_file_overwrite_.store(true, std::memory_order_release);
|
||||||
|
@ -7010,7 +7003,7 @@ TEST_F(DBTest2, CheckpointFileTemperature) {
|
||||||
std::vector<LiveFileStorageInfo> infos;
|
std::vector<LiveFileStorageInfo> infos;
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
dbfull()->GetLiveFilesStorageInfo(LiveFilesStorageInfoOptions(), &infos));
|
dbfull()->GetLiveFilesStorageInfo(LiveFilesStorageInfoOptions(), &infos));
|
||||||
for (auto info : infos) {
|
for (const auto& info : infos) {
|
||||||
temperatures.emplace(info.file_number, info.temperature);
|
temperatures.emplace(info.file_number, info.temperature);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -73,7 +73,7 @@ DBTestBase::DBTestBase(const std::string path, bool env_do_fsync)
|
||||||
if (getenv("ENCRYPTED_ENV")) {
|
if (getenv("ENCRYPTED_ENV")) {
|
||||||
std::shared_ptr<EncryptionProvider> provider;
|
std::shared_ptr<EncryptionProvider> provider;
|
||||||
std::string provider_id = getenv("ENCRYPTED_ENV");
|
std::string provider_id = getenv("ENCRYPTED_ENV");
|
||||||
if (provider_id.find("=") == std::string::npos &&
|
if (provider_id.find('=') == std::string::npos &&
|
||||||
!EndsWith(provider_id, "://test")) {
|
!EndsWith(provider_id, "://test")) {
|
||||||
provider_id = provider_id + "://test";
|
provider_id = provider_id + "://test";
|
||||||
}
|
}
|
||||||
|
@ -588,7 +588,7 @@ void DBTestBase::CreateColumnFamilies(const std::vector<std::string>& cfs,
|
||||||
ColumnFamilyOptions cf_opts(options);
|
ColumnFamilyOptions cf_opts(options);
|
||||||
size_t cfi = handles_.size();
|
size_t cfi = handles_.size();
|
||||||
handles_.resize(cfi + cfs.size());
|
handles_.resize(cfi + cfs.size());
|
||||||
for (auto cf : cfs) {
|
for (const auto& cf : cfs) {
|
||||||
Status s = db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]);
|
Status s = db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]);
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
}
|
}
|
||||||
|
@ -651,7 +651,7 @@ Status DBTestBase::TryReopenWithColumnFamilies(
|
||||||
EXPECT_EQ(cfs.size(), options.size());
|
EXPECT_EQ(cfs.size(), options.size());
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
for (size_t i = 0; i < cfs.size(); ++i) {
|
for (size_t i = 0; i < cfs.size(); ++i) {
|
||||||
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i]));
|
column_families.emplace_back(cfs[i], options[i]);
|
||||||
}
|
}
|
||||||
DBOptions db_opts = DBOptions(options[0]);
|
DBOptions db_opts = DBOptions(options[0]);
|
||||||
last_options_ = options[0];
|
last_options_ = options[0];
|
||||||
|
@ -828,7 +828,7 @@ std::vector<std::string> DBTestBase::MultiGet(std::vector<int> cfs,
|
||||||
|
|
||||||
for (unsigned int i = 0; i < cfs.size(); ++i) {
|
for (unsigned int i = 0; i < cfs.size(); ++i) {
|
||||||
handles.push_back(handles_[cfs[i]]);
|
handles.push_back(handles_[cfs[i]]);
|
||||||
keys.push_back(k[i]);
|
keys.emplace_back(k[i]);
|
||||||
}
|
}
|
||||||
std::vector<Status> s;
|
std::vector<Status> s;
|
||||||
if (!batched) {
|
if (!batched) {
|
||||||
|
@ -875,7 +875,7 @@ std::vector<std::string> DBTestBase::MultiGet(const std::vector<std::string>& k,
|
||||||
std::vector<PinnableSlice> pin_values(k.size());
|
std::vector<PinnableSlice> pin_values(k.size());
|
||||||
|
|
||||||
for (size_t i = 0; i < k.size(); ++i) {
|
for (size_t i = 0; i < k.size(); ++i) {
|
||||||
keys.push_back(k[i]);
|
keys.emplace_back(k[i]);
|
||||||
}
|
}
|
||||||
db_->MultiGet(options, dbfull()->DefaultColumnFamily(), keys.size(),
|
db_->MultiGet(options, dbfull()->DefaultColumnFamily(), keys.size(),
|
||||||
keys.data(), pin_values.data(), statuses.data());
|
keys.data(), pin_values.data(), statuses.data());
|
||||||
|
@ -1614,7 +1614,7 @@ void DBTestBase::VerifyDBFromMap(std::map<std::string, std::string> true_data,
|
||||||
<< iter_cnt << " / " << true_data.size();
|
<< iter_cnt << " / " << true_data.size();
|
||||||
|
|
||||||
// Verify Iterator::Seek()
|
// Verify Iterator::Seek()
|
||||||
for (auto kv : true_data) {
|
for (const auto& kv : true_data) {
|
||||||
iter->Seek(kv.first);
|
iter->Seek(kv.first);
|
||||||
ASSERT_EQ(kv.first, iter->key().ToString());
|
ASSERT_EQ(kv.first, iter->key().ToString());
|
||||||
ASSERT_EQ(kv.second, iter->value().ToString());
|
ASSERT_EQ(kv.second, iter->value().ToString());
|
||||||
|
@ -1644,7 +1644,7 @@ void DBTestBase::VerifyDBFromMap(std::map<std::string, std::string> true_data,
|
||||||
<< iter_cnt << " / " << true_data.size();
|
<< iter_cnt << " / " << true_data.size();
|
||||||
|
|
||||||
// Verify ForwardIterator::Seek()
|
// Verify ForwardIterator::Seek()
|
||||||
for (auto kv : true_data) {
|
for (const auto& kv : true_data) {
|
||||||
iter->Seek(kv.first);
|
iter->Seek(kv.first);
|
||||||
ASSERT_EQ(kv.first, iter->key().ToString());
|
ASSERT_EQ(kv.first, iter->key().ToString());
|
||||||
ASSERT_EQ(kv.second, iter->value().ToString());
|
ASSERT_EQ(kv.second, iter->value().ToString());
|
||||||
|
@ -1667,7 +1667,7 @@ void DBTestBase::VerifyDBInternal(
|
||||||
auto iter =
|
auto iter =
|
||||||
dbfull()->NewInternalIterator(read_options, &arena, kMaxSequenceNumber);
|
dbfull()->NewInternalIterator(read_options, &arena, kMaxSequenceNumber);
|
||||||
iter->SeekToFirst();
|
iter->SeekToFirst();
|
||||||
for (auto p : true_data) {
|
for (const auto& p : true_data) {
|
||||||
ASSERT_TRUE(iter->Valid());
|
ASSERT_TRUE(iter->Valid());
|
||||||
ParsedInternalKey ikey;
|
ParsedInternalKey ikey;
|
||||||
ASSERT_OK(ParseInternalKey(iter->key(), &ikey, true /* log_err_key */));
|
ASSERT_OK(ParseInternalKey(iter->key(), &ikey, true /* log_err_key */));
|
||||||
|
|
|
@ -558,7 +558,7 @@ TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) {
|
||||||
ColumnFamilyMetaData cf_meta;
|
ColumnFamilyMetaData cf_meta;
|
||||||
dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta);
|
dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta);
|
||||||
std::vector<std::string> compaction_input_file_names;
|
std::vector<std::string> compaction_input_file_names;
|
||||||
for (auto file : cf_meta.levels[0].files) {
|
for (const auto& file : cf_meta.levels[0].files) {
|
||||||
if (rnd.OneIn(2)) {
|
if (rnd.OneIn(2)) {
|
||||||
compaction_input_file_names.push_back(file.name);
|
compaction_input_file_names.push_back(file.name);
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,9 +107,9 @@ class EnrichedSpecialEnv : public SpecialEnv {
|
||||||
|
|
||||||
InstrumentedMutex env_mutex_;
|
InstrumentedMutex env_mutex_;
|
||||||
// the wal whose actual delete was skipped by the env
|
// the wal whose actual delete was skipped by the env
|
||||||
std::string skipped_wal = "";
|
std::string skipped_wal;
|
||||||
// the largest WAL that was requested to be deleted
|
// the largest WAL that was requested to be deleted
|
||||||
std::string largest_deleted_wal = "";
|
std::string largest_deleted_wal;
|
||||||
// number of WALs that were successfully deleted
|
// number of WALs that were successfully deleted
|
||||||
std::atomic<size_t> deleted_wal_cnt = {0};
|
std::atomic<size_t> deleted_wal_cnt = {0};
|
||||||
// the WAL whose delete from fs was skipped is reopened during recovery
|
// the WAL whose delete from fs was skipped is reopened during recovery
|
||||||
|
@ -2227,8 +2227,7 @@ TEST_P(DBWALTestWithParamsVaryingRecoveryMode,
|
||||||
ReadOptions ropt;
|
ReadOptions ropt;
|
||||||
Iterator* iter = dbfull()->NewIterator(ropt);
|
Iterator* iter = dbfull()->NewIterator(ropt);
|
||||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||||
data.push_back(
|
data.emplace_back(iter->key().ToString(), iter->value().ToString());
|
||||||
std::make_pair(iter->key().ToString(), iter->value().ToString()));
|
|
||||||
}
|
}
|
||||||
EXPECT_OK(iter->status());
|
EXPECT_OK(iter->status());
|
||||||
delete iter;
|
delete iter;
|
||||||
|
@ -2434,7 +2433,7 @@ TEST_F(DBWALTest, TruncateLastLogAfterRecoverWALEmpty) {
|
||||||
std::string last_log;
|
std::string last_log;
|
||||||
uint64_t last_log_num = 0;
|
uint64_t last_log_num = 0;
|
||||||
ASSERT_OK(env_->GetChildren(dbname_, &filenames));
|
ASSERT_OK(env_->GetChildren(dbname_, &filenames));
|
||||||
for (auto fname : filenames) {
|
for (const auto& fname : filenames) {
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
if (ParseFileName(fname, &number, &type, nullptr)) {
|
if (ParseFileName(fname, &number, &type, nullptr)) {
|
||||||
|
|
|
@ -375,7 +375,7 @@ TEST_F(DBBasicTestWithTimestamp, UpdateFullHistoryTsLowWithPublicAPI) {
|
||||||
ts_low_str_long);
|
ts_low_str_long);
|
||||||
ASSERT_EQ(s, Status::InvalidArgument());
|
ASSERT_EQ(s, Status::InvalidArgument());
|
||||||
// test IncreaseFullHistoryTsLow with a timestamp which is null
|
// test IncreaseFullHistoryTsLow with a timestamp which is null
|
||||||
std::string ts_low_str_null = "";
|
std::string ts_low_str_null;
|
||||||
s = db_->IncreaseFullHistoryTsLow(db_->DefaultColumnFamily(),
|
s = db_->IncreaseFullHistoryTsLow(db_->DefaultColumnFamily(),
|
||||||
ts_low_str_null);
|
ts_low_str_null);
|
||||||
ASSERT_EQ(s, Status::InvalidArgument());
|
ASSERT_EQ(s, Status::InvalidArgument());
|
||||||
|
@ -430,8 +430,8 @@ TEST_F(DBBasicTestWithTimestamp, GetApproximateSizes) {
|
||||||
std::vector<Range> ranges;
|
std::vector<Range> ranges;
|
||||||
std::string start_tmp = Key(10);
|
std::string start_tmp = Key(10);
|
||||||
std::string end_tmp = Key(20);
|
std::string end_tmp = Key(20);
|
||||||
ranges.emplace_back(Range(start_tmp, end_tmp));
|
ranges.emplace_back(start_tmp, end_tmp);
|
||||||
ranges.emplace_back(Range(start, end));
|
ranges.emplace_back(start, end);
|
||||||
uint64_t range_sizes[2];
|
uint64_t range_sizes[2];
|
||||||
ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf,
|
ASSERT_OK(db_->GetApproximateSizes(size_approx_options, default_cf,
|
||||||
ranges.data(), 2, range_sizes));
|
ranges.data(), 2, range_sizes));
|
||||||
|
@ -598,8 +598,7 @@ TEST_F(DBBasicTestWithTimestamp, TrimHistoryTest) {
|
||||||
|
|
||||||
ColumnFamilyOptions cf_options(options);
|
ColumnFamilyOptions cf_options(options);
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
column_families.push_back(
|
column_families.emplace_back(kDefaultColumnFamilyName, cf_options);
|
||||||
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
|
|
||||||
DBOptions db_options(options);
|
DBOptions db_options(options);
|
||||||
|
|
||||||
// Trim data whose version > Timestamp(5, 0), read(k1, ts(7)) <- NOT_FOUND.
|
// Trim data whose version > Timestamp(5, 0), read(k1, ts(7)) <- NOT_FOUND.
|
||||||
|
@ -642,8 +641,7 @@ TEST_F(DBBasicTestWithTimestamp, OpenAndTrimHistoryInvalidOptionTest) {
|
||||||
|
|
||||||
ColumnFamilyOptions cf_options(options);
|
ColumnFamilyOptions cf_options(options);
|
||||||
std::vector<ColumnFamilyDescriptor> column_families;
|
std::vector<ColumnFamilyDescriptor> column_families;
|
||||||
column_families.push_back(
|
column_families.emplace_back(kDefaultColumnFamilyName, cf_options);
|
||||||
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
|
|
||||||
DBOptions db_options(options);
|
DBOptions db_options(options);
|
||||||
|
|
||||||
// OpenAndTrimHistory should not work with avoid_flush_during_recovery
|
// OpenAndTrimHistory should not work with avoid_flush_during_recovery
|
||||||
|
@ -2634,7 +2632,7 @@ TEST_F(DataVisibilityTest, MultiGetWithoutSnapshot) {
|
||||||
auto ss = db_->MultiGet(read_opts, keys, &values);
|
auto ss = db_->MultiGet(read_opts, keys, &values);
|
||||||
|
|
||||||
writer_thread.join();
|
writer_thread.join();
|
||||||
for (auto s : ss) {
|
for (const auto& s : ss) {
|
||||||
ASSERT_TRUE(s.IsNotFound());
|
ASSERT_TRUE(s.IsNotFound());
|
||||||
}
|
}
|
||||||
VerifyDefaultCF();
|
VerifyDefaultCF();
|
||||||
|
@ -2904,8 +2902,8 @@ TEST_P(DBBasicTestWithTimestampCompressionSettings, PutDeleteGet) {
|
||||||
// A class which remembers the name of each flushed file.
|
// A class which remembers the name of each flushed file.
|
||||||
class FlushedFileCollector : public EventListener {
|
class FlushedFileCollector : public EventListener {
|
||||||
public:
|
public:
|
||||||
FlushedFileCollector() {}
|
FlushedFileCollector() = default;
|
||||||
~FlushedFileCollector() override {}
|
~FlushedFileCollector() override = default;
|
||||||
|
|
||||||
void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
|
void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
|
||||||
InstrumentedMutexLock lock(&mutex_);
|
InstrumentedMutexLock lock(&mutex_);
|
||||||
|
@ -3087,7 +3085,7 @@ TEST_F(DBBasicTestWithTimestamp, BatchWriteAndMultiGet) {
|
||||||
key_vals.push_back(Key1(j));
|
key_vals.push_back(Key1(j));
|
||||||
}
|
}
|
||||||
for (size_t j = 0; j != kNumKeysPerTimestamp; ++j) {
|
for (size_t j = 0; j != kNumKeysPerTimestamp; ++j) {
|
||||||
keys.push_back(key_vals[j]);
|
keys.emplace_back(key_vals[j]);
|
||||||
}
|
}
|
||||||
|
|
||||||
ReadOptions ropts;
|
ReadOptions ropts;
|
||||||
|
@ -3793,12 +3791,12 @@ TEST_F(DBBasicTestWithTimestamp, FullHistoryTsLowSanityCheckFail) {
|
||||||
std::vector<Slice> keys;
|
std::vector<Slice> keys;
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (size_t j = 0; j < 2; ++j) {
|
for (size_t j = 0; j < 2; ++j) {
|
||||||
keys.push_back(key_vals[j]);
|
keys.emplace_back(key_vals[j]);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<Status> statuses =
|
std::vector<Status> statuses =
|
||||||
db_->MultiGet(read_opts, cfhs, keys, &values);
|
db_->MultiGet(read_opts, cfhs, keys, &values);
|
||||||
for (auto status : statuses) {
|
for (const auto& status : statuses) {
|
||||||
ASSERT_TRUE(status.IsInvalidArgument());
|
ASSERT_TRUE(status.IsInvalidArgument());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3810,12 +3808,12 @@ TEST_F(DBBasicTestWithTimestamp, FullHistoryTsLowSanityCheckFail) {
|
||||||
std::vector<Slice> keys;
|
std::vector<Slice> keys;
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (size_t j = 0; j < 1; ++j) {
|
for (size_t j = 0; j < 1; ++j) {
|
||||||
keys.push_back(key_vals[j]);
|
keys.emplace_back(key_vals[j]);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<Status> statuses =
|
std::vector<Status> statuses =
|
||||||
db_->MultiGet(read_opts, one_cfh, keys, &values);
|
db_->MultiGet(read_opts, one_cfh, keys, &values);
|
||||||
for (auto status : statuses) {
|
for (const auto& status : statuses) {
|
||||||
ASSERT_TRUE(status.IsInvalidArgument());
|
ASSERT_TRUE(status.IsInvalidArgument());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3828,7 +3826,7 @@ TEST_F(DBBasicTestWithTimestamp, FullHistoryTsLowSanityCheckFail) {
|
||||||
Status statuses[] = {Status::OK(), Status::OK()};
|
Status statuses[] = {Status::OK(), Status::OK()};
|
||||||
db_->MultiGet(read_opts, /*num_keys=*/2, &column_families[0], &keys[0],
|
db_->MultiGet(read_opts, /*num_keys=*/2, &column_families[0], &keys[0],
|
||||||
&values[0], &statuses[0], /*sorted_input=*/false);
|
&values[0], &statuses[0], /*sorted_input=*/false);
|
||||||
for (auto status : statuses) {
|
for (const auto& status : statuses) {
|
||||||
ASSERT_TRUE(status.IsInvalidArgument());
|
ASSERT_TRUE(status.IsInvalidArgument());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3841,7 +3839,7 @@ TEST_F(DBBasicTestWithTimestamp, FullHistoryTsLowSanityCheckFail) {
|
||||||
Status statuses[] = {Status::OK()};
|
Status statuses[] = {Status::OK()};
|
||||||
db_->MultiGet(read_opts, /*num_keys=*/1, &one_column_family[0], &keys[0],
|
db_->MultiGet(read_opts, /*num_keys=*/1, &one_column_family[0], &keys[0],
|
||||||
&values[0], &statuses[0], /*sorted_input=*/false);
|
&values[0], &statuses[0], /*sorted_input=*/false);
|
||||||
for (auto status : statuses) {
|
for (const auto& status : statuses) {
|
||||||
ASSERT_TRUE(status.IsInvalidArgument());
|
ASSERT_TRUE(status.IsInvalidArgument());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -4556,8 +4554,8 @@ TEST_F(DBBasicTestWithTimestamp, TimestampFilterTableReadOnGet) {
|
||||||
Slice read_ts_slice = Slice(read_ts_str);
|
Slice read_ts_slice = Slice(read_ts_str);
|
||||||
ReadOptions read_opts;
|
ReadOptions read_opts;
|
||||||
read_opts.timestamp = &read_ts_slice;
|
read_opts.timestamp = &read_ts_slice;
|
||||||
std::string value_from_get = "";
|
std::string value_from_get;
|
||||||
std::string timestamp_from_get = "";
|
std::string timestamp_from_get;
|
||||||
auto status =
|
auto status =
|
||||||
db_->Get(read_opts, Key1(3), &value_from_get, ×tamp_from_get);
|
db_->Get(read_opts, Key1(3), &value_from_get, ×tamp_from_get);
|
||||||
ASSERT_TRUE(status.IsNotFound());
|
ASSERT_TRUE(status.IsNotFound());
|
||||||
|
|
|
@ -172,8 +172,8 @@ TEST_F(TimestampCompatibleCompactionTest, MultipleSubCompactions) {
|
||||||
|
|
||||||
class TestFilePartitioner : public SstPartitioner {
|
class TestFilePartitioner : public SstPartitioner {
|
||||||
public:
|
public:
|
||||||
explicit TestFilePartitioner() {}
|
explicit TestFilePartitioner() = default;
|
||||||
~TestFilePartitioner() override {}
|
~TestFilePartitioner() override = default;
|
||||||
|
|
||||||
const char* Name() const override { return "TestFilePartitioner"; }
|
const char* Name() const override { return "TestFilePartitioner"; }
|
||||||
PartitionerResult ShouldPartition(
|
PartitionerResult ShouldPartition(
|
||||||
|
@ -188,7 +188,7 @@ class TestFilePartitioner : public SstPartitioner {
|
||||||
|
|
||||||
class TestFilePartitionerFactory : public SstPartitionerFactory {
|
class TestFilePartitionerFactory : public SstPartitionerFactory {
|
||||||
public:
|
public:
|
||||||
explicit TestFilePartitionerFactory() {}
|
explicit TestFilePartitionerFactory() = default;
|
||||||
std::unique_ptr<SstPartitioner> CreatePartitioner(
|
std::unique_ptr<SstPartitioner> CreatePartitioner(
|
||||||
const SstPartitioner::Context& /*context*/) const override {
|
const SstPartitioner::Context& /*context*/) const override {
|
||||||
std::unique_ptr<SstPartitioner> ret =
|
std::unique_ptr<SstPartitioner> ret =
|
||||||
|
|
|
@ -296,7 +296,7 @@ TEST_P(DBWriteTest, IOErrorOnWALWritePropagateToWriteThreadFollower) {
|
||||||
});
|
});
|
||||||
SyncPoint::GetInstance()->EnableProcessing();
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
for (int i = 0; i < kNumThreads; i++) {
|
for (int i = 0; i < kNumThreads; i++) {
|
||||||
threads.push_back(port::Thread(
|
threads.emplace_back(
|
||||||
[&](int index) {
|
[&](int index) {
|
||||||
// All threads should fail.
|
// All threads should fail.
|
||||||
auto res = Put("key" + std::to_string(index), "value");
|
auto res = Put("key" + std::to_string(index), "value");
|
||||||
|
@ -313,7 +313,7 @@ TEST_P(DBWriteTest, IOErrorOnWALWritePropagateToWriteThreadFollower) {
|
||||||
ASSERT_FALSE(res.ok());
|
ASSERT_FALSE(res.ok());
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
i));
|
i);
|
||||||
}
|
}
|
||||||
for (int i = 0; i < kNumThreads; i++) {
|
for (int i = 0; i < kNumThreads; i++) {
|
||||||
threads[i].join();
|
threads[i].join();
|
||||||
|
|
|
@ -8,9 +8,8 @@
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
#include "db/dbformat.h"
|
#include "db/dbformat.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
|
||||||
|
|
||||||
#include <cinttypes>
|
#include <cinttypes>
|
||||||
|
#include <cstdio>
|
||||||
|
|
||||||
#include "db/lookup_key.h"
|
#include "db/lookup_key.h"
|
||||||
#include "monitoring/perf_context_imp.h"
|
#include "monitoring/perf_context_imp.h"
|
||||||
|
@ -28,7 +27,7 @@ namespace ROCKSDB_NAMESPACE {
|
||||||
// ValueType, not the lowest).
|
// ValueType, not the lowest).
|
||||||
const ValueType kValueTypeForSeek = kTypeWideColumnEntity;
|
const ValueType kValueTypeForSeek = kTypeWideColumnEntity;
|
||||||
const ValueType kValueTypeForSeekForPrev = kTypeDeletion;
|
const ValueType kValueTypeForSeekForPrev = kTypeDeletion;
|
||||||
const std::string kDisableUserTimestamp("");
|
const std::string kDisableUserTimestamp;
|
||||||
|
|
||||||
EntryType GetEntryType(ValueType value_type) {
|
EntryType GetEntryType(ValueType value_type) {
|
||||||
switch (value_type) {
|
switch (value_type) {
|
||||||
|
|
|
@ -7,9 +7,7 @@
|
||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||||
|
|
||||||
|
#include <cstdlib>
|
||||||
#include <stdlib.h>
|
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
@ -106,7 +104,7 @@ class DeleteFileTest : public DBTestBase {
|
||||||
ASSERT_OK(env_->GetChildren(dir, &filenames));
|
ASSERT_OK(env_->GetChildren(dir, &filenames));
|
||||||
|
|
||||||
int log_cnt = 0, sst_cnt = 0, manifest_cnt = 0;
|
int log_cnt = 0, sst_cnt = 0, manifest_cnt = 0;
|
||||||
for (auto file : filenames) {
|
for (const auto& file : filenames) {
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
if (ParseFileName(file, &number, &type)) {
|
if (ParseFileName(file, &number, &type)) {
|
||||||
|
@ -148,9 +146,9 @@ TEST_F(DeleteFileTest, AddKeysAndQueryLevels) {
|
||||||
std::vector<LiveFileMetaData> metadata;
|
std::vector<LiveFileMetaData> metadata;
|
||||||
db_->GetLiveFilesMetaData(&metadata);
|
db_->GetLiveFilesMetaData(&metadata);
|
||||||
|
|
||||||
std::string level1file = "";
|
std::string level1file;
|
||||||
int level1keycount = 0;
|
int level1keycount = 0;
|
||||||
std::string level2file = "";
|
std::string level2file;
|
||||||
int level2keycount = 0;
|
int level2keycount = 0;
|
||||||
int level1index = 0;
|
int level1index = 0;
|
||||||
int level2index = 1;
|
int level2index = 1;
|
||||||
|
|
|
@ -796,7 +796,6 @@ void ErrorHandler::RecoverFromRetryableBGIOError() {
|
||||||
RecordInHistogram(bg_error_stats_.get(),
|
RecordInHistogram(bg_error_stats_.get(),
|
||||||
ERROR_HANDLER_AUTORESUME_RETRY_COUNT, retry_count);
|
ERROR_HANDLER_AUTORESUME_RETRY_COUNT, retry_count);
|
||||||
}
|
}
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ErrorHandler::CheckAndSetRecoveryAndBGError(const Status& bg_err) {
|
void ErrorHandler::CheckAndSetRecoveryAndBGError(const Status& bg_err) {
|
||||||
|
@ -809,7 +808,6 @@ void ErrorHandler::CheckAndSetRecoveryAndBGError(const Status& bg_err) {
|
||||||
if (bg_error_.severity() >= Status::Severity::kHardError) {
|
if (bg_error_.severity() >= Status::Severity::kHardError) {
|
||||||
is_db_stopped_.store(true, std::memory_order_release);
|
is_db_stopped_.store(true, std::memory_order_release);
|
||||||
}
|
}
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ErrorHandler::EndAutoRecovery() {
|
void ErrorHandler::EndAutoRecovery() {
|
||||||
|
@ -827,7 +825,6 @@ void ErrorHandler::EndAutoRecovery() {
|
||||||
db_mutex_->Lock();
|
db_mutex_->Lock();
|
||||||
}
|
}
|
||||||
TEST_SYNC_POINT("PostEndAutoRecovery");
|
TEST_SYNC_POINT("PostEndAutoRecovery");
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace ROCKSDB_NAMESPACE
|
} // namespace ROCKSDB_NAMESPACE
|
||||||
|
|
|
@ -244,7 +244,7 @@ class ChecksumVerifyHelper {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ChecksumVerifyHelper(Options& options) : options_(options) {}
|
ChecksumVerifyHelper(Options& options) : options_(options) {}
|
||||||
~ChecksumVerifyHelper() {}
|
~ChecksumVerifyHelper() = default;
|
||||||
|
|
||||||
Status GetSingleFileChecksumAndFuncName(
|
Status GetSingleFileChecksumAndFuncName(
|
||||||
const std::string& file_path, std::string* file_checksum,
|
const std::string& file_path, std::string* file_checksum,
|
||||||
|
@ -472,7 +472,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) {
|
||||||
std::vector<LiveFileMetaData> live_files;
|
std::vector<LiveFileMetaData> live_files;
|
||||||
dbfull()->GetLiveFilesMetaData(&live_files);
|
dbfull()->GetLiveFilesMetaData(&live_files);
|
||||||
std::set<std::string> set1;
|
std::set<std::string> set1;
|
||||||
for (auto f : live_files) {
|
for (const auto& f : live_files) {
|
||||||
set1.insert(f.name);
|
set1.insert(f.name);
|
||||||
ASSERT_EQ(f.file_checksum, kUnknownFileChecksum);
|
ASSERT_EQ(f.file_checksum, kUnknownFileChecksum);
|
||||||
ASSERT_EQ(f.file_checksum_func_name, kUnknownFileChecksumFuncName);
|
ASSERT_EQ(f.file_checksum_func_name, kUnknownFileChecksumFuncName);
|
||||||
|
@ -521,7 +521,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) {
|
||||||
ASSERT_OK(s) << s.ToString();
|
ASSERT_OK(s) << s.ToString();
|
||||||
std::vector<LiveFileMetaData> live_files1;
|
std::vector<LiveFileMetaData> live_files1;
|
||||||
dbfull()->GetLiveFilesMetaData(&live_files1);
|
dbfull()->GetLiveFilesMetaData(&live_files1);
|
||||||
for (auto f : live_files1) {
|
for (const auto& f : live_files1) {
|
||||||
if (set1.find(f.name) == set1.end()) {
|
if (set1.find(f.name) == set1.end()) {
|
||||||
ASSERT_EQ(f.file_checksum, file_checksum2);
|
ASSERT_EQ(f.file_checksum, file_checksum2);
|
||||||
ASSERT_EQ(f.file_checksum_func_name, file_checksum_func_name2);
|
ASSERT_EQ(f.file_checksum_func_name, file_checksum_func_name2);
|
||||||
|
@ -538,7 +538,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) {
|
||||||
ASSERT_OK(s) << s.ToString();
|
ASSERT_OK(s) << s.ToString();
|
||||||
std::vector<LiveFileMetaData> live_files2;
|
std::vector<LiveFileMetaData> live_files2;
|
||||||
dbfull()->GetLiveFilesMetaData(&live_files2);
|
dbfull()->GetLiveFilesMetaData(&live_files2);
|
||||||
for (auto f : live_files2) {
|
for (const auto& f : live_files2) {
|
||||||
if (set1.find(f.name) == set1.end()) {
|
if (set1.find(f.name) == set1.end()) {
|
||||||
ASSERT_EQ(f.file_checksum, file_checksum3);
|
ASSERT_EQ(f.file_checksum, file_checksum3);
|
||||||
ASSERT_EQ(f.file_checksum_func_name, file_checksum_func_name3);
|
ASSERT_EQ(f.file_checksum_func_name, file_checksum_func_name3);
|
||||||
|
@ -561,7 +561,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) {
|
||||||
ASSERT_OK(s) << s.ToString();
|
ASSERT_OK(s) << s.ToString();
|
||||||
std::vector<LiveFileMetaData> live_files3;
|
std::vector<LiveFileMetaData> live_files3;
|
||||||
dbfull()->GetLiveFilesMetaData(&live_files3);
|
dbfull()->GetLiveFilesMetaData(&live_files3);
|
||||||
for (auto f : live_files3) {
|
for (const auto& f : live_files3) {
|
||||||
if (set1.find(f.name) == set1.end()) {
|
if (set1.find(f.name) == set1.end()) {
|
||||||
ASSERT_FALSE(f.file_checksum == file_checksum4);
|
ASSERT_FALSE(f.file_checksum == file_checksum4);
|
||||||
ASSERT_EQ(f.file_checksum, "asd");
|
ASSERT_EQ(f.file_checksum, "asd");
|
||||||
|
@ -581,7 +581,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) {
|
||||||
ASSERT_OK(s) << s.ToString();
|
ASSERT_OK(s) << s.ToString();
|
||||||
std::vector<LiveFileMetaData> live_files4;
|
std::vector<LiveFileMetaData> live_files4;
|
||||||
dbfull()->GetLiveFilesMetaData(&live_files4);
|
dbfull()->GetLiveFilesMetaData(&live_files4);
|
||||||
for (auto f : live_files4) {
|
for (const auto& f : live_files4) {
|
||||||
if (set1.find(f.name) == set1.end()) {
|
if (set1.find(f.name) == set1.end()) {
|
||||||
std::string cur_checksum5, cur_checksum_func_name5;
|
std::string cur_checksum5, cur_checksum_func_name5;
|
||||||
ASSERT_OK(checksum_helper.GetSingleFileChecksumAndFuncName(
|
ASSERT_OK(checksum_helper.GetSingleFileChecksumAndFuncName(
|
||||||
|
@ -603,7 +603,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) {
|
||||||
ASSERT_OK(s) << s.ToString();
|
ASSERT_OK(s) << s.ToString();
|
||||||
std::vector<LiveFileMetaData> live_files6;
|
std::vector<LiveFileMetaData> live_files6;
|
||||||
dbfull()->GetLiveFilesMetaData(&live_files6);
|
dbfull()->GetLiveFilesMetaData(&live_files6);
|
||||||
for (auto f : live_files6) {
|
for (const auto& f : live_files6) {
|
||||||
if (set1.find(f.name) == set1.end()) {
|
if (set1.find(f.name) == set1.end()) {
|
||||||
ASSERT_EQ(f.file_checksum, file_checksum6);
|
ASSERT_EQ(f.file_checksum, file_checksum6);
|
||||||
ASSERT_EQ(f.file_checksum_func_name, file_checksum_func_name6);
|
ASSERT_EQ(f.file_checksum_func_name, file_checksum_func_name6);
|
||||||
|
|
|
@ -92,7 +92,7 @@ class ExternalSSTFileTest
|
||||||
: public ExternalSSTFileTestBase,
|
: public ExternalSSTFileTestBase,
|
||||||
public ::testing::WithParamInterface<std::tuple<bool, bool>> {
|
public ::testing::WithParamInterface<std::tuple<bool, bool>> {
|
||||||
public:
|
public:
|
||||||
ExternalSSTFileTest() {}
|
ExternalSSTFileTest() = default;
|
||||||
|
|
||||||
Status GenerateOneExternalFile(
|
Status GenerateOneExternalFile(
|
||||||
const Options& options, ColumnFamilyHandle* cfh,
|
const Options& options, ColumnFamilyHandle* cfh,
|
||||||
|
@ -832,7 +832,7 @@ TEST_F(ExternalSSTFileTest, AddList) {
|
||||||
TablePropertiesCollection props;
|
TablePropertiesCollection props;
|
||||||
ASSERT_OK(db_->GetPropertiesOfAllTables(&props));
|
ASSERT_OK(db_->GetPropertiesOfAllTables(&props));
|
||||||
ASSERT_EQ(props.size(), 2);
|
ASSERT_EQ(props.size(), 2);
|
||||||
for (auto file_props : props) {
|
for (const auto& file_props : props) {
|
||||||
auto user_props = file_props.second->user_collected_properties;
|
auto user_props = file_props.second->user_collected_properties;
|
||||||
ASSERT_EQ(user_props["abc_SstFileWriterCollector"], "YES");
|
ASSERT_EQ(user_props["abc_SstFileWriterCollector"], "YES");
|
||||||
ASSERT_EQ(user_props["xyz_SstFileWriterCollector"], "YES");
|
ASSERT_EQ(user_props["xyz_SstFileWriterCollector"], "YES");
|
||||||
|
@ -855,7 +855,7 @@ TEST_F(ExternalSSTFileTest, AddList) {
|
||||||
|
|
||||||
ASSERT_OK(db_->GetPropertiesOfAllTables(&props));
|
ASSERT_OK(db_->GetPropertiesOfAllTables(&props));
|
||||||
ASSERT_EQ(props.size(), 3);
|
ASSERT_EQ(props.size(), 3);
|
||||||
for (auto file_props : props) {
|
for (const auto& file_props : props) {
|
||||||
auto user_props = file_props.second->user_collected_properties;
|
auto user_props = file_props.second->user_collected_properties;
|
||||||
ASSERT_EQ(user_props["abc_SstFileWriterCollector"], "YES");
|
ASSERT_EQ(user_props["abc_SstFileWriterCollector"], "YES");
|
||||||
ASSERT_EQ(user_props["xyz_SstFileWriterCollector"], "YES");
|
ASSERT_EQ(user_props["xyz_SstFileWriterCollector"], "YES");
|
||||||
|
@ -2878,7 +2878,7 @@ TEST_P(ExternalSSTFileTest, IngestFilesTriggerFlushingWithTwoWriteQueue) {
|
||||||
// currently at the front of the 2nd writer queue. We must make
|
// currently at the front of the 2nd writer queue. We must make
|
||||||
// sure that it won't enter the 2nd writer queue for the second time.
|
// sure that it won't enter the 2nd writer queue for the second time.
|
||||||
std::vector<std::pair<std::string, std::string>> data;
|
std::vector<std::pair<std::string, std::string>> data;
|
||||||
data.push_back(std::make_pair("1001", "v2"));
|
data.emplace_back("1001", "v2");
|
||||||
ASSERT_OK(GenerateAndAddExternalFile(options, data, -1, true));
|
ASSERT_OK(GenerateAndAddExternalFile(options, data, -1, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue