Run internal cpp modernizer on RocksDB repo (#12398)

Summary:
When internal cpp modernizer attempts to format rocksdb code, it will replace macro `ROCKSDB_NAMESPACE`  with its default definition `rocksdb` when collapsing nested namespace. We filed a feedback for the tool T180254030 and the team filed a bug for this: https://github.com/llvm/llvm-project/issues/83452. At the same time, they suggested us to run the modernizer tool ourselves so future auto codemod attempts will be smaller. This diff contains:

Running
`xplat/scripts/codemod_service/cpp_modernizer.sh`
in fbcode/internal_repo_rocksdb/repo (excluding some directories in utilities/transactions/lock/range/range_tree/lib that has a non meta copyright comment)
without swapping out the namespace macro `ROCKSDB_NAMESPACE`

Followed by RocksDB's own
`make format`
Pull Request resolved: https://github.com/facebook/rocksdb/pull/12398

Test Plan: Auto tests

Reviewed By: hx235

Differential Revision: D54382532

Pulled By: jowlyzhang

fbshipit-source-id: e7d5b40f9b113b60e5a503558c181f080b9d02fa
This commit is contained in:
yuzhangyu@fb.com 2024-03-04 10:08:32 -08:00 committed by Facebook GitHub Bot
parent d7b8756976
commit 1cfdece85d
119 changed files with 592 additions and 641 deletions

View File

@ -194,7 +194,7 @@ class SharedState {
: cv_(&mu_),
cache_bench_(cache_bench) {}
~SharedState() {}
~SharedState() = default;
port::Mutex* GetMutex() { return &mu_; }
@ -425,7 +425,7 @@ class CacheBench {
}
}
~CacheBench() {}
~CacheBench() = default;
void PopulateCache() {
Random64 rnd(FLAGS_seed);

8
cache/cache_test.cc vendored
View File

@ -106,7 +106,7 @@ class CacheTest : public testing::Test,
type_ = GetParam();
}
~CacheTest() override {}
~CacheTest() override = default;
// These functions encode/decode keys in tests cases that use
// int keys.
@ -766,7 +766,9 @@ TEST_P(CacheTest, OverCapacity) {
std::string key = EncodeKey(i + 1);
auto h = cache.Lookup(key);
ASSERT_TRUE(h != nullptr);
if (h) cache.Release(h);
if (h) {
cache.Release(h);
}
}
// the cache is over capacity since nothing could be evicted
@ -777,7 +779,7 @@ TEST_P(CacheTest, OverCapacity) {
if (IsHyperClock()) {
// Make sure eviction is triggered.
ASSERT_OK(cache.Insert(EncodeKey(-1), nullptr, 1, &handles[0]));
ASSERT_OK(cache.Insert(EncodeKey(-1), nullptr, 1, handles.data()));
// cache is under capacity now since elements were released
ASSERT_GE(n, cache.get()->GetUsage());

View File

@ -26,7 +26,7 @@ CompressedSecondaryCache::CompressedSecondaryCache(
cache_))),
disable_cache_(opts.capacity == 0) {}
CompressedSecondaryCache::~CompressedSecondaryCache() {}
CompressedSecondaryCache::~CompressedSecondaryCache() = default;
std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
const Slice& key, const Cache::CacheItemHelper* helper,

View File

@ -33,7 +33,7 @@ const std::string key3 = "____ ____key3";
class CompressedSecondaryCacheTestBase : public testing::Test,
public WithCacheType {
public:
CompressedSecondaryCacheTestBase() {}
CompressedSecondaryCacheTestBase() = default;
~CompressedSecondaryCacheTestBase() override = default;
protected:

View File

@ -32,7 +32,7 @@ namespace ROCKSDB_NAMESPACE {
class LRUCacheTest : public testing::Test {
public:
LRUCacheTest() {}
LRUCacheTest() = default;
~LRUCacheTest() override { DeleteCache(); }
void DeleteCache() {
@ -378,7 +378,7 @@ class ClockCacheTest : public testing::Test {
using Table = typename Shard::Table;
using TableOpts = typename Table::Opts;
ClockCacheTest() {}
ClockCacheTest() = default;
~ClockCacheTest() override { DeleteShard(); }
void DeleteShard() {
@ -1976,7 +1976,7 @@ TEST_P(BasicSecondaryCacheTest, BasicWaitAllTest) {
ah.priority = Cache::Priority::LOW;
cache->StartAsyncLookup(ah);
}
cache->WaitAll(&async_handles[0], async_handles.size());
cache->WaitAll(async_handles.data(), async_handles.size());
for (size_t i = 0; i < async_handles.size(); ++i) {
SCOPED_TRACE("i = " + std::to_string(i));
Cache::Handle* result = async_handles[i].Result();

View File

@ -386,7 +386,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
keys.push_back(Key(8));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
@ -400,7 +400,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
keys.push_back(Key(20));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
@ -414,7 +414,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
keys.push_back(Key(8));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
@ -428,7 +428,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
keys.push_back(Key(8));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
@ -442,7 +442,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
keys.push_back(Key(8));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
@ -456,7 +456,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
keys.push_back(Key(20));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
@ -470,7 +470,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
keys.push_back(Key(20));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
@ -484,7 +484,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) {
keys.push_back(Key(20));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
@ -528,7 +528,7 @@ TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) {
keys.push_back(Key(8));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
@ -542,7 +542,7 @@ TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) {
keys.push_back(Key(20));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
@ -561,7 +561,7 @@ TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) {
keys.push_back(Key(36));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 10u);
@ -582,7 +582,7 @@ TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) {
keys.push_back(Key(8));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 10u);
@ -629,7 +629,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) {
keys.push_back(Key(8));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u);
@ -644,7 +644,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) {
keys.push_back(Key(20));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
@ -659,7 +659,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) {
keys.push_back(Key(8));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u);
@ -676,7 +676,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) {
keys.push_back(Key(36));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 8u);
@ -691,7 +691,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) {
keys.push_back(Key(36));
values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true);
ASSERT_EQ(values.size(), keys.size());
for (auto value : values) {
for (const auto& value : values) {
ASSERT_EQ(1007, value.size());
}
ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 8u);

View File

@ -405,7 +405,7 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) {
requests_buf[0] =
BlobReadRequest(key_refs[0], blob_offsets[0], blob_sizes[0],
kNoCompression, nullptr, &statuses_buf[0]);
kNoCompression, nullptr, statuses_buf.data());
requests_buf[1] =
BlobReadRequest(key_refs[1], blob_offsets[1], blob_sizes[1] + 1,
kNoCompression, nullptr, &statuses_buf[1]);

View File

@ -168,8 +168,8 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) {
uint64_t file_size = BlobLogHeader::kSize;
for (size_t i = 0; i < num_blobs; ++i) {
keys.push_back({key_strs[i]});
blobs.push_back({blob_strs[i]});
keys.emplace_back(key_strs[i]);
blobs.emplace_back(blob_strs[i]);
file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size();
}
file_size += BlobLogFooter::kSize;
@ -482,8 +482,8 @@ TEST_F(BlobSourceTest, GetCompressedBlobs) {
std::vector<Slice> blobs;
for (size_t i = 0; i < num_blobs; ++i) {
keys.push_back({key_strs[i]});
blobs.push_back({blob_strs[i]});
keys.emplace_back(key_strs[i]);
blobs.emplace_back(blob_strs[i]);
}
std::vector<uint64_t> blob_offsets(keys.size());
@ -610,8 +610,8 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromMultiFiles) {
uint64_t file_size = BlobLogHeader::kSize;
uint64_t blob_value_bytes = 0;
for (size_t i = 0; i < num_blobs; ++i) {
keys.push_back({key_strs[i]});
blobs.push_back({blob_strs[i]});
keys.emplace_back(key_strs[i]);
blobs.emplace_back(blob_strs[i]);
blob_value_bytes += blobs[i].size();
file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size();
}
@ -802,8 +802,8 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) {
uint64_t file_size = BlobLogHeader::kSize;
for (size_t i = 0; i < num_blobs; ++i) {
keys.push_back({key_strs[i]});
blobs.push_back({blob_strs[i]});
keys.emplace_back(key_strs[i]);
blobs.emplace_back(blob_strs[i]);
file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size();
}
file_size += BlobLogFooter::kSize;
@ -1164,7 +1164,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number,
blob_offsets[0], file_size, blob_sizes[0],
kNoCompression, nullptr /* prefetch_buffer */,
&values[0], nullptr /* bytes_read */));
values.data(), nullptr /* bytes_read */));
// Release cache handle
values[0].Reset();
@ -1183,7 +1183,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number,
blob_offsets[0], file_size, blob_sizes[0],
kNoCompression, nullptr /* prefetch_buffer */,
&values[0], nullptr /* bytes_read */));
values.data(), nullptr /* bytes_read */));
ASSERT_EQ(values[0], blobs[0]);
ASSERT_TRUE(
blob_source.TEST_BlobInCache(file_number, file_size, blob_offsets[0]));
@ -1263,7 +1263,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) {
ASSERT_OK(blob_source.GetBlob(
read_options, keys[0], file_number, blob_offsets[0], file_size,
blob_sizes[0], kNoCompression, nullptr /* prefetch_buffer */,
&values[0], nullptr /* bytes_read */));
values.data(), nullptr /* bytes_read */));
ASSERT_EQ(values[0], blobs[0]);
// Release cache handle
@ -1365,8 +1365,8 @@ class BlobSourceCacheReservationTest : public DBTestBase {
blob_file_size_ = BlobLogHeader::kSize;
for (size_t i = 0; i < kNumBlobs; ++i) {
keys_.push_back({key_strs_[i]});
blobs_.push_back({blob_strs_[i]});
keys_.emplace_back(key_strs_[i]);
blobs_.emplace_back(blob_strs_[i]);
blob_file_size_ +=
BlobLogRecord::kHeaderSize + keys_[i].size() + blobs_[i].size();
}

View File

@ -418,8 +418,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobs) {
std::array<PinnableSlice, num_keys> values;
std::array<Status, num_keys> statuses;
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
keys.data(), values.data(), statuses.data());
ASSERT_OK(statuses[0]);
ASSERT_EQ(values[0], first_value);
@ -441,8 +441,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobs) {
std::array<PinnableSlice, num_keys> values;
std::array<Status, num_keys> statuses;
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
keys.data(), values.data(), statuses.data());
ASSERT_OK(statuses[0]);
ASSERT_EQ(values[0], first_value);
@ -512,8 +512,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) {
std::array<PinnableSlice, num_keys> values;
std::array<Status, num_keys> statuses;
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
keys.data(), values.data(), statuses.data());
ASSERT_OK(statuses[0]);
ASSERT_EQ(values[0], first_value);
@ -534,8 +534,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) {
std::array<PinnableSlice, num_keys> values;
std::array<Status, num_keys> statuses;
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
keys.data(), values.data(), statuses.data());
ASSERT_OK(statuses[0]);
ASSERT_EQ(values[0], first_value);
@ -553,8 +553,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) {
std::array<PinnableSlice, num_keys> values;
std::array<Status, num_keys> statuses;
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
keys.data(), values.data(), statuses.data());
ASSERT_OK(statuses[0]);
ASSERT_EQ(values[0], first_value);
@ -574,8 +574,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) {
std::array<PinnableSlice, num_keys> values;
std::array<Status, num_keys> statuses;
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
keys.data(), values.data(), statuses.data());
ASSERT_OK(statuses[0]);
ASSERT_EQ(values[0], first_value);
@ -758,8 +758,8 @@ TEST_F(DBBlobBasicTest, MultiGetWithDirectIO) {
//
// [offset=0, len=12288]
db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys,
keys.data(), values.data(), statuses.data());
SyncPoint::GetInstance()->DisableProcessing();
SyncPoint::GetInstance()->ClearAllCallBacks();
@ -829,8 +829,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) {
{
std::array<PinnableSlice, kNumKeys> values;
std::array<Status, kNumKeys> statuses;
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys,
keys.data(), values.data(), statuses.data());
for (size_t i = 0; i < kNumKeys; ++i) {
ASSERT_OK(statuses[i]);
@ -843,8 +843,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) {
{
std::array<PinnableSlice, kNumKeys> values;
std::array<Status, kNumKeys> statuses;
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys,
keys.data(), values.data(), statuses.data());
for (size_t i = 0; i < kNumKeys; ++i) {
ASSERT_TRUE(statuses[i].IsIncomplete());
@ -858,8 +858,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) {
{
std::array<PinnableSlice, kNumKeys> values;
std::array<Status, kNumKeys> statuses;
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys,
keys.data(), values.data(), statuses.data());
for (size_t i = 0; i < kNumKeys; ++i) {
ASSERT_OK(statuses[i]);
@ -872,8 +872,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) {
{
std::array<PinnableSlice, kNumKeys> values;
std::array<Status, kNumKeys> statuses;
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys,
keys.data(), values.data(), statuses.data());
for (size_t i = 0; i < kNumKeys; ++i) {
ASSERT_OK(statuses[i]);
@ -1206,8 +1206,8 @@ TEST_F(DBBlobBasicTest, MultiGetMergeBlobWithPut) {
std::array<PinnableSlice, num_keys> values;
std::array<Status, num_keys> statuses;
db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys,
keys.data(), values.data(), statuses.data());
ASSERT_OK(statuses[0]);
ASSERT_EQ(values[0], "v0_0,v0_1,v0_2");
@ -1470,8 +1470,8 @@ TEST_P(DBBlobBasicIOErrorMultiGetTest, MultiGetBlobs_IOError) {
});
SyncPoint::GetInstance()->EnableProcessing();
db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys,
keys.data(), values.data(), statuses.data());
SyncPoint::GetInstance()->DisableProcessing();
SyncPoint::GetInstance()->ClearAllCallBacks();
@ -1820,7 +1820,7 @@ TEST_F(DBBlobBasicTest, GetEntityBlob) {
std::array<Status, num_keys> statuses;
db_->MultiGetEntity(ReadOptions(), db_->DefaultColumnFamily(), num_keys,
&keys[0], &results[0], &statuses[0]);
keys.data(), results.data(), statuses.data());
ASSERT_OK(statuses[0]);
ASSERT_EQ(results[0].columns(), expected_columns);
@ -1917,8 +1917,8 @@ TEST_F(DBBlobWithTimestampTest, MultiGetBlobs) {
std::array<PinnableSlice, num_keys> values;
std::array<Status, num_keys> statuses;
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys,
keys.data(), values.data(), statuses.data());
ASSERT_OK(statuses[0]);
ASSERT_EQ(values[0], first_value);
@ -2001,8 +2001,8 @@ TEST_F(DBBlobWithTimestampTest, MultiGetMergeBlobWithPut) {
std::array<PinnableSlice, num_keys> values;
std::array<Status, num_keys> statuses;
db_->MultiGet(read_opts, db_->DefaultColumnFamily(), num_keys, &keys[0],
&values[0], &statuses[0]);
db_->MultiGet(read_opts, db_->DefaultColumnFamily(), num_keys, keys.data(),
values.data(), statuses.data());
ASSERT_OK(statuses[0]);
ASSERT_EQ(values[0], "v0_0,v0_1,v0_2");

47
db/c.cc
View File

@ -446,7 +446,7 @@ struct rocksdb_mergeoperator_t : public MergeOperator {
size_t new_value_len;
char* tmp_new_value = (*full_merge_)(
state_, merge_in.key.data(), merge_in.key.size(), existing_value_data,
existing_value_len, &operand_pointers[0], &operand_sizes[0],
existing_value_len, operand_pointers.data(), operand_sizes.data(),
static_cast<int>(n), &success, &new_value_len);
merge_out->new_value.assign(tmp_new_value, new_value_len);
@ -475,8 +475,9 @@ struct rocksdb_mergeoperator_t : public MergeOperator {
unsigned char success;
size_t new_value_len;
char* tmp_new_value = (*partial_merge_)(
state_, key.data(), key.size(), &operand_pointers[0], &operand_sizes[0],
static_cast<int>(operand_count), &success, &new_value_len);
state_, key.data(), key.size(), operand_pointers.data(),
operand_sizes.data(), static_cast<int>(operand_count), &success,
&new_value_len);
new_value->assign(tmp_new_value, new_value_len);
if (delete_value_ != nullptr) {
@ -886,9 +887,9 @@ rocksdb_t* rocksdb_open_and_trim_history(
size_t trim_tslen, char** errptr) {
std::vector<ColumnFamilyDescriptor> column_families;
for (int i = 0; i < num_column_families; i++) {
column_families.push_back(ColumnFamilyDescriptor(
column_families.emplace_back(
std::string(column_family_names[i]),
ColumnFamilyOptions(column_family_options[i]->rep)));
ColumnFamilyOptions(column_family_options[i]->rep));
}
std::string trim_ts_(trim_ts, trim_tslen);
@ -919,9 +920,9 @@ rocksdb_t* rocksdb_open_column_families(
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
std::vector<ColumnFamilyDescriptor> column_families;
for (int i = 0; i < num_column_families; i++) {
column_families.push_back(ColumnFamilyDescriptor(
column_families.emplace_back(
std::string(column_family_names[i]),
ColumnFamilyOptions(column_family_options[i]->rep)));
ColumnFamilyOptions(column_family_options[i]->rep));
}
DB* db;
@ -953,9 +954,9 @@ rocksdb_t* rocksdb_open_column_families_with_ttl(
for (int i = 0; i < num_column_families; i++) {
ttls_vec.push_back(ttls[i]);
column_families.push_back(ColumnFamilyDescriptor(
column_families.emplace_back(
std::string(column_family_names[i]),
ColumnFamilyOptions(column_family_options[i]->rep)));
ColumnFamilyOptions(column_family_options[i]->rep));
}
ROCKSDB_NAMESPACE::DBWithTTL* db;
@ -985,9 +986,9 @@ rocksdb_t* rocksdb_open_for_read_only_column_families(
unsigned char error_if_wal_file_exists, char** errptr) {
std::vector<ColumnFamilyDescriptor> column_families;
for (int i = 0; i < num_column_families; i++) {
column_families.push_back(ColumnFamilyDescriptor(
column_families.emplace_back(
std::string(column_family_names[i]),
ColumnFamilyOptions(column_family_options[i]->rep)));
ColumnFamilyOptions(column_family_options[i]->rep));
}
DB* db;
@ -1081,7 +1082,7 @@ rocksdb_column_family_handle_t** rocksdb_create_column_families(
std::vector<ColumnFamilyHandle*> handles;
std::vector<std::string> names;
for (int i = 0; i != num_column_families; ++i) {
names.push_back(std::string(column_family_names[i]));
names.emplace_back(column_family_names[i]);
}
SaveError(errptr, db->rep->CreateColumnFamilies(
ColumnFamilyOptions(column_family_options->rep), names,
@ -2788,7 +2789,9 @@ void rocksdb_options_set_cuckoo_table_factory(
void rocksdb_set_options(rocksdb_t* db, int count, const char* const keys[],
const char* const values[], char** errptr) {
std::unordered_map<std::string, std::string> options_map;
for (int i = 0; i < count; i++) options_map[keys[i]] = values[i];
for (int i = 0; i < count; i++) {
options_map[keys[i]] = values[i];
}
SaveError(errptr, db->rep->SetOptions(options_map));
}
@ -2797,7 +2800,9 @@ void rocksdb_set_options_cf(rocksdb_t* db,
const char* const keys[],
const char* const values[], char** errptr) {
std::unordered_map<std::string, std::string> options_map;
for (int i = 0; i < count; i++) options_map[keys[i]] = values[i];
for (int i = 0; i < count; i++) {
options_map[keys[i]] = values[i];
}
SaveError(errptr, db->rep->SetOptions(handle->rep, options_map));
}
@ -5060,7 +5065,9 @@ void rocksdb_env_lower_high_priority_thread_pool_cpu_priority(
}
void rocksdb_env_destroy(rocksdb_env_t* env) {
if (!env->is_default) delete env->rep;
if (!env->is_default) {
delete env->rep;
}
delete env;
}
@ -5524,7 +5531,7 @@ size_t rocksdb_column_family_metadata_get_level_count(
rocksdb_level_metadata_t* rocksdb_column_family_metadata_get_level_metadata(
rocksdb_column_family_metadata_t* cf_meta, size_t i) {
if (i >= cf_meta->rep.levels.size()) {
return NULL;
return nullptr;
}
rocksdb_level_metadata_t* level_meta =
(rocksdb_level_metadata_t*)malloc(sizeof(rocksdb_level_metadata_t));
@ -5739,9 +5746,9 @@ rocksdb_transactiondb_t* rocksdb_transactiondb_open_column_families(
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
std::vector<ColumnFamilyDescriptor> column_families;
for (int i = 0; i < num_column_families; i++) {
column_families.push_back(ColumnFamilyDescriptor(
column_families.emplace_back(
std::string(column_family_names[i]),
ColumnFamilyOptions(column_family_options[i]->rep)));
ColumnFamilyOptions(column_family_options[i]->rep));
}
TransactionDB* txn_db;
@ -6533,9 +6540,9 @@ rocksdb_optimistictransactiondb_open_column_families(
rocksdb_column_family_handle_t** column_family_handles, char** errptr) {
std::vector<ColumnFamilyDescriptor> column_families;
for (int i = 0; i < num_column_families; i++) {
column_families.push_back(ColumnFamilyDescriptor(
column_families.emplace_back(
std::string(column_family_names[i]),
ColumnFamilyOptions(column_family_options[i]->rep)));
ColumnFamilyOptions(column_family_options[i]->rep));
}
OptimisticTransactionDB* otxn_db;

View File

@ -50,14 +50,15 @@ static void StartPhase(const char* name) {
#endif
static const char* GetTempDir(void) {
const char* ret = getenv("TEST_TMPDIR");
if (ret == NULL || ret[0] == '\0')
if (ret == NULL || ret[0] == '\0') {
#ifdef OS_WIN
ret = getenv("TEMP");
#else
ret = "/tmp";
}
#endif
return ret;
}
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif
@ -206,10 +207,11 @@ static int CmpCompare(void* arg, const char* a, size_t alen, const char* b,
size_t n = (alen < blen) ? alen : blen;
int r = memcmp(a, b, n);
if (r == 0) {
if (alen < blen)
if (alen < blen) {
r = -1;
else if (alen > blen)
} else if (alen > blen) {
r = +1;
}
}
return r;
}

View File

@ -270,7 +270,7 @@ class ColumnFamilyTestBase : public testing::Test {
void Reopen(const std::vector<ColumnFamilyOptions> options = {}) {
std::vector<std::string> names;
for (auto name : names_) {
for (const auto& name : names_) {
if (name != "") {
names.push_back(name);
}
@ -607,7 +607,7 @@ TEST_P(FlushEmptyCFTestWithParam, FlushEmptyCFTest) {
// Preserve file system state up to here to simulate a crash condition.
fault_env->SetFilesystemActive(false);
std::vector<std::string> names;
for (auto name : names_) {
for (const auto& name : names_) {
if (name != "") {
names.push_back(name);
}
@ -669,7 +669,7 @@ TEST_P(FlushEmptyCFTestWithParam, FlushEmptyCFTest2) {
// Preserve file system state up to here to simulate a crash condition.
fault_env->SetFilesystemActive(false);
std::vector<std::string> names;
for (auto name : names_) {
for (const auto& name : names_) {
if (name != "") {
names.push_back(name);
}
@ -1034,7 +1034,7 @@ TEST_P(ColumnFamilyTest, CrashAfterFlush) {
fault_env->SetFilesystemActive(false);
std::vector<std::string> names;
for (auto name : names_) {
for (const auto& name : names_) {
if (name != "") {
names.push_back(name);
}
@ -3407,9 +3407,13 @@ TEST_P(ColumnFamilyTest, DISABLED_LogTruncationTest) {
for (size_t i = 0; i < filenames.size(); i++) {
uint64_t number;
FileType type;
if (!(ParseFileName(filenames[i], &number, &type))) continue;
if (!(ParseFileName(filenames[i], &number, &type))) {
continue;
}
if (type != kWalFile) continue;
if (type != kWalFile) {
continue;
}
logfs.push_back(filenames[i]);
}

View File

@ -34,8 +34,8 @@ class CompactFilesTest : public testing::Test {
// A class which remembers the name of each flushed file.
class FlushedFileCollector : public EventListener {
public:
FlushedFileCollector() {}
~FlushedFileCollector() override {}
FlushedFileCollector() = default;
~FlushedFileCollector() override = default;
void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
std::lock_guard<std::mutex> lock(mutex_);
@ -45,7 +45,7 @@ class FlushedFileCollector : public EventListener {
std::vector<std::string> GetFlushedFiles() {
std::lock_guard<std::mutex> lock(mutex_);
std::vector<std::string> result;
for (auto fname : flushed_files_) {
for (const auto& fname : flushed_files_) {
result.push_back(fname);
}
return result;
@ -159,7 +159,9 @@ TEST_F(CompactFilesTest, MultipleLevel) {
// Compact files except the file in L3
std::vector<std::string> files;
for (int i = 0; i < 6; ++i) {
if (i == 3) continue;
if (i == 3) {
continue;
}
for (auto& file : meta.levels[i].files) {
files.push_back(file.db_path + "/" + file.name);
}
@ -228,7 +230,7 @@ TEST_F(CompactFilesTest, ObsoleteFiles) {
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForCompact());
// verify all compaction input files are deleted
for (auto fname : l0_files) {
for (const auto& fname : l0_files) {
ASSERT_EQ(Status::NotFound(), env_->FileExists(fname));
}
delete db;
@ -492,4 +494,3 @@ int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -160,7 +160,9 @@ std::vector<CompactionInputFiles> Compaction::PopulateWithAtomicBoundaries(
AtomicCompactionUnitBoundary cur_boundary;
size_t first_atomic_idx = 0;
auto add_unit_boundary = [&](size_t to) {
if (first_atomic_idx == to) return;
if (first_atomic_idx == to) {
return;
}
for (size_t k = first_atomic_idx; k < to; k++) {
inputs[i].atomic_compaction_unit_boundaries.push_back(cur_boundary);
}
@ -753,7 +755,9 @@ int InputSummary(const std::vector<FileMetaData*>& files, char* output,
AppendHumanBytes(files.at(i)->fd.GetFileSize(), sztxt, 16);
ret = snprintf(output + write, sz, "%" PRIu64 "(%s) ",
files.at(i)->fd.GetNumber(), sztxt);
if (ret < 0 || ret >= sz) break;
if (ret < 0 || ret >= sz) {
break;
}
write += ret;
}
// if files.size() is non-zero, overwrite the last space

View File

@ -404,7 +404,9 @@ void CompactionJob::AcquireSubcompactionResources(
void CompactionJob::ShrinkSubcompactionResources(uint64_t num_extra_resources) {
// Do nothing when we have zero resources to shrink
if (num_extra_resources == 0) return;
if (num_extra_resources == 0) {
return;
}
db_mutex_->Lock();
// We cannot release threads more than what we reserved before
int extra_num_subcompaction_threads_released = env_->ReleaseThreads(
@ -584,7 +586,9 @@ void CompactionJob::GenSubcompactionBoundaries() {
TEST_SYNC_POINT_CALLBACK("CompactionJob::GenSubcompactionBoundaries:0",
&num_planned_subcompactions);
if (num_planned_subcompactions == 1) return;
if (num_planned_subcompactions == 1) {
return;
}
// Group the ranges into subcompactions
uint64_t target_range_size = std::max(
@ -641,7 +645,7 @@ Status CompactionJob::Run() {
// Always schedule the first subcompaction (whether or not there are also
// others) in the current thread to be efficient with resources
ProcessKeyValueCompaction(&compact_->sub_compact_states[0]);
ProcessKeyValueCompaction(compact_->sub_compact_states.data());
// Wait for all other threads (if there are any) to finish execution
for (auto& thread : thread_pool) {

View File

@ -131,7 +131,7 @@ class CompactionJobStatsTest : public testing::Test,
ColumnFamilyOptions cf_opts(options);
size_t cfi = handles_.size();
handles_.resize(cfi + cfs.size());
for (auto cf : cfs) {
for (const auto& cf : cfs) {
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
}
}
@ -160,7 +160,7 @@ class CompactionJobStatsTest : public testing::Test,
EXPECT_EQ(cfs.size(), options.size());
std::vector<ColumnFamilyDescriptor> column_families;
for (size_t i = 0; i < cfs.size(); ++i) {
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i]));
column_families.emplace_back(cfs[i], options[i]);
}
DBOptions db_opts = DBOptions(options[0]);
return DB::Open(db_opts, dbname_, column_families, &handles_, &db_);

View File

@ -308,7 +308,7 @@ class CompactionJobTestBase : public testing::Test {
kDefaultColumnFamilyName, -1 /* level */),
file_writer.get()));
// Build table.
for (auto kv : contents) {
for (const auto& kv : contents) {
std::string key;
std::string value;
std::tie(key, value) = kv;
@ -327,7 +327,7 @@ class CompactionJobTestBase : public testing::Test {
SequenceNumber smallest_seqno = kMaxSequenceNumber;
SequenceNumber largest_seqno = 0;
uint64_t oldest_blob_file_number = kInvalidBlobFileNumber;
for (auto kv : contents) {
for (const auto& kv : contents) {
ParsedInternalKey key;
std::string skey;
std::string value;

View File

@ -130,7 +130,7 @@ CompactionPicker::CompactionPicker(const ImmutableOptions& ioptions,
const InternalKeyComparator* icmp)
: ioptions_(ioptions), icmp_(icmp) {}
CompactionPicker::~CompactionPicker() {}
CompactionPicker::~CompactionPicker() = default;
// Delete this compaction from the list of running compactions.
void CompactionPicker::ReleaseCompactionFiles(Compaction* c, Status status) {

View File

@ -355,7 +355,9 @@ void LevelCompactionBuilder::SetupOtherFilesWithRoundRobinExpansion() {
TEST_SYNC_POINT("LevelCompactionPicker::RoundRobin");
// Only expand the inputs when we have selected a file in start_level_inputs_
if (start_level_inputs_.size() == 0) return;
if (start_level_inputs_.size() == 0) {
return;
}
uint64_t start_lvl_bytes_no_compacting = 0;
uint64_t curr_bytes_to_compact = 0;

View File

@ -77,7 +77,7 @@ class CompactionPickerTestBase : public testing::Test {
ioptions_.level_compaction_dynamic_level_bytes = false;
}
~CompactionPickerTestBase() override {}
~CompactionPickerTestBase() override = default;
void NewVersionStorage(int num_levels, CompactionStyle style) {
DeleteVersionStorage();
@ -214,7 +214,7 @@ class CompactionPickerTest : public CompactionPickerTestBase {
explicit CompactionPickerTest()
: CompactionPickerTestBase(BytewiseComparator()) {}
~CompactionPickerTest() override {}
~CompactionPickerTest() override = default;
};
class CompactionPickerU64TsTest : public CompactionPickerTestBase {
@ -222,7 +222,7 @@ class CompactionPickerU64TsTest : public CompactionPickerTestBase {
explicit CompactionPickerU64TsTest()
: CompactionPickerTestBase(test::BytewiseComparatorWithU64TsWrapper()) {}
~CompactionPickerU64TsTest() override {}
~CompactionPickerU64TsTest() override = default;
};
TEST_F(CompactionPickerTest, Empty) {

View File

@ -563,10 +563,10 @@ TEST_F(CompactionServiceTest, ConcurrentCompaction) {
std::vector<std::thread> threads;
for (const auto& file : meta.levels[1].files) {
threads.emplace_back(std::thread([&]() {
threads.emplace_back([&]() {
std::string fname = file.db_path + "/" + file.name;
ASSERT_OK(db_->CompactFiles(CompactionOptions(), {fname}, 2));
}));
});
}
for (auto& thread : threads) {

View File

@ -170,7 +170,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
class DoubleComparator : public Comparator {
public:
DoubleComparator() {}
DoubleComparator() = default;
const char* Name() const override { return "DoubleComparator"; }
@ -198,7 +198,7 @@ class DoubleComparator : public Comparator {
class HashComparator : public Comparator {
public:
HashComparator() {}
HashComparator() = default;
const char* Name() const override { return "HashComparator"; }
@ -221,7 +221,7 @@ class HashComparator : public Comparator {
class TwoStrComparator : public Comparator {
public:
TwoStrComparator() {}
TwoStrComparator() = default;
const char* Name() const override { return "TwoStrComparator"; }
@ -372,7 +372,7 @@ TEST_P(ComparatorDBTest, Uint64Comparator) {
uint64_t r = rnd64.Next();
std::string str;
str.resize(8);
memcpy(&str[0], static_cast<void*>(&r), 8);
memcpy(str.data(), static_cast<void*>(&r), 8);
source_strings.push_back(str);
}

View File

@ -209,7 +209,7 @@ static std::string Key(int i) {
static std::string Uint64Key(uint64_t i) {
std::string str;
str.resize(8);
memcpy(&str[0], static_cast<void*>(&i), 8);
memcpy(str.data(), static_cast<void*>(&i), 8);
return str;
}
} // namespace.

View File

@ -1368,9 +1368,9 @@ TEST_P(DBMultiGetTestWithParam, MultiGetMultiCF) {
for (int i = 0; i < num_keys; ++i) {
int cf = i / 3;
int cf_key = 1 % 3;
cf_kv_vec.emplace_back(std::make_tuple(
cf_kv_vec.emplace_back(
cf, "cf" + std::to_string(cf) + "_key_" + std::to_string(cf_key),
"cf" + std::to_string(cf) + "_val_" + std::to_string(cf_key)));
"cf" + std::to_string(cf) + "_val_" + std::to_string(cf_key));
ASSERT_OK(Put(std::get<0>(cf_kv_vec[i]), std::get<1>(cf_kv_vec[i]),
std::get<2>(cf_kv_vec[i])));
}
@ -2607,9 +2607,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1) {
key_strs.push_back(Key(33));
key_strs.push_back(Key(54));
key_strs.push_back(Key(102));
keys.push_back(key_strs[0]);
keys.push_back(key_strs[1]);
keys.push_back(key_strs[2]);
keys.emplace_back(key_strs[0]);
keys.emplace_back(key_strs[1]);
keys.emplace_back(key_strs[2]);
values.resize(keys.size());
statuses.resize(keys.size());
@ -2652,9 +2652,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1Error) {
key_strs.push_back(Key(33));
key_strs.push_back(Key(54));
key_strs.push_back(Key(102));
keys.push_back(key_strs[0]);
keys.push_back(key_strs[1]);
keys.push_back(key_strs[2]);
keys.emplace_back(key_strs[0]);
keys.emplace_back(key_strs[1]);
keys.emplace_back(key_strs[2]);
values.resize(keys.size());
statuses.resize(keys.size());
@ -2717,9 +2717,9 @@ TEST_P(DBMultiGetAsyncIOTest, LastKeyInFile) {
key_strs.push_back(Key(21));
key_strs.push_back(Key(54));
key_strs.push_back(Key(102));
keys.push_back(key_strs[0]);
keys.push_back(key_strs[1]);
keys.push_back(key_strs[2]);
keys.emplace_back(key_strs[0]);
keys.emplace_back(key_strs[1]);
keys.emplace_back(key_strs[2]);
values.resize(keys.size());
statuses.resize(keys.size());
@ -2762,9 +2762,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1AndL2) {
key_strs.push_back(Key(33));
key_strs.push_back(Key(56));
key_strs.push_back(Key(102));
keys.push_back(key_strs[0]);
keys.push_back(key_strs[1]);
keys.push_back(key_strs[2]);
keys.emplace_back(key_strs[0]);
keys.emplace_back(key_strs[1]);
keys.emplace_back(key_strs[2]);
values.resize(keys.size());
statuses.resize(keys.size());
@ -2805,8 +2805,8 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL2WithRangeOverlapL0L1) {
// 19 and 26 are in L2, but overlap with L0 and L1 file ranges
key_strs.push_back(Key(19));
key_strs.push_back(Key(26));
keys.push_back(key_strs[0]);
keys.push_back(key_strs[1]);
keys.emplace_back(key_strs[0]);
keys.emplace_back(key_strs[1]);
values.resize(keys.size());
statuses.resize(keys.size());
@ -2841,8 +2841,8 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL2WithRangeDelInL1) {
// 139 and 163 are in L2, but overlap with a range deletes in L1
key_strs.push_back(Key(139));
key_strs.push_back(Key(163));
keys.push_back(key_strs[0]);
keys.push_back(key_strs[1]);
keys.emplace_back(key_strs[0]);
keys.emplace_back(key_strs[1]);
values.resize(keys.size());
statuses.resize(keys.size());
@ -2871,9 +2871,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1AndL2WithRangeDelInL1) {
key_strs.push_back(Key(139));
key_strs.push_back(Key(144));
key_strs.push_back(Key(163));
keys.push_back(key_strs[0]);
keys.push_back(key_strs[1]);
keys.push_back(key_strs[2]);
keys.emplace_back(key_strs[0]);
keys.emplace_back(key_strs[1]);
keys.emplace_back(key_strs[2]);
values.resize(keys.size());
statuses.resize(keys.size());
@ -2904,9 +2904,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetNoIOUring) {
key_strs.push_back(Key(33));
key_strs.push_back(Key(54));
key_strs.push_back(Key(102));
keys.push_back(key_strs[0]);
keys.push_back(key_strs[1]);
keys.push_back(key_strs[2]);
keys.emplace_back(key_strs[0]);
keys.emplace_back(key_strs[1]);
keys.emplace_back(key_strs[2]);
values.resize(keys.size());
statuses.resize(keys.size());
@ -3285,9 +3285,9 @@ TEST_F(DBBasicTest, MultiGetIOBufferOverrun) {
// Warm up the cache first
key_data.emplace_back(Key(0));
keys.emplace_back(Slice(key_data.back()));
keys.emplace_back(key_data.back());
key_data.emplace_back(Key(50));
keys.emplace_back(Slice(key_data.back()));
keys.emplace_back(key_data.back());
statuses.resize(keys.size());
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
@ -3661,10 +3661,10 @@ TEST_F(DBBasicTest, ConcurrentlyCloseDB) {
DestroyAndReopen(options);
std::vector<std::thread> workers;
for (int i = 0; i < 10; i++) {
workers.push_back(std::thread([&]() {
workers.emplace_back([&]() {
auto s = db_->Close();
ASSERT_OK(s);
}));
});
}
for (auto& w : workers) {
w.join();
@ -3938,9 +3938,9 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
// Warm up the cache first
key_data.emplace_back(Key(0));
keys.emplace_back(Slice(key_data.back()));
keys.emplace_back(key_data.back());
key_data.emplace_back(Key(50));
keys.emplace_back(Slice(key_data.back()));
keys.emplace_back(key_data.back());
statuses.resize(keys.size());
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
@ -4119,9 +4119,9 @@ TEST_P(DBBasicTestWithParallelIO, MultiGetDirectIO) {
// Warm up the cache first
key_data.emplace_back(Key(0));
keys.emplace_back(Slice(key_data.back()));
keys.emplace_back(key_data.back());
key_data.emplace_back(Key(50));
keys.emplace_back(Slice(key_data.back()));
keys.emplace_back(key_data.back());
statuses.resize(keys.size());
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
@ -4189,9 +4189,9 @@ TEST_P(DBBasicTestWithParallelIO, MultiGetWithChecksumMismatch) {
// Warm up the cache first
key_data.emplace_back(Key(0));
keys.emplace_back(Slice(key_data.back()));
keys.emplace_back(key_data.back());
key_data.emplace_back(Key(50));
keys.emplace_back(Slice(key_data.back()));
keys.emplace_back(key_data.back());
statuses.resize(keys.size());
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
@ -4237,9 +4237,9 @@ TEST_P(DBBasicTestWithParallelIO, MultiGetWithMissingFile) {
// Warm up the cache first
key_data.emplace_back(Key(0));
keys.emplace_back(Slice(key_data.back()));
keys.emplace_back(key_data.back());
key_data.emplace_back(Key(50));
keys.emplace_back(Slice(key_data.back()));
keys.emplace_back(key_data.back());
statuses.resize(keys.size());
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
@ -4743,7 +4743,7 @@ TEST_F(DBBasicTest, VerifyFileChecksumsReadahead) {
uint64_t number;
FileType type;
ASSERT_OK(env_->GetChildren(dbname_, &filenames));
for (auto name : filenames) {
for (const auto& name : filenames) {
if (ParseFileName(name, &number, &type)) {
if (type == kTableFile) {
sst_cnt++;

View File

@ -744,7 +744,7 @@ TEST_F(DBBlockCacheTest, AddRedundantStats) {
const size_t capacity = size_t{1} << 25;
const int num_shard_bits = 0; // 1 shard
int iterations_tested = 0;
for (std::shared_ptr<Cache> base_cache :
for (const std::shared_ptr<Cache>& base_cache :
{NewLRUCache(capacity, num_shard_bits),
// FixedHyperClockCache
HyperClockCacheOptions(
@ -990,7 +990,7 @@ TEST_F(DBBlockCacheTest, CacheEntryRoleStats) {
int iterations_tested = 0;
for (bool partition : {false, true}) {
SCOPED_TRACE("Partition? " + std::to_string(partition));
for (std::shared_ptr<Cache> cache :
for (const std::shared_ptr<Cache>& cache :
{NewLRUCache(capacity),
HyperClockCacheOptions(
capacity,
@ -1251,7 +1251,7 @@ void DummyFillCache(Cache& cache, size_t entry_size,
class CountingLogger : public Logger {
public:
~CountingLogger() override {}
~CountingLogger() override = default;
using Logger::Logv;
void Logv(const InfoLogLevel log_level, const char* format,
va_list /*ap*/) override {
@ -1373,7 +1373,7 @@ class StableCacheKeyTestFS : public FaultInjectionTestFS {
SetFailGetUniqueId(true);
}
~StableCacheKeyTestFS() override {}
~StableCacheKeyTestFS() override = default;
IOStatus LinkFile(const std::string&, const std::string&, const IOOptions&,
IODebugContext*) override {
@ -1566,7 +1566,7 @@ class CacheKeyTest : public testing::Test {
tp_.db_id = std::to_string(db_id_);
tp_.orig_file_number = file_number;
bool is_stable;
std::string cur_session_id = ""; // ignored
std::string cur_session_id; // ignored
uint64_t cur_file_number = 42; // ignored
OffsetableCacheKey rv;
BlockBasedTable::SetupBaseCacheKey(&tp_, cur_session_id, cur_file_number,

View File

@ -78,7 +78,7 @@ class DBBloomFilterTestWithParam
DBBloomFilterTestWithParam()
: DBTestBase("db_bloom_filter_tests", /*env_do_fsync=*/true) {}
~DBBloomFilterTestWithParam() override {}
~DBBloomFilterTestWithParam() override = default;
void SetUp() override {
bfp_impl_ = std::get<0>(GetParam());
@ -2051,7 +2051,7 @@ class DBBloomFilterTestVaryPrefixAndFormatVer
DBBloomFilterTestVaryPrefixAndFormatVer()
: DBTestBase("db_bloom_filter_tests", /*env_do_fsync=*/true) {}
~DBBloomFilterTestVaryPrefixAndFormatVer() override {}
~DBBloomFilterTestVaryPrefixAndFormatVer() override = default;
void SetUp() override {
use_prefix_ = std::get<0>(GetParam());
@ -2126,8 +2126,9 @@ TEST_P(DBBloomFilterTestVaryPrefixAndFormatVer, PartitionedMultiGet) {
values[i] = PinnableSlice();
}
db_->MultiGet(ropts, Q, &column_families[0], &key_slices[0], &values[0],
/*timestamps=*/nullptr, &statuses[0], true);
db_->MultiGet(ropts, Q, column_families.data(), key_slices.data(),
values.data(),
/*timestamps=*/nullptr, statuses.data(), true);
// Confirm correct status results
uint32_t number_not_found = 0;
@ -2177,8 +2178,9 @@ TEST_P(DBBloomFilterTestVaryPrefixAndFormatVer, PartitionedMultiGet) {
values[i] = PinnableSlice();
}
db_->MultiGet(ropts, Q, &column_families[0], &key_slices[0], &values[0],
/*timestamps=*/nullptr, &statuses[0], true);
db_->MultiGet(ropts, Q, column_families.data(), key_slices.data(),
values.data(),
/*timestamps=*/nullptr, statuses.data(), true);
// Confirm correct status results
uint32_t number_not_found = 0;

View File

@ -150,7 +150,7 @@ class ConditionalFilter : public CompactionFilter {
class ChangeFilter : public CompactionFilter {
public:
explicit ChangeFilter() {}
explicit ChangeFilter() = default;
bool Filter(int /*level*/, const Slice& /*key*/, const Slice& /*value*/,
std::string* new_value, bool* value_changed) const override {
@ -289,7 +289,7 @@ class ConditionalFilterFactory : public CompactionFilterFactory {
class ChangeFilterFactory : public CompactionFilterFactory {
public:
explicit ChangeFilterFactory() {}
explicit ChangeFilterFactory() = default;
std::unique_ptr<CompactionFilter> CreateCompactionFilter(
const CompactionFilter::Context& /*context*/) override {

View File

@ -41,7 +41,7 @@ class CompactionStatsCollector : public EventListener {
}
}
~CompactionStatsCollector() override {}
~CompactionStatsCollector() override = default;
void OnCompactionCompleted(DB* /* db */,
const CompactionJobInfo& info) override {
@ -241,8 +241,8 @@ class RoundRobinSubcompactionsAgainstResources
namespace {
class FlushedFileCollector : public EventListener {
public:
FlushedFileCollector() {}
~FlushedFileCollector() override {}
FlushedFileCollector() = default;
~FlushedFileCollector() override = default;
void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override {
std::lock_guard<std::mutex> lock(mutex_);
@ -252,7 +252,7 @@ class FlushedFileCollector : public EventListener {
std::vector<std::string> GetFlushedFiles() {
std::lock_guard<std::mutex> lock(mutex_);
std::vector<std::string> result;
for (auto fname : flushed_files_) {
for (const auto& fname : flushed_files_) {
result.push_back(fname);
}
return result;
@ -2090,9 +2090,9 @@ TEST_P(DBDeleteFileRangeTest, DeleteFilesInRanges) {
Slice begin2(begin_str2), end2(end_str2);
Slice begin3(begin_str3), end3(end_str3);
std::vector<RangePtr> ranges;
ranges.push_back(RangePtr(&begin1, &end1));
ranges.push_back(RangePtr(&begin2, &end2));
ranges.push_back(RangePtr(&begin3, &end3));
ranges.emplace_back(&begin1, &end1);
ranges.emplace_back(&begin2, &end2);
ranges.emplace_back(&begin3, &end3);
ASSERT_OK(DeleteFilesInRanges(db_, db_->DefaultColumnFamily(),
ranges.data(), ranges.size()));
ASSERT_EQ("0,3,7", FilesPerLevel(0));
@ -2117,9 +2117,9 @@ TEST_P(DBDeleteFileRangeTest, DeleteFilesInRanges) {
Slice begin2(begin_str2), end2(end_str2);
Slice begin3(begin_str3), end3(end_str3);
std::vector<RangePtr> ranges;
ranges.push_back(RangePtr(&begin1, &end1));
ranges.push_back(RangePtr(&begin2, &end2));
ranges.push_back(RangePtr(&begin3, &end3));
ranges.emplace_back(&begin1, &end1);
ranges.emplace_back(&begin2, &end2);
ranges.emplace_back(&begin3, &end3);
ASSERT_OK(DeleteFilesInRanges(db_, db_->DefaultColumnFamily(),
ranges.data(), ranges.size(), false));
ASSERT_EQ("0,1,4", FilesPerLevel(0));
@ -6641,7 +6641,7 @@ TEST_F(DBCompactionTest, RoundRobinCutOutputAtCompactCursor) {
class NoopMergeOperator : public MergeOperator {
public:
NoopMergeOperator() {}
NoopMergeOperator() = default;
bool FullMergeV2(const MergeOperationInput& /*merge_in*/,
MergeOperationOutput* merge_out) const override {
@ -9878,7 +9878,7 @@ TEST_F(DBCompactionTest, TurnOnLevelCompactionDynamicLevelBytesUCToLC) {
options.compaction_style = CompactionStyle::kCompactionStyleLevel;
options.level_compaction_dynamic_level_bytes = true;
ReopenWithColumnFamilies({"default", "pikachu"}, options);
std::string expected_lsm = "";
std::string expected_lsm;
for (int i = 0; i < 49; ++i) {
expected_lsm += "0,";
}
@ -10394,20 +10394,20 @@ TEST_F(DBCompactionTest, ReleaseCompactionDuringManifestWrite) {
SyncPoint::GetInstance()->EnableProcessing();
std::vector<std::thread> threads;
threads.emplace_back(std::thread([&]() {
threads.emplace_back([&]() {
std::string k1_str = Key(1);
std::string k2_str = Key(2);
Slice k1 = k1_str;
Slice k2 = k2_str;
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &k1, &k2));
}));
threads.emplace_back(std::thread([&]() {
});
threads.emplace_back([&]() {
std::string k10_str = Key(10);
std::string k11_str = Key(11);
Slice k10 = k10_str;
Slice k11 = k11_str;
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &k10, &k11));
}));
});
std::string k100_str = Key(100);
std::string k101_str = Key(101);
Slice k100 = k100_str;

View File

@ -1367,14 +1367,15 @@ TEST_F(DBFlushTest, MemPurgeDeleteAndDeleteRange) {
ASSERT_OK(iter->status());
key = (iter->key()).ToString(false);
value = (iter->value()).ToString(false);
if (key.compare(KEY3) == 0)
if (key.compare(KEY3) == 0) {
ASSERT_EQ(value, p_v3b);
else if (key.compare(KEY4) == 0)
} else if (key.compare(KEY4) == 0) {
ASSERT_EQ(value, p_v4);
else if (key.compare(KEY5) == 0)
} else if (key.compare(KEY5) == 0) {
ASSERT_EQ(value, p_v5);
else
} else {
ASSERT_EQ(value, NOT_FOUND);
}
count++;
}
ASSERT_OK(iter->status());
@ -1404,22 +1405,25 @@ TEST_F(DBFlushTest, MemPurgeDeleteAndDeleteRange) {
ASSERT_OK(iter->status());
key = (iter->key()).ToString(false);
value = (iter->value()).ToString(false);
if (key.compare(KEY2) == 0)
if (key.compare(KEY2) == 0) {
ASSERT_EQ(value, p_v2);
else if (key.compare(KEY3) == 0)
} else if (key.compare(KEY3) == 0) {
ASSERT_EQ(value, p_v3b);
else if (key.compare(KEY4) == 0)
} else if (key.compare(KEY4) == 0) {
ASSERT_EQ(value, p_v4);
else if (key.compare(KEY5) == 0)
} else if (key.compare(KEY5) == 0) {
ASSERT_EQ(value, p_v5);
else
} else {
ASSERT_EQ(value, NOT_FOUND);
}
count++;
}
// Expected count here is 4: KEY2, KEY3, KEY4, KEY5.
ASSERT_EQ(count, EXPECTED_COUNT_END);
if (iter) delete iter;
if (iter) {
delete iter;
}
Close();
}
@ -2499,7 +2503,7 @@ TEST_F(DBFlushTest, TombstoneVisibleInSnapshot) {
class SimpleTestFlushListener : public EventListener {
public:
explicit SimpleTestFlushListener(DBFlushTest* _test) : test_(_test) {}
~SimpleTestFlushListener() override {}
~SimpleTestFlushListener() override = default;
void OnFlushBegin(DB* db, const FlushJobInfo& info) override {
ASSERT_EQ(static_cast<uint32_t>(0), info.cf_id);

View File

@ -21,7 +21,7 @@ CompactedDBImpl::CompactedDBImpl(const DBOptions& options,
version_(nullptr),
user_comparator_(nullptr) {}
CompactedDBImpl::~CompactedDBImpl() {}
CompactedDBImpl::~CompactedDBImpl() = default;
size_t CompactedDBImpl::FindFile(const Slice& key) {
size_t right = files_.num_files - 1;

View File

@ -8,7 +8,7 @@
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/db_impl/db_impl.h"
#include <stdint.h>
#include <cstdint>
#ifdef OS_SOLARIS
#include <alloca.h>
#endif
@ -959,7 +959,9 @@ size_t DBImpl::EstimateInMemoryStatsHistorySize() const {
stats_history_mutex_.AssertHeld();
size_t size_total =
sizeof(std::map<uint64_t, std::map<std::string, uint64_t>>);
if (stats_history_.size() == 0) return size_total;
if (stats_history_.size() == 0) {
return size_total;
}
size_t size_per_slice =
sizeof(uint64_t) + sizeof(std::map<std::string, uint64_t>);
// non-empty map, stats_history_.begin() guaranteed to exist
@ -1085,7 +1087,9 @@ bool DBImpl::FindStatsByTime(uint64_t start_time, uint64_t end_time,
std::map<std::string, uint64_t>* stats_map) {
assert(new_time);
assert(stats_map);
if (!new_time || !stats_map) return false;
if (!new_time || !stats_map) {
return false;
}
// lock when search for start_time
{
InstrumentedMutexLock l(&stats_history_mutex_);
@ -1492,7 +1496,9 @@ int DBImpl::FindMinimumEmptyLevelFitting(
int minimum_level = level;
for (int i = level - 1; i > 0; --i) {
// stop if level i is not empty
if (vstorage->NumLevelFiles(i) > 0) break;
if (vstorage->NumLevelFiles(i) > 0) {
break;
}
// stop if level i is too small (cannot fit the level files)
if (vstorage->MaxBytesForLevel(i) < vstorage->NumLevelBytes(level)) {
break;
@ -4615,9 +4621,9 @@ Status DBImpl::DeleteFile(std::string name) {
read_options, write_options, &edit, &mutex_,
directories_.GetDbDir());
if (status.ok()) {
InstallSuperVersionAndScheduleWork(cfd,
&job_context.superversion_contexts[0],
*cfd->GetLatestMutableCFOptions());
InstallSuperVersionAndScheduleWork(
cfd, job_context.superversion_contexts.data(),
*cfd->GetLatestMutableCFOptions());
}
FindObsoleteFiles(&job_context, false);
} // lock released here
@ -4728,9 +4734,9 @@ Status DBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family,
read_options, write_options, &edit, &mutex_,
directories_.GetDbDir());
if (status.ok()) {
InstallSuperVersionAndScheduleWork(cfd,
&job_context.superversion_contexts[0],
*cfd->GetLatestMutableCFOptions());
InstallSuperVersionAndScheduleWork(
cfd, job_context.superversion_contexts.data(),
*cfd->GetLatestMutableCFOptions());
}
for (auto* deleted_file : deleted_files) {
deleted_file->being_compacted = false;
@ -4965,7 +4971,7 @@ Status DB::DestroyColumnFamilyHandle(ColumnFamilyHandle* column_family) {
return Status::OK();
}
DB::~DB() {}
DB::~DB() = default;
Status DBImpl::Close() {
InstrumentedMutexLock closing_lock_guard(&closing_mutex_);
@ -4992,7 +4998,7 @@ Status DB::ListColumnFamilies(const DBOptions& db_options,
return VersionSet::ListColumnFamilies(column_families, name, fs.get());
}
Snapshot::~Snapshot() {}
Snapshot::~Snapshot() = default;
Status DestroyDB(const std::string& dbname, const Options& options,
const std::vector<ColumnFamilyDescriptor>& column_families) {
@ -6024,8 +6030,8 @@ Status DBImpl::ClipColumnFamily(ColumnFamilyHandle* column_family,
if (status.ok()) {
// DeleteFilesInRanges non-overlap files except L0
std::vector<RangePtr> ranges;
ranges.push_back(RangePtr(nullptr, &begin_key));
ranges.push_back(RangePtr(&end_key, nullptr));
ranges.emplace_back(nullptr, &begin_key);
ranges.emplace_back(&end_key, nullptr);
status = DeleteFilesInRanges(column_family, ranges.data(), ranges.size());
}
@ -6273,7 +6279,7 @@ void DBImpl::NotifyOnExternalFileIngested(
info.internal_file_path = f.internal_file_path;
info.global_seqno = f.assigned_seqno;
info.table_properties = f.table_properties;
for (auto listener : immutable_db_options_.listeners) {
for (const auto& listener : immutable_db_options_.listeners) {
listener->OnExternalFileIngested(this, info);
}
}

View File

@ -970,7 +970,7 @@ void DBImpl::NotifyOnFlushBegin(ColumnFamilyData* cfd, FileMetaData* file_meta,
info.smallest_seqno = file_meta->fd.smallest_seqno;
info.largest_seqno = file_meta->fd.largest_seqno;
info.flush_reason = flush_reason;
for (auto listener : immutable_db_options_.listeners) {
for (const auto& listener : immutable_db_options_.listeners) {
listener->OnFlushBegin(this, info);
}
}
@ -1002,7 +1002,7 @@ void DBImpl::NotifyOnFlushCompleted(
for (auto& info : *flush_jobs_info) {
info->triggered_writes_slowdown = triggered_writes_slowdown;
info->triggered_writes_stop = triggered_writes_stop;
for (auto listener : immutable_db_options_.listeners) {
for (const auto& listener : immutable_db_options_.listeners) {
listener->OnFlushCompleted(this, *info);
}
TEST_SYNC_POINT(
@ -1609,9 +1609,9 @@ Status DBImpl::CompactFilesImpl(
}
if (status.ok()) {
assert(compaction_job.io_status().ok());
InstallSuperVersionAndScheduleWork(c->column_family_data(),
&job_context->superversion_contexts[0],
*c->mutable_cf_options());
InstallSuperVersionAndScheduleWork(
c->column_family_data(), job_context->superversion_contexts.data(),
*c->mutable_cf_options());
}
// status above captures any error during compaction_job.Install, so its ok
// not check compaction_job.io_status() explicitly if we're not calling
@ -1731,7 +1731,7 @@ void DBImpl::NotifyOnCompactionBegin(ColumnFamilyData* cfd, Compaction* c,
{
CompactionJobInfo info{};
BuildCompactionJobInfo(cfd, c, st, job_stats, job_id, &info);
for (auto listener : immutable_db_options_.listeners) {
for (const auto& listener : immutable_db_options_.listeners) {
listener->OnCompactionBegin(this, info);
}
info.status.PermitUncheckedError();
@ -1760,7 +1760,7 @@ void DBImpl::NotifyOnCompactionCompleted(
{
CompactionJobInfo info{};
BuildCompactionJobInfo(cfd, c, st, compaction_job_stats, job_id, &info);
for (auto listener : immutable_db_options_.listeners) {
for (const auto& listener : immutable_db_options_.listeners) {
listener->OnCompactionCompleted(this, info);
}
}
@ -3221,7 +3221,7 @@ Status DBImpl::BackgroundFlush(bool* made_progress, JobContext* job_context,
column_families_not_to_flush.push_back(cfd);
continue;
}
superversion_contexts.emplace_back(SuperVersionContext(true));
superversion_contexts.emplace_back(true);
bg_flush_args.emplace_back(cfd, max_memtable_id,
&(superversion_contexts.back()), flush_reason);
}
@ -3726,9 +3726,9 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
compaction_released = true;
});
io_s = versions_->io_status();
InstallSuperVersionAndScheduleWork(c->column_family_data(),
&job_context->superversion_contexts[0],
*c->mutable_cf_options());
InstallSuperVersionAndScheduleWork(
c->column_family_data(), job_context->superversion_contexts.data(),
*c->mutable_cf_options());
ROCKS_LOG_BUFFER(log_buffer, "[%s] Deleted %d files\n",
c->column_family_data()->GetName().c_str(),
c->num_input_files(0));
@ -3801,9 +3801,9 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
});
io_s = versions_->io_status();
// Use latest MutableCFOptions
InstallSuperVersionAndScheduleWork(c->column_family_data(),
&job_context->superversion_contexts[0],
*c->mutable_cf_options());
InstallSuperVersionAndScheduleWork(
c->column_family_data(), job_context->superversion_contexts.data(),
*c->mutable_cf_options());
VersionStorageInfo::LevelSummaryStorage tmp;
c->column_family_data()->internal_stats()->IncBytesMoved(c->output_level(),
@ -3896,9 +3896,9 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
compaction_job.Install(*c->mutable_cf_options(), &compaction_released);
io_s = compaction_job.io_status();
if (status.ok()) {
InstallSuperVersionAndScheduleWork(c->column_family_data(),
&job_context->superversion_contexts[0],
*c->mutable_cf_options());
InstallSuperVersionAndScheduleWork(
c->column_family_data(), job_context->superversion_contexts.data(),
*c->mutable_cf_options());
}
*made_progress = true;
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:AfterCompaction",
@ -4045,7 +4045,6 @@ void DBImpl::RemoveManualCompaction(DBImpl::ManualCompactionState* m) {
++it;
}
assert(false);
return;
}
bool DBImpl::ShouldntRunManualCompaction(ManualCompactionState* m) {

View File

@ -104,7 +104,9 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
return status;
}
if (i == 0) continue;
if (i == 0) {
continue;
}
auto prev_f = l0_files[i - 1];
if (icmp->Compare(prev_f->largest, f->smallest) >= 0) {
ROCKS_LOG_INFO(immutable_db_options_.info_log,
@ -148,9 +150,9 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
read_options, write_options, &edit, &mutex_,
directories_.GetDbDir());
if (status.ok()) {
InstallSuperVersionAndScheduleWork(cfd,
&job_context.superversion_contexts[0],
*cfd->GetLatestMutableCFOptions());
InstallSuperVersionAndScheduleWork(
cfd, job_context.superversion_contexts.data(),
*cfd->GetLatestMutableCFOptions());
}
} // lock released here
LogFlush(immutable_db_options_.info_log);

View File

@ -1799,11 +1799,9 @@ Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) {
DBOptions db_options(options);
ColumnFamilyOptions cf_options(options);
std::vector<ColumnFamilyDescriptor> column_families;
column_families.push_back(
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
column_families.emplace_back(kDefaultColumnFamilyName, cf_options);
if (db_options.persist_stats_to_disk) {
column_families.push_back(
ColumnFamilyDescriptor(kPersistentStatsColumnFamilyName, cf_options));
column_families.emplace_back(kPersistentStatsColumnFamilyName, cf_options);
}
std::vector<ColumnFamilyHandle*> handles;
Status s = DB::Open(db_options, dbname, column_families, &handles, dbptr);
@ -1972,7 +1970,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
handles->clear();
size_t max_write_buffer_size = 0;
for (auto cf : column_families) {
for (const auto& cf : column_families) {
max_write_buffer_size =
std::max(max_write_buffer_size, cf.options.write_buffer_size);
}
@ -2044,8 +2042,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
}
if (s.ok()) {
impl->alive_log_files_.push_back(
DBImpl::LogFileNumberSize(impl->logfile_number_));
impl->alive_log_files_.emplace_back(impl->logfile_number_);
// In WritePrepared there could be gap in sequence numbers. This breaks
// the trick we use in kPointInTimeRecovery which assumes the first seq in
// the log right after the corrupted log is one larger than the last seq
@ -2093,7 +2090,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
if (s.ok()) {
// set column family handles
for (auto cf : column_families) {
for (const auto& cf : column_families) {
auto cfd =
impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name);
if (cfd != nullptr) {

View File

@ -26,7 +26,7 @@ DBImplReadOnly::DBImplReadOnly(const DBOptions& db_options,
LogFlush(immutable_db_options_.info_log);
}
DBImplReadOnly::~DBImplReadOnly() {}
DBImplReadOnly::~DBImplReadOnly() = default;
// Implementations of the DB interface
Status DBImplReadOnly::GetImpl(const ReadOptions& read_options,
@ -293,8 +293,7 @@ Status DB::OpenForReadOnly(const Options& options, const std::string& dbname,
DBOptions db_options(options);
ColumnFamilyOptions cf_options(options);
std::vector<ColumnFamilyDescriptor> column_families;
column_families.push_back(
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
column_families.emplace_back(kDefaultColumnFamilyName, cf_options);
std::vector<ColumnFamilyHandle*> handles;
s = DBImplReadOnly::OpenForReadOnlyWithoutCheck(
@ -339,7 +338,7 @@ Status DBImplReadOnly::OpenForReadOnlyWithoutCheck(
error_if_wal_file_exists);
if (s.ok()) {
// set column family handles
for (auto cf : column_families) {
for (const auto& cf : column_families) {
auto cfd =
impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name);
if (cfd == nullptr) {

View File

@ -28,7 +28,7 @@ DBImplSecondary::DBImplSecondary(const DBOptions& db_options,
LogFlush(immutable_db_options_.info_log);
}
DBImplSecondary::~DBImplSecondary() {}
DBImplSecondary::~DBImplSecondary() = default;
Status DBImplSecondary::Recover(
const std::vector<ColumnFamilyDescriptor>& column_families,
@ -804,7 +804,7 @@ Status DB::OpenAsSecondary(
impl->mutex_.Lock();
s = impl->Recover(column_families, true, false, false);
if (s.ok()) {
for (auto cf : column_families) {
for (const auto& cf : column_families) {
auto cfd =
impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name);
if (nullptr == cfd) {

View File

@ -2135,7 +2135,7 @@ void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* /*cfd*/,
}
mutex_.Unlock();
for (auto listener : immutable_db_options_.listeners) {
for (const auto& listener : immutable_db_options_.listeners) {
listener->OnMemTableSealed(mem_table_info);
}
mutex_.Lock();
@ -2252,7 +2252,7 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
log_empty_ = true;
log_dir_synced_ = false;
logs_.emplace_back(logfile_number_, new_log);
alive_log_files_.push_back(LogFileNumberSize(logfile_number_));
alive_log_files_.emplace_back(logfile_number_);
}
}

View File

@ -280,7 +280,7 @@ class DBTablePropertiesInRangeTest : public DBTestBase,
// run the query
TablePropertiesCollection props;
ColumnFamilyHandle* default_cf = db_->DefaultColumnFamily();
EXPECT_OK(db_->GetPropertiesOfTablesInRange(default_cf, &ranges[0],
EXPECT_OK(db_->GetPropertiesOfTablesInRange(default_cf, ranges.data(),
ranges.size(), &props));
const Comparator* ucmp = default_cf->GetComparator();

View File

@ -17,9 +17,7 @@
#include "logging/logging.h"
#include "util/atomic.h"
namespace ROCKSDB_NAMESPACE {
namespace experimental {
namespace ROCKSDB_NAMESPACE::experimental {
Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family,
const Slice* begin, const Slice* end) {
@ -378,7 +376,7 @@ enum BuiltinSstQueryFilters : char {
class SstQueryFilterBuilder {
public:
virtual ~SstQueryFilterBuilder() {}
virtual ~SstQueryFilterBuilder() = default;
virtual void Add(const Slice& key,
const KeySegmentsExtractor::Result& extracted,
const Slice* prev_key,
@ -395,7 +393,7 @@ class SstQueryFilterConfigImpl : public SstQueryFilterConfig {
const KeySegmentsExtractor::KeyCategorySet& categories)
: input_(input), categories_(categories) {}
virtual ~SstQueryFilterConfigImpl() {}
virtual ~SstQueryFilterConfigImpl() = default;
virtual std::unique_ptr<SstQueryFilterBuilder> NewBuilder(
bool sanity_checks) const = 0;
@ -1210,5 +1208,4 @@ Status SstQueryFilterConfigsManager::MakeShared(
return s;
}
} // namespace experimental
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::experimental

View File

@ -18,8 +18,7 @@
#include "util/coding.h"
#include "util/crc32c.h"
namespace ROCKSDB_NAMESPACE {
namespace log {
namespace ROCKSDB_NAMESPACE::log {
Reader::Reporter::~Reporter() = default;
@ -937,5 +936,4 @@ bool FragmentBufferedReader::TryReadFragment(
}
}
} // namespace log
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::log

View File

@ -19,8 +19,7 @@
#include "util/random.h"
#include "utilities/memory_allocators.h"
namespace ROCKSDB_NAMESPACE {
namespace log {
namespace ROCKSDB_NAMESPACE::log {
// Construct a string of the specified length made out of the supplied
// partial string.
@ -1206,8 +1205,7 @@ INSTANTIATE_TEST_CASE_P(
kBlockSize * 2),
::testing::Values(CompressionType::kZSTD)));
} // namespace log
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::log
int main(int argc, char** argv) {
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();

View File

@ -18,8 +18,7 @@
#include "util/crc32c.h"
#include "util/udt_util.h"
namespace ROCKSDB_NAMESPACE {
namespace log {
namespace ROCKSDB_NAMESPACE::log {
Writer::Writer(std::unique_ptr<WritableFileWriter>&& dest, uint64_t log_number,
bool recycle_log_files, bool manual_flush,
@ -297,5 +296,4 @@ IOStatus Writer::EmitPhysicalRecord(const WriteOptions& write_options,
return s;
}
} // namespace log
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::log

View File

@ -422,7 +422,7 @@ bool SeqnoToTimeMapping::Append(SequenceNumber seqno, uint64_t time) {
// TODO: consider changing?
} else if (pairs_.empty()) {
enforced_ = true;
pairs_.push_back({seqno, time});
pairs_.emplace_back(seqno, time);
// skip normal enforced check below
return true;
} else {
@ -437,13 +437,13 @@ bool SeqnoToTimeMapping::Append(SequenceNumber seqno, uint64_t time) {
// reset
assert(false);
} else {
pairs_.push_back({seqno, time});
pairs_.emplace_back(seqno, time);
added = true;
}
}
} else if (!enforced_) {
// Treat like AddUnenforced and fix up below
pairs_.push_back({seqno, time});
pairs_.emplace_back(seqno, time);
added = true;
} else {
// Out of order append attempted

View File

@ -29,8 +29,7 @@ using GFLAGS_NAMESPACE::ParseCommandLineFlags;
DEFINE_bool(enable_print, false, "Print options generated to console.");
#endif // GFLAGS
namespace ROCKSDB_NAMESPACE {
namespace test {
namespace ROCKSDB_NAMESPACE::test {
class StringLogger : public Logger {
public:
using Logger::Logv;
@ -849,8 +848,7 @@ INSTANTIATE_TEST_CASE_P(
"block_size=1024;"
"no_block_cache=true;")));
} // namespace test
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::test
int main(int argc, char** argv) {
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);

View File

@ -55,8 +55,7 @@ void* SaveStack(int* /*num_frames*/, int /*first_frames_to_skip*/) {
#include "port/lang.h"
namespace ROCKSDB_NAMESPACE {
namespace port {
namespace ROCKSDB_NAMESPACE::port {
namespace {
@ -413,7 +412,6 @@ void InstallStackTraceHandler() {
#endif
}
} // namespace port
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::port
#endif

View File

@ -13,8 +13,7 @@
#include "table/get_context.h"
#include "util/coding.h"
namespace ROCKSDB_NAMESPACE {
namespace mock {
namespace ROCKSDB_NAMESPACE::mock {
KVVector MakeMockFile(std::initializer_list<KVPair> l) { return KVVector(l); }
@ -347,5 +346,4 @@ void MockTableFactory::AssertLatestFiles(
}
}
} // namespace mock
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::mock

View File

@ -431,7 +431,7 @@ class SstFileReaderTimestampNotPersistedTest
sst_name_ = test::PerThreadDBPath("sst_file_ts_not_persisted");
}
~SstFileReaderTimestampNotPersistedTest() {}
~SstFileReaderTimestampNotPersistedTest() = default;
};
TEST_F(SstFileReaderTimestampNotPersistedTest, Basic) {

View File

@ -7,9 +7,7 @@
#include <array>
namespace ROCKSDB_NAMESPACE {
namespace secondary_cache_test_util {
namespace ROCKSDB_NAMESPACE::secondary_cache_test_util {
namespace {
using TestItem = WithCacheType::TestItem;
@ -92,6 +90,4 @@ const Cache::CacheItemHelper* WithCacheType::GetHelperFail(CacheEntryRole r) {
return GetHelper(r, true, true);
}
} // namespace secondary_cache_test_util
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::secondary_cache_test_util

View File

@ -13,8 +13,7 @@
#include <string>
#include <thread>
namespace ROCKSDB_NAMESPACE {
namespace test {
namespace ROCKSDB_NAMESPACE::test {
#ifdef OS_WIN
#include <windows.h>
@ -103,5 +102,4 @@ bool TestRegex::Matches(const std::string& str) const {
}
}
} // namespace test
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::test

View File

@ -34,8 +34,7 @@
void RegisterCustomObjects(int /*argc*/, char** /*argv*/) {}
#endif
namespace ROCKSDB_NAMESPACE {
namespace test {
namespace ROCKSDB_NAMESPACE::test {
const uint32_t kDefaultFormatVersion = BlockBasedTableOptions().format_version;
const std::set<uint32_t> kFooterFormatVersionsToTest{
@ -749,5 +748,4 @@ void RegisterTestLibrary(const std::string& arg) {
ObjectRegistry::Default()->AddLibrary("test", RegisterTestObjects, arg);
}
}
} // namespace test
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::test

View File

@ -56,8 +56,7 @@ ASSERT_FEATURE_COMPAT_HEADER();
bool pmull_runtime_flag = false;
#endif
namespace ROCKSDB_NAMESPACE {
namespace crc32c {
namespace ROCKSDB_NAMESPACE::crc32c {
#if defined(HAVE_POWER8) && defined(HAS_ALTIVEC)
#ifdef __powerpc64__
@ -1293,5 +1292,4 @@ uint32_t Crc32cCombine(uint32_t crc1, uint32_t crc2, size_t crc2len) {
pure_crc2_with_init);
}
} // namespace crc32c
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::crc32c

View File

@ -12,8 +12,7 @@
#include "util/coding.h"
#include "util/random.h"
namespace ROCKSDB_NAMESPACE {
namespace crc32c {
namespace ROCKSDB_NAMESPACE::crc32c {
class CRC {};
@ -170,8 +169,7 @@ TEST(CRC, Crc32cCombineBigSizeTest) {
ASSERT_EQ(crc1_2, crc1_2_combine);
}
} // namespace crc32c
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::crc32c
// copied from folly
const uint64_t FNV_64_HASH_START = 14695981039346656037ULL;

View File

@ -7,12 +7,10 @@
#include "util/math.h"
namespace ROCKSDB_NAMESPACE {
namespace detail {
namespace ROCKSDB_NAMESPACE::detail {
int CountTrailingZeroBitsForSmallEnumSet(uint64_t v) {
return CountTrailingZeroBits(v);
}
} // namespace detail
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::detail

View File

@ -5,11 +5,7 @@
#include "util/ribbon_config.h"
namespace ROCKSDB_NAMESPACE {
namespace ribbon {
namespace detail {
namespace ROCKSDB_NAMESPACE::ribbon::detail {
// Each instantiation of this struct is sufficiently unique for configuration
// purposes, and is only instantiated for settings where we support the
@ -499,8 +495,4 @@ template struct BandingConfigHelper1MaybeSupported<
template struct BandingConfigHelper1MaybeSupported<kOneIn1000, 64U, /*sm*/ true,
/*hm*/ true, /*sup*/ true>;
} // namespace detail
} // namespace ribbon
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::ribbon::detail

View File

@ -9,9 +9,8 @@
#include "rocksdb/slice.h"
#include <stdio.h>
#include <algorithm>
#include <cstdio>
#include "rocksdb/convenience.h"
#include "rocksdb/slice_transform.h"
@ -128,7 +127,7 @@ class CappedPrefixTransform : public SliceTransform {
class NoopTransform : public SliceTransform {
public:
explicit NoopTransform() {}
explicit NoopTransform() = default;
static const char* kClassName() { return "rocksdb.Noop"; }
const char* Name() const override { return kClassName(); }
@ -173,7 +172,7 @@ static int RegisterBuiltinSliceTransform(ObjectLibrary& library,
.AddNumber(":"),
[](const std::string& uri, std::unique_ptr<const SliceTransform>* guard,
std::string* /*errmsg*/) {
auto colon = uri.find(":");
auto colon = uri.find(':');
auto len = ParseSizeT(uri.substr(colon + 1));
guard->reset(NewFixedPrefixTransform(len));
return guard->get();
@ -193,7 +192,7 @@ static int RegisterBuiltinSliceTransform(ObjectLibrary& library,
.AddNumber(":"),
[](const std::string& uri, std::unique_ptr<const SliceTransform>* guard,
std::string* /*errmsg*/) {
auto colon = uri.find(":");
auto colon = uri.find(':');
auto len = ParseSizeT(uri.substr(colon + 1));
guard->reset(NewCappedPrefixTransform(len));
return guard->get();

View File

@ -169,8 +169,8 @@ TEST_F(PinnableSliceTest, Move) {
// Unit test for SmallEnumSet
class SmallEnumSetTest : public testing::Test {
public:
SmallEnumSetTest() {}
~SmallEnumSetTest() {}
SmallEnumSetTest() = default;
~SmallEnumSetTest() = default;
};
TEST_F(SmallEnumSetTest, SmallEnumSetTest1) {

View File

@ -9,7 +9,7 @@
#include "rocksdb/status.h"
#include <stdio.h>
#include <cstdio>
#ifdef OS_WIN
#include <string.h>
#endif

View File

@ -5,13 +5,12 @@
//
#include "util/string_util.h"
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <cerrno>
#include <cinttypes>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <sstream>
#include <string>
#include <utility>
@ -266,7 +265,9 @@ std::string UnescapeOptionString(const std::string& escaped_string) {
}
std::string trim(const std::string& str) {
if (str.empty()) return std::string();
if (str.empty()) {
return std::string();
}
size_t start = 0;
size_t end = str.size() - 1;
while (isspace(str[start]) != 0 && start < end) {
@ -346,14 +347,15 @@ uint64_t ParseUint64(const std::string& value) {
if (endchar < value.length()) {
char c = value[endchar];
if (c == 'k' || c == 'K')
if (c == 'k' || c == 'K') {
num <<= 10LL;
else if (c == 'm' || c == 'M')
} else if (c == 'm' || c == 'M') {
num <<= 20LL;
else if (c == 'g' || c == 'G')
} else if (c == 'g' || c == 'G') {
num <<= 30LL;
else if (c == 't' || c == 'T')
} else if (c == 't' || c == 'T') {
num <<= 40LL;
}
}
return num;
@ -371,14 +373,15 @@ int64_t ParseInt64(const std::string& value) {
if (endchar < value.length()) {
char c = value[endchar];
if (c == 'k' || c == 'K')
if (c == 'k' || c == 'K') {
num <<= 10LL;
else if (c == 'm' || c == 'M')
} else if (c == 'm' || c == 'M') {
num <<= 20LL;
else if (c == 'g' || c == 'G')
} else if (c == 'g' || c == 'G') {
num <<= 30LL;
else if (c == 't' || c == 'T')
} else if (c == 't' || c == 'T') {
num <<= 40LL;
}
}
return num;
@ -396,12 +399,13 @@ int ParseInt(const std::string& value) {
if (endchar < value.length()) {
char c = value[endchar];
if (c == 'k' || c == 'K')
if (c == 'k' || c == 'K') {
num <<= 10;
else if (c == 'm' || c == 'M')
} else if (c == 'm' || c == 'M') {
num <<= 20;
else if (c == 'g' || c == 'G')
} else if (c == 'g' || c == 'G') {
num <<= 30;
}
}
return num;

View File

@ -97,7 +97,7 @@ class SimulatedBackgroundTask {
class ThreadListTest : public testing::Test {
public:
ThreadListTest() {}
ThreadListTest() = default;
};
TEST_F(ThreadListTest, GlobalTables) {
@ -161,7 +161,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) {
// Verify the number of running threads in each pool.
ASSERT_OK(env->GetThreadList(&thread_list));
int running_count[ThreadStatus::NUM_THREAD_TYPES] = {0};
for (auto thread_status : thread_list) {
for (const auto& thread_status : thread_list) {
if (thread_status.cf_name == "pikachu" &&
thread_status.db_name == "running") {
running_count[thread_status.thread_type]++;
@ -189,7 +189,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) {
for (int i = 0; i < ThreadStatus::NUM_THREAD_TYPES; ++i) {
running_count[i] = 0;
}
for (auto thread_status : thread_list) {
for (const auto& thread_status : thread_list) {
if (thread_status.cf_name == "pikachu" &&
thread_status.db_name == "running") {
running_count[thread_status.thread_type]++;
@ -204,7 +204,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) {
namespace {
void UpdateStatusCounts(const std::vector<ThreadStatus>& thread_list,
int operation_counts[], int state_counts[]) {
for (auto thread_status : thread_list) {
for (const auto& thread_status : thread_list) {
operation_counts[thread_status.operation_type]++;
state_counts[thread_status.state_type]++;
}

View File

@ -9,7 +9,7 @@
#include "util/thread_local.h"
#include <stdlib.h>
#include <cstdlib>
#include "port/likely.h"
#include "util/mutexlock.h"

View File

@ -18,11 +18,10 @@
#include <sys/syscall.h>
#endif
#include <stdlib.h>
#include <algorithm>
#include <atomic>
#include <condition_variable>
#include <cstdlib>
#include <deque>
#include <mutex>
#include <sstream>
@ -465,7 +464,7 @@ int ThreadPoolImpl::Impl::UnSchedule(void* arg) {
ThreadPoolImpl::ThreadPoolImpl() : impl_(new Impl()) {}
ThreadPoolImpl::~ThreadPoolImpl() {}
ThreadPoolImpl::~ThreadPoolImpl() = default;
void ThreadPoolImpl::JoinAllThreads() { impl_->JoinThreads(false); }

View File

@ -20,16 +20,16 @@ static const std::string kValuePlaceHolder = "value";
class HandleTimestampSizeDifferenceTest : public testing::Test {
public:
HandleTimestampSizeDifferenceTest() {}
HandleTimestampSizeDifferenceTest() = default;
// Test handler used to collect the column family id and user keys contained
// in a WriteBatch for test verification. And verifies the value part stays
// the same if it's available.
class KeyCollector : public WriteBatch::Handler {
public:
explicit KeyCollector() {}
explicit KeyCollector() = default;
~KeyCollector() override {}
~KeyCollector() override = default;
Status PutCF(uint32_t cf, const Slice& key, const Slice& value) override {
if (value.compare(kValuePlaceHolder) != 0) {
@ -90,7 +90,7 @@ class HandleTimestampSizeDifferenceTest : public testing::Test {
private:
Status AddKey(uint32_t cf, const Slice& key) {
keys_.push_back(std::make_pair(cf, key));
keys_.emplace_back(cf, key);
return Status::OK();
}
std::vector<std::pair<uint32_t, const Slice>> keys_;

View File

@ -5,8 +5,7 @@
#include "rocksdb/utilities/agg_merge.h"
#include <assert.h>
#include <cassert>
#include <deque>
#include <memory>
#include <type_traits>
@ -24,7 +23,7 @@
namespace ROCKSDB_NAMESPACE {
static std::unordered_map<std::string, std::unique_ptr<Aggregator>> func_map;
const std::string kUnnamedFuncName = "";
const std::string kUnnamedFuncName;
const std::string kErrorFuncName = "kErrorFuncName";
Status AddAggregator(const std::string& function_name,
@ -37,7 +36,7 @@ Status AddAggregator(const std::string& function_name,
return Status::OK();
}
AggMergeOperator::AggMergeOperator() {}
AggMergeOperator::AggMergeOperator() = default;
std::string EncodeAggFuncAndPayloadNoCheck(const Slice& function_name,
const Slice& value) {
@ -123,7 +122,7 @@ class AggMergeOperator::Accumulator {
}
std::swap(scratch_, aggregated_);
values_.clear();
values_.push_back(aggregated_);
values_.emplace_back(aggregated_);
func_ = my_func;
}
values_.push_back(my_value);

View File

@ -5,8 +5,7 @@
#include "test_agg_merge.h"
#include <assert.h>
#include <cassert>
#include <deque>
#include <vector>

View File

@ -384,7 +384,7 @@ class BackupEngineImpl {
BackupMeta(const BackupMeta&) = delete;
BackupMeta& operator=(const BackupMeta&) = delete;
~BackupMeta() {}
~BackupMeta() = default;
void RecordTimestamp() {
// Best effort
@ -639,11 +639,9 @@ class BackupEngineImpl {
std::string db_session_id;
CopyOrCreateWorkItem()
: src_path(""),
dst_path(""),
src_temperature(Temperature::kUnknown),
: src_temperature(Temperature::kUnknown),
dst_temperature(Temperature::kUnknown),
contents(""),
src_env(nullptr),
dst_env(nullptr),
src_env_options(),
@ -651,10 +649,7 @@ class BackupEngineImpl {
rate_limiter(nullptr),
size_limit(0),
stats(nullptr),
src_checksum_func_name(kUnknownFileChecksumFuncName),
src_checksum_hex(""),
db_id(""),
db_session_id("") {}
src_checksum_func_name(kUnknownFileChecksumFuncName) {}
CopyOrCreateWorkItem(const CopyOrCreateWorkItem&) = delete;
CopyOrCreateWorkItem& operator=(const CopyOrCreateWorkItem&) = delete;
@ -727,12 +722,7 @@ class BackupEngineImpl {
std::string dst_path;
std::string dst_relative;
BackupAfterCopyOrCreateWorkItem()
: shared(false),
needed_to_copy(false),
backup_env(nullptr),
dst_path_tmp(""),
dst_path(""),
dst_relative("") {}
: shared(false), needed_to_copy(false), backup_env(nullptr) {}
BackupAfterCopyOrCreateWorkItem(
BackupAfterCopyOrCreateWorkItem&& o) noexcept {
@ -773,7 +763,7 @@ class BackupEngineImpl {
std::string from_file;
std::string to_file;
std::string checksum_hex;
RestoreAfterCopyOrCreateWorkItem() : checksum_hex("") {}
RestoreAfterCopyOrCreateWorkItem() {}
RestoreAfterCopyOrCreateWorkItem(std::future<CopyOrCreateResult>&& _result,
const std::string& _from_file,
const std::string& _to_file,
@ -874,7 +864,7 @@ class BackupEngineImplThreadSafe : public BackupEngine,
BackupEngineImplThreadSafe(const BackupEngineOptions& options, Env* db_env,
bool read_only = false)
: impl_(options, db_env, read_only) {}
~BackupEngineImplThreadSafe() override {}
~BackupEngineImplThreadSafe() override = default;
using BackupEngine::CreateNewBackupWithMetadata;
IOStatus CreateNewBackupWithMetadata(const CreateBackupOptions& options,

View File

@ -858,8 +858,8 @@ class BackupEngineTest : public testing::Test {
for (auto& dir : child_dirs) {
dir = "private/" + dir;
}
child_dirs.push_back("shared"); // might not exist
child_dirs.push_back("shared_checksum"); // might not exist
child_dirs.emplace_back("shared"); // might not exist
child_dirs.emplace_back("shared_checksum"); // might not exist
for (auto& dir : child_dirs) {
std::vector<std::string> children;
test_backup_env_->GetChildren(backupdir_ + "/" + dir, &children)
@ -927,7 +927,7 @@ class BackupEngineTest : public testing::Test {
void DeleteLogFiles() {
std::vector<std::string> delete_logs;
ASSERT_OK(db_chroot_env_->GetChildren(dbname_, &delete_logs));
for (auto f : delete_logs) {
for (const auto& f : delete_logs) {
uint64_t number;
FileType type;
bool ok = ParseFileName(f, &number, &type);
@ -1925,7 +1925,7 @@ TEST_F(BackupEngineTest, BackupOptions) {
ASSERT_OK(file_manager_->FileExists(OptionsPath(backupdir_, i) + name));
ASSERT_OK(backup_chroot_env_->GetChildren(OptionsPath(backupdir_, i),
&filenames));
for (auto fn : filenames) {
for (const auto& fn : filenames) {
if (fn.compare(0, 7, "OPTIONS") == 0) {
ASSERT_EQ(name, fn);
}
@ -2664,7 +2664,7 @@ TEST_F(BackupEngineTest, DeleteTmpFiles) {
assert(false);
}
CloseDBAndBackupEngine();
for (std::string file_or_dir : tmp_files_and_dirs) {
for (const std::string& file_or_dir : tmp_files_and_dirs) {
if (file_manager_->FileExists(file_or_dir) != Status::NotFound()) {
FAIL() << file_or_dir << " was expected to be deleted." << cleanup_fn;
}
@ -2698,7 +2698,7 @@ class BackupEngineRateLimitingTestWithParam
int /* 0 = single threaded, 1 = multi threaded*/,
std::pair<uint64_t, uint64_t> /* limits */>> {
public:
BackupEngineRateLimitingTestWithParam() {}
BackupEngineRateLimitingTestWithParam() = default;
};
uint64_t const MB = 1024 * 1024;
@ -2848,7 +2848,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingVerifyBackup) {
true /* include_file_details */));
std::uint64_t bytes_read_during_verify_backup = 0;
for (BackupFileInfo backup_file_info : backup_info.file_details) {
for (const BackupFileInfo& backup_file_info : backup_info.file_details) {
bytes_read_during_verify_backup += backup_file_info.size;
}
auto start_verify_backup = special_env->NowMicros();
@ -2986,7 +2986,7 @@ class BackupEngineRateLimitingTestWithParam2
public testing::WithParamInterface<
std::tuple<std::pair<uint64_t, uint64_t> /* limits */>> {
public:
BackupEngineRateLimitingTestWithParam2() {}
BackupEngineRateLimitingTestWithParam2() = default;
};
INSTANTIATE_TEST_CASE_P(
@ -4212,7 +4212,7 @@ TEST_F(BackupEngineTest, FileTemperatures) {
std::vector<LiveFileStorageInfo> infos;
ASSERT_OK(
db_->GetLiveFilesStorageInfo(LiveFilesStorageInfoOptions(), &infos));
for (auto info : infos) {
for (const auto& info : infos) {
if (info.file_type == kTableFile) {
manifest_temps.emplace(info.file_number, info.temperature);
manifest_temp_counts[info.temperature]++;
@ -4379,7 +4379,7 @@ TEST_F(BackupEngineTest, ExcludeFiles) {
MaybeExcludeBackupFile* files_end) {
for (auto* f = files_begin; f != files_end; ++f) {
std::string s = StringSplit(f->info.relative_file, '/').back();
s = s.substr(0, s.find("_"));
s = s.substr(0, s.find('_'));
int64_t num = std::strtoll(s.c_str(), nullptr, /*base*/ 10);
// Exclude if not a match
f->exclude_decision = (num % modulus) != remainder;

View File

@ -13,8 +13,7 @@
#include "rocksdb/system_clock.h"
#include "test_util/sync_point.h"
namespace ROCKSDB_NAMESPACE {
namespace blob_db {
namespace ROCKSDB_NAMESPACE::blob_db {
BlobIndexCompactionFilterBase::~BlobIndexCompactionFilterBase() {
if (blob_file_) {
@ -488,5 +487,4 @@ BlobIndexCompactionFilterFactoryGC::CreateCompactionFilter(
std::move(user_comp_filter_from_factory), current_time, statistics()));
}
} // namespace blob_db
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::blob_db

View File

@ -11,8 +11,7 @@
#include "logging/logging.h"
#include "utilities/blob_db/blob_db_impl.h"
namespace ROCKSDB_NAMESPACE {
namespace blob_db {
namespace ROCKSDB_NAMESPACE::blob_db {
Status BlobDB::Open(const Options& options, const BlobDBOptions& bdb_options,
const std::string& dbname, BlobDB** blob_db) {
@ -20,8 +19,7 @@ Status BlobDB::Open(const Options& options, const BlobDBOptions& bdb_options,
DBOptions db_options(options);
ColumnFamilyOptions cf_options(options);
std::vector<ColumnFamilyDescriptor> column_families;
column_families.push_back(
ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options));
column_families.emplace_back(kDefaultColumnFamilyName, cf_options);
std::vector<ColumnFamilyHandle*> handles;
Status s = BlobDB::Open(db_options, bdb_options, dbname, column_families,
&handles, blob_db);
@ -108,5 +106,4 @@ void BlobDBOptions::Dump(Logger* log) const {
disable_background_tasks);
}
} // namespace blob_db
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::blob_db

View File

@ -48,8 +48,7 @@ namespace {
int kBlockBasedTableVersionFormat = 2;
} // end namespace
namespace ROCKSDB_NAMESPACE {
namespace blob_db {
namespace ROCKSDB_NAMESPACE::blob_db {
bool BlobFileComparator::operator()(
const std::shared_ptr<BlobFile>& lhs,
@ -1461,7 +1460,6 @@ void BlobDBImpl::MultiGet(const ReadOptions& _read_options, size_t num_keys,
if (snapshot_created) {
db_->ReleaseSnapshot(read_options.snapshot);
}
return;
}
bool BlobDBImpl::SetSnapshotIfNeeded(ReadOptions* read_options) {
@ -1602,8 +1600,8 @@ Status BlobDBImpl::GetRawBlobFromFile(const Slice& key, uint64_t file_number,
} else {
buf.reserve(static_cast<size_t>(record_size));
s = reader->Read(IOOptions(), record_offset,
static_cast<size_t>(record_size), &blob_record, &buf[0],
nullptr);
static_cast<size_t>(record_size), &blob_record,
buf.data(), nullptr);
}
RecordTick(statistics_, BLOB_DB_BLOB_FILE_BYTES_READ, blob_record.size());
}
@ -1770,7 +1768,7 @@ std::pair<bool, int64_t> BlobDBImpl::SanityCheck(bool aborted) {
uint64_t now = EpochNow();
for (auto blob_file_pair : blob_files_) {
for (const auto& blob_file_pair : blob_files_) {
auto blob_file = blob_file_pair.second;
std::ostringstream buf;
@ -1930,7 +1928,7 @@ std::pair<bool, int64_t> BlobDBImpl::EvictExpiredFiles(bool aborted) {
uint64_t now = EpochNow();
{
ReadLock rl(&mutex_);
for (auto p : blob_files_) {
for (const auto& p : blob_files_) {
auto& blob_file = p.second;
ReadLock file_lock(&blob_file->mutex_);
if (blob_file->HasTTL() && !blob_file->Obsolete() &&
@ -1977,7 +1975,7 @@ Status BlobDBImpl::SyncBlobFiles(const WriteOptions& write_options) {
std::vector<std::shared_ptr<BlobFile>> process_files;
{
ReadLock rl(&mutex_);
for (auto fitr : open_ttl_files_) {
for (const auto& fitr : open_ttl_files_) {
process_files.push_back(fitr);
}
if (open_non_ttl_file_ != nullptr) {
@ -2006,7 +2004,9 @@ Status BlobDBImpl::SyncBlobFiles(const WriteOptions& write_options) {
}
std::pair<bool, int64_t> BlobDBImpl::ReclaimOpenFiles(bool aborted) {
if (aborted) return std::make_pair(false, -1);
if (aborted) {
return std::make_pair(false, -1);
}
if (open_file_count_.load() < kOpenFilesTrigger) {
return std::make_pair(true, -1);
@ -2017,7 +2017,9 @@ std::pair<bool, int64_t> BlobDBImpl::ReclaimOpenFiles(bool aborted) {
ReadLock rl(&mutex_);
for (auto const& ent : blob_files_) {
auto bfile = ent.second;
if (bfile->last_access_.load() == -1) continue;
if (bfile->last_access_.load() == -1) {
continue;
}
WriteLock lockbfile_w(&bfile->mutex_);
CloseRandomAccessLocked(bfile);
@ -2100,7 +2102,7 @@ std::pair<bool, int64_t> BlobDBImpl::DeleteObsoleteFiles(bool aborted) {
// put files back into obsolete if for some reason, delete failed
if (!tobsolete.empty()) {
WriteLock wl(&mutex_);
for (auto bfile : tobsolete) {
for (const auto& bfile : tobsolete) {
blob_files_.insert(std::make_pair(bfile->BlobFileNumber(), bfile));
obsolete_files_.push_front(bfile);
}
@ -2264,5 +2266,4 @@ void BlobDBImpl::TEST_ProcessCompactionJobInfo(const CompactionJobInfo& info) {
#endif // !NDEBUG
} // namespace blob_db
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::blob_db

View File

@ -12,8 +12,7 @@
// BlobDBImpl methods to get snapshot of files, e.g. for replication.
namespace ROCKSDB_NAMESPACE {
namespace blob_db {
namespace ROCKSDB_NAMESPACE::blob_db {
Status BlobDBImpl::DisableFileDeletions() {
// Disable base DB file deletions.
@ -72,7 +71,7 @@ Status BlobDBImpl::GetLiveFiles(std::vector<std::string>& ret,
return s;
}
ret.reserve(ret.size() + blob_files_.size());
for (auto bfile_pair : blob_files_) {
for (const auto& bfile_pair : blob_files_) {
auto blob_file = bfile_pair.second;
// Path should be relative to db_name, but begin with slash.
ret.emplace_back(
@ -87,7 +86,7 @@ void BlobDBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
// Hold a lock in the beginning to avoid updates to base DB during the call
ReadLock rl(&mutex_);
db_->GetLiveFilesMetaData(metadata);
for (auto bfile_pair : blob_files_) {
for (const auto& bfile_pair : blob_files_) {
auto blob_file = bfile_pair.second;
LiveFileMetaData filemetadata;
filemetadata.size = blob_file->GetFileSize();
@ -105,5 +104,4 @@ void BlobDBImpl::GetLiveFilesMetaData(std::vector<LiveFileMetaData>* metadata) {
}
}
} // namespace blob_db
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::blob_db

View File

@ -31,8 +31,7 @@
#include "utilities/blob_db/blob_db_impl.h"
#include "utilities/fault_injection_env.h"
namespace ROCKSDB_NAMESPACE {
namespace blob_db {
namespace ROCKSDB_NAMESPACE::blob_db {
class BlobDBTest : public testing::Test {
public:
@ -607,7 +606,7 @@ TEST_F(BlobDBTest, EnableDisableCompressionGC) {
VerifyDB(data);
blob_files = blob_db_impl()->TEST_GetBlobFiles();
for (auto bfile : blob_files) {
for (const auto &bfile : blob_files) {
ASSERT_EQ(kNoCompression, bfile->GetCompressionType());
}
@ -627,7 +626,7 @@ TEST_F(BlobDBTest, EnableDisableCompressionGC) {
VerifyDB(data);
blob_files = blob_db_impl()->TEST_GetBlobFiles();
for (auto bfile : blob_files) {
for (const auto &bfile : blob_files) {
ASSERT_EQ(kSnappyCompression, bfile->GetCompressionType());
}
}
@ -678,7 +677,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) {
blob_db_impl()->TEST_DeleteObsoleteFiles();
blob_files = blob_db_impl()->TEST_GetBlobFiles();
for (auto bfile : blob_files) {
for (const auto &bfile : blob_files) {
ASSERT_EQ(kSnappyCompression, bfile->GetCompressionType());
}
@ -695,7 +694,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) {
blob_db_impl()->TEST_DeleteObsoleteFiles();
blob_files = blob_db_impl()->TEST_GetBlobFiles();
for (auto bfile : blob_files) {
for (const auto &bfile : blob_files) {
ASSERT_EQ(kNoCompression, bfile->GetCompressionType());
}
@ -719,7 +718,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) {
blob_db_impl()->TEST_DeleteObsoleteFiles();
blob_files = blob_db_impl()->TEST_GetBlobFiles();
for (auto bfile : blob_files) {
for (const auto &bfile : blob_files) {
ASSERT_EQ(kLZ4Compression, bfile->GetCompressionType());
}
}
@ -731,8 +730,8 @@ TEST_F(BlobDBTest, MultipleWriters) {
std::vector<port::Thread> workers;
std::vector<std::map<std::string, std::string>> data_set(10);
for (uint32_t i = 0; i < 10; i++)
workers.push_back(port::Thread(
for (uint32_t i = 0; i < 10; i++) {
workers.emplace_back(
[&](uint32_t id) {
Random rnd(301 + id);
for (int j = 0; j < 100; j++) {
@ -747,7 +746,8 @@ TEST_F(BlobDBTest, MultipleWriters) {
}
}
},
i));
i);
}
std::map<std::string, std::string> data;
for (size_t i = 0; i < 10; i++) {
workers[i].join();
@ -1375,8 +1375,8 @@ TEST_F(BlobDBTest, UserCompactionFilter) {
constexpr uint64_t kMinValueSize = 1 << 6;
constexpr uint64_t kMaxValueSize = 1 << 8;
constexpr uint64_t kMinBlobSize = 1 << 7;
static_assert(kMinValueSize < kMinBlobSize, "");
static_assert(kMaxValueSize > kMinBlobSize, "");
static_assert(kMinValueSize < kMinBlobSize);
static_assert(kMaxValueSize > kMinBlobSize);
BlobDBOptions bdb_options;
bdb_options.min_blob_size = kMinBlobSize;
@ -1747,8 +1747,8 @@ TEST_F(BlobDBTest, GarbageCollection) {
constexpr uint64_t kSmallValueSize = 1 << 6;
constexpr uint64_t kLargeValueSize = 1 << 8;
constexpr uint64_t kMinBlobSize = 1 << 7;
static_assert(kSmallValueSize < kMinBlobSize, "");
static_assert(kLargeValueSize > kMinBlobSize, "");
static_assert(kSmallValueSize < kMinBlobSize);
static_assert(kLargeValueSize > kMinBlobSize);
constexpr size_t kBlobsPerFile = 8;
constexpr size_t kNumBlobFiles = kNumPuts / kBlobsPerFile;
@ -1999,7 +1999,7 @@ TEST_F(BlobDBTest, EvictExpiredFile) {
ASSERT_EQ(0, blob_db_impl()->TEST_GetObsoleteFiles().size());
// Make sure we don't return garbage value after blob file being evicted,
// but the blob index still exists in the LSM tree.
std::string val = "";
std::string val;
ASSERT_TRUE(blob_db_->Get(ReadOptions(), "foo", &val).IsNotFound());
ASSERT_EQ("", val);
}
@ -2413,8 +2413,7 @@ TEST_F(BlobDBTest, SyncBlobFileBeforeCloseIOError) {
ASSERT_TRUE(s.IsIOError());
}
} // namespace blob_db
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::blob_db
// A black-box test for the ttl wrapper around rocksdb
int main(int argc, char **argv) {

View File

@ -5,9 +5,8 @@
#include "utilities/blob_db/blob_dump_tool.h"
#include <stdio.h>
#include <cinttypes>
#include <cstdio>
#include <iostream>
#include <memory>
#include <string>
@ -21,8 +20,7 @@
#include "util/coding.h"
#include "util/string_util.h"
namespace ROCKSDB_NAMESPACE {
namespace blob_db {
namespace ROCKSDB_NAMESPACE::blob_db {
BlobDumpTool::BlobDumpTool()
: reader_(nullptr), buffer_(nullptr), buffer_size_(0) {}
@ -275,5 +273,4 @@ std::string BlobDumpTool::GetString(std::pair<T, T> p) {
return "(" + std::to_string(p.first) + ", " + std::to_string(p.second) + ")";
}
} // namespace blob_db
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::blob_db

View File

@ -5,10 +5,9 @@
// (found in the LICENSE.Apache file in the root directory).
#include "utilities/blob_db/blob_file.h"
#include <stdio.h>
#include <algorithm>
#include <cinttypes>
#include <cstdio>
#include <memory>
#include "db/column_family.h"
@ -19,9 +18,7 @@
#include "logging/logging.h"
#include "utilities/blob_db/blob_db_impl.h"
namespace ROCKSDB_NAMESPACE {
namespace blob_db {
namespace ROCKSDB_NAMESPACE::blob_db {
BlobFile::BlobFile(const BlobDBImpl* p, const std::string& bdir, uint64_t fn,
Logger* info_log)
@ -120,9 +117,11 @@ Status BlobFile::ReadFooter(BlobLogFooter* bf) {
} else {
buf.reserve(BlobLogFooter::kSize + 10);
s = ra_file_reader_->Read(IOOptions(), footer_offset, BlobLogFooter::kSize,
&result, &buf[0], nullptr);
&result, buf.data(), nullptr);
}
if (!s.ok()) {
return s;
}
if (!s.ok()) return s;
if (result.size() != BlobLogFooter::kSize) {
// should not happen
return Status::IOError("EOF reached before footer");
@ -242,7 +241,7 @@ Status BlobFile::ReadMetadata(const std::shared_ptr<FileSystem>& fs,
} else {
header_buf.reserve(BlobLogHeader::kSize);
s = file_reader->Read(IOOptions(), 0, BlobLogHeader::kSize, &header_slice,
&header_buf[0], nullptr);
header_buf.data(), nullptr);
}
if (!s.ok()) {
ROCKS_LOG_ERROR(
@ -283,8 +282,8 @@ Status BlobFile::ReadMetadata(const std::shared_ptr<FileSystem>& fs,
} else {
footer_buf.reserve(BlobLogFooter::kSize);
s = file_reader->Read(IOOptions(), file_size - BlobLogFooter::kSize,
BlobLogFooter::kSize, &footer_slice, &footer_buf[0],
nullptr);
BlobLogFooter::kSize, &footer_slice,
footer_buf.data(), nullptr);
}
if (!s.ok()) {
ROCKS_LOG_ERROR(
@ -309,5 +308,4 @@ Status BlobFile::ReadMetadata(const std::shared_ptr<FileSystem>& fs,
return Status::OK();
}
} // namespace blob_db
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::blob_db

View File

@ -13,8 +13,7 @@
#include "utilities/cassandra/format.h"
#include "utilities/cassandra/merge_operator.h"
namespace ROCKSDB_NAMESPACE {
namespace cassandra {
namespace ROCKSDB_NAMESPACE::cassandra {
static std::unordered_map<std::string, OptionTypeInfo>
cassandra_filter_type_info = {
{"purge_ttl_on_expiration",
@ -102,5 +101,4 @@ int RegisterCassandraObjects(ObjectLibrary& library,
size_t num_types;
return static_cast<int>(library.GetFactoryCount(&num_types));
}
} // namespace cassandra
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::cassandra

View File

@ -11,8 +11,7 @@
#include "utilities/cassandra/serialize.h"
#include "utilities/cassandra/test_utils.h"
namespace ROCKSDB_NAMESPACE {
namespace cassandra {
namespace ROCKSDB_NAMESPACE::cassandra {
TEST(ColumnTest, Column) {
char data[4] = {'d', 'a', 't', 'a'};
@ -367,8 +366,7 @@ TEST(RowValueTest, ExpireTtlShouldConvertExpiredColumnsToTombstones) {
compacted.ConvertExpiredColumnsToTombstones(&changed);
EXPECT_FALSE(changed);
}
} // namespace cassandra
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::cassandra
int main(int argc, char** argv) {
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();

View File

@ -18,8 +18,7 @@
#include "utilities/cassandra/test_utils.h"
#include "utilities/merge_operators.h"
namespace ROCKSDB_NAMESPACE {
namespace cassandra {
namespace ROCKSDB_NAMESPACE::cassandra {
// Path to the database on file system
const std::string kDbName = test::PerThreadDBPath("cassandra_functional_test");
@ -434,8 +433,7 @@ TEST_F(CassandraFunctionalTest, LoadCompactionFilterFactory) {
ASSERT_TRUE(opts->purge_ttl_on_expiration);
}
} // namespace cassandra
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::cassandra
int main(int argc, char** argv) {
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();

View File

@ -9,8 +9,7 @@
#include "utilities/cassandra/format.h"
#include "utilities/cassandra/test_utils.h"
namespace ROCKSDB_NAMESPACE {
namespace cassandra {
namespace ROCKSDB_NAMESPACE::cassandra {
class RowValueMergeTest : public testing::Test {};
@ -88,8 +87,7 @@ TEST(RowValueMergeTest, MergeWithRowTombstone) {
EXPECT_EQ(merged.LastModifiedTime(), 17);
}
} // namespace cassandra
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::cassandra
int main(int argc, char** argv) {
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();

View File

@ -6,8 +6,7 @@
#include "test_util/testharness.h"
#include "utilities/cassandra/serialize.h"
namespace ROCKSDB_NAMESPACE {
namespace cassandra {
namespace ROCKSDB_NAMESPACE::cassandra {
TEST(SerializeTest, SerializeI64) {
std::string dest;
@ -154,8 +153,7 @@ TEST(SerializeTest, DeserializeI8) {
EXPECT_EQ(-128, Deserialize<int8_t>(dest.c_str(), offset));
}
} // namespace cassandra
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::cassandra
int main(int argc, char** argv) {
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();

View File

@ -11,8 +11,7 @@
#include "utilities/cassandra/serialize.h"
namespace ROCKSDB_NAMESPACE {
namespace cassandra {
namespace ROCKSDB_NAMESPACE::cassandra {
namespace {
const int32_t kDefaultLocalDeletionTime = std::numeric_limits<int32_t>::max();
const int64_t kDefaultMarkedForDeleteAt = std::numeric_limits<int64_t>::min();
@ -363,5 +362,4 @@ RowValue RowValue::Merge(std::vector<RowValue>&& values) {
return RowValue(std::move(columns), last_modified_time);
}
} // namespace cassandra
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::cassandra

View File

@ -5,8 +5,7 @@
#include "merge_operator.h"
#include <assert.h>
#include <cassert>
#include <memory>
#include "rocksdb/merge_operator.h"
@ -15,8 +14,7 @@
#include "utilities/cassandra/format.h"
#include "utilities/merge_operators.h"
namespace ROCKSDB_NAMESPACE {
namespace cassandra {
namespace ROCKSDB_NAMESPACE::cassandra {
static std::unordered_map<std::string, OptionTypeInfo>
merge_operator_options_info = {
{"gc_grace_period_in_seconds",
@ -75,6 +73,4 @@ bool CassandraValueMergeOperator::PartialMergeMulti(
return true;
}
} // namespace cassandra
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::cassandra

View File

@ -5,8 +5,7 @@
#include "test_utils.h"
namespace ROCKSDB_NAMESPACE {
namespace cassandra {
namespace ROCKSDB_NAMESPACE::cassandra {
const char kData[] = {'d', 'a', 't', 'a'};
const char kExpiringData[] = {'e', 'd', 'a', 't', 'a'};
const int32_t kTtl = 86400;
@ -65,5 +64,4 @@ int64_t ToMicroSeconds(int64_t seconds) { return seconds * (int64_t)1000000; }
int32_t ToSeconds(int64_t microseconds) {
return (int32_t)(microseconds / (int64_t)1000000);
}
} // namespace cassandra
} // namespace ROCKSDB_NAMESPACE
} // namespace ROCKSDB_NAMESPACE::cassandra

View File

@ -112,7 +112,7 @@ class CheckpointTest : public testing::Test {
ColumnFamilyOptions cf_opts(options);
size_t cfi = handles_.size();
handles_.resize(cfi + cfs.size());
for (auto cf : cfs) {
for (const auto& cf : cfs) {
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
}
}
@ -141,7 +141,7 @@ class CheckpointTest : public testing::Test {
EXPECT_EQ(cfs.size(), options.size());
std::vector<ColumnFamilyDescriptor> column_families;
for (size_t i = 0; i < cfs.size(); ++i) {
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i]));
column_families.emplace_back(cfs[i], options[i]);
}
DBOptions db_opts = DBOptions(options[0]);
return DB::Open(db_opts, dbname_, column_families, &handles_, &db_);
@ -507,7 +507,7 @@ TEST_F(CheckpointTest, CheckpointCF) {
cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"};
std::vector<ColumnFamilyDescriptor> column_families;
for (size_t i = 0; i < cfs.size(); ++i) {
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options));
column_families.emplace_back(cfs[i], options);
}
ASSERT_OK(DB::Open(options, snapshot_name_, column_families, &cphandles,
&snapshotDB));
@ -565,7 +565,7 @@ TEST_F(CheckpointTest, CheckpointCFNoFlush) {
cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"};
std::vector<ColumnFamilyDescriptor> column_families;
for (size_t i = 0; i < cfs.size(); ++i) {
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options));
column_families.emplace_back(cfs[i], options);
}
ASSERT_OK(DB::Open(options, snapshot_name_, column_families, &cphandles,
&snapshotDB));
@ -717,12 +717,9 @@ TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing2PC) {
TransactionDB* snapshotDB;
std::vector<ColumnFamilyDescriptor> column_families;
column_families.push_back(
ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions()));
column_families.push_back(
ColumnFamilyDescriptor("CFA", ColumnFamilyOptions()));
column_families.push_back(
ColumnFamilyDescriptor("CFB", ColumnFamilyOptions()));
column_families.emplace_back(kDefaultColumnFamilyName, ColumnFamilyOptions());
column_families.emplace_back("CFA", ColumnFamilyOptions());
column_families.emplace_back("CFB", ColumnFamilyOptions());
std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles;
ASSERT_OK(TransactionDB::Open(options, txn_db_options, snapshot_name_,
column_families, &cf_handles, &snapshotDB));

View File

@ -214,10 +214,11 @@ Status EnvMirror::NewSequentialFile(const std::string& f,
Status as = a_->NewSequentialFile(f, &mf->a_, options);
Status bs = b_->NewSequentialFile(f, &mf->b_, options);
assert(as == bs);
if (as.ok())
if (as.ok()) {
r->reset(mf);
else
} else {
delete mf;
}
return as;
}
@ -231,25 +232,29 @@ Status EnvMirror::NewRandomAccessFile(const std::string& f,
Status as = a_->NewRandomAccessFile(f, &mf->a_, options);
Status bs = b_->NewRandomAccessFile(f, &mf->b_, options);
assert(as == bs);
if (as.ok())
if (as.ok()) {
r->reset(mf);
else
} else {
delete mf;
}
return as;
}
Status EnvMirror::NewWritableFile(const std::string& f,
std::unique_ptr<WritableFile>* r,
const EnvOptions& options) {
if (f.find("/proc/") == 0) return a_->NewWritableFile(f, r, options);
if (f.find("/proc/") == 0) {
return a_->NewWritableFile(f, r, options);
}
WritableFileMirror* mf = new WritableFileMirror(f, options);
Status as = a_->NewWritableFile(f, &mf->a_, options);
Status bs = b_->NewWritableFile(f, &mf->b_, options);
assert(as == bs);
if (as.ok())
if (as.ok()) {
r->reset(mf);
else
} else {
delete mf;
}
return as;
}
@ -257,16 +262,18 @@ Status EnvMirror::ReuseWritableFile(const std::string& fname,
const std::string& old_fname,
std::unique_ptr<WritableFile>* r,
const EnvOptions& options) {
if (fname.find("/proc/") == 0)
if (fname.find("/proc/") == 0) {
return a_->ReuseWritableFile(fname, old_fname, r, options);
}
WritableFileMirror* mf = new WritableFileMirror(fname, options);
Status as = a_->ReuseWritableFile(fname, old_fname, &mf->a_, options);
Status bs = b_->ReuseWritableFile(fname, old_fname, &mf->b_, options);
assert(as == bs);
if (as.ok())
if (as.ok()) {
r->reset(mf);
else
} else {
delete mf;
}
return as;
}

View File

@ -71,7 +71,7 @@ Status Truncate(Env* env, const std::string& filename, uint64_t length) {
// Trim the tailing "/" in the end of `str`
std::string TrimDirname(const std::string& str) {
size_t found = str.find_last_not_of("/");
size_t found = str.find_last_not_of('/');
if (found == std::string::npos) {
return str;
}
@ -528,7 +528,7 @@ Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() {
}
for (auto& pair : map_copy) {
for (std::string name : pair.second) {
for (const std::string& name : pair.second) {
Status s = DeleteFile(pair.first + "/" + name);
if (!s.ok()) {
return s;

View File

@ -33,7 +33,7 @@
namespace ROCKSDB_NAMESPACE {
const std::string kNewFileNoOverwrite = "";
const std::string kNewFileNoOverwrite;
// Assume a filename, and not a directory name like "/foo/bar/"
std::string TestFSGetDirName(const std::string filename) {
@ -47,7 +47,7 @@ std::string TestFSGetDirName(const std::string filename) {
// Trim the tailing "/" in the end of `str`
std::string TestFSTrimDirname(const std::string& str) {
size_t found = str.find_last_not_of("/");
size_t found = str.find_last_not_of('/');
if (found == std::string::npos) {
return str;
}
@ -74,7 +74,6 @@ void CalculateTypedChecksum(const ChecksumType& checksum_type, const char* data,
uint32_t v = XXH32(data, size, 0);
PutFixed32(checksum, v);
}
return;
}
IOStatus FSFileState::DropUnsyncedData() {
@ -1014,7 +1013,7 @@ IOStatus FaultInjectionTestFS::InjectThreadSpecificReadError(
bool FaultInjectionTestFS::TryParseFileName(const std::string& file_name,
uint64_t* number, FileType* type) {
std::size_t found = file_name.find_last_of("/");
std::size_t found = file_name.find_last_of('/');
std::string file = file_name.substr(found);
return ParseFileName(file, number, type);
}

View File

@ -65,7 +65,7 @@ class MemoryTest : public testing::Test {
if (db_impl != nullptr) {
ASSERT_OK(db_impl->TEST_GetAllImmutableCFOptions(&iopts_map));
}
for (auto pair : iopts_map) {
for (const auto& pair : iopts_map) {
GetCachePointersFromTableFactory(pair.second->table_factory.get(),
cache_set);
}

View File

@ -52,7 +52,9 @@ bool SortList::PartialMergeMulti(const Slice& /*key*/,
void SortList::MakeVector(std::vector<int>& operand, Slice slice) const {
do {
const char* begin = slice.data_;
while (*slice.data_ != ',' && *slice.data_) slice.data_++;
while (*slice.data_ != ',' && *slice.data_) {
slice.data_++;
}
operand.push_back(std::stoi(std::string(begin, slice.data_)));
} while (0 != *slice.data_++);
}

View File

@ -7,8 +7,7 @@
#include "stringappend.h"
#include <assert.h>
#include <cassert>
#include <memory>
#include "rocksdb/merge_operator.h"

View File

@ -5,8 +5,7 @@
#include "stringappend2.h"
#include <assert.h>
#include <cassert>
#include <memory>
#include <string>

View File

@ -5,7 +5,7 @@
#include "rocksdb/utilities/object_registry.h"
#include <ctype.h>
#include <cctype>
#include "logging/logging.h"
#include "port/lang.h"

View File

@ -119,7 +119,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate1) {
{
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
it->SeekToFirst();
for (std::string key : keys) {
for (const std::string& key : keys) {
ASSERT_TRUE(it->Valid());
ASSERT_EQ(key, it->key().ToString());
it->Next();
@ -199,7 +199,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate2) {
{
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
it->SeekToFirst();
for (std::string key : keys) {
for (const std::string& key : keys) {
ASSERT_TRUE(it->Valid());
ASSERT_EQ(key, it->key().ToString());
it->Next();
@ -285,7 +285,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate3) {
{
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
it->SeekToFirst();
for (std::string key : keys) {
for (const std::string& key : keys) {
ASSERT_TRUE(it->Valid());
ASSERT_EQ(key, it->key().ToString());
it->Next();
@ -371,7 +371,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate4) {
{
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
it->SeekToFirst();
for (std::string key : keys) {
for (const std::string& key : keys) {
ASSERT_TRUE(it->Valid());
ASSERT_EQ(key, it->key().ToString());
it->Next();
@ -538,7 +538,7 @@ TEST_F(DBOptionChangeMigrationTest, CompactedSrcToUniversal) {
{
std::unique_ptr<Iterator> it(db_->NewIterator(ReadOptions()));
it->SeekToFirst();
for (std::string key : keys) {
for (const std::string& key : keys) {
ASSERT_TRUE(it->Valid());
ASSERT_EQ(key, it->key().ToString());
it->Next();

View File

@ -121,8 +121,8 @@ TEST_F(OptionsUtilTest, SaveAndLoadWithCacheCheck) {
std::vector<std::string> cf_names;
cf_names.push_back(kDefaultColumnFamilyName);
cf_names.push_back("cf_sample");
cf_names.push_back("cf_plain_table_sample");
cf_names.emplace_back("cf_sample");
cf_names.emplace_back("cf_plain_table_sample");
// Saving DB in file
const std::string kFileName = "OPTIONS-LOAD_CACHE_123456";
ASSERT_OK(PersistRocksDBOptions(WriteOptions(), db_opt, cf_names, cf_opts,
@ -151,8 +151,8 @@ TEST_F(OptionsUtilTest, SaveAndLoadWithCacheCheck) {
namespace {
class DummyTableFactory : public TableFactory {
public:
DummyTableFactory() {}
~DummyTableFactory() override {}
DummyTableFactory() = default;
~DummyTableFactory() override = default;
const char* Name() const override { return "DummyTableFactory"; }
@ -183,8 +183,8 @@ class DummyTableFactory : public TableFactory {
class DummyMergeOperator : public MergeOperator {
public:
DummyMergeOperator() {}
~DummyMergeOperator() override {}
DummyMergeOperator() = default;
~DummyMergeOperator() override = default;
bool FullMergeV2(const MergeOperationInput& /*merge_in*/,
MergeOperationOutput* /*merge_out*/) const override {
@ -203,8 +203,8 @@ class DummyMergeOperator : public MergeOperator {
class DummySliceTransform : public SliceTransform {
public:
DummySliceTransform() {}
~DummySliceTransform() override {}
DummySliceTransform() = default;
~DummySliceTransform() override = default;
// Return the name of this transformation.
const char* Name() const override { return "DummySliceTransform"; }

View File

@ -78,7 +78,7 @@ bool IsCacheFile(const std::string& file) {
// check if the file has .rc suffix
// Unfortunately regex support across compilers is not even, so we use simple
// string parsing
size_t pos = file.find(".");
size_t pos = file.find('.');
if (pos == std::string::npos) {
return false;
}
@ -97,7 +97,7 @@ Status BlockCacheTier::CleanupCacheFolder(const std::string& folder) {
}
// cleanup files with the patter :digi:.rc
for (auto file : files) {
for (const auto& file : files) {
if (IsCacheFile(file)) {
// cache file
Info(opt_.log, "Removing file %s.", file.c_str());

View File

@ -79,7 +79,7 @@ struct CacheRecordHeader {
};
struct CacheRecord {
CacheRecord() {}
CacheRecord() = default;
CacheRecord(const Slice& key, const Slice& val)
: hdr_(MAGIC, static_cast<uint32_t>(key.size()),
static_cast<uint32_t>(val.size())),

View File

@ -5,8 +5,7 @@
//
#include "utilities/persistent_cache/hash_table.h"
#include <stdlib.h>
#include <cstdlib>
#include <iostream>
#include <set>
#include <string>
@ -17,14 +16,13 @@
#include "util/random.h"
#include "utilities/persistent_cache/hash_table_evictable.h"
namespace ROCKSDB_NAMESPACE {
struct HashTableTest : public testing::Test {
~HashTableTest() override { map_.Clear(&HashTableTest::ClearNode); }
struct Node {
Node() {}
Node() = default;
explicit Node(const uint64_t key, const std::string& val = std::string())
: key_(key), val_(val) {}
@ -55,7 +53,7 @@ struct EvictableHashTableTest : public testing::Test {
}
struct Node : LRUElement<Node> {
Node() {}
Node() = default;
explicit Node(const uint64_t key, const std::string& val = std::string())
: key_(key), val_(val) {}

View File

@ -82,9 +82,9 @@ bool PersistentCacheTier::Erase(const Slice& /*key*/) {
std::string PersistentCacheTier::PrintStats() {
std::ostringstream os;
for (auto tier_stats : Stats()) {
for (const auto& tier_stats : Stats()) {
os << "---- next tier -----" << std::endl;
for (auto stat : tier_stats) {
for (const auto& stat : tier_stats) {
os << stat.first << ": " << stat.second << std::endl;
}
}

View File

@ -157,7 +157,7 @@ class SimCacheImpl : public SimCache {
hit_times_(0),
stats_(nullptr) {}
~SimCacheImpl() override {}
~SimCacheImpl() override = default;
const char* Name() const override { return "SimCache"; }

View File

@ -175,7 +175,7 @@ TEST_F(SimCacheTest, SimCacheLogging) {
sim_cache->StopActivityLogging();
ASSERT_OK(sim_cache->GetActivityLoggingStatus());
std::string file_contents = "";
std::string file_contents;
ASSERT_OK(ReadFileToString(env_, log_file, &file_contents));
std::istringstream contents(file_contents);

View File

@ -7,10 +7,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <stdio.h>
#include "utilities/table_properties_collectors/compact_on_deletion_collector.h"
#include <algorithm>
#include <cmath>
#include <cstdio>
#include <vector>
#include "port/stack_trace.h"
@ -19,7 +20,6 @@
#include "rocksdb/utilities/table_properties_collectors.h"
#include "test_util/testharness.h"
#include "util/random.h"
#include "utilities/table_properties_collectors/compact_on_deletion_collector.h"
namespace ROCKSDB_NAMESPACE {

View File

@ -34,9 +34,8 @@ struct LockInfo {
txn_ids.push_back(id);
}
LockInfo(const LockInfo& lock_info)
: exclusive(lock_info.exclusive),
txn_ids(lock_info.txn_ids),
expiration_time(lock_info.expiration_time) {}
= default;
void operator=(const LockInfo& lock_info) {
exclusive = lock_info.exclusive;
txn_ids = lock_info.txn_ids;

Some files were not shown because too many files have changed in this diff Show More