diff --git a/cache/cache_bench_tool.cc b/cache/cache_bench_tool.cc index 07cd1a1f6f..041bf34bcb 100644 --- a/cache/cache_bench_tool.cc +++ b/cache/cache_bench_tool.cc @@ -194,7 +194,7 @@ class SharedState { : cv_(&mu_), cache_bench_(cache_bench) {} - ~SharedState() {} + ~SharedState() = default; port::Mutex* GetMutex() { return &mu_; } @@ -425,7 +425,7 @@ class CacheBench { } } - ~CacheBench() {} + ~CacheBench() = default; void PopulateCache() { Random64 rnd(FLAGS_seed); diff --git a/cache/cache_test.cc b/cache/cache_test.cc index f21efc47a9..adc354a8f1 100644 --- a/cache/cache_test.cc +++ b/cache/cache_test.cc @@ -106,7 +106,7 @@ class CacheTest : public testing::Test, type_ = GetParam(); } - ~CacheTest() override {} + ~CacheTest() override = default; // These functions encode/decode keys in tests cases that use // int keys. @@ -766,7 +766,9 @@ TEST_P(CacheTest, OverCapacity) { std::string key = EncodeKey(i + 1); auto h = cache.Lookup(key); ASSERT_TRUE(h != nullptr); - if (h) cache.Release(h); + if (h) { + cache.Release(h); + } } // the cache is over capacity since nothing could be evicted @@ -777,7 +779,7 @@ TEST_P(CacheTest, OverCapacity) { if (IsHyperClock()) { // Make sure eviction is triggered. - ASSERT_OK(cache.Insert(EncodeKey(-1), nullptr, 1, &handles[0])); + ASSERT_OK(cache.Insert(EncodeKey(-1), nullptr, 1, handles.data())); // cache is under capacity now since elements were released ASSERT_GE(n, cache.get()->GetUsage()); diff --git a/cache/compressed_secondary_cache.cc b/cache/compressed_secondary_cache.cc index 9e6cd7b5e6..ef2417f8d8 100644 --- a/cache/compressed_secondary_cache.cc +++ b/cache/compressed_secondary_cache.cc @@ -26,7 +26,7 @@ CompressedSecondaryCache::CompressedSecondaryCache( cache_))), disable_cache_(opts.capacity == 0) {} -CompressedSecondaryCache::~CompressedSecondaryCache() {} +CompressedSecondaryCache::~CompressedSecondaryCache() = default; std::unique_ptr CompressedSecondaryCache::Lookup( const Slice& key, const Cache::CacheItemHelper* helper, diff --git a/cache/compressed_secondary_cache_test.cc b/cache/compressed_secondary_cache_test.cc index 15eb05d5e7..058a80dd71 100644 --- a/cache/compressed_secondary_cache_test.cc +++ b/cache/compressed_secondary_cache_test.cc @@ -33,7 +33,7 @@ const std::string key3 = "____ ____key3"; class CompressedSecondaryCacheTestBase : public testing::Test, public WithCacheType { public: - CompressedSecondaryCacheTestBase() {} + CompressedSecondaryCacheTestBase() = default; ~CompressedSecondaryCacheTestBase() override = default; protected: diff --git a/cache/lru_cache_test.cc b/cache/lru_cache_test.cc index 587fbfd2cc..13cb52f4a9 100644 --- a/cache/lru_cache_test.cc +++ b/cache/lru_cache_test.cc @@ -32,7 +32,7 @@ namespace ROCKSDB_NAMESPACE { class LRUCacheTest : public testing::Test { public: - LRUCacheTest() {} + LRUCacheTest() = default; ~LRUCacheTest() override { DeleteCache(); } void DeleteCache() { @@ -378,7 +378,7 @@ class ClockCacheTest : public testing::Test { using Table = typename Shard::Table; using TableOpts = typename Table::Opts; - ClockCacheTest() {} + ClockCacheTest() = default; ~ClockCacheTest() override { DeleteShard(); } void DeleteShard() { @@ -1976,7 +1976,7 @@ TEST_P(BasicSecondaryCacheTest, BasicWaitAllTest) { ah.priority = Cache::Priority::LOW; cache->StartAsyncLookup(ah); } - cache->WaitAll(&async_handles[0], async_handles.size()); + cache->WaitAll(async_handles.data(), async_handles.size()); for (size_t i = 0; i < async_handles.size(); ++i) { SCOPED_TRACE("i = " + std::to_string(i)); Cache::Handle* result = async_handles[i].Result(); diff --git a/cache/tiered_secondary_cache_test.cc b/cache/tiered_secondary_cache_test.cc index 28a393325e..6a43b6dd52 100644 --- a/cache/tiered_secondary_cache_test.cc +++ b/cache/tiered_secondary_cache_test.cc @@ -386,7 +386,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) { keys.push_back(Key(8)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u); @@ -400,7 +400,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) { keys.push_back(Key(20)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u); @@ -414,7 +414,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) { keys.push_back(Key(8)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u); @@ -428,7 +428,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) { keys.push_back(Key(8)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u); @@ -442,7 +442,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) { keys.push_back(Key(8)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u); @@ -456,7 +456,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) { keys.push_back(Key(20)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u); @@ -470,7 +470,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) { keys.push_back(Key(20)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u); @@ -484,7 +484,7 @@ TEST_F(DBTieredSecondaryCacheTest, BasicMultiGetTest) { keys.push_back(Key(20)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u); @@ -528,7 +528,7 @@ TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) { keys.push_back(Key(8)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u); @@ -542,7 +542,7 @@ TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) { keys.push_back(Key(20)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u); @@ -561,7 +561,7 @@ TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) { keys.push_back(Key(36)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 10u); @@ -582,7 +582,7 @@ TEST_F(DBTieredSecondaryCacheTest, WaitAllTest) { keys.push_back(Key(8)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 10u); @@ -629,7 +629,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) { keys.push_back(Key(8)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 3u); @@ -644,7 +644,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) { keys.push_back(Key(20)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u); @@ -659,7 +659,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) { keys.push_back(Key(8)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 6u); @@ -676,7 +676,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) { keys.push_back(Key(36)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 8u); @@ -691,7 +691,7 @@ TEST_F(DBTieredSecondaryCacheTest, ReadyBeforeWaitAllTest) { keys.push_back(Key(36)); values = MultiGet(keys, /*snapshot=*/nullptr, /*async=*/true); ASSERT_EQ(values.size(), keys.size()); - for (auto value : values) { + for (const auto& value : values) { ASSERT_EQ(1007, value.size()); } ASSERT_EQ(nvm_sec_cache()->num_insert_saved(), 8u); diff --git a/db/blob/blob_file_reader_test.cc b/db/blob/blob_file_reader_test.cc index b42b866859..676cbed41e 100644 --- a/db/blob/blob_file_reader_test.cc +++ b/db/blob/blob_file_reader_test.cc @@ -405,7 +405,7 @@ TEST_F(BlobFileReaderTest, CreateReaderAndGetBlob) { requests_buf[0] = BlobReadRequest(key_refs[0], blob_offsets[0], blob_sizes[0], - kNoCompression, nullptr, &statuses_buf[0]); + kNoCompression, nullptr, statuses_buf.data()); requests_buf[1] = BlobReadRequest(key_refs[1], blob_offsets[1], blob_sizes[1] + 1, kNoCompression, nullptr, &statuses_buf[1]); diff --git a/db/blob/blob_source_test.cc b/db/blob/blob_source_test.cc index 9fc1931c1e..a12c210fc2 100644 --- a/db/blob/blob_source_test.cc +++ b/db/blob/blob_source_test.cc @@ -168,8 +168,8 @@ TEST_F(BlobSourceTest, GetBlobsFromCache) { uint64_t file_size = BlobLogHeader::kSize; for (size_t i = 0; i < num_blobs; ++i) { - keys.push_back({key_strs[i]}); - blobs.push_back({blob_strs[i]}); + keys.emplace_back(key_strs[i]); + blobs.emplace_back(blob_strs[i]); file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size(); } file_size += BlobLogFooter::kSize; @@ -482,8 +482,8 @@ TEST_F(BlobSourceTest, GetCompressedBlobs) { std::vector blobs; for (size_t i = 0; i < num_blobs; ++i) { - keys.push_back({key_strs[i]}); - blobs.push_back({blob_strs[i]}); + keys.emplace_back(key_strs[i]); + blobs.emplace_back(blob_strs[i]); } std::vector blob_offsets(keys.size()); @@ -610,8 +610,8 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromMultiFiles) { uint64_t file_size = BlobLogHeader::kSize; uint64_t blob_value_bytes = 0; for (size_t i = 0; i < num_blobs; ++i) { - keys.push_back({key_strs[i]}); - blobs.push_back({blob_strs[i]}); + keys.emplace_back(key_strs[i]); + blobs.emplace_back(blob_strs[i]); blob_value_bytes += blobs[i].size(); file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size(); } @@ -802,8 +802,8 @@ TEST_F(BlobSourceTest, MultiGetBlobsFromCache) { uint64_t file_size = BlobLogHeader::kSize; for (size_t i = 0; i < num_blobs; ++i) { - keys.push_back({key_strs[i]}); - blobs.push_back({blob_strs[i]}); + keys.emplace_back(key_strs[i]); + blobs.emplace_back(blob_strs[i]); file_size += BlobLogRecord::kHeaderSize + keys[i].size() + blobs[i].size(); } file_size += BlobLogFooter::kSize; @@ -1164,7 +1164,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) { ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number, blob_offsets[0], file_size, blob_sizes[0], kNoCompression, nullptr /* prefetch_buffer */, - &values[0], nullptr /* bytes_read */)); + values.data(), nullptr /* bytes_read */)); // Release cache handle values[0].Reset(); @@ -1183,7 +1183,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) { ASSERT_OK(blob_source.GetBlob(read_options, keys[0], file_number, blob_offsets[0], file_size, blob_sizes[0], kNoCompression, nullptr /* prefetch_buffer */, - &values[0], nullptr /* bytes_read */)); + values.data(), nullptr /* bytes_read */)); ASSERT_EQ(values[0], blobs[0]); ASSERT_TRUE( blob_source.TEST_BlobInCache(file_number, file_size, blob_offsets[0])); @@ -1263,7 +1263,7 @@ TEST_F(BlobSecondaryCacheTest, GetBlobsFromSecondaryCache) { ASSERT_OK(blob_source.GetBlob( read_options, keys[0], file_number, blob_offsets[0], file_size, blob_sizes[0], kNoCompression, nullptr /* prefetch_buffer */, - &values[0], nullptr /* bytes_read */)); + values.data(), nullptr /* bytes_read */)); ASSERT_EQ(values[0], blobs[0]); // Release cache handle @@ -1365,8 +1365,8 @@ class BlobSourceCacheReservationTest : public DBTestBase { blob_file_size_ = BlobLogHeader::kSize; for (size_t i = 0; i < kNumBlobs; ++i) { - keys_.push_back({key_strs_[i]}); - blobs_.push_back({blob_strs_[i]}); + keys_.emplace_back(key_strs_[i]); + blobs_.emplace_back(blob_strs_[i]); blob_file_size_ += BlobLogRecord::kHeaderSize + keys_[i].size() + blobs_[i].size(); } diff --git a/db/blob/db_blob_basic_test.cc b/db/blob/db_blob_basic_test.cc index 1c0caba93d..ef48844b43 100644 --- a/db/blob/db_blob_basic_test.cc +++ b/db/blob/db_blob_basic_test.cc @@ -418,8 +418,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobs) { std::array values; std::array statuses; - db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, + keys.data(), values.data(), statuses.data()); ASSERT_OK(statuses[0]); ASSERT_EQ(values[0], first_value); @@ -441,8 +441,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobs) { std::array values; std::array statuses; - db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, + keys.data(), values.data(), statuses.data()); ASSERT_OK(statuses[0]); ASSERT_EQ(values[0], first_value); @@ -512,8 +512,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) { std::array values; std::array statuses; - db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, + keys.data(), values.data(), statuses.data()); ASSERT_OK(statuses[0]); ASSERT_EQ(values[0], first_value); @@ -534,8 +534,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) { std::array values; std::array statuses; - db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, + keys.data(), values.data(), statuses.data()); ASSERT_OK(statuses[0]); ASSERT_EQ(values[0], first_value); @@ -553,8 +553,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) { std::array values; std::array statuses; - db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, + keys.data(), values.data(), statuses.data()); ASSERT_OK(statuses[0]); ASSERT_EQ(values[0], first_value); @@ -574,8 +574,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromCache) { std::array values; std::array statuses; - db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, + keys.data(), values.data(), statuses.data()); ASSERT_OK(statuses[0]); ASSERT_EQ(values[0], first_value); @@ -758,8 +758,8 @@ TEST_F(DBBlobBasicTest, MultiGetWithDirectIO) { // // [offset=0, len=12288] - db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys, + keys.data(), values.data(), statuses.data()); SyncPoint::GetInstance()->DisableProcessing(); SyncPoint::GetInstance()->ClearAllCallBacks(); @@ -829,8 +829,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) { { std::array values; std::array statuses; - db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, + keys.data(), values.data(), statuses.data()); for (size_t i = 0; i < kNumKeys; ++i) { ASSERT_OK(statuses[i]); @@ -843,8 +843,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) { { std::array values; std::array statuses; - db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, + keys.data(), values.data(), statuses.data()); for (size_t i = 0; i < kNumKeys; ++i) { ASSERT_TRUE(statuses[i].IsIncomplete()); @@ -858,8 +858,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) { { std::array values; std::array statuses; - db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, + keys.data(), values.data(), statuses.data()); for (size_t i = 0; i < kNumKeys; ++i) { ASSERT_OK(statuses[i]); @@ -872,8 +872,8 @@ TEST_F(DBBlobBasicTest, MultiGetBlobsFromMultipleFiles) { { std::array values; std::array statuses; - db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(read_options, db_->DefaultColumnFamily(), kNumKeys, + keys.data(), values.data(), statuses.data()); for (size_t i = 0; i < kNumKeys; ++i) { ASSERT_OK(statuses[i]); @@ -1206,8 +1206,8 @@ TEST_F(DBBlobBasicTest, MultiGetMergeBlobWithPut) { std::array values; std::array statuses; - db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys, + keys.data(), values.data(), statuses.data()); ASSERT_OK(statuses[0]); ASSERT_EQ(values[0], "v0_0,v0_1,v0_2"); @@ -1470,8 +1470,8 @@ TEST_P(DBBlobBasicIOErrorMultiGetTest, MultiGetBlobs_IOError) { }); SyncPoint::GetInstance()->EnableProcessing(); - db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(ReadOptions(), db_->DefaultColumnFamily(), num_keys, + keys.data(), values.data(), statuses.data()); SyncPoint::GetInstance()->DisableProcessing(); SyncPoint::GetInstance()->ClearAllCallBacks(); @@ -1820,7 +1820,7 @@ TEST_F(DBBlobBasicTest, GetEntityBlob) { std::array statuses; db_->MultiGetEntity(ReadOptions(), db_->DefaultColumnFamily(), num_keys, - &keys[0], &results[0], &statuses[0]); + keys.data(), results.data(), statuses.data()); ASSERT_OK(statuses[0]); ASSERT_EQ(results[0].columns(), expected_columns); @@ -1917,8 +1917,8 @@ TEST_F(DBBlobWithTimestampTest, MultiGetBlobs) { std::array values; std::array statuses; - db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(read_options, db_->DefaultColumnFamily(), num_keys, + keys.data(), values.data(), statuses.data()); ASSERT_OK(statuses[0]); ASSERT_EQ(values[0], first_value); @@ -2001,8 +2001,8 @@ TEST_F(DBBlobWithTimestampTest, MultiGetMergeBlobWithPut) { std::array values; std::array statuses; - db_->MultiGet(read_opts, db_->DefaultColumnFamily(), num_keys, &keys[0], - &values[0], &statuses[0]); + db_->MultiGet(read_opts, db_->DefaultColumnFamily(), num_keys, keys.data(), + values.data(), statuses.data()); ASSERT_OK(statuses[0]); ASSERT_EQ(values[0], "v0_0,v0_1,v0_2"); diff --git a/db/c.cc b/db/c.cc index c0dcd59b61..69cd665c0b 100644 --- a/db/c.cc +++ b/db/c.cc @@ -446,7 +446,7 @@ struct rocksdb_mergeoperator_t : public MergeOperator { size_t new_value_len; char* tmp_new_value = (*full_merge_)( state_, merge_in.key.data(), merge_in.key.size(), existing_value_data, - existing_value_len, &operand_pointers[0], &operand_sizes[0], + existing_value_len, operand_pointers.data(), operand_sizes.data(), static_cast(n), &success, &new_value_len); merge_out->new_value.assign(tmp_new_value, new_value_len); @@ -475,8 +475,9 @@ struct rocksdb_mergeoperator_t : public MergeOperator { unsigned char success; size_t new_value_len; char* tmp_new_value = (*partial_merge_)( - state_, key.data(), key.size(), &operand_pointers[0], &operand_sizes[0], - static_cast(operand_count), &success, &new_value_len); + state_, key.data(), key.size(), operand_pointers.data(), + operand_sizes.data(), static_cast(operand_count), &success, + &new_value_len); new_value->assign(tmp_new_value, new_value_len); if (delete_value_ != nullptr) { @@ -886,9 +887,9 @@ rocksdb_t* rocksdb_open_and_trim_history( size_t trim_tslen, char** errptr) { std::vector column_families; for (int i = 0; i < num_column_families; i++) { - column_families.push_back(ColumnFamilyDescriptor( + column_families.emplace_back( std::string(column_family_names[i]), - ColumnFamilyOptions(column_family_options[i]->rep))); + ColumnFamilyOptions(column_family_options[i]->rep)); } std::string trim_ts_(trim_ts, trim_tslen); @@ -919,9 +920,9 @@ rocksdb_t* rocksdb_open_column_families( rocksdb_column_family_handle_t** column_family_handles, char** errptr) { std::vector column_families; for (int i = 0; i < num_column_families; i++) { - column_families.push_back(ColumnFamilyDescriptor( + column_families.emplace_back( std::string(column_family_names[i]), - ColumnFamilyOptions(column_family_options[i]->rep))); + ColumnFamilyOptions(column_family_options[i]->rep)); } DB* db; @@ -953,9 +954,9 @@ rocksdb_t* rocksdb_open_column_families_with_ttl( for (int i = 0; i < num_column_families; i++) { ttls_vec.push_back(ttls[i]); - column_families.push_back(ColumnFamilyDescriptor( + column_families.emplace_back( std::string(column_family_names[i]), - ColumnFamilyOptions(column_family_options[i]->rep))); + ColumnFamilyOptions(column_family_options[i]->rep)); } ROCKSDB_NAMESPACE::DBWithTTL* db; @@ -985,9 +986,9 @@ rocksdb_t* rocksdb_open_for_read_only_column_families( unsigned char error_if_wal_file_exists, char** errptr) { std::vector column_families; for (int i = 0; i < num_column_families; i++) { - column_families.push_back(ColumnFamilyDescriptor( + column_families.emplace_back( std::string(column_family_names[i]), - ColumnFamilyOptions(column_family_options[i]->rep))); + ColumnFamilyOptions(column_family_options[i]->rep)); } DB* db; @@ -1081,7 +1082,7 @@ rocksdb_column_family_handle_t** rocksdb_create_column_families( std::vector handles; std::vector names; for (int i = 0; i != num_column_families; ++i) { - names.push_back(std::string(column_family_names[i])); + names.emplace_back(column_family_names[i]); } SaveError(errptr, db->rep->CreateColumnFamilies( ColumnFamilyOptions(column_family_options->rep), names, @@ -2788,7 +2789,9 @@ void rocksdb_options_set_cuckoo_table_factory( void rocksdb_set_options(rocksdb_t* db, int count, const char* const keys[], const char* const values[], char** errptr) { std::unordered_map options_map; - for (int i = 0; i < count; i++) options_map[keys[i]] = values[i]; + for (int i = 0; i < count; i++) { + options_map[keys[i]] = values[i]; + } SaveError(errptr, db->rep->SetOptions(options_map)); } @@ -2797,7 +2800,9 @@ void rocksdb_set_options_cf(rocksdb_t* db, const char* const keys[], const char* const values[], char** errptr) { std::unordered_map options_map; - for (int i = 0; i < count; i++) options_map[keys[i]] = values[i]; + for (int i = 0; i < count; i++) { + options_map[keys[i]] = values[i]; + } SaveError(errptr, db->rep->SetOptions(handle->rep, options_map)); } @@ -5060,7 +5065,9 @@ void rocksdb_env_lower_high_priority_thread_pool_cpu_priority( } void rocksdb_env_destroy(rocksdb_env_t* env) { - if (!env->is_default) delete env->rep; + if (!env->is_default) { + delete env->rep; + } delete env; } @@ -5524,7 +5531,7 @@ size_t rocksdb_column_family_metadata_get_level_count( rocksdb_level_metadata_t* rocksdb_column_family_metadata_get_level_metadata( rocksdb_column_family_metadata_t* cf_meta, size_t i) { if (i >= cf_meta->rep.levels.size()) { - return NULL; + return nullptr; } rocksdb_level_metadata_t* level_meta = (rocksdb_level_metadata_t*)malloc(sizeof(rocksdb_level_metadata_t)); @@ -5739,9 +5746,9 @@ rocksdb_transactiondb_t* rocksdb_transactiondb_open_column_families( rocksdb_column_family_handle_t** column_family_handles, char** errptr) { std::vector column_families; for (int i = 0; i < num_column_families; i++) { - column_families.push_back(ColumnFamilyDescriptor( + column_families.emplace_back( std::string(column_family_names[i]), - ColumnFamilyOptions(column_family_options[i]->rep))); + ColumnFamilyOptions(column_family_options[i]->rep)); } TransactionDB* txn_db; @@ -6533,9 +6540,9 @@ rocksdb_optimistictransactiondb_open_column_families( rocksdb_column_family_handle_t** column_family_handles, char** errptr) { std::vector column_families; for (int i = 0; i < num_column_families; i++) { - column_families.push_back(ColumnFamilyDescriptor( + column_families.emplace_back( std::string(column_family_names[i]), - ColumnFamilyOptions(column_family_options[i]->rep))); + ColumnFamilyOptions(column_family_options[i]->rep)); } OptimisticTransactionDB* otxn_db; diff --git a/db/c_test.c b/db/c_test.c index b00827fef7..bbd06b5163 100644 --- a/db/c_test.c +++ b/db/c_test.c @@ -50,14 +50,15 @@ static void StartPhase(const char* name) { #endif static const char* GetTempDir(void) { const char* ret = getenv("TEST_TMPDIR"); - if (ret == NULL || ret[0] == '\0') + if (ret == NULL || ret[0] == '\0') { #ifdef OS_WIN ret = getenv("TEMP"); #else ret = "/tmp"; + } #endif return ret; -} + } #ifdef _MSC_VER #pragma warning(pop) #endif @@ -206,10 +207,11 @@ static int CmpCompare(void* arg, const char* a, size_t alen, const char* b, size_t n = (alen < blen) ? alen : blen; int r = memcmp(a, b, n); if (r == 0) { - if (alen < blen) + if (alen < blen) { r = -1; - else if (alen > blen) + } else if (alen > blen) { r = +1; + } } return r; } diff --git a/db/column_family_test.cc b/db/column_family_test.cc index 25805e9139..90f66077ce 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -270,7 +270,7 @@ class ColumnFamilyTestBase : public testing::Test { void Reopen(const std::vector options = {}) { std::vector names; - for (auto name : names_) { + for (const auto& name : names_) { if (name != "") { names.push_back(name); } @@ -607,7 +607,7 @@ TEST_P(FlushEmptyCFTestWithParam, FlushEmptyCFTest) { // Preserve file system state up to here to simulate a crash condition. fault_env->SetFilesystemActive(false); std::vector names; - for (auto name : names_) { + for (const auto& name : names_) { if (name != "") { names.push_back(name); } @@ -669,7 +669,7 @@ TEST_P(FlushEmptyCFTestWithParam, FlushEmptyCFTest2) { // Preserve file system state up to here to simulate a crash condition. fault_env->SetFilesystemActive(false); std::vector names; - for (auto name : names_) { + for (const auto& name : names_) { if (name != "") { names.push_back(name); } @@ -1034,7 +1034,7 @@ TEST_P(ColumnFamilyTest, CrashAfterFlush) { fault_env->SetFilesystemActive(false); std::vector names; - for (auto name : names_) { + for (const auto& name : names_) { if (name != "") { names.push_back(name); } @@ -3407,9 +3407,13 @@ TEST_P(ColumnFamilyTest, DISABLED_LogTruncationTest) { for (size_t i = 0; i < filenames.size(); i++) { uint64_t number; FileType type; - if (!(ParseFileName(filenames[i], &number, &type))) continue; + if (!(ParseFileName(filenames[i], &number, &type))) { + continue; + } - if (type != kWalFile) continue; + if (type != kWalFile) { + continue; + } logfs.push_back(filenames[i]); } diff --git a/db/compact_files_test.cc b/db/compact_files_test.cc index 2d53f2b992..129b29c99f 100644 --- a/db/compact_files_test.cc +++ b/db/compact_files_test.cc @@ -34,8 +34,8 @@ class CompactFilesTest : public testing::Test { // A class which remembers the name of each flushed file. class FlushedFileCollector : public EventListener { public: - FlushedFileCollector() {} - ~FlushedFileCollector() override {} + FlushedFileCollector() = default; + ~FlushedFileCollector() override = default; void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override { std::lock_guard lock(mutex_); @@ -45,7 +45,7 @@ class FlushedFileCollector : public EventListener { std::vector GetFlushedFiles() { std::lock_guard lock(mutex_); std::vector result; - for (auto fname : flushed_files_) { + for (const auto& fname : flushed_files_) { result.push_back(fname); } return result; @@ -159,7 +159,9 @@ TEST_F(CompactFilesTest, MultipleLevel) { // Compact files except the file in L3 std::vector files; for (int i = 0; i < 6; ++i) { - if (i == 3) continue; + if (i == 3) { + continue; + } for (auto& file : meta.levels[i].files) { files.push_back(file.db_path + "/" + file.name); } @@ -228,7 +230,7 @@ TEST_F(CompactFilesTest, ObsoleteFiles) { ASSERT_OK(static_cast_with_check(db)->TEST_WaitForCompact()); // verify all compaction input files are deleted - for (auto fname : l0_files) { + for (const auto& fname : l0_files) { ASSERT_EQ(Status::NotFound(), env_->FileExists(fname)); } delete db; @@ -492,4 +494,3 @@ int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } - diff --git a/db/compaction/compaction.cc b/db/compaction/compaction.cc index d39f47e1a8..d4c994f5f7 100644 --- a/db/compaction/compaction.cc +++ b/db/compaction/compaction.cc @@ -160,7 +160,9 @@ std::vector Compaction::PopulateWithAtomicBoundaries( AtomicCompactionUnitBoundary cur_boundary; size_t first_atomic_idx = 0; auto add_unit_boundary = [&](size_t to) { - if (first_atomic_idx == to) return; + if (first_atomic_idx == to) { + return; + } for (size_t k = first_atomic_idx; k < to; k++) { inputs[i].atomic_compaction_unit_boundaries.push_back(cur_boundary); } @@ -753,7 +755,9 @@ int InputSummary(const std::vector& files, char* output, AppendHumanBytes(files.at(i)->fd.GetFileSize(), sztxt, 16); ret = snprintf(output + write, sz, "%" PRIu64 "(%s) ", files.at(i)->fd.GetNumber(), sztxt); - if (ret < 0 || ret >= sz) break; + if (ret < 0 || ret >= sz) { + break; + } write += ret; } // if files.size() is non-zero, overwrite the last space diff --git a/db/compaction/compaction_job.cc b/db/compaction/compaction_job.cc index ff2cc9f957..0bdc9fd522 100644 --- a/db/compaction/compaction_job.cc +++ b/db/compaction/compaction_job.cc @@ -404,7 +404,9 @@ void CompactionJob::AcquireSubcompactionResources( void CompactionJob::ShrinkSubcompactionResources(uint64_t num_extra_resources) { // Do nothing when we have zero resources to shrink - if (num_extra_resources == 0) return; + if (num_extra_resources == 0) { + return; + } db_mutex_->Lock(); // We cannot release threads more than what we reserved before int extra_num_subcompaction_threads_released = env_->ReleaseThreads( @@ -584,7 +586,9 @@ void CompactionJob::GenSubcompactionBoundaries() { TEST_SYNC_POINT_CALLBACK("CompactionJob::GenSubcompactionBoundaries:0", &num_planned_subcompactions); - if (num_planned_subcompactions == 1) return; + if (num_planned_subcompactions == 1) { + return; + } // Group the ranges into subcompactions uint64_t target_range_size = std::max( @@ -641,7 +645,7 @@ Status CompactionJob::Run() { // Always schedule the first subcompaction (whether or not there are also // others) in the current thread to be efficient with resources - ProcessKeyValueCompaction(&compact_->sub_compact_states[0]); + ProcessKeyValueCompaction(compact_->sub_compact_states.data()); // Wait for all other threads (if there are any) to finish execution for (auto& thread : thread_pool) { diff --git a/db/compaction/compaction_job_stats_test.cc b/db/compaction/compaction_job_stats_test.cc index 56fc51d058..1cc6b31148 100644 --- a/db/compaction/compaction_job_stats_test.cc +++ b/db/compaction/compaction_job_stats_test.cc @@ -131,7 +131,7 @@ class CompactionJobStatsTest : public testing::Test, ColumnFamilyOptions cf_opts(options); size_t cfi = handles_.size(); handles_.resize(cfi + cfs.size()); - for (auto cf : cfs) { + for (const auto& cf : cfs) { ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++])); } } @@ -160,7 +160,7 @@ class CompactionJobStatsTest : public testing::Test, EXPECT_EQ(cfs.size(), options.size()); std::vector column_families; for (size_t i = 0; i < cfs.size(); ++i) { - column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i])); + column_families.emplace_back(cfs[i], options[i]); } DBOptions db_opts = DBOptions(options[0]); return DB::Open(db_opts, dbname_, column_families, &handles_, &db_); diff --git a/db/compaction/compaction_job_test.cc b/db/compaction/compaction_job_test.cc index 9a135e1c4b..11a757fd68 100644 --- a/db/compaction/compaction_job_test.cc +++ b/db/compaction/compaction_job_test.cc @@ -308,7 +308,7 @@ class CompactionJobTestBase : public testing::Test { kDefaultColumnFamilyName, -1 /* level */), file_writer.get())); // Build table. - for (auto kv : contents) { + for (const auto& kv : contents) { std::string key; std::string value; std::tie(key, value) = kv; @@ -327,7 +327,7 @@ class CompactionJobTestBase : public testing::Test { SequenceNumber smallest_seqno = kMaxSequenceNumber; SequenceNumber largest_seqno = 0; uint64_t oldest_blob_file_number = kInvalidBlobFileNumber; - for (auto kv : contents) { + for (const auto& kv : contents) { ParsedInternalKey key; std::string skey; std::string value; diff --git a/db/compaction/compaction_picker.cc b/db/compaction/compaction_picker.cc index 74985b46cc..53ef7bc6df 100644 --- a/db/compaction/compaction_picker.cc +++ b/db/compaction/compaction_picker.cc @@ -130,7 +130,7 @@ CompactionPicker::CompactionPicker(const ImmutableOptions& ioptions, const InternalKeyComparator* icmp) : ioptions_(ioptions), icmp_(icmp) {} -CompactionPicker::~CompactionPicker() {} +CompactionPicker::~CompactionPicker() = default; // Delete this compaction from the list of running compactions. void CompactionPicker::ReleaseCompactionFiles(Compaction* c, Status status) { diff --git a/db/compaction/compaction_picker_level.cc b/db/compaction/compaction_picker_level.cc index 67f1916876..3cb4521129 100644 --- a/db/compaction/compaction_picker_level.cc +++ b/db/compaction/compaction_picker_level.cc @@ -355,7 +355,9 @@ void LevelCompactionBuilder::SetupOtherFilesWithRoundRobinExpansion() { TEST_SYNC_POINT("LevelCompactionPicker::RoundRobin"); // Only expand the inputs when we have selected a file in start_level_inputs_ - if (start_level_inputs_.size() == 0) return; + if (start_level_inputs_.size() == 0) { + return; + } uint64_t start_lvl_bytes_no_compacting = 0; uint64_t curr_bytes_to_compact = 0; diff --git a/db/compaction/compaction_picker_test.cc b/db/compaction/compaction_picker_test.cc index bde61a3667..beac419d98 100644 --- a/db/compaction/compaction_picker_test.cc +++ b/db/compaction/compaction_picker_test.cc @@ -77,7 +77,7 @@ class CompactionPickerTestBase : public testing::Test { ioptions_.level_compaction_dynamic_level_bytes = false; } - ~CompactionPickerTestBase() override {} + ~CompactionPickerTestBase() override = default; void NewVersionStorage(int num_levels, CompactionStyle style) { DeleteVersionStorage(); @@ -214,7 +214,7 @@ class CompactionPickerTest : public CompactionPickerTestBase { explicit CompactionPickerTest() : CompactionPickerTestBase(BytewiseComparator()) {} - ~CompactionPickerTest() override {} + ~CompactionPickerTest() override = default; }; class CompactionPickerU64TsTest : public CompactionPickerTestBase { @@ -222,7 +222,7 @@ class CompactionPickerU64TsTest : public CompactionPickerTestBase { explicit CompactionPickerU64TsTest() : CompactionPickerTestBase(test::BytewiseComparatorWithU64TsWrapper()) {} - ~CompactionPickerU64TsTest() override {} + ~CompactionPickerU64TsTest() override = default; }; TEST_F(CompactionPickerTest, Empty) { diff --git a/db/compaction/compaction_service_test.cc b/db/compaction/compaction_service_test.cc index 3fd6ad83bc..5da4b0ad32 100644 --- a/db/compaction/compaction_service_test.cc +++ b/db/compaction/compaction_service_test.cc @@ -563,10 +563,10 @@ TEST_F(CompactionServiceTest, ConcurrentCompaction) { std::vector threads; for (const auto& file : meta.levels[1].files) { - threads.emplace_back(std::thread([&]() { + threads.emplace_back([&]() { std::string fname = file.db_path + "/" + file.name; ASSERT_OK(db_->CompactFiles(CompactionOptions(), {fname}, 2)); - })); + }); } for (auto& thread : threads) { diff --git a/db/comparator_db_test.cc b/db/comparator_db_test.cc index 0bf79bef19..f9c0f47ef7 100644 --- a/db/comparator_db_test.cc +++ b/db/comparator_db_test.cc @@ -170,7 +170,7 @@ void DoRandomIteraratorTest(DB* db, std::vector source_strings, class DoubleComparator : public Comparator { public: - DoubleComparator() {} + DoubleComparator() = default; const char* Name() const override { return "DoubleComparator"; } @@ -198,7 +198,7 @@ class DoubleComparator : public Comparator { class HashComparator : public Comparator { public: - HashComparator() {} + HashComparator() = default; const char* Name() const override { return "HashComparator"; } @@ -221,7 +221,7 @@ class HashComparator : public Comparator { class TwoStrComparator : public Comparator { public: - TwoStrComparator() {} + TwoStrComparator() = default; const char* Name() const override { return "TwoStrComparator"; } @@ -372,7 +372,7 @@ TEST_P(ComparatorDBTest, Uint64Comparator) { uint64_t r = rnd64.Next(); std::string str; str.resize(8); - memcpy(&str[0], static_cast(&r), 8); + memcpy(str.data(), static_cast(&r), 8); source_strings.push_back(str); } diff --git a/db/cuckoo_table_db_test.cc b/db/cuckoo_table_db_test.cc index c35ab4593d..84cd5f8838 100644 --- a/db/cuckoo_table_db_test.cc +++ b/db/cuckoo_table_db_test.cc @@ -209,7 +209,7 @@ static std::string Key(int i) { static std::string Uint64Key(uint64_t i) { std::string str; str.resize(8); - memcpy(&str[0], static_cast(&i), 8); + memcpy(str.data(), static_cast(&i), 8); return str; } } // namespace. diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index 9c2af83585..60eaa8486e 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -1368,9 +1368,9 @@ TEST_P(DBMultiGetTestWithParam, MultiGetMultiCF) { for (int i = 0; i < num_keys; ++i) { int cf = i / 3; int cf_key = 1 % 3; - cf_kv_vec.emplace_back(std::make_tuple( + cf_kv_vec.emplace_back( cf, "cf" + std::to_string(cf) + "_key_" + std::to_string(cf_key), - "cf" + std::to_string(cf) + "_val_" + std::to_string(cf_key))); + "cf" + std::to_string(cf) + "_val_" + std::to_string(cf_key)); ASSERT_OK(Put(std::get<0>(cf_kv_vec[i]), std::get<1>(cf_kv_vec[i]), std::get<2>(cf_kv_vec[i]))); } @@ -2607,9 +2607,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1) { key_strs.push_back(Key(33)); key_strs.push_back(Key(54)); key_strs.push_back(Key(102)); - keys.push_back(key_strs[0]); - keys.push_back(key_strs[1]); - keys.push_back(key_strs[2]); + keys.emplace_back(key_strs[0]); + keys.emplace_back(key_strs[1]); + keys.emplace_back(key_strs[2]); values.resize(keys.size()); statuses.resize(keys.size()); @@ -2652,9 +2652,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1Error) { key_strs.push_back(Key(33)); key_strs.push_back(Key(54)); key_strs.push_back(Key(102)); - keys.push_back(key_strs[0]); - keys.push_back(key_strs[1]); - keys.push_back(key_strs[2]); + keys.emplace_back(key_strs[0]); + keys.emplace_back(key_strs[1]); + keys.emplace_back(key_strs[2]); values.resize(keys.size()); statuses.resize(keys.size()); @@ -2717,9 +2717,9 @@ TEST_P(DBMultiGetAsyncIOTest, LastKeyInFile) { key_strs.push_back(Key(21)); key_strs.push_back(Key(54)); key_strs.push_back(Key(102)); - keys.push_back(key_strs[0]); - keys.push_back(key_strs[1]); - keys.push_back(key_strs[2]); + keys.emplace_back(key_strs[0]); + keys.emplace_back(key_strs[1]); + keys.emplace_back(key_strs[2]); values.resize(keys.size()); statuses.resize(keys.size()); @@ -2762,9 +2762,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1AndL2) { key_strs.push_back(Key(33)); key_strs.push_back(Key(56)); key_strs.push_back(Key(102)); - keys.push_back(key_strs[0]); - keys.push_back(key_strs[1]); - keys.push_back(key_strs[2]); + keys.emplace_back(key_strs[0]); + keys.emplace_back(key_strs[1]); + keys.emplace_back(key_strs[2]); values.resize(keys.size()); statuses.resize(keys.size()); @@ -2805,8 +2805,8 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL2WithRangeOverlapL0L1) { // 19 and 26 are in L2, but overlap with L0 and L1 file ranges key_strs.push_back(Key(19)); key_strs.push_back(Key(26)); - keys.push_back(key_strs[0]); - keys.push_back(key_strs[1]); + keys.emplace_back(key_strs[0]); + keys.emplace_back(key_strs[1]); values.resize(keys.size()); statuses.resize(keys.size()); @@ -2841,8 +2841,8 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL2WithRangeDelInL1) { // 139 and 163 are in L2, but overlap with a range deletes in L1 key_strs.push_back(Key(139)); key_strs.push_back(Key(163)); - keys.push_back(key_strs[0]); - keys.push_back(key_strs[1]); + keys.emplace_back(key_strs[0]); + keys.emplace_back(key_strs[1]); values.resize(keys.size()); statuses.resize(keys.size()); @@ -2871,9 +2871,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetFromL1AndL2WithRangeDelInL1) { key_strs.push_back(Key(139)); key_strs.push_back(Key(144)); key_strs.push_back(Key(163)); - keys.push_back(key_strs[0]); - keys.push_back(key_strs[1]); - keys.push_back(key_strs[2]); + keys.emplace_back(key_strs[0]); + keys.emplace_back(key_strs[1]); + keys.emplace_back(key_strs[2]); values.resize(keys.size()); statuses.resize(keys.size()); @@ -2904,9 +2904,9 @@ TEST_P(DBMultiGetAsyncIOTest, GetNoIOUring) { key_strs.push_back(Key(33)); key_strs.push_back(Key(54)); key_strs.push_back(Key(102)); - keys.push_back(key_strs[0]); - keys.push_back(key_strs[1]); - keys.push_back(key_strs[2]); + keys.emplace_back(key_strs[0]); + keys.emplace_back(key_strs[1]); + keys.emplace_back(key_strs[2]); values.resize(keys.size()); statuses.resize(keys.size()); @@ -3285,9 +3285,9 @@ TEST_F(DBBasicTest, MultiGetIOBufferOverrun) { // Warm up the cache first key_data.emplace_back(Key(0)); - keys.emplace_back(Slice(key_data.back())); + keys.emplace_back(key_data.back()); key_data.emplace_back(Key(50)); - keys.emplace_back(Slice(key_data.back())); + keys.emplace_back(key_data.back()); statuses.resize(keys.size()); dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(), @@ -3661,10 +3661,10 @@ TEST_F(DBBasicTest, ConcurrentlyCloseDB) { DestroyAndReopen(options); std::vector workers; for (int i = 0; i < 10; i++) { - workers.push_back(std::thread([&]() { + workers.emplace_back([&]() { auto s = db_->Close(); ASSERT_OK(s); - })); + }); } for (auto& w : workers) { w.join(); @@ -3938,9 +3938,9 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) { // Warm up the cache first key_data.emplace_back(Key(0)); - keys.emplace_back(Slice(key_data.back())); + keys.emplace_back(key_data.back()); key_data.emplace_back(Key(50)); - keys.emplace_back(Slice(key_data.back())); + keys.emplace_back(key_data.back()); statuses.resize(keys.size()); dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(), @@ -4119,9 +4119,9 @@ TEST_P(DBBasicTestWithParallelIO, MultiGetDirectIO) { // Warm up the cache first key_data.emplace_back(Key(0)); - keys.emplace_back(Slice(key_data.back())); + keys.emplace_back(key_data.back()); key_data.emplace_back(Key(50)); - keys.emplace_back(Slice(key_data.back())); + keys.emplace_back(key_data.back()); statuses.resize(keys.size()); dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(), @@ -4189,9 +4189,9 @@ TEST_P(DBBasicTestWithParallelIO, MultiGetWithChecksumMismatch) { // Warm up the cache first key_data.emplace_back(Key(0)); - keys.emplace_back(Slice(key_data.back())); + keys.emplace_back(key_data.back()); key_data.emplace_back(Key(50)); - keys.emplace_back(Slice(key_data.back())); + keys.emplace_back(key_data.back()); statuses.resize(keys.size()); dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(), @@ -4237,9 +4237,9 @@ TEST_P(DBBasicTestWithParallelIO, MultiGetWithMissingFile) { // Warm up the cache first key_data.emplace_back(Key(0)); - keys.emplace_back(Slice(key_data.back())); + keys.emplace_back(key_data.back()); key_data.emplace_back(Key(50)); - keys.emplace_back(Slice(key_data.back())); + keys.emplace_back(key_data.back()); statuses.resize(keys.size()); dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(), @@ -4743,7 +4743,7 @@ TEST_F(DBBasicTest, VerifyFileChecksumsReadahead) { uint64_t number; FileType type; ASSERT_OK(env_->GetChildren(dbname_, &filenames)); - for (auto name : filenames) { + for (const auto& name : filenames) { if (ParseFileName(name, &number, &type)) { if (type == kTableFile) { sst_cnt++; diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index 83b20b9017..a4ceb908fd 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -744,7 +744,7 @@ TEST_F(DBBlockCacheTest, AddRedundantStats) { const size_t capacity = size_t{1} << 25; const int num_shard_bits = 0; // 1 shard int iterations_tested = 0; - for (std::shared_ptr base_cache : + for (const std::shared_ptr& base_cache : {NewLRUCache(capacity, num_shard_bits), // FixedHyperClockCache HyperClockCacheOptions( @@ -990,7 +990,7 @@ TEST_F(DBBlockCacheTest, CacheEntryRoleStats) { int iterations_tested = 0; for (bool partition : {false, true}) { SCOPED_TRACE("Partition? " + std::to_string(partition)); - for (std::shared_ptr cache : + for (const std::shared_ptr& cache : {NewLRUCache(capacity), HyperClockCacheOptions( capacity, @@ -1251,7 +1251,7 @@ void DummyFillCache(Cache& cache, size_t entry_size, class CountingLogger : public Logger { public: - ~CountingLogger() override {} + ~CountingLogger() override = default; using Logger::Logv; void Logv(const InfoLogLevel log_level, const char* format, va_list /*ap*/) override { @@ -1373,7 +1373,7 @@ class StableCacheKeyTestFS : public FaultInjectionTestFS { SetFailGetUniqueId(true); } - ~StableCacheKeyTestFS() override {} + ~StableCacheKeyTestFS() override = default; IOStatus LinkFile(const std::string&, const std::string&, const IOOptions&, IODebugContext*) override { @@ -1566,7 +1566,7 @@ class CacheKeyTest : public testing::Test { tp_.db_id = std::to_string(db_id_); tp_.orig_file_number = file_number; bool is_stable; - std::string cur_session_id = ""; // ignored + std::string cur_session_id; // ignored uint64_t cur_file_number = 42; // ignored OffsetableCacheKey rv; BlockBasedTable::SetupBaseCacheKey(&tp_, cur_session_id, cur_file_number, diff --git a/db/db_bloom_filter_test.cc b/db/db_bloom_filter_test.cc index 510884e8d6..fa2d45d250 100644 --- a/db/db_bloom_filter_test.cc +++ b/db/db_bloom_filter_test.cc @@ -78,7 +78,7 @@ class DBBloomFilterTestWithParam DBBloomFilterTestWithParam() : DBTestBase("db_bloom_filter_tests", /*env_do_fsync=*/true) {} - ~DBBloomFilterTestWithParam() override {} + ~DBBloomFilterTestWithParam() override = default; void SetUp() override { bfp_impl_ = std::get<0>(GetParam()); @@ -2051,7 +2051,7 @@ class DBBloomFilterTestVaryPrefixAndFormatVer DBBloomFilterTestVaryPrefixAndFormatVer() : DBTestBase("db_bloom_filter_tests", /*env_do_fsync=*/true) {} - ~DBBloomFilterTestVaryPrefixAndFormatVer() override {} + ~DBBloomFilterTestVaryPrefixAndFormatVer() override = default; void SetUp() override { use_prefix_ = std::get<0>(GetParam()); @@ -2126,8 +2126,9 @@ TEST_P(DBBloomFilterTestVaryPrefixAndFormatVer, PartitionedMultiGet) { values[i] = PinnableSlice(); } - db_->MultiGet(ropts, Q, &column_families[0], &key_slices[0], &values[0], - /*timestamps=*/nullptr, &statuses[0], true); + db_->MultiGet(ropts, Q, column_families.data(), key_slices.data(), + values.data(), + /*timestamps=*/nullptr, statuses.data(), true); // Confirm correct status results uint32_t number_not_found = 0; @@ -2177,8 +2178,9 @@ TEST_P(DBBloomFilterTestVaryPrefixAndFormatVer, PartitionedMultiGet) { values[i] = PinnableSlice(); } - db_->MultiGet(ropts, Q, &column_families[0], &key_slices[0], &values[0], - /*timestamps=*/nullptr, &statuses[0], true); + db_->MultiGet(ropts, Q, column_families.data(), key_slices.data(), + values.data(), + /*timestamps=*/nullptr, statuses.data(), true); // Confirm correct status results uint32_t number_not_found = 0; diff --git a/db/db_compaction_filter_test.cc b/db/db_compaction_filter_test.cc index 44c406c496..6cedb6fd53 100644 --- a/db/db_compaction_filter_test.cc +++ b/db/db_compaction_filter_test.cc @@ -150,7 +150,7 @@ class ConditionalFilter : public CompactionFilter { class ChangeFilter : public CompactionFilter { public: - explicit ChangeFilter() {} + explicit ChangeFilter() = default; bool Filter(int /*level*/, const Slice& /*key*/, const Slice& /*value*/, std::string* new_value, bool* value_changed) const override { @@ -289,7 +289,7 @@ class ConditionalFilterFactory : public CompactionFilterFactory { class ChangeFilterFactory : public CompactionFilterFactory { public: - explicit ChangeFilterFactory() {} + explicit ChangeFilterFactory() = default; std::unique_ptr CreateCompactionFilter( const CompactionFilter::Context& /*context*/) override { diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index e0ecff6776..612a1f21d6 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -41,7 +41,7 @@ class CompactionStatsCollector : public EventListener { } } - ~CompactionStatsCollector() override {} + ~CompactionStatsCollector() override = default; void OnCompactionCompleted(DB* /* db */, const CompactionJobInfo& info) override { @@ -241,8 +241,8 @@ class RoundRobinSubcompactionsAgainstResources namespace { class FlushedFileCollector : public EventListener { public: - FlushedFileCollector() {} - ~FlushedFileCollector() override {} + FlushedFileCollector() = default; + ~FlushedFileCollector() override = default; void OnFlushCompleted(DB* /*db*/, const FlushJobInfo& info) override { std::lock_guard lock(mutex_); @@ -252,7 +252,7 @@ class FlushedFileCollector : public EventListener { std::vector GetFlushedFiles() { std::lock_guard lock(mutex_); std::vector result; - for (auto fname : flushed_files_) { + for (const auto& fname : flushed_files_) { result.push_back(fname); } return result; @@ -2090,9 +2090,9 @@ TEST_P(DBDeleteFileRangeTest, DeleteFilesInRanges) { Slice begin2(begin_str2), end2(end_str2); Slice begin3(begin_str3), end3(end_str3); std::vector ranges; - ranges.push_back(RangePtr(&begin1, &end1)); - ranges.push_back(RangePtr(&begin2, &end2)); - ranges.push_back(RangePtr(&begin3, &end3)); + ranges.emplace_back(&begin1, &end1); + ranges.emplace_back(&begin2, &end2); + ranges.emplace_back(&begin3, &end3); ASSERT_OK(DeleteFilesInRanges(db_, db_->DefaultColumnFamily(), ranges.data(), ranges.size())); ASSERT_EQ("0,3,7", FilesPerLevel(0)); @@ -2117,9 +2117,9 @@ TEST_P(DBDeleteFileRangeTest, DeleteFilesInRanges) { Slice begin2(begin_str2), end2(end_str2); Slice begin3(begin_str3), end3(end_str3); std::vector ranges; - ranges.push_back(RangePtr(&begin1, &end1)); - ranges.push_back(RangePtr(&begin2, &end2)); - ranges.push_back(RangePtr(&begin3, &end3)); + ranges.emplace_back(&begin1, &end1); + ranges.emplace_back(&begin2, &end2); + ranges.emplace_back(&begin3, &end3); ASSERT_OK(DeleteFilesInRanges(db_, db_->DefaultColumnFamily(), ranges.data(), ranges.size(), false)); ASSERT_EQ("0,1,4", FilesPerLevel(0)); @@ -6641,7 +6641,7 @@ TEST_F(DBCompactionTest, RoundRobinCutOutputAtCompactCursor) { class NoopMergeOperator : public MergeOperator { public: - NoopMergeOperator() {} + NoopMergeOperator() = default; bool FullMergeV2(const MergeOperationInput& /*merge_in*/, MergeOperationOutput* merge_out) const override { @@ -9878,7 +9878,7 @@ TEST_F(DBCompactionTest, TurnOnLevelCompactionDynamicLevelBytesUCToLC) { options.compaction_style = CompactionStyle::kCompactionStyleLevel; options.level_compaction_dynamic_level_bytes = true; ReopenWithColumnFamilies({"default", "pikachu"}, options); - std::string expected_lsm = ""; + std::string expected_lsm; for (int i = 0; i < 49; ++i) { expected_lsm += "0,"; } @@ -10394,20 +10394,20 @@ TEST_F(DBCompactionTest, ReleaseCompactionDuringManifestWrite) { SyncPoint::GetInstance()->EnableProcessing(); std::vector threads; - threads.emplace_back(std::thread([&]() { + threads.emplace_back([&]() { std::string k1_str = Key(1); std::string k2_str = Key(2); Slice k1 = k1_str; Slice k2 = k2_str; ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &k1, &k2)); - })); - threads.emplace_back(std::thread([&]() { + }); + threads.emplace_back([&]() { std::string k10_str = Key(10); std::string k11_str = Key(11); Slice k10 = k10_str; Slice k11 = k11_str; ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &k10, &k11)); - })); + }); std::string k100_str = Key(100); std::string k101_str = Key(101); Slice k100 = k100_str; diff --git a/db/db_flush_test.cc b/db/db_flush_test.cc index e7a8657d82..6846d96d0e 100644 --- a/db/db_flush_test.cc +++ b/db/db_flush_test.cc @@ -1367,14 +1367,15 @@ TEST_F(DBFlushTest, MemPurgeDeleteAndDeleteRange) { ASSERT_OK(iter->status()); key = (iter->key()).ToString(false); value = (iter->value()).ToString(false); - if (key.compare(KEY3) == 0) + if (key.compare(KEY3) == 0) { ASSERT_EQ(value, p_v3b); - else if (key.compare(KEY4) == 0) + } else if (key.compare(KEY4) == 0) { ASSERT_EQ(value, p_v4); - else if (key.compare(KEY5) == 0) + } else if (key.compare(KEY5) == 0) { ASSERT_EQ(value, p_v5); - else + } else { ASSERT_EQ(value, NOT_FOUND); + } count++; } ASSERT_OK(iter->status()); @@ -1404,22 +1405,25 @@ TEST_F(DBFlushTest, MemPurgeDeleteAndDeleteRange) { ASSERT_OK(iter->status()); key = (iter->key()).ToString(false); value = (iter->value()).ToString(false); - if (key.compare(KEY2) == 0) + if (key.compare(KEY2) == 0) { ASSERT_EQ(value, p_v2); - else if (key.compare(KEY3) == 0) + } else if (key.compare(KEY3) == 0) { ASSERT_EQ(value, p_v3b); - else if (key.compare(KEY4) == 0) + } else if (key.compare(KEY4) == 0) { ASSERT_EQ(value, p_v4); - else if (key.compare(KEY5) == 0) + } else if (key.compare(KEY5) == 0) { ASSERT_EQ(value, p_v5); - else + } else { ASSERT_EQ(value, NOT_FOUND); + } count++; } // Expected count here is 4: KEY2, KEY3, KEY4, KEY5. ASSERT_EQ(count, EXPECTED_COUNT_END); - if (iter) delete iter; + if (iter) { + delete iter; + } Close(); } @@ -2499,7 +2503,7 @@ TEST_F(DBFlushTest, TombstoneVisibleInSnapshot) { class SimpleTestFlushListener : public EventListener { public: explicit SimpleTestFlushListener(DBFlushTest* _test) : test_(_test) {} - ~SimpleTestFlushListener() override {} + ~SimpleTestFlushListener() override = default; void OnFlushBegin(DB* db, const FlushJobInfo& info) override { ASSERT_EQ(static_cast(0), info.cf_id); diff --git a/db/db_impl/compacted_db_impl.cc b/db/db_impl/compacted_db_impl.cc index d1c2db17b6..0e92ffd232 100644 --- a/db/db_impl/compacted_db_impl.cc +++ b/db/db_impl/compacted_db_impl.cc @@ -21,7 +21,7 @@ CompactedDBImpl::CompactedDBImpl(const DBOptions& options, version_(nullptr), user_comparator_(nullptr) {} -CompactedDBImpl::~CompactedDBImpl() {} +CompactedDBImpl::~CompactedDBImpl() = default; size_t CompactedDBImpl::FindFile(const Slice& key) { size_t right = files_.num_files - 1; diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc index 8e83ad0831..a8ea205b4a 100644 --- a/db/db_impl/db_impl.cc +++ b/db/db_impl/db_impl.cc @@ -8,7 +8,7 @@ // found in the LICENSE file. See the AUTHORS file for names of contributors. #include "db/db_impl/db_impl.h" -#include +#include #ifdef OS_SOLARIS #include #endif @@ -959,7 +959,9 @@ size_t DBImpl::EstimateInMemoryStatsHistorySize() const { stats_history_mutex_.AssertHeld(); size_t size_total = sizeof(std::map>); - if (stats_history_.size() == 0) return size_total; + if (stats_history_.size() == 0) { + return size_total; + } size_t size_per_slice = sizeof(uint64_t) + sizeof(std::map); // non-empty map, stats_history_.begin() guaranteed to exist @@ -1085,7 +1087,9 @@ bool DBImpl::FindStatsByTime(uint64_t start_time, uint64_t end_time, std::map* stats_map) { assert(new_time); assert(stats_map); - if (!new_time || !stats_map) return false; + if (!new_time || !stats_map) { + return false; + } // lock when search for start_time { InstrumentedMutexLock l(&stats_history_mutex_); @@ -1492,7 +1496,9 @@ int DBImpl::FindMinimumEmptyLevelFitting( int minimum_level = level; for (int i = level - 1; i > 0; --i) { // stop if level i is not empty - if (vstorage->NumLevelFiles(i) > 0) break; + if (vstorage->NumLevelFiles(i) > 0) { + break; + } // stop if level i is too small (cannot fit the level files) if (vstorage->MaxBytesForLevel(i) < vstorage->NumLevelBytes(level)) { break; @@ -4615,9 +4621,9 @@ Status DBImpl::DeleteFile(std::string name) { read_options, write_options, &edit, &mutex_, directories_.GetDbDir()); if (status.ok()) { - InstallSuperVersionAndScheduleWork(cfd, - &job_context.superversion_contexts[0], - *cfd->GetLatestMutableCFOptions()); + InstallSuperVersionAndScheduleWork( + cfd, job_context.superversion_contexts.data(), + *cfd->GetLatestMutableCFOptions()); } FindObsoleteFiles(&job_context, false); } // lock released here @@ -4728,9 +4734,9 @@ Status DBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family, read_options, write_options, &edit, &mutex_, directories_.GetDbDir()); if (status.ok()) { - InstallSuperVersionAndScheduleWork(cfd, - &job_context.superversion_contexts[0], - *cfd->GetLatestMutableCFOptions()); + InstallSuperVersionAndScheduleWork( + cfd, job_context.superversion_contexts.data(), + *cfd->GetLatestMutableCFOptions()); } for (auto* deleted_file : deleted_files) { deleted_file->being_compacted = false; @@ -4965,7 +4971,7 @@ Status DB::DestroyColumnFamilyHandle(ColumnFamilyHandle* column_family) { return Status::OK(); } -DB::~DB() {} +DB::~DB() = default; Status DBImpl::Close() { InstrumentedMutexLock closing_lock_guard(&closing_mutex_); @@ -4992,7 +4998,7 @@ Status DB::ListColumnFamilies(const DBOptions& db_options, return VersionSet::ListColumnFamilies(column_families, name, fs.get()); } -Snapshot::~Snapshot() {} +Snapshot::~Snapshot() = default; Status DestroyDB(const std::string& dbname, const Options& options, const std::vector& column_families) { @@ -6024,8 +6030,8 @@ Status DBImpl::ClipColumnFamily(ColumnFamilyHandle* column_family, if (status.ok()) { // DeleteFilesInRanges non-overlap files except L0 std::vector ranges; - ranges.push_back(RangePtr(nullptr, &begin_key)); - ranges.push_back(RangePtr(&end_key, nullptr)); + ranges.emplace_back(nullptr, &begin_key); + ranges.emplace_back(&end_key, nullptr); status = DeleteFilesInRanges(column_family, ranges.data(), ranges.size()); } @@ -6273,7 +6279,7 @@ void DBImpl::NotifyOnExternalFileIngested( info.internal_file_path = f.internal_file_path; info.global_seqno = f.assigned_seqno; info.table_properties = f.table_properties; - for (auto listener : immutable_db_options_.listeners) { + for (const auto& listener : immutable_db_options_.listeners) { listener->OnExternalFileIngested(this, info); } } diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc index 63f280b99d..f97b955dbc 100644 --- a/db/db_impl/db_impl_compaction_flush.cc +++ b/db/db_impl/db_impl_compaction_flush.cc @@ -970,7 +970,7 @@ void DBImpl::NotifyOnFlushBegin(ColumnFamilyData* cfd, FileMetaData* file_meta, info.smallest_seqno = file_meta->fd.smallest_seqno; info.largest_seqno = file_meta->fd.largest_seqno; info.flush_reason = flush_reason; - for (auto listener : immutable_db_options_.listeners) { + for (const auto& listener : immutable_db_options_.listeners) { listener->OnFlushBegin(this, info); } } @@ -1002,7 +1002,7 @@ void DBImpl::NotifyOnFlushCompleted( for (auto& info : *flush_jobs_info) { info->triggered_writes_slowdown = triggered_writes_slowdown; info->triggered_writes_stop = triggered_writes_stop; - for (auto listener : immutable_db_options_.listeners) { + for (const auto& listener : immutable_db_options_.listeners) { listener->OnFlushCompleted(this, *info); } TEST_SYNC_POINT( @@ -1609,9 +1609,9 @@ Status DBImpl::CompactFilesImpl( } if (status.ok()) { assert(compaction_job.io_status().ok()); - InstallSuperVersionAndScheduleWork(c->column_family_data(), - &job_context->superversion_contexts[0], - *c->mutable_cf_options()); + InstallSuperVersionAndScheduleWork( + c->column_family_data(), job_context->superversion_contexts.data(), + *c->mutable_cf_options()); } // status above captures any error during compaction_job.Install, so its ok // not check compaction_job.io_status() explicitly if we're not calling @@ -1731,7 +1731,7 @@ void DBImpl::NotifyOnCompactionBegin(ColumnFamilyData* cfd, Compaction* c, { CompactionJobInfo info{}; BuildCompactionJobInfo(cfd, c, st, job_stats, job_id, &info); - for (auto listener : immutable_db_options_.listeners) { + for (const auto& listener : immutable_db_options_.listeners) { listener->OnCompactionBegin(this, info); } info.status.PermitUncheckedError(); @@ -1760,7 +1760,7 @@ void DBImpl::NotifyOnCompactionCompleted( { CompactionJobInfo info{}; BuildCompactionJobInfo(cfd, c, st, compaction_job_stats, job_id, &info); - for (auto listener : immutable_db_options_.listeners) { + for (const auto& listener : immutable_db_options_.listeners) { listener->OnCompactionCompleted(this, info); } } @@ -3221,7 +3221,7 @@ Status DBImpl::BackgroundFlush(bool* made_progress, JobContext* job_context, column_families_not_to_flush.push_back(cfd); continue; } - superversion_contexts.emplace_back(SuperVersionContext(true)); + superversion_contexts.emplace_back(true); bg_flush_args.emplace_back(cfd, max_memtable_id, &(superversion_contexts.back()), flush_reason); } @@ -3726,9 +3726,9 @@ Status DBImpl::BackgroundCompaction(bool* made_progress, compaction_released = true; }); io_s = versions_->io_status(); - InstallSuperVersionAndScheduleWork(c->column_family_data(), - &job_context->superversion_contexts[0], - *c->mutable_cf_options()); + InstallSuperVersionAndScheduleWork( + c->column_family_data(), job_context->superversion_contexts.data(), + *c->mutable_cf_options()); ROCKS_LOG_BUFFER(log_buffer, "[%s] Deleted %d files\n", c->column_family_data()->GetName().c_str(), c->num_input_files(0)); @@ -3801,9 +3801,9 @@ Status DBImpl::BackgroundCompaction(bool* made_progress, }); io_s = versions_->io_status(); // Use latest MutableCFOptions - InstallSuperVersionAndScheduleWork(c->column_family_data(), - &job_context->superversion_contexts[0], - *c->mutable_cf_options()); + InstallSuperVersionAndScheduleWork( + c->column_family_data(), job_context->superversion_contexts.data(), + *c->mutable_cf_options()); VersionStorageInfo::LevelSummaryStorage tmp; c->column_family_data()->internal_stats()->IncBytesMoved(c->output_level(), @@ -3896,9 +3896,9 @@ Status DBImpl::BackgroundCompaction(bool* made_progress, compaction_job.Install(*c->mutable_cf_options(), &compaction_released); io_s = compaction_job.io_status(); if (status.ok()) { - InstallSuperVersionAndScheduleWork(c->column_family_data(), - &job_context->superversion_contexts[0], - *c->mutable_cf_options()); + InstallSuperVersionAndScheduleWork( + c->column_family_data(), job_context->superversion_contexts.data(), + *c->mutable_cf_options()); } *made_progress = true; TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:AfterCompaction", @@ -4045,7 +4045,6 @@ void DBImpl::RemoveManualCompaction(DBImpl::ManualCompactionState* m) { ++it; } assert(false); - return; } bool DBImpl::ShouldntRunManualCompaction(ManualCompactionState* m) { diff --git a/db/db_impl/db_impl_experimental.cc b/db/db_impl/db_impl_experimental.cc index c90df262e8..113a7f42ff 100644 --- a/db/db_impl/db_impl_experimental.cc +++ b/db/db_impl/db_impl_experimental.cc @@ -104,7 +104,9 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) { return status; } - if (i == 0) continue; + if (i == 0) { + continue; + } auto prev_f = l0_files[i - 1]; if (icmp->Compare(prev_f->largest, f->smallest) >= 0) { ROCKS_LOG_INFO(immutable_db_options_.info_log, @@ -148,9 +150,9 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) { read_options, write_options, &edit, &mutex_, directories_.GetDbDir()); if (status.ok()) { - InstallSuperVersionAndScheduleWork(cfd, - &job_context.superversion_contexts[0], - *cfd->GetLatestMutableCFOptions()); + InstallSuperVersionAndScheduleWork( + cfd, job_context.superversion_contexts.data(), + *cfd->GetLatestMutableCFOptions()); } } // lock released here LogFlush(immutable_db_options_.info_log); diff --git a/db/db_impl/db_impl_open.cc b/db/db_impl/db_impl_open.cc index 9417721318..d6ac98c86b 100644 --- a/db/db_impl/db_impl_open.cc +++ b/db/db_impl/db_impl_open.cc @@ -1799,11 +1799,9 @@ Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) { DBOptions db_options(options); ColumnFamilyOptions cf_options(options); std::vector column_families; - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options)); + column_families.emplace_back(kDefaultColumnFamilyName, cf_options); if (db_options.persist_stats_to_disk) { - column_families.push_back( - ColumnFamilyDescriptor(kPersistentStatsColumnFamilyName, cf_options)); + column_families.emplace_back(kPersistentStatsColumnFamilyName, cf_options); } std::vector handles; Status s = DB::Open(db_options, dbname, column_families, &handles, dbptr); @@ -1972,7 +1970,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname, handles->clear(); size_t max_write_buffer_size = 0; - for (auto cf : column_families) { + for (const auto& cf : column_families) { max_write_buffer_size = std::max(max_write_buffer_size, cf.options.write_buffer_size); } @@ -2044,8 +2042,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname, } if (s.ok()) { - impl->alive_log_files_.push_back( - DBImpl::LogFileNumberSize(impl->logfile_number_)); + impl->alive_log_files_.emplace_back(impl->logfile_number_); // In WritePrepared there could be gap in sequence numbers. This breaks // the trick we use in kPointInTimeRecovery which assumes the first seq in // the log right after the corrupted log is one larger than the last seq @@ -2093,7 +2090,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname, if (s.ok()) { // set column family handles - for (auto cf : column_families) { + for (const auto& cf : column_families) { auto cfd = impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name); if (cfd != nullptr) { diff --git a/db/db_impl/db_impl_readonly.cc b/db/db_impl/db_impl_readonly.cc index facc4bb1a1..e0d8d3b31a 100644 --- a/db/db_impl/db_impl_readonly.cc +++ b/db/db_impl/db_impl_readonly.cc @@ -26,7 +26,7 @@ DBImplReadOnly::DBImplReadOnly(const DBOptions& db_options, LogFlush(immutable_db_options_.info_log); } -DBImplReadOnly::~DBImplReadOnly() {} +DBImplReadOnly::~DBImplReadOnly() = default; // Implementations of the DB interface Status DBImplReadOnly::GetImpl(const ReadOptions& read_options, @@ -293,8 +293,7 @@ Status DB::OpenForReadOnly(const Options& options, const std::string& dbname, DBOptions db_options(options); ColumnFamilyOptions cf_options(options); std::vector column_families; - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options)); + column_families.emplace_back(kDefaultColumnFamilyName, cf_options); std::vector handles; s = DBImplReadOnly::OpenForReadOnlyWithoutCheck( @@ -339,7 +338,7 @@ Status DBImplReadOnly::OpenForReadOnlyWithoutCheck( error_if_wal_file_exists); if (s.ok()) { // set column family handles - for (auto cf : column_families) { + for (const auto& cf : column_families) { auto cfd = impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name); if (cfd == nullptr) { diff --git a/db/db_impl/db_impl_secondary.cc b/db/db_impl/db_impl_secondary.cc index 29c8990c9b..f0502fc0a2 100644 --- a/db/db_impl/db_impl_secondary.cc +++ b/db/db_impl/db_impl_secondary.cc @@ -28,7 +28,7 @@ DBImplSecondary::DBImplSecondary(const DBOptions& db_options, LogFlush(immutable_db_options_.info_log); } -DBImplSecondary::~DBImplSecondary() {} +DBImplSecondary::~DBImplSecondary() = default; Status DBImplSecondary::Recover( const std::vector& column_families, @@ -804,7 +804,7 @@ Status DB::OpenAsSecondary( impl->mutex_.Lock(); s = impl->Recover(column_families, true, false, false); if (s.ok()) { - for (auto cf : column_families) { + for (const auto& cf : column_families) { auto cfd = impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name); if (nullptr == cfd) { diff --git a/db/db_impl/db_impl_write.cc b/db/db_impl/db_impl_write.cc index 3c940ef517..ee103a57a9 100644 --- a/db/db_impl/db_impl_write.cc +++ b/db/db_impl/db_impl_write.cc @@ -2135,7 +2135,7 @@ void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* /*cfd*/, } mutex_.Unlock(); - for (auto listener : immutable_db_options_.listeners) { + for (const auto& listener : immutable_db_options_.listeners) { listener->OnMemTableSealed(mem_table_info); } mutex_.Lock(); @@ -2252,7 +2252,7 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) { log_empty_ = true; log_dir_synced_ = false; logs_.emplace_back(logfile_number_, new_log); - alive_log_files_.push_back(LogFileNumberSize(logfile_number_)); + alive_log_files_.emplace_back(logfile_number_); } } diff --git a/db/db_table_properties_test.cc b/db/db_table_properties_test.cc index 274e16c4c4..dca36721f8 100644 --- a/db/db_table_properties_test.cc +++ b/db/db_table_properties_test.cc @@ -280,7 +280,7 @@ class DBTablePropertiesInRangeTest : public DBTestBase, // run the query TablePropertiesCollection props; ColumnFamilyHandle* default_cf = db_->DefaultColumnFamily(); - EXPECT_OK(db_->GetPropertiesOfTablesInRange(default_cf, &ranges[0], + EXPECT_OK(db_->GetPropertiesOfTablesInRange(default_cf, ranges.data(), ranges.size(), &props)); const Comparator* ucmp = default_cf->GetComparator(); diff --git a/db/experimental.cc b/db/experimental.cc index 9c11b4c1ba..402dd95408 100644 --- a/db/experimental.cc +++ b/db/experimental.cc @@ -17,9 +17,7 @@ #include "logging/logging.h" #include "util/atomic.h" -namespace ROCKSDB_NAMESPACE { -namespace experimental { - +namespace ROCKSDB_NAMESPACE::experimental { Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family, const Slice* begin, const Slice* end) { @@ -378,7 +376,7 @@ enum BuiltinSstQueryFilters : char { class SstQueryFilterBuilder { public: - virtual ~SstQueryFilterBuilder() {} + virtual ~SstQueryFilterBuilder() = default; virtual void Add(const Slice& key, const KeySegmentsExtractor::Result& extracted, const Slice* prev_key, @@ -395,7 +393,7 @@ class SstQueryFilterConfigImpl : public SstQueryFilterConfig { const KeySegmentsExtractor::KeyCategorySet& categories) : input_(input), categories_(categories) {} - virtual ~SstQueryFilterConfigImpl() {} + virtual ~SstQueryFilterConfigImpl() = default; virtual std::unique_ptr NewBuilder( bool sanity_checks) const = 0; @@ -1210,5 +1208,4 @@ Status SstQueryFilterConfigsManager::MakeShared( return s; } -} // namespace experimental -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::experimental diff --git a/db/log_reader.cc b/db/log_reader.cc index b29517a19f..48380a735c 100644 --- a/db/log_reader.cc +++ b/db/log_reader.cc @@ -18,8 +18,7 @@ #include "util/coding.h" #include "util/crc32c.h" -namespace ROCKSDB_NAMESPACE { -namespace log { +namespace ROCKSDB_NAMESPACE::log { Reader::Reporter::~Reporter() = default; @@ -937,5 +936,4 @@ bool FragmentBufferedReader::TryReadFragment( } } -} // namespace log -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::log diff --git a/db/log_test.cc b/db/log_test.cc index bd5aaf6d6b..79ff02a04b 100644 --- a/db/log_test.cc +++ b/db/log_test.cc @@ -19,8 +19,7 @@ #include "util/random.h" #include "utilities/memory_allocators.h" -namespace ROCKSDB_NAMESPACE { -namespace log { +namespace ROCKSDB_NAMESPACE::log { // Construct a string of the specified length made out of the supplied // partial string. @@ -1206,8 +1205,7 @@ INSTANTIATE_TEST_CASE_P( kBlockSize * 2), ::testing::Values(CompressionType::kZSTD))); -} // namespace log -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::log int main(int argc, char** argv) { ROCKSDB_NAMESPACE::port::InstallStackTraceHandler(); diff --git a/db/log_writer.cc b/db/log_writer.cc index 8e0f7a4a9d..4fbdb978fa 100644 --- a/db/log_writer.cc +++ b/db/log_writer.cc @@ -18,8 +18,7 @@ #include "util/crc32c.h" #include "util/udt_util.h" -namespace ROCKSDB_NAMESPACE { -namespace log { +namespace ROCKSDB_NAMESPACE::log { Writer::Writer(std::unique_ptr&& dest, uint64_t log_number, bool recycle_log_files, bool manual_flush, @@ -297,5 +296,4 @@ IOStatus Writer::EmitPhysicalRecord(const WriteOptions& write_options, return s; } -} // namespace log -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::log diff --git a/db/seqno_to_time_mapping.cc b/db/seqno_to_time_mapping.cc index e7bee3e46f..ec547ff2a6 100644 --- a/db/seqno_to_time_mapping.cc +++ b/db/seqno_to_time_mapping.cc @@ -422,7 +422,7 @@ bool SeqnoToTimeMapping::Append(SequenceNumber seqno, uint64_t time) { // TODO: consider changing? } else if (pairs_.empty()) { enforced_ = true; - pairs_.push_back({seqno, time}); + pairs_.emplace_back(seqno, time); // skip normal enforced check below return true; } else { @@ -437,13 +437,13 @@ bool SeqnoToTimeMapping::Append(SequenceNumber seqno, uint64_t time) { // reset assert(false); } else { - pairs_.push_back({seqno, time}); + pairs_.emplace_back(seqno, time); added = true; } } } else if (!enforced_) { // Treat like AddUnenforced and fix up below - pairs_.push_back({seqno, time}); + pairs_.emplace_back(seqno, time); added = true; } else { // Out of order append attempted diff --git a/options/configurable_test.cc b/options/configurable_test.cc index 9284e8622a..3ed2d23e3d 100644 --- a/options/configurable_test.cc +++ b/options/configurable_test.cc @@ -29,8 +29,7 @@ using GFLAGS_NAMESPACE::ParseCommandLineFlags; DEFINE_bool(enable_print, false, "Print options generated to console."); #endif // GFLAGS -namespace ROCKSDB_NAMESPACE { -namespace test { +namespace ROCKSDB_NAMESPACE::test { class StringLogger : public Logger { public: using Logger::Logv; @@ -849,8 +848,7 @@ INSTANTIATE_TEST_CASE_P( "block_size=1024;" "no_block_cache=true;"))); -} // namespace test -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::test int main(int argc, char** argv) { ROCKSDB_NAMESPACE::port::InstallStackTraceHandler(); ::testing::InitGoogleTest(&argc, argv); diff --git a/port/stack_trace.cc b/port/stack_trace.cc index 5ce459ba88..f4909f91d5 100644 --- a/port/stack_trace.cc +++ b/port/stack_trace.cc @@ -55,8 +55,7 @@ void* SaveStack(int* /*num_frames*/, int /*first_frames_to_skip*/) { #include "port/lang.h" -namespace ROCKSDB_NAMESPACE { -namespace port { +namespace ROCKSDB_NAMESPACE::port { namespace { @@ -413,7 +412,6 @@ void InstallStackTraceHandler() { #endif } -} // namespace port -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::port #endif diff --git a/table/mock_table.cc b/table/mock_table.cc index fe3bd854c6..ef12060d74 100644 --- a/table/mock_table.cc +++ b/table/mock_table.cc @@ -13,8 +13,7 @@ #include "table/get_context.h" #include "util/coding.h" -namespace ROCKSDB_NAMESPACE { -namespace mock { +namespace ROCKSDB_NAMESPACE::mock { KVVector MakeMockFile(std::initializer_list l) { return KVVector(l); } @@ -347,5 +346,4 @@ void MockTableFactory::AssertLatestFiles( } } -} // namespace mock -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::mock diff --git a/table/sst_file_reader_test.cc b/table/sst_file_reader_test.cc index 3dcb820cab..b5e00caded 100644 --- a/table/sst_file_reader_test.cc +++ b/table/sst_file_reader_test.cc @@ -431,7 +431,7 @@ class SstFileReaderTimestampNotPersistedTest sst_name_ = test::PerThreadDBPath("sst_file_ts_not_persisted"); } - ~SstFileReaderTimestampNotPersistedTest() {} + ~SstFileReaderTimestampNotPersistedTest() = default; }; TEST_F(SstFileReaderTimestampNotPersistedTest, Basic) { diff --git a/test_util/secondary_cache_test_util.cc b/test_util/secondary_cache_test_util.cc index 6f0bd38494..b5693de059 100644 --- a/test_util/secondary_cache_test_util.cc +++ b/test_util/secondary_cache_test_util.cc @@ -7,9 +7,7 @@ #include -namespace ROCKSDB_NAMESPACE { - -namespace secondary_cache_test_util { +namespace ROCKSDB_NAMESPACE::secondary_cache_test_util { namespace { using TestItem = WithCacheType::TestItem; @@ -92,6 +90,4 @@ const Cache::CacheItemHelper* WithCacheType::GetHelperFail(CacheEntryRole r) { return GetHelper(r, true, true); } -} // namespace secondary_cache_test_util - -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::secondary_cache_test_util diff --git a/test_util/testharness.cc b/test_util/testharness.cc index 3c7b835d2f..89c7fd9775 100644 --- a/test_util/testharness.cc +++ b/test_util/testharness.cc @@ -13,8 +13,7 @@ #include #include -namespace ROCKSDB_NAMESPACE { -namespace test { +namespace ROCKSDB_NAMESPACE::test { #ifdef OS_WIN #include @@ -103,5 +102,4 @@ bool TestRegex::Matches(const std::string& str) const { } } -} // namespace test -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::test diff --git a/test_util/testutil.cc b/test_util/testutil.cc index f90688a945..5372126ef5 100644 --- a/test_util/testutil.cc +++ b/test_util/testutil.cc @@ -34,8 +34,7 @@ void RegisterCustomObjects(int /*argc*/, char** /*argv*/) {} #endif -namespace ROCKSDB_NAMESPACE { -namespace test { +namespace ROCKSDB_NAMESPACE::test { const uint32_t kDefaultFormatVersion = BlockBasedTableOptions().format_version; const std::set kFooterFormatVersionsToTest{ @@ -749,5 +748,4 @@ void RegisterTestLibrary(const std::string& arg) { ObjectRegistry::Default()->AddLibrary("test", RegisterTestObjects, arg); } } -} // namespace test -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::test diff --git a/util/crc32c.cc b/util/crc32c.cc index 38a69bb505..c00276d765 100644 --- a/util/crc32c.cc +++ b/util/crc32c.cc @@ -56,8 +56,7 @@ ASSERT_FEATURE_COMPAT_HEADER(); bool pmull_runtime_flag = false; #endif -namespace ROCKSDB_NAMESPACE { -namespace crc32c { +namespace ROCKSDB_NAMESPACE::crc32c { #if defined(HAVE_POWER8) && defined(HAS_ALTIVEC) #ifdef __powerpc64__ @@ -1293,5 +1292,4 @@ uint32_t Crc32cCombine(uint32_t crc1, uint32_t crc2, size_t crc2len) { pure_crc2_with_init); } -} // namespace crc32c -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::crc32c diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc index 715d63e2de..ecb43d7fb8 100644 --- a/util/crc32c_test.cc +++ b/util/crc32c_test.cc @@ -12,8 +12,7 @@ #include "util/coding.h" #include "util/random.h" -namespace ROCKSDB_NAMESPACE { -namespace crc32c { +namespace ROCKSDB_NAMESPACE::crc32c { class CRC {}; @@ -170,8 +169,7 @@ TEST(CRC, Crc32cCombineBigSizeTest) { ASSERT_EQ(crc1_2, crc1_2_combine); } -} // namespace crc32c -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::crc32c // copied from folly const uint64_t FNV_64_HASH_START = 14695981039346656037ULL; diff --git a/util/data_structure.cc b/util/data_structure.cc index d647df5d5b..04d0442a5f 100644 --- a/util/data_structure.cc +++ b/util/data_structure.cc @@ -7,12 +7,10 @@ #include "util/math.h" -namespace ROCKSDB_NAMESPACE { -namespace detail { +namespace ROCKSDB_NAMESPACE::detail { int CountTrailingZeroBitsForSmallEnumSet(uint64_t v) { return CountTrailingZeroBits(v); } -} // namespace detail -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::detail diff --git a/util/ribbon_config.cc b/util/ribbon_config.cc index c1046f4aaa..792a6d7c27 100644 --- a/util/ribbon_config.cc +++ b/util/ribbon_config.cc @@ -5,11 +5,7 @@ #include "util/ribbon_config.h" -namespace ROCKSDB_NAMESPACE { - -namespace ribbon { - -namespace detail { +namespace ROCKSDB_NAMESPACE::ribbon::detail { // Each instantiation of this struct is sufficiently unique for configuration // purposes, and is only instantiated for settings where we support the @@ -499,8 +495,4 @@ template struct BandingConfigHelper1MaybeSupported< template struct BandingConfigHelper1MaybeSupported; -} // namespace detail - -} // namespace ribbon - -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::ribbon::detail diff --git a/util/slice.cc b/util/slice.cc index 22dd7ee6bb..9ec0af132c 100644 --- a/util/slice.cc +++ b/util/slice.cc @@ -9,9 +9,8 @@ #include "rocksdb/slice.h" -#include - #include +#include #include "rocksdb/convenience.h" #include "rocksdb/slice_transform.h" @@ -128,7 +127,7 @@ class CappedPrefixTransform : public SliceTransform { class NoopTransform : public SliceTransform { public: - explicit NoopTransform() {} + explicit NoopTransform() = default; static const char* kClassName() { return "rocksdb.Noop"; } const char* Name() const override { return kClassName(); } @@ -173,7 +172,7 @@ static int RegisterBuiltinSliceTransform(ObjectLibrary& library, .AddNumber(":"), [](const std::string& uri, std::unique_ptr* guard, std::string* /*errmsg*/) { - auto colon = uri.find(":"); + auto colon = uri.find(':'); auto len = ParseSizeT(uri.substr(colon + 1)); guard->reset(NewFixedPrefixTransform(len)); return guard->get(); @@ -193,7 +192,7 @@ static int RegisterBuiltinSliceTransform(ObjectLibrary& library, .AddNumber(":"), [](const std::string& uri, std::unique_ptr* guard, std::string* /*errmsg*/) { - auto colon = uri.find(":"); + auto colon = uri.find(':'); auto len = ParseSizeT(uri.substr(colon + 1)); guard->reset(NewCappedPrefixTransform(len)); return guard->get(); diff --git a/util/slice_test.cc b/util/slice_test.cc index dd59b2a1bc..d59afe053d 100644 --- a/util/slice_test.cc +++ b/util/slice_test.cc @@ -169,8 +169,8 @@ TEST_F(PinnableSliceTest, Move) { // Unit test for SmallEnumSet class SmallEnumSetTest : public testing::Test { public: - SmallEnumSetTest() {} - ~SmallEnumSetTest() {} + SmallEnumSetTest() = default; + ~SmallEnumSetTest() = default; }; TEST_F(SmallEnumSetTest, SmallEnumSetTest1) { diff --git a/util/status.cc b/util/status.cc index 160755d54d..8f49077406 100644 --- a/util/status.cc +++ b/util/status.cc @@ -9,7 +9,7 @@ #include "rocksdb/status.h" -#include +#include #ifdef OS_WIN #include #endif diff --git a/util/string_util.cc b/util/string_util.cc index 7ffda08875..6e99723c18 100644 --- a/util/string_util.cc +++ b/util/string_util.cc @@ -5,13 +5,12 @@ // #include "util/string_util.h" -#include -#include -#include - #include +#include #include #include +#include +#include #include #include #include @@ -266,7 +265,9 @@ std::string UnescapeOptionString(const std::string& escaped_string) { } std::string trim(const std::string& str) { - if (str.empty()) return std::string(); + if (str.empty()) { + return std::string(); + } size_t start = 0; size_t end = str.size() - 1; while (isspace(str[start]) != 0 && start < end) { @@ -346,14 +347,15 @@ uint64_t ParseUint64(const std::string& value) { if (endchar < value.length()) { char c = value[endchar]; - if (c == 'k' || c == 'K') + if (c == 'k' || c == 'K') { num <<= 10LL; - else if (c == 'm' || c == 'M') + } else if (c == 'm' || c == 'M') { num <<= 20LL; - else if (c == 'g' || c == 'G') + } else if (c == 'g' || c == 'G') { num <<= 30LL; - else if (c == 't' || c == 'T') + } else if (c == 't' || c == 'T') { num <<= 40LL; + } } return num; @@ -371,14 +373,15 @@ int64_t ParseInt64(const std::string& value) { if (endchar < value.length()) { char c = value[endchar]; - if (c == 'k' || c == 'K') + if (c == 'k' || c == 'K') { num <<= 10LL; - else if (c == 'm' || c == 'M') + } else if (c == 'm' || c == 'M') { num <<= 20LL; - else if (c == 'g' || c == 'G') + } else if (c == 'g' || c == 'G') { num <<= 30LL; - else if (c == 't' || c == 'T') + } else if (c == 't' || c == 'T') { num <<= 40LL; + } } return num; @@ -396,12 +399,13 @@ int ParseInt(const std::string& value) { if (endchar < value.length()) { char c = value[endchar]; - if (c == 'k' || c == 'K') + if (c == 'k' || c == 'K') { num <<= 10; - else if (c == 'm' || c == 'M') + } else if (c == 'm' || c == 'M') { num <<= 20; - else if (c == 'g' || c == 'G') + } else if (c == 'g' || c == 'G') { num <<= 30; + } } return num; diff --git a/util/thread_list_test.cc b/util/thread_list_test.cc index 47d5fcb5bd..4899b98ac4 100644 --- a/util/thread_list_test.cc +++ b/util/thread_list_test.cc @@ -97,7 +97,7 @@ class SimulatedBackgroundTask { class ThreadListTest : public testing::Test { public: - ThreadListTest() {} + ThreadListTest() = default; }; TEST_F(ThreadListTest, GlobalTables) { @@ -161,7 +161,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) { // Verify the number of running threads in each pool. ASSERT_OK(env->GetThreadList(&thread_list)); int running_count[ThreadStatus::NUM_THREAD_TYPES] = {0}; - for (auto thread_status : thread_list) { + for (const auto& thread_status : thread_list) { if (thread_status.cf_name == "pikachu" && thread_status.db_name == "running") { running_count[thread_status.thread_type]++; @@ -189,7 +189,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) { for (int i = 0; i < ThreadStatus::NUM_THREAD_TYPES; ++i) { running_count[i] = 0; } - for (auto thread_status : thread_list) { + for (const auto& thread_status : thread_list) { if (thread_status.cf_name == "pikachu" && thread_status.db_name == "running") { running_count[thread_status.thread_type]++; @@ -204,7 +204,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) { namespace { void UpdateStatusCounts(const std::vector& thread_list, int operation_counts[], int state_counts[]) { - for (auto thread_status : thread_list) { + for (const auto& thread_status : thread_list) { operation_counts[thread_status.operation_type]++; state_counts[thread_status.state_type]++; } diff --git a/util/thread_local.cc b/util/thread_local.cc index 969639d9bc..805a0aad63 100644 --- a/util/thread_local.cc +++ b/util/thread_local.cc @@ -9,7 +9,7 @@ #include "util/thread_local.h" -#include +#include #include "port/likely.h" #include "util/mutexlock.h" diff --git a/util/threadpool_imp.cc b/util/threadpool_imp.cc index 3d224804a4..8397c4b390 100644 --- a/util/threadpool_imp.cc +++ b/util/threadpool_imp.cc @@ -18,11 +18,10 @@ #include #endif -#include - #include #include #include +#include #include #include #include @@ -465,7 +464,7 @@ int ThreadPoolImpl::Impl::UnSchedule(void* arg) { ThreadPoolImpl::ThreadPoolImpl() : impl_(new Impl()) {} -ThreadPoolImpl::~ThreadPoolImpl() {} +ThreadPoolImpl::~ThreadPoolImpl() = default; void ThreadPoolImpl::JoinAllThreads() { impl_->JoinThreads(false); } diff --git a/util/udt_util_test.cc b/util/udt_util_test.cc index 44ee567f74..8f45d564a5 100644 --- a/util/udt_util_test.cc +++ b/util/udt_util_test.cc @@ -20,16 +20,16 @@ static const std::string kValuePlaceHolder = "value"; class HandleTimestampSizeDifferenceTest : public testing::Test { public: - HandleTimestampSizeDifferenceTest() {} + HandleTimestampSizeDifferenceTest() = default; // Test handler used to collect the column family id and user keys contained // in a WriteBatch for test verification. And verifies the value part stays // the same if it's available. class KeyCollector : public WriteBatch::Handler { public: - explicit KeyCollector() {} + explicit KeyCollector() = default; - ~KeyCollector() override {} + ~KeyCollector() override = default; Status PutCF(uint32_t cf, const Slice& key, const Slice& value) override { if (value.compare(kValuePlaceHolder) != 0) { @@ -90,7 +90,7 @@ class HandleTimestampSizeDifferenceTest : public testing::Test { private: Status AddKey(uint32_t cf, const Slice& key) { - keys_.push_back(std::make_pair(cf, key)); + keys_.emplace_back(cf, key); return Status::OK(); } std::vector> keys_; diff --git a/utilities/agg_merge/agg_merge.cc b/utilities/agg_merge/agg_merge.cc index 8e5c536f55..a13d861e84 100644 --- a/utilities/agg_merge/agg_merge.cc +++ b/utilities/agg_merge/agg_merge.cc @@ -5,8 +5,7 @@ #include "rocksdb/utilities/agg_merge.h" -#include - +#include #include #include #include @@ -24,7 +23,7 @@ namespace ROCKSDB_NAMESPACE { static std::unordered_map> func_map; -const std::string kUnnamedFuncName = ""; +const std::string kUnnamedFuncName; const std::string kErrorFuncName = "kErrorFuncName"; Status AddAggregator(const std::string& function_name, @@ -37,7 +36,7 @@ Status AddAggregator(const std::string& function_name, return Status::OK(); } -AggMergeOperator::AggMergeOperator() {} +AggMergeOperator::AggMergeOperator() = default; std::string EncodeAggFuncAndPayloadNoCheck(const Slice& function_name, const Slice& value) { @@ -123,7 +122,7 @@ class AggMergeOperator::Accumulator { } std::swap(scratch_, aggregated_); values_.clear(); - values_.push_back(aggregated_); + values_.emplace_back(aggregated_); func_ = my_func; } values_.push_back(my_value); diff --git a/utilities/agg_merge/test_agg_merge.cc b/utilities/agg_merge/test_agg_merge.cc index 63b89cccd6..03bb2a2cd6 100644 --- a/utilities/agg_merge/test_agg_merge.cc +++ b/utilities/agg_merge/test_agg_merge.cc @@ -5,8 +5,7 @@ #include "test_agg_merge.h" -#include - +#include #include #include diff --git a/utilities/backup/backup_engine.cc b/utilities/backup/backup_engine.cc index b2353888c2..ba46e04e02 100644 --- a/utilities/backup/backup_engine.cc +++ b/utilities/backup/backup_engine.cc @@ -384,7 +384,7 @@ class BackupEngineImpl { BackupMeta(const BackupMeta&) = delete; BackupMeta& operator=(const BackupMeta&) = delete; - ~BackupMeta() {} + ~BackupMeta() = default; void RecordTimestamp() { // Best effort @@ -639,11 +639,9 @@ class BackupEngineImpl { std::string db_session_id; CopyOrCreateWorkItem() - : src_path(""), - dst_path(""), - src_temperature(Temperature::kUnknown), + : src_temperature(Temperature::kUnknown), dst_temperature(Temperature::kUnknown), - contents(""), + src_env(nullptr), dst_env(nullptr), src_env_options(), @@ -651,10 +649,7 @@ class BackupEngineImpl { rate_limiter(nullptr), size_limit(0), stats(nullptr), - src_checksum_func_name(kUnknownFileChecksumFuncName), - src_checksum_hex(""), - db_id(""), - db_session_id("") {} + src_checksum_func_name(kUnknownFileChecksumFuncName) {} CopyOrCreateWorkItem(const CopyOrCreateWorkItem&) = delete; CopyOrCreateWorkItem& operator=(const CopyOrCreateWorkItem&) = delete; @@ -727,12 +722,7 @@ class BackupEngineImpl { std::string dst_path; std::string dst_relative; BackupAfterCopyOrCreateWorkItem() - : shared(false), - needed_to_copy(false), - backup_env(nullptr), - dst_path_tmp(""), - dst_path(""), - dst_relative("") {} + : shared(false), needed_to_copy(false), backup_env(nullptr) {} BackupAfterCopyOrCreateWorkItem( BackupAfterCopyOrCreateWorkItem&& o) noexcept { @@ -773,7 +763,7 @@ class BackupEngineImpl { std::string from_file; std::string to_file; std::string checksum_hex; - RestoreAfterCopyOrCreateWorkItem() : checksum_hex("") {} + RestoreAfterCopyOrCreateWorkItem() {} RestoreAfterCopyOrCreateWorkItem(std::future&& _result, const std::string& _from_file, const std::string& _to_file, @@ -874,7 +864,7 @@ class BackupEngineImplThreadSafe : public BackupEngine, BackupEngineImplThreadSafe(const BackupEngineOptions& options, Env* db_env, bool read_only = false) : impl_(options, db_env, read_only) {} - ~BackupEngineImplThreadSafe() override {} + ~BackupEngineImplThreadSafe() override = default; using BackupEngine::CreateNewBackupWithMetadata; IOStatus CreateNewBackupWithMetadata(const CreateBackupOptions& options, diff --git a/utilities/backup/backup_engine_test.cc b/utilities/backup/backup_engine_test.cc index 51f95a9e2b..917effceab 100644 --- a/utilities/backup/backup_engine_test.cc +++ b/utilities/backup/backup_engine_test.cc @@ -858,8 +858,8 @@ class BackupEngineTest : public testing::Test { for (auto& dir : child_dirs) { dir = "private/" + dir; } - child_dirs.push_back("shared"); // might not exist - child_dirs.push_back("shared_checksum"); // might not exist + child_dirs.emplace_back("shared"); // might not exist + child_dirs.emplace_back("shared_checksum"); // might not exist for (auto& dir : child_dirs) { std::vector children; test_backup_env_->GetChildren(backupdir_ + "/" + dir, &children) @@ -927,7 +927,7 @@ class BackupEngineTest : public testing::Test { void DeleteLogFiles() { std::vector delete_logs; ASSERT_OK(db_chroot_env_->GetChildren(dbname_, &delete_logs)); - for (auto f : delete_logs) { + for (const auto& f : delete_logs) { uint64_t number; FileType type; bool ok = ParseFileName(f, &number, &type); @@ -1925,7 +1925,7 @@ TEST_F(BackupEngineTest, BackupOptions) { ASSERT_OK(file_manager_->FileExists(OptionsPath(backupdir_, i) + name)); ASSERT_OK(backup_chroot_env_->GetChildren(OptionsPath(backupdir_, i), &filenames)); - for (auto fn : filenames) { + for (const auto& fn : filenames) { if (fn.compare(0, 7, "OPTIONS") == 0) { ASSERT_EQ(name, fn); } @@ -2664,7 +2664,7 @@ TEST_F(BackupEngineTest, DeleteTmpFiles) { assert(false); } CloseDBAndBackupEngine(); - for (std::string file_or_dir : tmp_files_and_dirs) { + for (const std::string& file_or_dir : tmp_files_and_dirs) { if (file_manager_->FileExists(file_or_dir) != Status::NotFound()) { FAIL() << file_or_dir << " was expected to be deleted." << cleanup_fn; } @@ -2698,7 +2698,7 @@ class BackupEngineRateLimitingTestWithParam int /* 0 = single threaded, 1 = multi threaded*/, std::pair /* limits */>> { public: - BackupEngineRateLimitingTestWithParam() {} + BackupEngineRateLimitingTestWithParam() = default; }; uint64_t const MB = 1024 * 1024; @@ -2848,7 +2848,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingVerifyBackup) { true /* include_file_details */)); std::uint64_t bytes_read_during_verify_backup = 0; - for (BackupFileInfo backup_file_info : backup_info.file_details) { + for (const BackupFileInfo& backup_file_info : backup_info.file_details) { bytes_read_during_verify_backup += backup_file_info.size; } auto start_verify_backup = special_env->NowMicros(); @@ -2986,7 +2986,7 @@ class BackupEngineRateLimitingTestWithParam2 public testing::WithParamInterface< std::tuple /* limits */>> { public: - BackupEngineRateLimitingTestWithParam2() {} + BackupEngineRateLimitingTestWithParam2() = default; }; INSTANTIATE_TEST_CASE_P( @@ -4212,7 +4212,7 @@ TEST_F(BackupEngineTest, FileTemperatures) { std::vector infos; ASSERT_OK( db_->GetLiveFilesStorageInfo(LiveFilesStorageInfoOptions(), &infos)); - for (auto info : infos) { + for (const auto& info : infos) { if (info.file_type == kTableFile) { manifest_temps.emplace(info.file_number, info.temperature); manifest_temp_counts[info.temperature]++; @@ -4379,7 +4379,7 @@ TEST_F(BackupEngineTest, ExcludeFiles) { MaybeExcludeBackupFile* files_end) { for (auto* f = files_begin; f != files_end; ++f) { std::string s = StringSplit(f->info.relative_file, '/').back(); - s = s.substr(0, s.find("_")); + s = s.substr(0, s.find('_')); int64_t num = std::strtoll(s.c_str(), nullptr, /*base*/ 10); // Exclude if not a match f->exclude_decision = (num % modulus) != remainder; diff --git a/utilities/blob_db/blob_compaction_filter.cc b/utilities/blob_db/blob_compaction_filter.cc index 97543214db..f22a169457 100644 --- a/utilities/blob_db/blob_compaction_filter.cc +++ b/utilities/blob_db/blob_compaction_filter.cc @@ -13,8 +13,7 @@ #include "rocksdb/system_clock.h" #include "test_util/sync_point.h" -namespace ROCKSDB_NAMESPACE { -namespace blob_db { +namespace ROCKSDB_NAMESPACE::blob_db { BlobIndexCompactionFilterBase::~BlobIndexCompactionFilterBase() { if (blob_file_) { @@ -488,5 +487,4 @@ BlobIndexCompactionFilterFactoryGC::CreateCompactionFilter( std::move(user_comp_filter_from_factory), current_time, statistics())); } -} // namespace blob_db -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::blob_db diff --git a/utilities/blob_db/blob_db.cc b/utilities/blob_db/blob_db.cc index b6fe039036..25960bdd6c 100644 --- a/utilities/blob_db/blob_db.cc +++ b/utilities/blob_db/blob_db.cc @@ -11,8 +11,7 @@ #include "logging/logging.h" #include "utilities/blob_db/blob_db_impl.h" -namespace ROCKSDB_NAMESPACE { -namespace blob_db { +namespace ROCKSDB_NAMESPACE::blob_db { Status BlobDB::Open(const Options& options, const BlobDBOptions& bdb_options, const std::string& dbname, BlobDB** blob_db) { @@ -20,8 +19,7 @@ Status BlobDB::Open(const Options& options, const BlobDBOptions& bdb_options, DBOptions db_options(options); ColumnFamilyOptions cf_options(options); std::vector column_families; - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options)); + column_families.emplace_back(kDefaultColumnFamilyName, cf_options); std::vector handles; Status s = BlobDB::Open(db_options, bdb_options, dbname, column_families, &handles, blob_db); @@ -108,5 +106,4 @@ void BlobDBOptions::Dump(Logger* log) const { disable_background_tasks); } -} // namespace blob_db -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::blob_db diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index c4c336a357..c88c3e8a70 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -48,8 +48,7 @@ namespace { int kBlockBasedTableVersionFormat = 2; } // end namespace -namespace ROCKSDB_NAMESPACE { -namespace blob_db { +namespace ROCKSDB_NAMESPACE::blob_db { bool BlobFileComparator::operator()( const std::shared_ptr& lhs, @@ -1461,7 +1460,6 @@ void BlobDBImpl::MultiGet(const ReadOptions& _read_options, size_t num_keys, if (snapshot_created) { db_->ReleaseSnapshot(read_options.snapshot); } - return; } bool BlobDBImpl::SetSnapshotIfNeeded(ReadOptions* read_options) { @@ -1602,8 +1600,8 @@ Status BlobDBImpl::GetRawBlobFromFile(const Slice& key, uint64_t file_number, } else { buf.reserve(static_cast(record_size)); s = reader->Read(IOOptions(), record_offset, - static_cast(record_size), &blob_record, &buf[0], - nullptr); + static_cast(record_size), &blob_record, + buf.data(), nullptr); } RecordTick(statistics_, BLOB_DB_BLOB_FILE_BYTES_READ, blob_record.size()); } @@ -1770,7 +1768,7 @@ std::pair BlobDBImpl::SanityCheck(bool aborted) { uint64_t now = EpochNow(); - for (auto blob_file_pair : blob_files_) { + for (const auto& blob_file_pair : blob_files_) { auto blob_file = blob_file_pair.second; std::ostringstream buf; @@ -1930,7 +1928,7 @@ std::pair BlobDBImpl::EvictExpiredFiles(bool aborted) { uint64_t now = EpochNow(); { ReadLock rl(&mutex_); - for (auto p : blob_files_) { + for (const auto& p : blob_files_) { auto& blob_file = p.second; ReadLock file_lock(&blob_file->mutex_); if (blob_file->HasTTL() && !blob_file->Obsolete() && @@ -1977,7 +1975,7 @@ Status BlobDBImpl::SyncBlobFiles(const WriteOptions& write_options) { std::vector> process_files; { ReadLock rl(&mutex_); - for (auto fitr : open_ttl_files_) { + for (const auto& fitr : open_ttl_files_) { process_files.push_back(fitr); } if (open_non_ttl_file_ != nullptr) { @@ -2006,7 +2004,9 @@ Status BlobDBImpl::SyncBlobFiles(const WriteOptions& write_options) { } std::pair BlobDBImpl::ReclaimOpenFiles(bool aborted) { - if (aborted) return std::make_pair(false, -1); + if (aborted) { + return std::make_pair(false, -1); + } if (open_file_count_.load() < kOpenFilesTrigger) { return std::make_pair(true, -1); @@ -2017,7 +2017,9 @@ std::pair BlobDBImpl::ReclaimOpenFiles(bool aborted) { ReadLock rl(&mutex_); for (auto const& ent : blob_files_) { auto bfile = ent.second; - if (bfile->last_access_.load() == -1) continue; + if (bfile->last_access_.load() == -1) { + continue; + } WriteLock lockbfile_w(&bfile->mutex_); CloseRandomAccessLocked(bfile); @@ -2100,7 +2102,7 @@ std::pair BlobDBImpl::DeleteObsoleteFiles(bool aborted) { // put files back into obsolete if for some reason, delete failed if (!tobsolete.empty()) { WriteLock wl(&mutex_); - for (auto bfile : tobsolete) { + for (const auto& bfile : tobsolete) { blob_files_.insert(std::make_pair(bfile->BlobFileNumber(), bfile)); obsolete_files_.push_front(bfile); } @@ -2264,5 +2266,4 @@ void BlobDBImpl::TEST_ProcessCompactionJobInfo(const CompactionJobInfo& info) { #endif // !NDEBUG -} // namespace blob_db -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::blob_db diff --git a/utilities/blob_db/blob_db_impl_filesnapshot.cc b/utilities/blob_db/blob_db_impl_filesnapshot.cc index 106c0202e8..7f9189b12e 100644 --- a/utilities/blob_db/blob_db_impl_filesnapshot.cc +++ b/utilities/blob_db/blob_db_impl_filesnapshot.cc @@ -12,8 +12,7 @@ // BlobDBImpl methods to get snapshot of files, e.g. for replication. -namespace ROCKSDB_NAMESPACE { -namespace blob_db { +namespace ROCKSDB_NAMESPACE::blob_db { Status BlobDBImpl::DisableFileDeletions() { // Disable base DB file deletions. @@ -72,7 +71,7 @@ Status BlobDBImpl::GetLiveFiles(std::vector& ret, return s; } ret.reserve(ret.size() + blob_files_.size()); - for (auto bfile_pair : blob_files_) { + for (const auto& bfile_pair : blob_files_) { auto blob_file = bfile_pair.second; // Path should be relative to db_name, but begin with slash. ret.emplace_back( @@ -87,7 +86,7 @@ void BlobDBImpl::GetLiveFilesMetaData(std::vector* metadata) { // Hold a lock in the beginning to avoid updates to base DB during the call ReadLock rl(&mutex_); db_->GetLiveFilesMetaData(metadata); - for (auto bfile_pair : blob_files_) { + for (const auto& bfile_pair : blob_files_) { auto blob_file = bfile_pair.second; LiveFileMetaData filemetadata; filemetadata.size = blob_file->GetFileSize(); @@ -105,5 +104,4 @@ void BlobDBImpl::GetLiveFilesMetaData(std::vector* metadata) { } } -} // namespace blob_db -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::blob_db diff --git a/utilities/blob_db/blob_db_test.cc b/utilities/blob_db/blob_db_test.cc index 48856fef54..3f57cbfaa2 100644 --- a/utilities/blob_db/blob_db_test.cc +++ b/utilities/blob_db/blob_db_test.cc @@ -31,8 +31,7 @@ #include "utilities/blob_db/blob_db_impl.h" #include "utilities/fault_injection_env.h" -namespace ROCKSDB_NAMESPACE { -namespace blob_db { +namespace ROCKSDB_NAMESPACE::blob_db { class BlobDBTest : public testing::Test { public: @@ -607,7 +606,7 @@ TEST_F(BlobDBTest, EnableDisableCompressionGC) { VerifyDB(data); blob_files = blob_db_impl()->TEST_GetBlobFiles(); - for (auto bfile : blob_files) { + for (const auto &bfile : blob_files) { ASSERT_EQ(kNoCompression, bfile->GetCompressionType()); } @@ -627,7 +626,7 @@ TEST_F(BlobDBTest, EnableDisableCompressionGC) { VerifyDB(data); blob_files = blob_db_impl()->TEST_GetBlobFiles(); - for (auto bfile : blob_files) { + for (const auto &bfile : blob_files) { ASSERT_EQ(kSnappyCompression, bfile->GetCompressionType()); } } @@ -678,7 +677,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) { blob_db_impl()->TEST_DeleteObsoleteFiles(); blob_files = blob_db_impl()->TEST_GetBlobFiles(); - for (auto bfile : blob_files) { + for (const auto &bfile : blob_files) { ASSERT_EQ(kSnappyCompression, bfile->GetCompressionType()); } @@ -695,7 +694,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) { blob_db_impl()->TEST_DeleteObsoleteFiles(); blob_files = blob_db_impl()->TEST_GetBlobFiles(); - for (auto bfile : blob_files) { + for (const auto &bfile : blob_files) { ASSERT_EQ(kNoCompression, bfile->GetCompressionType()); } @@ -719,7 +718,7 @@ TEST_F(BlobDBTest, ChangeCompressionGC) { blob_db_impl()->TEST_DeleteObsoleteFiles(); blob_files = blob_db_impl()->TEST_GetBlobFiles(); - for (auto bfile : blob_files) { + for (const auto &bfile : blob_files) { ASSERT_EQ(kLZ4Compression, bfile->GetCompressionType()); } } @@ -731,8 +730,8 @@ TEST_F(BlobDBTest, MultipleWriters) { std::vector workers; std::vector> data_set(10); - for (uint32_t i = 0; i < 10; i++) - workers.push_back(port::Thread( + for (uint32_t i = 0; i < 10; i++) { + workers.emplace_back( [&](uint32_t id) { Random rnd(301 + id); for (int j = 0; j < 100; j++) { @@ -747,7 +746,8 @@ TEST_F(BlobDBTest, MultipleWriters) { } } }, - i)); + i); + } std::map data; for (size_t i = 0; i < 10; i++) { workers[i].join(); @@ -1375,8 +1375,8 @@ TEST_F(BlobDBTest, UserCompactionFilter) { constexpr uint64_t kMinValueSize = 1 << 6; constexpr uint64_t kMaxValueSize = 1 << 8; constexpr uint64_t kMinBlobSize = 1 << 7; - static_assert(kMinValueSize < kMinBlobSize, ""); - static_assert(kMaxValueSize > kMinBlobSize, ""); + static_assert(kMinValueSize < kMinBlobSize); + static_assert(kMaxValueSize > kMinBlobSize); BlobDBOptions bdb_options; bdb_options.min_blob_size = kMinBlobSize; @@ -1747,8 +1747,8 @@ TEST_F(BlobDBTest, GarbageCollection) { constexpr uint64_t kSmallValueSize = 1 << 6; constexpr uint64_t kLargeValueSize = 1 << 8; constexpr uint64_t kMinBlobSize = 1 << 7; - static_assert(kSmallValueSize < kMinBlobSize, ""); - static_assert(kLargeValueSize > kMinBlobSize, ""); + static_assert(kSmallValueSize < kMinBlobSize); + static_assert(kLargeValueSize > kMinBlobSize); constexpr size_t kBlobsPerFile = 8; constexpr size_t kNumBlobFiles = kNumPuts / kBlobsPerFile; @@ -1999,7 +1999,7 @@ TEST_F(BlobDBTest, EvictExpiredFile) { ASSERT_EQ(0, blob_db_impl()->TEST_GetObsoleteFiles().size()); // Make sure we don't return garbage value after blob file being evicted, // but the blob index still exists in the LSM tree. - std::string val = ""; + std::string val; ASSERT_TRUE(blob_db_->Get(ReadOptions(), "foo", &val).IsNotFound()); ASSERT_EQ("", val); } @@ -2413,8 +2413,7 @@ TEST_F(BlobDBTest, SyncBlobFileBeforeCloseIOError) { ASSERT_TRUE(s.IsIOError()); } -} // namespace blob_db -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::blob_db // A black-box test for the ttl wrapper around rocksdb int main(int argc, char **argv) { diff --git a/utilities/blob_db/blob_dump_tool.cc b/utilities/blob_db/blob_dump_tool.cc index 0c2fef5e15..933803f8f3 100644 --- a/utilities/blob_db/blob_dump_tool.cc +++ b/utilities/blob_db/blob_dump_tool.cc @@ -5,9 +5,8 @@ #include "utilities/blob_db/blob_dump_tool.h" -#include - #include +#include #include #include #include @@ -21,8 +20,7 @@ #include "util/coding.h" #include "util/string_util.h" -namespace ROCKSDB_NAMESPACE { -namespace blob_db { +namespace ROCKSDB_NAMESPACE::blob_db { BlobDumpTool::BlobDumpTool() : reader_(nullptr), buffer_(nullptr), buffer_size_(0) {} @@ -275,5 +273,4 @@ std::string BlobDumpTool::GetString(std::pair p) { return "(" + std::to_string(p.first) + ", " + std::to_string(p.second) + ")"; } -} // namespace blob_db -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::blob_db diff --git a/utilities/blob_db/blob_file.cc b/utilities/blob_db/blob_file.cc index c4c0556fb1..5a479dc8bd 100644 --- a/utilities/blob_db/blob_file.cc +++ b/utilities/blob_db/blob_file.cc @@ -5,10 +5,9 @@ // (found in the LICENSE.Apache file in the root directory). #include "utilities/blob_db/blob_file.h" -#include - #include #include +#include #include #include "db/column_family.h" @@ -19,9 +18,7 @@ #include "logging/logging.h" #include "utilities/blob_db/blob_db_impl.h" -namespace ROCKSDB_NAMESPACE { - -namespace blob_db { +namespace ROCKSDB_NAMESPACE::blob_db { BlobFile::BlobFile(const BlobDBImpl* p, const std::string& bdir, uint64_t fn, Logger* info_log) @@ -120,9 +117,11 @@ Status BlobFile::ReadFooter(BlobLogFooter* bf) { } else { buf.reserve(BlobLogFooter::kSize + 10); s = ra_file_reader_->Read(IOOptions(), footer_offset, BlobLogFooter::kSize, - &result, &buf[0], nullptr); + &result, buf.data(), nullptr); + } + if (!s.ok()) { + return s; } - if (!s.ok()) return s; if (result.size() != BlobLogFooter::kSize) { // should not happen return Status::IOError("EOF reached before footer"); @@ -242,7 +241,7 @@ Status BlobFile::ReadMetadata(const std::shared_ptr& fs, } else { header_buf.reserve(BlobLogHeader::kSize); s = file_reader->Read(IOOptions(), 0, BlobLogHeader::kSize, &header_slice, - &header_buf[0], nullptr); + header_buf.data(), nullptr); } if (!s.ok()) { ROCKS_LOG_ERROR( @@ -283,8 +282,8 @@ Status BlobFile::ReadMetadata(const std::shared_ptr& fs, } else { footer_buf.reserve(BlobLogFooter::kSize); s = file_reader->Read(IOOptions(), file_size - BlobLogFooter::kSize, - BlobLogFooter::kSize, &footer_slice, &footer_buf[0], - nullptr); + BlobLogFooter::kSize, &footer_slice, + footer_buf.data(), nullptr); } if (!s.ok()) { ROCKS_LOG_ERROR( @@ -309,5 +308,4 @@ Status BlobFile::ReadMetadata(const std::shared_ptr& fs, return Status::OK(); } -} // namespace blob_db -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::blob_db diff --git a/utilities/cassandra/cassandra_compaction_filter.cc b/utilities/cassandra/cassandra_compaction_filter.cc index b7da2ba0cb..21a81e1960 100644 --- a/utilities/cassandra/cassandra_compaction_filter.cc +++ b/utilities/cassandra/cassandra_compaction_filter.cc @@ -13,8 +13,7 @@ #include "utilities/cassandra/format.h" #include "utilities/cassandra/merge_operator.h" -namespace ROCKSDB_NAMESPACE { -namespace cassandra { +namespace ROCKSDB_NAMESPACE::cassandra { static std::unordered_map cassandra_filter_type_info = { {"purge_ttl_on_expiration", @@ -102,5 +101,4 @@ int RegisterCassandraObjects(ObjectLibrary& library, size_t num_types; return static_cast(library.GetFactoryCount(&num_types)); } -} // namespace cassandra -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::cassandra diff --git a/utilities/cassandra/cassandra_format_test.cc b/utilities/cassandra/cassandra_format_test.cc index 4f12947ad9..128dad4fea 100644 --- a/utilities/cassandra/cassandra_format_test.cc +++ b/utilities/cassandra/cassandra_format_test.cc @@ -11,8 +11,7 @@ #include "utilities/cassandra/serialize.h" #include "utilities/cassandra/test_utils.h" -namespace ROCKSDB_NAMESPACE { -namespace cassandra { +namespace ROCKSDB_NAMESPACE::cassandra { TEST(ColumnTest, Column) { char data[4] = {'d', 'a', 't', 'a'}; @@ -367,8 +366,7 @@ TEST(RowValueTest, ExpireTtlShouldConvertExpiredColumnsToTombstones) { compacted.ConvertExpiredColumnsToTombstones(&changed); EXPECT_FALSE(changed); } -} // namespace cassandra -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::cassandra int main(int argc, char** argv) { ROCKSDB_NAMESPACE::port::InstallStackTraceHandler(); diff --git a/utilities/cassandra/cassandra_functional_test.cc b/utilities/cassandra/cassandra_functional_test.cc index e3266a0dc1..28fba3acb8 100644 --- a/utilities/cassandra/cassandra_functional_test.cc +++ b/utilities/cassandra/cassandra_functional_test.cc @@ -18,8 +18,7 @@ #include "utilities/cassandra/test_utils.h" #include "utilities/merge_operators.h" -namespace ROCKSDB_NAMESPACE { -namespace cassandra { +namespace ROCKSDB_NAMESPACE::cassandra { // Path to the database on file system const std::string kDbName = test::PerThreadDBPath("cassandra_functional_test"); @@ -434,8 +433,7 @@ TEST_F(CassandraFunctionalTest, LoadCompactionFilterFactory) { ASSERT_TRUE(opts->purge_ttl_on_expiration); } -} // namespace cassandra -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::cassandra int main(int argc, char** argv) { ROCKSDB_NAMESPACE::port::InstallStackTraceHandler(); diff --git a/utilities/cassandra/cassandra_row_merge_test.cc b/utilities/cassandra/cassandra_row_merge_test.cc index 0b4a892871..c54398458d 100644 --- a/utilities/cassandra/cassandra_row_merge_test.cc +++ b/utilities/cassandra/cassandra_row_merge_test.cc @@ -9,8 +9,7 @@ #include "utilities/cassandra/format.h" #include "utilities/cassandra/test_utils.h" -namespace ROCKSDB_NAMESPACE { -namespace cassandra { +namespace ROCKSDB_NAMESPACE::cassandra { class RowValueMergeTest : public testing::Test {}; @@ -88,8 +87,7 @@ TEST(RowValueMergeTest, MergeWithRowTombstone) { EXPECT_EQ(merged.LastModifiedTime(), 17); } -} // namespace cassandra -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::cassandra int main(int argc, char** argv) { ROCKSDB_NAMESPACE::port::InstallStackTraceHandler(); diff --git a/utilities/cassandra/cassandra_serialize_test.cc b/utilities/cassandra/cassandra_serialize_test.cc index c14d8fd809..f05fd44085 100644 --- a/utilities/cassandra/cassandra_serialize_test.cc +++ b/utilities/cassandra/cassandra_serialize_test.cc @@ -6,8 +6,7 @@ #include "test_util/testharness.h" #include "utilities/cassandra/serialize.h" -namespace ROCKSDB_NAMESPACE { -namespace cassandra { +namespace ROCKSDB_NAMESPACE::cassandra { TEST(SerializeTest, SerializeI64) { std::string dest; @@ -154,8 +153,7 @@ TEST(SerializeTest, DeserializeI8) { EXPECT_EQ(-128, Deserialize(dest.c_str(), offset)); } -} // namespace cassandra -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::cassandra int main(int argc, char** argv) { ROCKSDB_NAMESPACE::port::InstallStackTraceHandler(); diff --git a/utilities/cassandra/format.cc b/utilities/cassandra/format.cc index cc1dd2f280..dc2548bd94 100644 --- a/utilities/cassandra/format.cc +++ b/utilities/cassandra/format.cc @@ -11,8 +11,7 @@ #include "utilities/cassandra/serialize.h" -namespace ROCKSDB_NAMESPACE { -namespace cassandra { +namespace ROCKSDB_NAMESPACE::cassandra { namespace { const int32_t kDefaultLocalDeletionTime = std::numeric_limits::max(); const int64_t kDefaultMarkedForDeleteAt = std::numeric_limits::min(); @@ -363,5 +362,4 @@ RowValue RowValue::Merge(std::vector&& values) { return RowValue(std::move(columns), last_modified_time); } -} // namespace cassandra -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::cassandra diff --git a/utilities/cassandra/merge_operator.cc b/utilities/cassandra/merge_operator.cc index 366d8fa443..5c9727f74f 100644 --- a/utilities/cassandra/merge_operator.cc +++ b/utilities/cassandra/merge_operator.cc @@ -5,8 +5,7 @@ #include "merge_operator.h" -#include - +#include #include #include "rocksdb/merge_operator.h" @@ -15,8 +14,7 @@ #include "utilities/cassandra/format.h" #include "utilities/merge_operators.h" -namespace ROCKSDB_NAMESPACE { -namespace cassandra { +namespace ROCKSDB_NAMESPACE::cassandra { static std::unordered_map merge_operator_options_info = { {"gc_grace_period_in_seconds", @@ -75,6 +73,4 @@ bool CassandraValueMergeOperator::PartialMergeMulti( return true; } -} // namespace cassandra - -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::cassandra diff --git a/utilities/cassandra/test_utils.cc b/utilities/cassandra/test_utils.cc index ec6e5752d0..3615813500 100644 --- a/utilities/cassandra/test_utils.cc +++ b/utilities/cassandra/test_utils.cc @@ -5,8 +5,7 @@ #include "test_utils.h" -namespace ROCKSDB_NAMESPACE { -namespace cassandra { +namespace ROCKSDB_NAMESPACE::cassandra { const char kData[] = {'d', 'a', 't', 'a'}; const char kExpiringData[] = {'e', 'd', 'a', 't', 'a'}; const int32_t kTtl = 86400; @@ -65,5 +64,4 @@ int64_t ToMicroSeconds(int64_t seconds) { return seconds * (int64_t)1000000; } int32_t ToSeconds(int64_t microseconds) { return (int32_t)(microseconds / (int64_t)1000000); } -} // namespace cassandra -} // namespace ROCKSDB_NAMESPACE +} // namespace ROCKSDB_NAMESPACE::cassandra diff --git a/utilities/checkpoint/checkpoint_test.cc b/utilities/checkpoint/checkpoint_test.cc index a9cea1c058..cdda1c0595 100644 --- a/utilities/checkpoint/checkpoint_test.cc +++ b/utilities/checkpoint/checkpoint_test.cc @@ -112,7 +112,7 @@ class CheckpointTest : public testing::Test { ColumnFamilyOptions cf_opts(options); size_t cfi = handles_.size(); handles_.resize(cfi + cfs.size()); - for (auto cf : cfs) { + for (const auto& cf : cfs) { ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++])); } } @@ -141,7 +141,7 @@ class CheckpointTest : public testing::Test { EXPECT_EQ(cfs.size(), options.size()); std::vector column_families; for (size_t i = 0; i < cfs.size(); ++i) { - column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i])); + column_families.emplace_back(cfs[i], options[i]); } DBOptions db_opts = DBOptions(options[0]); return DB::Open(db_opts, dbname_, column_families, &handles_, &db_); @@ -507,7 +507,7 @@ TEST_F(CheckpointTest, CheckpointCF) { cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"}; std::vector column_families; for (size_t i = 0; i < cfs.size(); ++i) { - column_families.push_back(ColumnFamilyDescriptor(cfs[i], options)); + column_families.emplace_back(cfs[i], options); } ASSERT_OK(DB::Open(options, snapshot_name_, column_families, &cphandles, &snapshotDB)); @@ -565,7 +565,7 @@ TEST_F(CheckpointTest, CheckpointCFNoFlush) { cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"}; std::vector column_families; for (size_t i = 0; i < cfs.size(); ++i) { - column_families.push_back(ColumnFamilyDescriptor(cfs[i], options)); + column_families.emplace_back(cfs[i], options); } ASSERT_OK(DB::Open(options, snapshot_name_, column_families, &cphandles, &snapshotDB)); @@ -717,12 +717,9 @@ TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing2PC) { TransactionDB* snapshotDB; std::vector column_families; - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions())); - column_families.push_back( - ColumnFamilyDescriptor("CFA", ColumnFamilyOptions())); - column_families.push_back( - ColumnFamilyDescriptor("CFB", ColumnFamilyOptions())); + column_families.emplace_back(kDefaultColumnFamilyName, ColumnFamilyOptions()); + column_families.emplace_back("CFA", ColumnFamilyOptions()); + column_families.emplace_back("CFB", ColumnFamilyOptions()); std::vector cf_handles; ASSERT_OK(TransactionDB::Open(options, txn_db_options, snapshot_name_, column_families, &cf_handles, &snapshotDB)); diff --git a/utilities/env_mirror.cc b/utilities/env_mirror.cc index 0edb33df45..b8c74e5a71 100644 --- a/utilities/env_mirror.cc +++ b/utilities/env_mirror.cc @@ -214,10 +214,11 @@ Status EnvMirror::NewSequentialFile(const std::string& f, Status as = a_->NewSequentialFile(f, &mf->a_, options); Status bs = b_->NewSequentialFile(f, &mf->b_, options); assert(as == bs); - if (as.ok()) + if (as.ok()) { r->reset(mf); - else + } else { delete mf; + } return as; } @@ -231,25 +232,29 @@ Status EnvMirror::NewRandomAccessFile(const std::string& f, Status as = a_->NewRandomAccessFile(f, &mf->a_, options); Status bs = b_->NewRandomAccessFile(f, &mf->b_, options); assert(as == bs); - if (as.ok()) + if (as.ok()) { r->reset(mf); - else + } else { delete mf; + } return as; } Status EnvMirror::NewWritableFile(const std::string& f, std::unique_ptr* r, const EnvOptions& options) { - if (f.find("/proc/") == 0) return a_->NewWritableFile(f, r, options); + if (f.find("/proc/") == 0) { + return a_->NewWritableFile(f, r, options); + } WritableFileMirror* mf = new WritableFileMirror(f, options); Status as = a_->NewWritableFile(f, &mf->a_, options); Status bs = b_->NewWritableFile(f, &mf->b_, options); assert(as == bs); - if (as.ok()) + if (as.ok()) { r->reset(mf); - else + } else { delete mf; + } return as; } @@ -257,16 +262,18 @@ Status EnvMirror::ReuseWritableFile(const std::string& fname, const std::string& old_fname, std::unique_ptr* r, const EnvOptions& options) { - if (fname.find("/proc/") == 0) + if (fname.find("/proc/") == 0) { return a_->ReuseWritableFile(fname, old_fname, r, options); + } WritableFileMirror* mf = new WritableFileMirror(fname, options); Status as = a_->ReuseWritableFile(fname, old_fname, &mf->a_, options); Status bs = b_->ReuseWritableFile(fname, old_fname, &mf->b_, options); assert(as == bs); - if (as.ok()) + if (as.ok()) { r->reset(mf); - else + } else { delete mf; + } return as; } diff --git a/utilities/fault_injection_env.cc b/utilities/fault_injection_env.cc index b0495a8c18..fb443cc87f 100644 --- a/utilities/fault_injection_env.cc +++ b/utilities/fault_injection_env.cc @@ -71,7 +71,7 @@ Status Truncate(Env* env, const std::string& filename, uint64_t length) { // Trim the tailing "/" in the end of `str` std::string TrimDirname(const std::string& str) { - size_t found = str.find_last_not_of("/"); + size_t found = str.find_last_not_of('/'); if (found == std::string::npos) { return str; } @@ -528,7 +528,7 @@ Status FaultInjectionTestEnv::DeleteFilesCreatedAfterLastDirSync() { } for (auto& pair : map_copy) { - for (std::string name : pair.second) { + for (const std::string& name : pair.second) { Status s = DeleteFile(pair.first + "/" + name); if (!s.ok()) { return s; diff --git a/utilities/fault_injection_fs.cc b/utilities/fault_injection_fs.cc index 8c3c3a447e..7777877010 100644 --- a/utilities/fault_injection_fs.cc +++ b/utilities/fault_injection_fs.cc @@ -33,7 +33,7 @@ namespace ROCKSDB_NAMESPACE { -const std::string kNewFileNoOverwrite = ""; +const std::string kNewFileNoOverwrite; // Assume a filename, and not a directory name like "/foo/bar/" std::string TestFSGetDirName(const std::string filename) { @@ -47,7 +47,7 @@ std::string TestFSGetDirName(const std::string filename) { // Trim the tailing "/" in the end of `str` std::string TestFSTrimDirname(const std::string& str) { - size_t found = str.find_last_not_of("/"); + size_t found = str.find_last_not_of('/'); if (found == std::string::npos) { return str; } @@ -74,7 +74,6 @@ void CalculateTypedChecksum(const ChecksumType& checksum_type, const char* data, uint32_t v = XXH32(data, size, 0); PutFixed32(checksum, v); } - return; } IOStatus FSFileState::DropUnsyncedData() { @@ -1014,7 +1013,7 @@ IOStatus FaultInjectionTestFS::InjectThreadSpecificReadError( bool FaultInjectionTestFS::TryParseFileName(const std::string& file_name, uint64_t* number, FileType* type) { - std::size_t found = file_name.find_last_of("/"); + std::size_t found = file_name.find_last_of('/'); std::string file = file_name.substr(found); return ParseFileName(file, number, type); } diff --git a/utilities/memory/memory_test.cc b/utilities/memory/memory_test.cc index 8255a6cad7..3a64fc3fa9 100644 --- a/utilities/memory/memory_test.cc +++ b/utilities/memory/memory_test.cc @@ -65,7 +65,7 @@ class MemoryTest : public testing::Test { if (db_impl != nullptr) { ASSERT_OK(db_impl->TEST_GetAllImmutableCFOptions(&iopts_map)); } - for (auto pair : iopts_map) { + for (const auto& pair : iopts_map) { GetCachePointersFromTableFactory(pair.second->table_factory.get(), cache_set); } diff --git a/utilities/merge_operators/sortlist.cc b/utilities/merge_operators/sortlist.cc index 67bfc7e5ea..ff0063779f 100644 --- a/utilities/merge_operators/sortlist.cc +++ b/utilities/merge_operators/sortlist.cc @@ -52,7 +52,9 @@ bool SortList::PartialMergeMulti(const Slice& /*key*/, void SortList::MakeVector(std::vector& operand, Slice slice) const { do { const char* begin = slice.data_; - while (*slice.data_ != ',' && *slice.data_) slice.data_++; + while (*slice.data_ != ',' && *slice.data_) { + slice.data_++; + } operand.push_back(std::stoi(std::string(begin, slice.data_))); } while (0 != *slice.data_++); } diff --git a/utilities/merge_operators/string_append/stringappend.cc b/utilities/merge_operators/string_append/stringappend.cc index 748e5c89f6..4ea250c4d7 100644 --- a/utilities/merge_operators/string_append/stringappend.cc +++ b/utilities/merge_operators/string_append/stringappend.cc @@ -7,8 +7,7 @@ #include "stringappend.h" -#include - +#include #include #include "rocksdb/merge_operator.h" diff --git a/utilities/merge_operators/string_append/stringappend2.cc b/utilities/merge_operators/string_append/stringappend2.cc index bd0716cc3c..31972402fe 100644 --- a/utilities/merge_operators/string_append/stringappend2.cc +++ b/utilities/merge_operators/string_append/stringappend2.cc @@ -5,8 +5,7 @@ #include "stringappend2.h" -#include - +#include #include #include diff --git a/utilities/object_registry.cc b/utilities/object_registry.cc index 786f2ee2e4..105d52bf5a 100644 --- a/utilities/object_registry.cc +++ b/utilities/object_registry.cc @@ -5,7 +5,7 @@ #include "rocksdb/utilities/object_registry.h" -#include +#include #include "logging/logging.h" #include "port/lang.h" diff --git a/utilities/option_change_migration/option_change_migration_test.cc b/utilities/option_change_migration/option_change_migration_test.cc index 1cb42a0cac..d6114f8331 100644 --- a/utilities/option_change_migration/option_change_migration_test.cc +++ b/utilities/option_change_migration/option_change_migration_test.cc @@ -119,7 +119,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate1) { { std::unique_ptr it(db_->NewIterator(ReadOptions())); it->SeekToFirst(); - for (std::string key : keys) { + for (const std::string& key : keys) { ASSERT_TRUE(it->Valid()); ASSERT_EQ(key, it->key().ToString()); it->Next(); @@ -199,7 +199,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate2) { { std::unique_ptr it(db_->NewIterator(ReadOptions())); it->SeekToFirst(); - for (std::string key : keys) { + for (const std::string& key : keys) { ASSERT_TRUE(it->Valid()); ASSERT_EQ(key, it->key().ToString()); it->Next(); @@ -285,7 +285,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate3) { { std::unique_ptr it(db_->NewIterator(ReadOptions())); it->SeekToFirst(); - for (std::string key : keys) { + for (const std::string& key : keys) { ASSERT_TRUE(it->Valid()); ASSERT_EQ(key, it->key().ToString()); it->Next(); @@ -371,7 +371,7 @@ TEST_P(DBOptionChangeMigrationTests, Migrate4) { { std::unique_ptr it(db_->NewIterator(ReadOptions())); it->SeekToFirst(); - for (std::string key : keys) { + for (const std::string& key : keys) { ASSERT_TRUE(it->Valid()); ASSERT_EQ(key, it->key().ToString()); it->Next(); @@ -538,7 +538,7 @@ TEST_F(DBOptionChangeMigrationTest, CompactedSrcToUniversal) { { std::unique_ptr it(db_->NewIterator(ReadOptions())); it->SeekToFirst(); - for (std::string key : keys) { + for (const std::string& key : keys) { ASSERT_TRUE(it->Valid()); ASSERT_EQ(key, it->key().ToString()); it->Next(); diff --git a/utilities/options/options_util_test.cc b/utilities/options/options_util_test.cc index 2d08c3dd06..5c4530e617 100644 --- a/utilities/options/options_util_test.cc +++ b/utilities/options/options_util_test.cc @@ -121,8 +121,8 @@ TEST_F(OptionsUtilTest, SaveAndLoadWithCacheCheck) { std::vector cf_names; cf_names.push_back(kDefaultColumnFamilyName); - cf_names.push_back("cf_sample"); - cf_names.push_back("cf_plain_table_sample"); + cf_names.emplace_back("cf_sample"); + cf_names.emplace_back("cf_plain_table_sample"); // Saving DB in file const std::string kFileName = "OPTIONS-LOAD_CACHE_123456"; ASSERT_OK(PersistRocksDBOptions(WriteOptions(), db_opt, cf_names, cf_opts, @@ -151,8 +151,8 @@ TEST_F(OptionsUtilTest, SaveAndLoadWithCacheCheck) { namespace { class DummyTableFactory : public TableFactory { public: - DummyTableFactory() {} - ~DummyTableFactory() override {} + DummyTableFactory() = default; + ~DummyTableFactory() override = default; const char* Name() const override { return "DummyTableFactory"; } @@ -183,8 +183,8 @@ class DummyTableFactory : public TableFactory { class DummyMergeOperator : public MergeOperator { public: - DummyMergeOperator() {} - ~DummyMergeOperator() override {} + DummyMergeOperator() = default; + ~DummyMergeOperator() override = default; bool FullMergeV2(const MergeOperationInput& /*merge_in*/, MergeOperationOutput* /*merge_out*/) const override { @@ -203,8 +203,8 @@ class DummyMergeOperator : public MergeOperator { class DummySliceTransform : public SliceTransform { public: - DummySliceTransform() {} - ~DummySliceTransform() override {} + DummySliceTransform() = default; + ~DummySliceTransform() override = default; // Return the name of this transformation. const char* Name() const override { return "DummySliceTransform"; } diff --git a/utilities/persistent_cache/block_cache_tier.cc b/utilities/persistent_cache/block_cache_tier.cc index 3118fc2df6..864cb682ea 100644 --- a/utilities/persistent_cache/block_cache_tier.cc +++ b/utilities/persistent_cache/block_cache_tier.cc @@ -78,7 +78,7 @@ bool IsCacheFile(const std::string& file) { // check if the file has .rc suffix // Unfortunately regex support across compilers is not even, so we use simple // string parsing - size_t pos = file.find("."); + size_t pos = file.find('.'); if (pos == std::string::npos) { return false; } @@ -97,7 +97,7 @@ Status BlockCacheTier::CleanupCacheFolder(const std::string& folder) { } // cleanup files with the patter :digi:.rc - for (auto file : files) { + for (const auto& file : files) { if (IsCacheFile(file)) { // cache file Info(opt_.log, "Removing file %s.", file.c_str()); diff --git a/utilities/persistent_cache/block_cache_tier_file.cc b/utilities/persistent_cache/block_cache_tier_file.cc index ff01c1abcf..493b922367 100644 --- a/utilities/persistent_cache/block_cache_tier_file.cc +++ b/utilities/persistent_cache/block_cache_tier_file.cc @@ -79,7 +79,7 @@ struct CacheRecordHeader { }; struct CacheRecord { - CacheRecord() {} + CacheRecord() = default; CacheRecord(const Slice& key, const Slice& val) : hdr_(MAGIC, static_cast(key.size()), static_cast(val.size())), diff --git a/utilities/persistent_cache/hash_table_test.cc b/utilities/persistent_cache/hash_table_test.cc index faae2cf214..7ae6a4a643 100644 --- a/utilities/persistent_cache/hash_table_test.cc +++ b/utilities/persistent_cache/hash_table_test.cc @@ -5,8 +5,7 @@ // #include "utilities/persistent_cache/hash_table.h" -#include - +#include #include #include #include @@ -17,14 +16,13 @@ #include "util/random.h" #include "utilities/persistent_cache/hash_table_evictable.h" - namespace ROCKSDB_NAMESPACE { struct HashTableTest : public testing::Test { ~HashTableTest() override { map_.Clear(&HashTableTest::ClearNode); } struct Node { - Node() {} + Node() = default; explicit Node(const uint64_t key, const std::string& val = std::string()) : key_(key), val_(val) {} @@ -55,7 +53,7 @@ struct EvictableHashTableTest : public testing::Test { } struct Node : LRUElement { - Node() {} + Node() = default; explicit Node(const uint64_t key, const std::string& val = std::string()) : key_(key), val_(val) {} diff --git a/utilities/persistent_cache/persistent_cache_tier.cc b/utilities/persistent_cache/persistent_cache_tier.cc index 773aafbf26..cfa3722b48 100644 --- a/utilities/persistent_cache/persistent_cache_tier.cc +++ b/utilities/persistent_cache/persistent_cache_tier.cc @@ -82,9 +82,9 @@ bool PersistentCacheTier::Erase(const Slice& /*key*/) { std::string PersistentCacheTier::PrintStats() { std::ostringstream os; - for (auto tier_stats : Stats()) { + for (const auto& tier_stats : Stats()) { os << "---- next tier -----" << std::endl; - for (auto stat : tier_stats) { + for (const auto& stat : tier_stats) { os << stat.first << ": " << stat.second << std::endl; } } diff --git a/utilities/simulator_cache/sim_cache.cc b/utilities/simulator_cache/sim_cache.cc index 6d2bf098c4..1daaeb2199 100644 --- a/utilities/simulator_cache/sim_cache.cc +++ b/utilities/simulator_cache/sim_cache.cc @@ -157,7 +157,7 @@ class SimCacheImpl : public SimCache { hit_times_(0), stats_(nullptr) {} - ~SimCacheImpl() override {} + ~SimCacheImpl() override = default; const char* Name() const override { return "SimCache"; } diff --git a/utilities/simulator_cache/sim_cache_test.cc b/utilities/simulator_cache/sim_cache_test.cc index 2e37cd3479..e9e3fcd9d9 100644 --- a/utilities/simulator_cache/sim_cache_test.cc +++ b/utilities/simulator_cache/sim_cache_test.cc @@ -175,7 +175,7 @@ TEST_F(SimCacheTest, SimCacheLogging) { sim_cache->StopActivityLogging(); ASSERT_OK(sim_cache->GetActivityLoggingStatus()); - std::string file_contents = ""; + std::string file_contents; ASSERT_OK(ReadFileToString(env_, log_file, &file_contents)); std::istringstream contents(file_contents); diff --git a/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc b/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc index 5de18e2624..34a47dbf3f 100644 --- a/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc +++ b/utilities/table_properties_collectors/compact_on_deletion_collector_test.cc @@ -7,10 +7,11 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#include +#include "utilities/table_properties_collectors/compact_on_deletion_collector.h" #include #include +#include #include #include "port/stack_trace.h" @@ -19,7 +20,6 @@ #include "rocksdb/utilities/table_properties_collectors.h" #include "test_util/testharness.h" #include "util/random.h" -#include "utilities/table_properties_collectors/compact_on_deletion_collector.h" namespace ROCKSDB_NAMESPACE { diff --git a/utilities/transactions/lock/point/point_lock_manager.cc b/utilities/transactions/lock/point/point_lock_manager.cc index b73b3fe761..0521097778 100644 --- a/utilities/transactions/lock/point/point_lock_manager.cc +++ b/utilities/transactions/lock/point/point_lock_manager.cc @@ -34,9 +34,8 @@ struct LockInfo { txn_ids.push_back(id); } LockInfo(const LockInfo& lock_info) - : exclusive(lock_info.exclusive), - txn_ids(lock_info.txn_ids), - expiration_time(lock_info.expiration_time) {} + + = default; void operator=(const LockInfo& lock_info) { exclusive = lock_info.exclusive; txn_ids = lock_info.txn_ids; diff --git a/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc b/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc index 6dc86cc999..b1fb736a36 100644 --- a/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc +++ b/utilities/transactions/lock/range/range_tree/lib/standalone_port.cc @@ -7,7 +7,7 @@ /* This is a dump ground to make Lock Tree work without the rest of TokuDB. */ -#include +#include #include "db.h" #include "ft/ft-status.h" @@ -53,7 +53,9 @@ size_t toku_memory_footprint(void *, size_t touched) { return touched; } // "TOKU" LTM_STATUS_S ltm_status; void LTM_STATUS_S::init() { - if (m_initialized) return; + if (m_initialized) { + return; + } #define LTM_STATUS_INIT(k, c, t, l) \ TOKUFT_STATUS_INIT((*this), k, c, t, "locktree: " l, \ TOKU_ENGINE_STATUS | TOKU_GLOBAL_STATUS) @@ -104,7 +106,9 @@ void LTM_STATUS_S::init() { #undef LTM_STATUS_INIT } void LTM_STATUS_S::destroy() { - if (!m_initialized) return; + if (!m_initialized) { + return; + } for (int i = 0; i < LTM_STATUS_NUM_ROWS; ++i) { if (status[i].type == STATUS_PARCOUNT) { // PORT: TODO?? destroy_partitioned_counter(status[i].value.parcount); diff --git a/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc b/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc index 65ca91b0ba..584d9ebc27 100644 --- a/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc +++ b/utilities/transactions/lock/range/range_tree/range_tree_lock_manager.cc @@ -149,7 +149,7 @@ Status RangeTreeLockManager::TryLock(PessimisticTransaction* txn, // the lock waits that are in progress. void wait_callback_for_locktree(void*, toku::lock_wait_infos* infos) { TEST_SYNC_POINT("RangeTreeLockManager::TryRangeLock:EnterWaitingTxn"); - for (auto wait_info : *infos) { + for (const auto& wait_info : *infos) { // As long as we hold the lock on the locktree's pending request queue // this should be safe. auto txn = (PessimisticTransaction*)wait_info.waiter; @@ -305,7 +305,7 @@ std::vector RangeTreeLockManager::GetDeadlockInfoBuffer() { path.push_back( {it2->m_txn_id, it2->m_cf_id, it2->m_exclusive, it2->m_start.slice}); } - res.push_back(DeadlockPath(path, it->deadlock_time)); + res.emplace_back(path, it->deadlock_time); } return res; } @@ -489,7 +489,7 @@ LockManager::RangeLockStatus RangeTreeLockManager::GetRangeLockStatus() { LockManager::RangeLockStatus data; { InstrumentedMutexLock l(<ree_map_mutex_); - for (auto it : ltree_map_) { + for (const auto& it : ltree_map_) { LOCK_PRINT_CONTEXT ctx = {&data, it.first}; it.second->dump_locks((void*)&ctx, push_into_lock_status_data); } diff --git a/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc b/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc index 5bfb863376..8457332534 100644 --- a/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc +++ b/utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc @@ -12,7 +12,9 @@ namespace ROCKSDB_NAMESPACE { RangeLockList *RangeTreeLockTracker::getOrCreateList() { - if (range_list_) return range_list_.get(); + if (range_list_) { + return range_list_.get(); + } // Doesn't exist, create range_list_.reset(new RangeLockList()); @@ -103,7 +105,7 @@ void RangeLockList::ReleaseLocks(RangeTreeLockManager *mgr, releasing_locks_.store(true); } - for (auto it : buffers_) { + for (const auto &it : buffers_) { // Don't try to call release_locks() if the buffer is empty! if we are // not holding any locks, the lock tree might be in the STO-mode with // another transaction, and our attempt to release an empty set of locks diff --git a/utilities/transactions/optimistic_transaction.cc b/utilities/transactions/optimistic_transaction.cc index e8506f2816..bbd99575fb 100644 --- a/utilities/transactions/optimistic_transaction.cc +++ b/utilities/transactions/optimistic_transaction.cc @@ -19,7 +19,6 @@ #include "util/defer.h" #include "util/string_util.h" #include "utilities/transactions/lock/point/point_lock_tracker.h" -#include "utilities/transactions/optimistic_transaction.h" #include "utilities/transactions/optimistic_transaction_db_impl.h" #include "utilities/transactions/transaction_util.h" @@ -50,7 +49,7 @@ void OptimisticTransaction::Reinitialize( Initialize(txn_options); } -OptimisticTransaction::~OptimisticTransaction() {} +OptimisticTransaction::~OptimisticTransaction() = default; void OptimisticTransaction::Clear() { TransactionBaseImpl::Clear(); } diff --git a/utilities/transactions/optimistic_transaction_db_impl.cc b/utilities/transactions/optimistic_transaction_db_impl.cc index 564bf8b6aa..817cbdd688 100644 --- a/utilities/transactions/optimistic_transaction_db_impl.cc +++ b/utilities/transactions/optimistic_transaction_db_impl.cc @@ -43,8 +43,7 @@ Status OptimisticTransactionDB::Open(const Options& options, DBOptions db_options(options); ColumnFamilyOptions cf_options(options); std::vector column_families; - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options)); + column_families.emplace_back(kDefaultColumnFamilyName, cf_options); std::vector handles; Status s = Open(db_options, dbname, column_families, &handles, dbptr); if (s.ok()) { diff --git a/utilities/transactions/optimistic_transaction_test.cc b/utilities/transactions/optimistic_transaction_test.cc index 7334941804..690b6bdd00 100644 --- a/utilities/transactions/optimistic_transaction_test.cc +++ b/utilities/transactions/optimistic_transaction_test.cc @@ -62,8 +62,7 @@ class OptimisticTransactionTest ColumnFamilyOptions cf_options(options); std::vector column_families; std::vector handles; - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options)); + column_families.emplace_back(kDefaultColumnFamilyName, cf_options); OptimisticTransactionDB* raw_txn_db = nullptr; Status s = OptimisticTransactionDB::Open( options, occ_opts, dbname, column_families, &handles, &raw_txn_db); @@ -654,13 +653,10 @@ TEST_P(OptimisticTransactionTest, ColumnFamiliesTest) { // open DB with three column families std::vector column_families; // have to open default column family - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions())); + column_families.emplace_back(kDefaultColumnFamilyName, ColumnFamilyOptions()); // open the new column families - column_families.push_back( - ColumnFamilyDescriptor("CFA", ColumnFamilyOptions())); - column_families.push_back( - ColumnFamilyDescriptor("CFB", ColumnFamilyOptions())); + column_families.emplace_back("CFA", ColumnFamilyOptions()); + column_families.emplace_back("CFB", ColumnFamilyOptions()); std::vector handles; OptimisticTransactionDB* raw_txn_db = nullptr; ASSERT_OK(OptimisticTransactionDB::Open( diff --git a/utilities/transactions/pessimistic_transaction.cc b/utilities/transactions/pessimistic_transaction.cc index 5f6f7f157f..70a623ec4c 100644 --- a/utilities/transactions/pessimistic_transaction.cc +++ b/utilities/transactions/pessimistic_transaction.cc @@ -884,7 +884,7 @@ Status PessimisticTransaction::LockBatch(WriteBatch* batch, // what the sorting is as long as it's consistent. std::map> keys_; - Handler() {} + Handler() = default; void RecordKey(uint32_t column_family_id, const Slice& key) { auto& cfh_keys = keys_[column_family_id]; diff --git a/utilities/transactions/pessimistic_transaction_db.cc b/utilities/transactions/pessimistic_transaction_db.cc index 661e6bc4d7..57c14b5f7b 100644 --- a/utilities/transactions/pessimistic_transaction_db.cc +++ b/utilities/transactions/pessimistic_transaction_db.cc @@ -204,8 +204,7 @@ Status TransactionDB::Open(const Options& options, DBOptions db_options(options); ColumnFamilyOptions cf_options(options); std::vector column_families; - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options)); + column_families.emplace_back(kDefaultColumnFamilyName, cf_options); std::vector handles; Status s = TransactionDB::Open(db_options, txn_db_options, dbname, column_families, &handles, dbptr); diff --git a/utilities/transactions/timestamped_snapshot_test.cc b/utilities/transactions/timestamped_snapshot_test.cc index 9681b0157a..1ca265aa15 100644 --- a/utilities/transactions/timestamped_snapshot_test.cc +++ b/utilities/transactions/timestamped_snapshot_test.cc @@ -27,7 +27,7 @@ class TsCheckingTxnNotifier : public TransactionNotifier { public: explicit TsCheckingTxnNotifier() = default; - ~TsCheckingTxnNotifier() override {} + ~TsCheckingTxnNotifier() override = default; void SnapshotCreated(const Snapshot* new_snapshot) override { assert(new_snapshot); diff --git a/utilities/transactions/transaction_db_mutex_impl.cc b/utilities/transactions/transaction_db_mutex_impl.cc index c893aec69e..9f26b20911 100644 --- a/utilities/transactions/transaction_db_mutex_impl.cc +++ b/utilities/transactions/transaction_db_mutex_impl.cc @@ -17,8 +17,8 @@ namespace ROCKSDB_NAMESPACE { class TransactionDBMutexImpl : public TransactionDBMutex { public: - TransactionDBMutexImpl() {} - ~TransactionDBMutexImpl() override {} + TransactionDBMutexImpl() = default; + ~TransactionDBMutexImpl() override = default; Status Lock() override; @@ -34,8 +34,8 @@ class TransactionDBMutexImpl : public TransactionDBMutex { class TransactionDBCondVarImpl : public TransactionDBCondVar { public: - TransactionDBCondVarImpl() {} - ~TransactionDBCondVarImpl() override {} + TransactionDBCondVarImpl() = default; + ~TransactionDBCondVarImpl() override = default; Status Wait(std::shared_ptr mutex) override; diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index 24cd53f714..ff06558b75 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -2787,13 +2787,10 @@ TEST_P(TransactionTest, ColumnFamiliesTest) { // open DB with three column families std::vector column_families; // have to open default column family - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions())); + column_families.emplace_back(kDefaultColumnFamilyName, ColumnFamilyOptions()); // open the new column families - column_families.push_back( - ColumnFamilyDescriptor("CFA", ColumnFamilyOptions())); - column_families.push_back( - ColumnFamilyDescriptor("CFB", ColumnFamilyOptions())); + column_families.emplace_back("CFA", ColumnFamilyOptions()); + column_families.emplace_back("CFB", ColumnFamilyOptions()); std::vector handles; @@ -2951,11 +2948,10 @@ TEST_P(TransactionTest, MultiGetBatchedTest) { // open DB with three column families std::vector column_families; // have to open default column family - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions())); + column_families.emplace_back(kDefaultColumnFamilyName, ColumnFamilyOptions()); // open the new column families cf_options.merge_operator = MergeOperators::CreateStringAppendOperator(); - column_families.push_back(ColumnFamilyDescriptor("CF", cf_options)); + column_families.emplace_back("CF", cf_options); std::vector handles; @@ -3045,11 +3041,10 @@ TEST_P(TransactionTest, MultiGetLargeBatchedTest) { // open DB with three column families std::vector column_families; // have to open default column family - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions())); + column_families.emplace_back(kDefaultColumnFamilyName, ColumnFamilyOptions()); // open the new column families cf_options.merge_operator = MergeOperators::CreateStringAppendOperator(); - column_families.push_back(ColumnFamilyDescriptor("CF", cf_options)); + column_families.emplace_back("CF", cf_options); std::vector handles; @@ -5457,13 +5452,10 @@ TEST_P(TransactionTest, ToggleAutoCompactionTest) { // open DB with three column families std::vector column_families; // have to open default column family - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions())); + column_families.emplace_back(kDefaultColumnFamilyName, ColumnFamilyOptions()); // open the new column families - column_families.push_back( - ColumnFamilyDescriptor("CFA", ColumnFamilyOptions())); - column_families.push_back( - ColumnFamilyDescriptor("CFB", ColumnFamilyOptions())); + column_families.emplace_back("CFA", ColumnFamilyOptions()); + column_families.emplace_back("CFB", ColumnFamilyOptions()); ColumnFamilyOptions* cf_opt_default = &column_families[0].options; ColumnFamilyOptions* cf_opt_cfa = &column_families[1].options; @@ -5860,7 +5852,7 @@ TEST_P(TransactionTest, Optimizations) { // A comparator that uses only the first three bytes class ThreeBytewiseComparator : public Comparator { public: - ThreeBytewiseComparator() {} + ThreeBytewiseComparator() = default; const char* Name() const override { return "test.ThreeBytewiseComparator"; } int Compare(const Slice& a, const Slice& b) const override { Slice na = Slice(a.data(), a.size() < 3 ? a.size() : 3); @@ -6481,10 +6473,9 @@ TEST_P(TransactionTest, DoubleCrashInRecovery) { // Recover from corruption std::vector handles; std::vector column_families; - column_families.push_back(ColumnFamilyDescriptor(kDefaultColumnFamilyName, - ColumnFamilyOptions())); - column_families.push_back( - ColumnFamilyDescriptor("two", ColumnFamilyOptions())); + column_families.emplace_back(kDefaultColumnFamilyName, + ColumnFamilyOptions()); + column_families.emplace_back("two", ColumnFamilyOptions()); ASSERT_OK(ReOpenNoDelete(column_families, &handles)); assert(db != nullptr); @@ -6628,7 +6619,7 @@ TEST_P(TransactionTest, WriteWithBulkCreatedColumnFamilies) { std::vector cf_names; std::vector cf_handles; - cf_names.push_back("test_cf"); + cf_names.emplace_back("test_cf"); ASSERT_OK(db->CreateColumnFamilies(cf_options, cf_names, &cf_handles)); ASSERT_OK(db->Put(write_options, cf_handles[0], "foo", "bar")); diff --git a/utilities/transactions/write_prepared_transaction_test.cc b/utilities/transactions/write_prepared_transaction_test.cc index 994d372c79..b9c12bcb80 100644 --- a/utilities/transactions/write_prepared_transaction_test.cc +++ b/utilities/transactions/write_prepared_transaction_test.cc @@ -1197,7 +1197,9 @@ TEST_P(SnapshotConcurrentAccessTest, SnapshotConcurrentAccess) { // create a common_snapshots for each combination. size_t new_comb_cnt = size_t(1) << old_size; for (size_t new_comb = 0; new_comb < new_comb_cnt; new_comb++, loop_id++) { - if (loop_id % split_cnt_ != split_id_) continue; + if (loop_id % split_cnt_ != split_id_) { + continue; + } printf("."); // To signal progress fflush(stdout); std::vector common_snapshots; @@ -1619,7 +1621,7 @@ TEST_P(WritePreparedTransactionTest, SmallestUnCommittedSeq) { } }); ROCKSDB_NAMESPACE::port::Thread read_thread([&]() { - while (1) { + while (true) { MutexLock l(&mutex); if (txns.empty()) { break; @@ -1668,7 +1670,9 @@ TEST_P(SeqAdvanceConcurrentTest, SeqAdvanceConcurrent) { ASSERT_OK(ReOpen()); } - if (n % split_cnt_ != split_id_) continue; + if (n % split_cnt_ != split_id_) { + continue; + } if (n % 1000 == 0) { printf("Tested %" ROCKSDB_PRIszt " cases so far\n", n); } diff --git a/utilities/transactions/write_prepared_txn_db.cc b/utilities/transactions/write_prepared_txn_db.cc index 3641a1e6a8..a68e635f63 100644 --- a/utilities/transactions/write_prepared_txn_db.cc +++ b/utilities/transactions/write_prepared_txn_db.cc @@ -46,7 +46,7 @@ Status WritePreparedTxnDB::Initialize( assert(dbimpl != nullptr); auto rtxns = dbimpl->recovered_transactions(); std::map ordered_seq_cnt; - for (auto rtxn : rtxns) { + for (const auto& rtxn : rtxns) { // There should only one batch for WritePrepared policy. assert(rtxn.second->batches_.size() == 1); const auto& seq = rtxn.second->batches_.begin()->first; @@ -369,7 +369,6 @@ void WritePreparedTxnDB::MultiGet(const ReadOptions& _read_options, statuses[i] = this->GetImpl(read_options, column_families[i], keys[i], &values[i]); } - return; } // Struct to hold ownership of snapshot and read callback for iterator cleanup. diff --git a/utilities/transactions/write_unprepared_txn_db.cc b/utilities/transactions/write_unprepared_txn_db.cc index 304a3c200d..0f52cd2861 100644 --- a/utilities/transactions/write_unprepared_txn_db.cc +++ b/utilities/transactions/write_unprepared_txn_db.cc @@ -250,7 +250,7 @@ Status WriteUnpreparedTxnDB::Initialize( // create 'real' transactions from recovered shell transactions auto rtxns = dbimpl->recovered_transactions(); std::map ordered_seq_cnt; - for (auto rtxn : rtxns) { + for (const auto& rtxn : rtxns) { auto recovered_trx = rtxn.second; assert(recovered_trx); assert(recovered_trx->batches_.size() >= 1); @@ -334,7 +334,7 @@ Status WriteUnpreparedTxnDB::Initialize( Status s; // Rollback unprepared transactions. - for (auto rtxn : rtxns) { + for (const auto& rtxn : rtxns) { auto recovered_trx = rtxn.second; if (recovered_trx->unprepared_) { s = RollbackRecoveredTransaction(recovered_trx); diff --git a/utilities/ttl/db_ttl_impl.cc b/utilities/ttl/db_ttl_impl.cc index f2c02e860a..55354c6cbc 100644 --- a/utilities/ttl/db_ttl_impl.cc +++ b/utilities/ttl/db_ttl_impl.cc @@ -109,8 +109,7 @@ bool TtlMergeOperator::PartialMergeMulti(const Slice& key, return false; } - operands_without_ts.push_back( - Slice(operand.data(), operand.size() - ts_len)); + operands_without_ts.emplace_back(operand.data(), operand.size() - ts_len); } // Apply the user partial-merge operator (store result in *new_value) @@ -339,8 +338,7 @@ Status DBWithTTL::Open(const Options& options, const std::string& dbname, DBOptions db_options(options); ColumnFamilyOptions cf_options(options); std::vector column_families; - column_families.push_back( - ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options)); + column_families.emplace_back(kDefaultColumnFamilyName, cf_options); std::vector handles; Status s = DBWithTTL::Open(db_options, dbname, column_families, &handles, dbptr, {ttl}, read_only); @@ -631,7 +629,9 @@ void DBWithTTLImpl::SetTtl(ColumnFamilyHandle* h, int32_t ttl) { opts = GetOptions(h); filter = std::static_pointer_cast( opts.compaction_filter_factory); - if (!filter) return; + if (!filter) { + return; + } filter->SetTtl(ttl); } diff --git a/utilities/ttl/ttl_test.cc b/utilities/ttl/ttl_test.cc index da1d2d0da9..bab175d6f3 100644 --- a/utilities/ttl/ttl_test.cc +++ b/utilities/ttl/ttl_test.cc @@ -674,10 +674,10 @@ TEST_F(TtlTest, ColumnFamiliesTest) { delete db; std::vector column_families; - column_families.push_back(ColumnFamilyDescriptor( - kDefaultColumnFamilyName, ColumnFamilyOptions(options))); - column_families.push_back(ColumnFamilyDescriptor( - "ttl_column_family", ColumnFamilyOptions(options))); + column_families.emplace_back(kDefaultColumnFamilyName, + ColumnFamilyOptions(options)); + column_families.emplace_back("ttl_column_family", + ColumnFamilyOptions(options)); std::vector handles; diff --git a/utilities/util_merge_operators_test.cc b/utilities/util_merge_operators_test.cc index fed6f1a75a..692f1f0071 100644 --- a/utilities/util_merge_operators_test.cc +++ b/utilities/util_merge_operators_test.cc @@ -11,7 +11,7 @@ namespace ROCKSDB_NAMESPACE { class UtilMergeOperatorTest : public testing::Test { public: - UtilMergeOperatorTest() {} + UtilMergeOperatorTest() = default; std::string FullMergeV2(std::string existing_value, std::vector operands, diff --git a/utilities/write_batch_with_index/write_batch_with_index.cc b/utilities/write_batch_with_index/write_batch_with_index.cc index d5a2f03510..8597589318 100644 --- a/utilities/write_batch_with_index/write_batch_with_index.cc +++ b/utilities/write_batch_with_index/write_batch_with_index.cc @@ -283,7 +283,7 @@ WriteBatchWithIndex::WriteBatchWithIndex( : rep(new Rep(default_index_comparator, reserved_bytes, max_bytes, overwrite_key, protection_bytes_per_key)) {} -WriteBatchWithIndex::~WriteBatchWithIndex() {} +WriteBatchWithIndex::~WriteBatchWithIndex() = default; WriteBatchWithIndex::WriteBatchWithIndex(WriteBatchWithIndex&&) = default; diff --git a/utilities/write_batch_with_index/write_batch_with_index_test.cc b/utilities/write_batch_with_index/write_batch_with_index_test.cc index 90438ff2e0..8286b34147 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_test.cc +++ b/utilities/write_batch_with_index/write_batch_with_index_test.cc @@ -267,7 +267,7 @@ void AssertItersEqual(Iterator* iter1, Iterator* iter2) { void AssertIterEqual(WBWIIteratorImpl* wbwii, const std::vector& keys) { wbwii->SeekToFirst(); - for (auto k : keys) { + for (const auto& k : keys) { ASSERT_TRUE(wbwii->Valid()); ASSERT_EQ(wbwii->Entry().key, k); wbwii->NextKey(); @@ -410,7 +410,7 @@ void TestValueAsSecondaryIndexHelper(std::vector entries, } else { iter->Seek(""); } - for (auto pair : data_map) { + for (const auto& pair : data_map) { for (auto v : pair.second) { ASSERT_OK(iter->status()); ASSERT_TRUE(iter->Valid()); @@ -451,7 +451,7 @@ void TestValueAsSecondaryIndexHelper(std::vector entries, } else { iter->Seek(""); } - for (auto pair : index_map) { + for (const auto& pair : index_map) { for (auto v : pair.second) { ASSERT_OK(iter->status()); ASSERT_TRUE(iter->Valid()); @@ -536,7 +536,7 @@ void TestValueAsSecondaryIndexHelper(std::vector entries, { ASSERT_EQ(entries.size(), handler.seen[data.GetID()].size()); size_t i = 0; - for (auto e : handler.seen[data.GetID()]) { + for (const auto& e : handler.seen[data.GetID()]) { auto write_entry = entries[i++]; ASSERT_EQ(e.type, write_entry.type); ASSERT_EQ(e.key, write_entry.key); @@ -550,7 +550,7 @@ void TestValueAsSecondaryIndexHelper(std::vector entries, { ASSERT_EQ(entries.size(), handler.seen[index.GetID()].size()); size_t i = 0; - for (auto e : handler.seen[index.GetID()]) { + for (const auto& e : handler.seen[index.GetID()]) { auto write_entry = entries[i++]; ASSERT_EQ(e.key, write_entry.value); if (write_entry.type != kDeleteRecord) { @@ -823,7 +823,7 @@ TEST_P(WriteBatchWithIndexTest, TestRandomIteraratorWithBase) { KVMap map; KVMap merged_map; - for (auto key : source_strings) { + for (const auto& key : source_strings) { std::string value = key + key; int type = rnd.Uniform(6); switch (type) { @@ -2371,7 +2371,7 @@ TEST_P(WriteBatchWithIndexTest, GetAfterMergeDelete) { TEST_F(WBWIOverwriteTest, TestBadMergeOperator) { class FailingMergeOperator : public MergeOperator { public: - FailingMergeOperator() {} + FailingMergeOperator() = default; bool FullMergeV2(const MergeOperationInput& /*merge_in*/, MergeOperationOutput* /*merge_out*/) const override {