From 64324e329eb0a9b4e77241a425a1615ff524c7f1 Mon Sep 17 00:00:00 2001 From: cngzhnp Date: Wed, 5 Sep 2018 18:07:53 -0700 Subject: [PATCH] Support pragma once in all header files and cleanup some warnings (#4339) Summary: As you know, almost all compilers support "pragma once" keyword instead of using include guards. To be keep consistency between header files, all header files are edited. Besides this, try to fix some warnings about loss of data. Pull Request resolved: https://github.com/facebook/rocksdb/pull/4339 Differential Revision: D9654990 Pulled By: ajkr fbshipit-source-id: c2cf3d2d03a599847684bed81378c401920ca848 --- db/c.cc | 2 +- db/compaction_picker.cc | 2 +- db/db_impl.cc | 2 +- db/external_sst_file_ingestion_job.cc | 2 +- db/merge_helper.h | 5 +-- db/transaction_log_impl.cc | 2 +- db/version_set.cc | 15 ++++---- db/wal_manager.cc | 6 ++-- env/mock_env.cc | 2 +- include/rocksdb/c.h | 5 --- include/rocksdb/cleanable.h | 5 +-- include/rocksdb/compaction_filter.h | 5 +-- include/rocksdb/comparator.h | 5 +-- include/rocksdb/db.h | 5 +-- include/rocksdb/env.h | 5 +-- include/rocksdb/filter_policy.h | 5 +-- include/rocksdb/iterator.h | 5 +-- include/rocksdb/merge_operator.h | 5 +-- include/rocksdb/metadata.h | 4 +-- include/rocksdb/options.h | 5 +-- include/rocksdb/perf_context.h | 5 +-- include/rocksdb/perf_level.h | 5 +-- include/rocksdb/slice.h | 7 ++-- include/rocksdb/slice_transform.h | 5 +-- include/rocksdb/statistics.h | 5 +-- include/rocksdb/status.h | 5 +-- include/rocksdb/table.h | 1 + include/rocksdb/transaction_log.h | 5 +-- include/rocksdb/types.h | 5 +-- include/rocksdb/universal_compaction.h | 5 +-- include/rocksdb/utilities/env_librados.h | 4 +-- include/rocksdb/wal_filter.h | 1 + include/rocksdb/write_batch.h | 5 +-- monitoring/histogram_windowing.cc | 4 +-- port/dirent.h | 5 +-- port/likely.h | 5 +-- port/port_example.h | 5 +-- port/sys_time.h | 5 +-- port/util_logger.h | 5 +-- port/win/env_win.cc | 6 ++-- port/win/io_win.cc | 24 ++++++------- port/win/port_win.h | 5 +-- table/block_based_table_reader.cc | 2 +- table/cuckoo_table_builder.cc | 36 +++++++++---------- table/cuckoo_table_reader.cc | 4 +-- table/plain_table_reader.cc | 2 +- third-party/fbson/FbsonDocument.h | 5 +-- third-party/fbson/FbsonJsonParser.h | 5 +-- third-party/fbson/FbsonStream.h | 5 +-- third-party/fbson/FbsonUtil.h | 5 +-- third-party/fbson/FbsonWriter.h | 5 +-- util/channel.h | 4 +-- util/crc32c_ppc.h | 5 +-- util/crc32c_ppc_constants.h | 7 ++-- util/fault_injection_test_env.h | 5 +-- util/file_reader_writer.cc | 18 +++++----- util/ppc-opcode.h | 5 +-- util/testutil.h | 6 ++-- util/transaction_test_util.cc | 1 + utilities/backupable/backupable_db.cc | 8 ++--- utilities/blob_db/blob_db_impl.cc | 6 ++-- .../blob_db/blob_db_impl_filesnapshot.cc | 2 +- utilities/blob_db/blob_dump_tool.cc | 8 ++--- utilities/blob_db/blob_log_reader.cc | 4 +-- utilities/col_buf_decoder.cc | 4 +-- utilities/column_aware_encoding_util.cc | 6 ++-- utilities/document/document_db.cc | 4 +-- utilities/merge_operators/bytesxor.h | 5 +-- utilities/persistent_cache/block_cache_tier.h | 4 +-- utilities/spatialdb/spatial_db.cc | 8 ++--- .../transactions/transaction_lock_mgr.cc | 4 +-- .../transactions/write_prepared_txn_db.cc | 6 ++-- 72 files changed, 149 insertions(+), 254 deletions(-) diff --git a/db/c.cc b/db/c.cc index 4beb719885..f0af307074 100644 --- a/db/c.cc +++ b/db/c.cc @@ -2402,7 +2402,7 @@ void rocksdb_options_set_bytes_per_sync( void rocksdb_options_set_writable_file_max_buffer_size(rocksdb_options_t* opt, uint64_t v) { - opt->rep.writable_file_max_buffer_size = v; + opt->rep.writable_file_max_buffer_size = static_cast(v); } void rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t* opt, diff --git a/db/compaction_picker.cc b/db/compaction_picker.cc index b8f566afef..00e660a07e 100644 --- a/db/compaction_picker.cc +++ b/db/compaction_picker.cc @@ -49,7 +49,7 @@ bool FindIntraL0Compaction(const std::vector& level_files, // increasing. size_t new_compact_bytes_per_del_file = 0; for (span_len = 1; span_len < level_files.size(); ++span_len) { - compact_bytes += level_files[span_len]->fd.file_size; + compact_bytes += static_cast(level_files[span_len]->fd.file_size); new_compact_bytes_per_del_file = compact_bytes / span_len; if (level_files[span_len]->being_compacted || new_compact_bytes_per_del_file > compact_bytes_per_del_file) { diff --git a/db/db_impl.cc b/db/db_impl.cc index c015c6f9f4..7a9e49cc2e 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -2413,7 +2413,7 @@ Status DBImpl::GetDbIdentity(std::string& identity) const { if (!s.ok()) { return s; } - char* buffer = reinterpret_cast(alloca(file_size)); + char* buffer = reinterpret_cast(alloca(static_cast(file_size))); Slice id; s = id_file_reader->Read(static_cast(file_size), &id, buffer); if (!s.ok()) { diff --git a/db/external_sst_file_ingestion_job.cc b/db/external_sst_file_ingestion_job.cc index c31a5d6f52..e142bfbeac 100644 --- a/db/external_sst_file_ingestion_job.cc +++ b/db/external_sst_file_ingestion_job.cc @@ -344,7 +344,7 @@ Status ExternalSstFileIngestionJob::GetIngestedFileInfo( file_to_ingest->global_seqno_offset = 0; return Status::Corruption("Was not able to find file global seqno field"); } - file_to_ingest->global_seqno_offset = offsets_iter->second; + file_to_ingest->global_seqno_offset = static_cast(offsets_iter->second); } else if (file_to_ingest->version == 1) { // SST file V1 should not have global seqno field assert(seqno_iter == uprops.end()); diff --git a/db/merge_helper.h b/db/merge_helper.h index abb1e17563..993bbe3e9d 100644 --- a/db/merge_helper.h +++ b/db/merge_helper.h @@ -3,8 +3,7 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). // -#ifndef MERGE_HELPER_H -#define MERGE_HELPER_H +#pragma once #include #include @@ -193,5 +192,3 @@ class MergeOutputIterator { }; } // namespace rocksdb - -#endif diff --git a/db/transaction_log_impl.cc b/db/transaction_log_impl.cc index d0b95543d6..36feba9799 100644 --- a/db/transaction_log_impl.cc +++ b/db/transaction_log_impl.cc @@ -104,7 +104,7 @@ void TransactionLogIteratorImpl::SeekToStartSequence( if (files_->size() <= startFileIndex) { return; } - Status s = OpenLogReader(files_->at(startFileIndex).get()); + Status s = OpenLogReader(files_->at(static_cast(startFileIndex)).get()); if (!s.ok()) { currentStatus_ = s; reporter_.Info(currentStatus_.ToString().c_str()); diff --git a/db/version_set.cc b/db/version_set.cc index 67a0246301..31af554ae0 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -895,13 +895,16 @@ void Version::GetColumnFamilyMetaData(ColumnFamilyMetaData* cf_meta) { assert(!ioptions->cf_paths.empty()); file_path = ioptions->cf_paths.back().path; } - files.emplace_back( - MakeTableFileName("", file->fd.GetNumber()), file_path, - file->fd.GetFileSize(), file->fd.smallest_seqno, - file->fd.largest_seqno, file->smallest.user_key().ToString(), + files.emplace_back(SstFileMetaData{ + MakeTableFileName("", file->fd.GetNumber()), + file_path, + static_cast(file->fd.GetFileSize()), + file->fd.smallest_seqno, + file->fd.largest_seqno, + file->smallest.user_key().ToString(), file->largest.user_key().ToString(), file->stats.num_reads_sampled.load(std::memory_order_relaxed), - file->being_compacted); + file->being_compacted}); level_size += file->fd.GetFileSize(); } cf_meta->levels.emplace_back( @@ -4338,7 +4341,7 @@ void VersionSet::GetLiveFilesMetaData(std::vector* metadata) { } filemetadata.name = MakeTableFileName("", file->fd.GetNumber()); filemetadata.level = level; - filemetadata.size = file->fd.GetFileSize(); + filemetadata.size = static_cast(file->fd.GetFileSize()); filemetadata.smallestkey = file->smallest.user_key().ToString(); filemetadata.largestkey = file->largest.user_key().ToString(); filemetadata.smallest_seqno = file->fd.smallest_seqno; diff --git a/db/wal_manager.cc b/db/wal_manager.cc index 3b0ef7ca58..c1ce7d8327 100644 --- a/db/wal_manager.cc +++ b/db/wal_manager.cc @@ -237,7 +237,7 @@ void WalManager::PurgeObsoleteWALFiles() { } size_t const files_keep_num = - db_options_.wal_size_limit_mb * 1024 * 1024 / log_file_size; + static_cast(db_options_.wal_size_limit_mb * 1024 * 1024 / log_file_size); if (log_files_num <= files_keep_num) { return; } @@ -352,7 +352,7 @@ Status WalManager::RetainProbableWalFiles(VectorLogPtr& all_logs, // Binary Search. avoid opening all files. while (end >= start) { int64_t mid = start + (end - start) / 2; // Avoid overflow. - SequenceNumber current_seq_num = all_logs.at(mid)->StartSequence(); + SequenceNumber current_seq_num = all_logs.at(static_cast(mid))->StartSequence(); if (current_seq_num == target) { end = mid; break; @@ -363,7 +363,7 @@ Status WalManager::RetainProbableWalFiles(VectorLogPtr& all_logs, } } // end could be -ve. - size_t start_index = std::max(static_cast(0), end); + size_t start_index = static_cast(std::max(static_cast(0), end)); // The last wal file is always included all_logs.erase(all_logs.begin(), all_logs.begin() + start_index); return Status::OK(); diff --git a/env/mock_env.cc b/env/mock_env.cc index 9b019260dd..12c096cefb 100644 --- a/env/mock_env.cc +++ b/env/mock_env.cc @@ -201,7 +201,7 @@ class MockSequentialFile : public SequentialFile { if (n > available) { n = available; } - pos_ += n; + pos_ += static_cast(n); return Status::OK(); } diff --git a/include/rocksdb/c.h b/include/rocksdb/c.h index d86b9737d9..0552c1e055 100644 --- a/include/rocksdb/c.h +++ b/include/rocksdb/c.h @@ -42,9 +42,6 @@ (5) All of the pointer arguments must be non-NULL. */ -#ifndef STORAGE_ROCKSDB_INCLUDE_C_H_ -#define STORAGE_ROCKSDB_INCLUDE_C_H_ - #pragma once #ifdef _WIN32 @@ -1678,5 +1675,3 @@ extern ROCKSDB_LIBRARY_API const char* rocksdb_pinnableslice_value( #ifdef __cplusplus } /* end extern "C" */ #endif - -#endif /* STORAGE_ROCKSDB_INCLUDE_C_H_ */ diff --git a/include/rocksdb/cleanable.h b/include/rocksdb/cleanable.h index ee4ee44241..6dba8d9531 100644 --- a/include/rocksdb/cleanable.h +++ b/include/rocksdb/cleanable.h @@ -16,8 +16,7 @@ // non-const method, all threads accessing the same Iterator must use // external synchronization. -#ifndef INCLUDE_ROCKSDB_CLEANABLE_H_ -#define INCLUDE_ROCKSDB_CLEANABLE_H_ +#pragma once namespace rocksdb { @@ -78,5 +77,3 @@ class Cleanable { }; } // namespace rocksdb - -#endif // INCLUDE_ROCKSDB_CLEANABLE_H_ diff --git a/include/rocksdb/compaction_filter.h b/include/rocksdb/compaction_filter.h index 29b7e50771..98f86c2812 100644 --- a/include/rocksdb/compaction_filter.h +++ b/include/rocksdb/compaction_filter.h @@ -6,8 +6,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#ifndef STORAGE_ROCKSDB_INCLUDE_COMPACTION_FILTER_H_ -#define STORAGE_ROCKSDB_INCLUDE_COMPACTION_FILTER_H_ +#pragma once #include #include @@ -206,5 +205,3 @@ class CompactionFilterFactory { }; } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_COMPACTION_FILTER_H_ diff --git a/include/rocksdb/comparator.h b/include/rocksdb/comparator.h index b048ebaf42..12e05ffee7 100644 --- a/include/rocksdb/comparator.h +++ b/include/rocksdb/comparator.h @@ -6,8 +6,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#ifndef STORAGE_ROCKSDB_INCLUDE_COMPARATOR_H_ -#define STORAGE_ROCKSDB_INCLUDE_COMPARATOR_H_ +#pragma once #include @@ -92,5 +91,3 @@ extern const Comparator* BytewiseComparator(); extern const Comparator* ReverseBytewiseComparator(); } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_COMPARATOR_H_ diff --git a/include/rocksdb/db.h b/include/rocksdb/db.h index 6efa980aac..f1430bce83 100644 --- a/include/rocksdb/db.h +++ b/include/rocksdb/db.h @@ -6,8 +6,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#ifndef STORAGE_ROCKSDB_INCLUDE_DB_H_ -#define STORAGE_ROCKSDB_INCLUDE_DB_H_ +#pragma once #include #include @@ -1221,5 +1220,3 @@ Status RepairDB(const std::string& dbname, const Options& options); #endif } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_DB_H_ diff --git a/include/rocksdb/env.h b/include/rocksdb/env.h index c6ca725c52..99127f7661 100644 --- a/include/rocksdb/env.h +++ b/include/rocksdb/env.h @@ -14,8 +14,7 @@ // All Env implementations are safe for concurrent access from // multiple threads without any external synchronization. -#ifndef STORAGE_ROCKSDB_INCLUDE_ENV_H_ -#define STORAGE_ROCKSDB_INCLUDE_ENV_H_ +#pragma once #include #include @@ -1267,5 +1266,3 @@ Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname); Env* NewTimedEnv(Env* base_env); } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_ENV_H_ diff --git a/include/rocksdb/filter_policy.h b/include/rocksdb/filter_policy.h index 4706f38c30..4e1dc3bfc9 100644 --- a/include/rocksdb/filter_policy.h +++ b/include/rocksdb/filter_policy.h @@ -17,8 +17,7 @@ // Most people will want to use the builtin bloom filter support (see // NewBloomFilterPolicy() below). -#ifndef STORAGE_ROCKSDB_INCLUDE_FILTER_POLICY_H_ -#define STORAGE_ROCKSDB_INCLUDE_FILTER_POLICY_H_ +#pragma once #include #include @@ -149,5 +148,3 @@ class FilterPolicy { extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key, bool use_block_based_builder = true); } - -#endif // STORAGE_ROCKSDB_INCLUDE_FILTER_POLICY_H_ diff --git a/include/rocksdb/iterator.h b/include/rocksdb/iterator.h index 4475eb396f..e99b434a01 100644 --- a/include/rocksdb/iterator.h +++ b/include/rocksdb/iterator.h @@ -16,8 +16,7 @@ // non-const method, all threads accessing the same Iterator must use // external synchronization. -#ifndef STORAGE_ROCKSDB_INCLUDE_ITERATOR_H_ -#define STORAGE_ROCKSDB_INCLUDE_ITERATOR_H_ +#pragma once #include #include "rocksdb/cleanable.h" @@ -119,5 +118,3 @@ extern Iterator* NewEmptyIterator(); extern Iterator* NewErrorIterator(const Status& status); } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_ITERATOR_H_ diff --git a/include/rocksdb/merge_operator.h b/include/rocksdb/merge_operator.h index 8406d4a74f..b90f3d72f1 100644 --- a/include/rocksdb/merge_operator.h +++ b/include/rocksdb/merge_operator.h @@ -3,8 +3,7 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#ifndef STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_ -#define STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_ +#pragma once #include #include @@ -241,5 +240,3 @@ class AssociativeMergeOperator : public MergeOperator { }; } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_ diff --git a/include/rocksdb/metadata.h b/include/rocksdb/metadata.h index f6be889ba9..a9773bf40c 100644 --- a/include/rocksdb/metadata.h +++ b/include/rocksdb/metadata.h @@ -65,7 +65,7 @@ struct SstFileMetaData { num_reads_sampled(0), being_compacted(false) {} SstFileMetaData(const std::string& _file_name, const std::string& _path, - uint64_t _size, SequenceNumber _smallest_seqno, + size_t _size, SequenceNumber _smallest_seqno, SequenceNumber _largest_seqno, const std::string& _smallestkey, const std::string& _largestkey, uint64_t _num_reads_sampled, @@ -81,7 +81,7 @@ struct SstFileMetaData { being_compacted(_being_compacted) {} // File size in bytes. - uint64_t size; + size_t size; // The name of the file. std::string name; // The full path where the file locates. diff --git a/include/rocksdb/options.h b/include/rocksdb/options.h index 687871dc58..3b851874fb 100644 --- a/include/rocksdb/options.h +++ b/include/rocksdb/options.h @@ -6,8 +6,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#ifndef STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_ -#define STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_ +#pragma once #include #include @@ -1289,5 +1288,3 @@ struct IngestExternalFileOptions { struct TraceOptions {}; } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_ diff --git a/include/rocksdb/perf_context.h b/include/rocksdb/perf_context.h index c3d61a3f62..d3771d3f08 100644 --- a/include/rocksdb/perf_context.h +++ b/include/rocksdb/perf_context.h @@ -3,8 +3,7 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#ifndef STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H -#define STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H +#pragma once #include #include @@ -176,5 +175,3 @@ struct PerfContext { PerfContext* get_perf_context(); } - -#endif diff --git a/include/rocksdb/perf_level.h b/include/rocksdb/perf_level.h index 84a331c355..218c6015f8 100644 --- a/include/rocksdb/perf_level.h +++ b/include/rocksdb/perf_level.h @@ -3,8 +3,7 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#ifndef INCLUDE_ROCKSDB_PERF_LEVEL_H_ -#define INCLUDE_ROCKSDB_PERF_LEVEL_H_ +#pragma once #include #include @@ -29,5 +28,3 @@ void SetPerfLevel(PerfLevel level); PerfLevel GetPerfLevel(); } // namespace rocksdb - -#endif // INCLUDE_ROCKSDB_PERF_LEVEL_H_ diff --git a/include/rocksdb/slice.h b/include/rocksdb/slice.h index b2db059d3a..9ccbdc51e5 100644 --- a/include/rocksdb/slice.h +++ b/include/rocksdb/slice.h @@ -16,8 +16,7 @@ // non-const method, all threads accessing the same Slice must use // external synchronization. -#ifndef STORAGE_ROCKSDB_INCLUDE_SLICE_H_ -#define STORAGE_ROCKSDB_INCLUDE_SLICE_H_ +#pragma once #include #include @@ -256,6 +255,4 @@ inline size_t Slice::difference_offset(const Slice& b) const { return off; } -} // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_SLICE_H_ +} // namespace rocksdb \ No newline at end of file diff --git a/include/rocksdb/slice_transform.h b/include/rocksdb/slice_transform.h index 5a461b7769..2bbe06153a 100644 --- a/include/rocksdb/slice_transform.h +++ b/include/rocksdb/slice_transform.h @@ -12,8 +12,7 @@ // define InDomain and InRange to determine which slices are in either // of these sets respectively. -#ifndef STORAGE_ROCKSDB_INCLUDE_SLICE_TRANSFORM_H_ -#define STORAGE_ROCKSDB_INCLUDE_SLICE_TRANSFORM_H_ +#pragma once #include @@ -100,5 +99,3 @@ extern const SliceTransform* NewCappedPrefixTransform(size_t cap_len); extern const SliceTransform* NewNoopTransform(); } - -#endif // STORAGE_ROCKSDB_INCLUDE_SLICE_TRANSFORM_H_ diff --git a/include/rocksdb/statistics.h b/include/rocksdb/statistics.h index 30e79b0998..c493a18240 100644 --- a/include/rocksdb/statistics.h +++ b/include/rocksdb/statistics.h @@ -3,8 +3,7 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#ifndef STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_ -#define STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_ +#pragma once #include #include @@ -673,5 +672,3 @@ class Statistics { std::shared_ptr CreateDBStatistics(); } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_ diff --git a/include/rocksdb/status.h b/include/rocksdb/status.h index db41c3efdb..40b374ecf6 100644 --- a/include/rocksdb/status.h +++ b/include/rocksdb/status.h @@ -14,8 +14,7 @@ // non-const method, all threads accessing the same Status must use // external synchronization. -#ifndef STORAGE_ROCKSDB_INCLUDE_STATUS_H_ -#define STORAGE_ROCKSDB_INCLUDE_STATUS_H_ +#pragma once #include #include "rocksdb/slice.h" @@ -348,5 +347,3 @@ inline bool Status::operator!=(const Status& rhs) const { } } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_STATUS_H_ diff --git a/include/rocksdb/table.h b/include/rocksdb/table.h index 701fff8b4a..1be2aebe01 100644 --- a/include/rocksdb/table.h +++ b/include/rocksdb/table.h @@ -16,6 +16,7 @@ // https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats#wiki-examples #pragma once + #include #include #include diff --git a/include/rocksdb/transaction_log.h b/include/rocksdb/transaction_log.h index 7fc46ae264..1d8ef91861 100644 --- a/include/rocksdb/transaction_log.h +++ b/include/rocksdb/transaction_log.h @@ -3,8 +3,7 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#ifndef STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_ -#define STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_ +#pragma once #include "rocksdb/status.h" #include "rocksdb/types.h" @@ -121,5 +120,3 @@ class TransactionLogIterator { }; }; } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_ diff --git a/include/rocksdb/types.h b/include/rocksdb/types.h index 3a73b7d96c..0868a74157 100644 --- a/include/rocksdb/types.h +++ b/include/rocksdb/types.h @@ -3,8 +3,7 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#ifndef STORAGE_ROCKSDB_INCLUDE_TYPES_H_ -#define STORAGE_ROCKSDB_INCLUDE_TYPES_H_ +#pragma once #include #include "rocksdb/slice.h" @@ -53,5 +52,3 @@ struct FullKey { bool ParseFullKey(const Slice& internal_key, FullKey* result); } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_TYPES_H_ diff --git a/include/rocksdb/universal_compaction.h b/include/rocksdb/universal_compaction.h index ed2220873c..04e2c849f9 100644 --- a/include/rocksdb/universal_compaction.h +++ b/include/rocksdb/universal_compaction.h @@ -3,8 +3,7 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#ifndef STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H -#define STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H +#pragma once #include #include @@ -86,5 +85,3 @@ class CompactionOptionsUniversal { }; } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H diff --git a/include/rocksdb/utilities/env_librados.h b/include/rocksdb/utilities/env_librados.h index 272365f0c6..c872809754 100644 --- a/include/rocksdb/utilities/env_librados.h +++ b/include/rocksdb/utilities/env_librados.h @@ -2,8 +2,8 @@ // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#ifndef ROCKSDB_UTILITIES_ENV_LIBRADOS_H -#define ROCKSDB_UTILITIES_ENV_LIBRADOS_H + +#pragma once #include #include diff --git a/include/rocksdb/wal_filter.h b/include/rocksdb/wal_filter.h index a22dca9237..b8be77b232 100644 --- a/include/rocksdb/wal_filter.h +++ b/include/rocksdb/wal_filter.h @@ -4,6 +4,7 @@ // (found in the LICENSE.Apache file in the root directory). #pragma once + #include #include diff --git a/include/rocksdb/write_batch.h b/include/rocksdb/write_batch.h index 7db177f866..c40c448fdd 100644 --- a/include/rocksdb/write_batch.h +++ b/include/rocksdb/write_batch.h @@ -22,8 +22,7 @@ // non-const method, all threads accessing the same WriteBatch must use // external synchronization. -#ifndef STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_ -#define STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_ +#pragma once #include #include @@ -367,5 +366,3 @@ class WriteBatch : public WriteBatchBase { }; } // namespace rocksdb - -#endif // STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_ diff --git a/monitoring/histogram_windowing.cc b/monitoring/histogram_windowing.cc index 5c49fcd16b..ecd6f090a5 100644 --- a/monitoring/histogram_windowing.cc +++ b/monitoring/histogram_windowing.cc @@ -17,7 +17,7 @@ namespace rocksdb { HistogramWindowingImpl::HistogramWindowingImpl() { env_ = Env::Default(); - window_stats_.reset(new HistogramStat[num_windows_]); + window_stats_.reset(new HistogramStat[static_cast(num_windows_)]); Clear(); } @@ -29,7 +29,7 @@ HistogramWindowingImpl::HistogramWindowingImpl( micros_per_window_(micros_per_window), min_num_per_window_(min_num_per_window) { env_ = Env::Default(); - window_stats_.reset(new HistogramStat[num_windows_]); + window_stats_.reset(new HistogramStat[static_cast(num_windows_)]); Clear(); } diff --git a/port/dirent.h b/port/dirent.h index 7bcc356978..cb1adbe129 100644 --- a/port/dirent.h +++ b/port/dirent.h @@ -9,8 +9,7 @@ // // See port_example.h for documentation for the following types/functions. -#ifndef STORAGE_LEVELDB_PORT_DIRENT_H_ -#define STORAGE_LEVELDB_PORT_DIRENT_H_ +#pragma once #ifdef ROCKSDB_PLATFORM_POSIX #include @@ -43,5 +42,3 @@ using port::closedir; } // namespace rocksdb #endif // OS_WIN - -#endif // STORAGE_LEVELDB_PORT_DIRENT_H_ diff --git a/port/likely.h b/port/likely.h index e5ef786f2e..397d757133 100644 --- a/port/likely.h +++ b/port/likely.h @@ -7,8 +7,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#ifndef PORT_LIKELY_H_ -#define PORT_LIKELY_H_ +#pragma once #if defined(__GNUC__) && __GNUC__ >= 4 #define LIKELY(x) (__builtin_expect((x), 1)) @@ -17,5 +16,3 @@ #define LIKELY(x) (x) #define UNLIKELY(x) (x) #endif - -#endif // PORT_LIKELY_H_ diff --git a/port/port_example.h b/port/port_example.h index 05b3240669..a94dc93c26 100644 --- a/port/port_example.h +++ b/port/port_example.h @@ -12,8 +12,7 @@ // specific port_.h file. Use this file as a reference for // how to port this package to a new platform. -#ifndef STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_ -#define STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_ +#pragma once namespace rocksdb { namespace port { @@ -100,5 +99,3 @@ extern bool Snappy_Uncompress(const char* input_data, size_t input_length, } // namespace port } // namespace rocksdb - -#endif // STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_ diff --git a/port/sys_time.h b/port/sys_time.h index 1e2ad0f5d6..2f83da8b3e 100644 --- a/port/sys_time.h +++ b/port/sys_time.h @@ -10,8 +10,7 @@ // This file is a portable substitute for sys/time.h which does not exist on // Windows -#ifndef STORAGE_LEVELDB_PORT_SYS_TIME_H_ -#define STORAGE_LEVELDB_PORT_SYS_TIME_H_ +#pragma once #if defined(OS_WIN) && defined(_MSC_VER) @@ -44,5 +43,3 @@ using port::localtime_r; #include #include #endif - -#endif // STORAGE_LEVELDB_PORT_SYS_TIME_H_ diff --git a/port/util_logger.h b/port/util_logger.h index a8255ad6d6..ba424705b2 100644 --- a/port/util_logger.h +++ b/port/util_logger.h @@ -7,8 +7,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. -#ifndef STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_ -#define STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_ +#pragma once // Include the appropriate platform specific file below. If you are // porting to a new platform, see "port_example.h" for documentation @@ -19,5 +18,3 @@ #elif defined(OS_WIN) #include "port/win/win_logger.h" #endif - -#endif // STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_ diff --git a/port/win/env_win.cc b/port/win/env_win.cc index 814207e3c3..34b3de788e 100644 --- a/port/win/env_win.cc +++ b/port/win/env_win.cc @@ -235,7 +235,7 @@ Status WinEnvIO::NewRandomAccessFile(const std::string& fname, MapViewOfFileEx(hMap, FILE_MAP_READ, 0, // High DWORD of access start 0, // Low DWORD - fileSize, + static_cast(fileSize), NULL); // Let the OS choose the mapping if (!mapped_region) { @@ -246,7 +246,7 @@ Status WinEnvIO::NewRandomAccessFile(const std::string& fname, } result->reset(new WinMmapReadableFile(fname, hFile, hMap, mapped_region, - fileSize)); + static_cast(fileSize))); mapGuard.release(); fileGuard.release(); @@ -448,7 +448,7 @@ Status WinEnvIO::NewMemoryMappedFileBuffer(const std::string & fname, void* base = MapViewOfFileEx(hMap, FILE_MAP_WRITE, 0, // High DWORD of access start 0, // Low DWORD - fileSize, + static_cast(fileSize), NULL); // Let the OS choose the mapping if (!base) { diff --git a/port/win/io_win.cc b/port/win/io_win.cc index 66fe8a11e6..128cb60b9f 100644 --- a/port/win/io_win.cc +++ b/port/win/io_win.cc @@ -260,7 +260,7 @@ Status WinMmapReadableFile::Read(uint64_t offset, size_t n, Slice* result, *result = Slice(); return IOError(filename_, EINVAL); } else if (offset + n > length_) { - n = length_ - offset; + n = length_ - static_cast(offset); } *result = Slice(reinterpret_cast(mapped_region_)+offset, n); @@ -317,7 +317,7 @@ Status WinMmapFile::MapNewRegion() { assert(mapped_begin_ == nullptr); - size_t minDiskSize = file_offset_ + view_size_; + size_t minDiskSize = static_cast(file_offset_) + view_size_; if (minDiskSize > reserved_size_) { status = Allocate(file_offset_, view_size_); @@ -579,7 +579,7 @@ Status WinMmapFile::Allocate(uint64_t offset, uint64_t len) { // Make sure that we reserve an aligned amount of space // since the reservation block size is driven outside so we want // to check if we are ok with reservation here - size_t spaceToReserve = Roundup(offset + len, view_size_); + size_t spaceToReserve = Roundup(static_cast(offset + len), view_size_); // Nothing to do if (spaceToReserve <= reserved_size_) { return status; @@ -656,14 +656,14 @@ Status WinSequentialFile::PositionedRead(uint64_t offset, size_t n, Slice* resul return Status::NotSupported("This function is only used for direct_io"); } - if (!IsSectorAligned(offset) || + if (!IsSectorAligned(static_cast(offset)) || !IsSectorAligned(n)) { return Status::InvalidArgument( "WinSequentialFile::PositionedRead: offset is not properly aligned"); } size_t bytes_read = 0; // out param - s = PositionedReadInternal(scratch, n, offset, bytes_read); + s = PositionedReadInternal(scratch, static_cast(n), offset, bytes_read); *result = Slice(scratch, bytes_read); return s; } @@ -721,7 +721,7 @@ Status WinRandomAccessImpl::ReadImpl(uint64_t offset, size_t n, Slice* result, // Check buffer alignment if (file_base_->use_direct_io()) { - if (!IsSectorAligned(offset) || + if (!IsSectorAligned(static_cast(offset)) || !IsAligned(alignment_, scratch)) { return Status::InvalidArgument( "WinRandomAccessImpl::ReadImpl: offset or scratch is not properly aligned"); @@ -818,7 +818,7 @@ Status WinWritableImpl::AppendImpl(const Slice& data) { // to the end of the file assert(IsSectorAligned(next_write_offset_)); if (!IsSectorAligned(data.size()) || - !IsAligned(GetAlignement(), data.data())) { + !IsAligned(static_cast(GetAlignement()), data.data())) { s = Status::InvalidArgument( "WriteData must be page aligned, size must be sector aligned"); } else { @@ -857,9 +857,9 @@ inline Status WinWritableImpl::PositionedAppendImpl(const Slice& data, uint64_t offset) { if(file_data_->use_direct_io()) { - if (!IsSectorAligned(offset) || + if (!IsSectorAligned(static_cast(offset)) || !IsSectorAligned(data.size()) || - !IsAligned(GetAlignement(), data.data())) { + !IsAligned(static_cast(GetAlignement()), data.data())) { return Status::InvalidArgument( "Data and offset must be page aligned, size must be sector aligned"); } @@ -944,7 +944,7 @@ Status WinWritableImpl::AllocateImpl(uint64_t offset, uint64_t len) { // Make sure that we reserve an aligned amount of space // since the reservation block size is driven outside so we want // to check if we are ok with reservation here - size_t spaceToReserve = Roundup(offset + len, alignment_); + size_t spaceToReserve = Roundup(static_cast(offset + len), static_cast(alignment_)); // Nothing to do if (spaceToReserve <= reservedsize_) { return status; @@ -977,7 +977,7 @@ WinWritableFile::~WinWritableFile() { bool WinWritableFile::use_direct_io() const { return WinFileData::use_direct_io(); } size_t WinWritableFile::GetRequiredBufferAlignment() const { - return GetAlignement(); + return static_cast(GetAlignement()); } Status WinWritableFile::Append(const Slice& data) { @@ -1037,7 +1037,7 @@ WinRandomRWFile::WinRandomRWFile(const std::string& fname, HANDLE hFile, bool WinRandomRWFile::use_direct_io() const { return WinFileData::use_direct_io(); } size_t WinRandomRWFile::GetRequiredBufferAlignment() const { - return GetAlignement(); + return static_cast(GetAlignement()); } Status WinRandomRWFile::Write(uint64_t offset, const Slice & data) { diff --git a/port/win/port_win.h b/port/win/port_win.h index 8a6c5aad17..41ccea68d4 100644 --- a/port/win/port_win.h +++ b/port/win/port_win.h @@ -9,8 +9,7 @@ // // See port_example.h for documentation for the following types/functions. -#ifndef STORAGE_LEVELDB_PORT_PORT_WIN_H_ -#define STORAGE_LEVELDB_PORT_PORT_WIN_H_ +#pragma once // Always want minimum headers #ifndef WIN32_LEAN_AND_MEAN @@ -341,5 +340,3 @@ using port::pthread_getspecific; using port::truncate; } // namespace rocksdb - -#endif // STORAGE_LEVELDB_PORT_PORT_WIN_H_ diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index db89789957..dc8a46cdc1 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -1094,7 +1094,7 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, if (tail_prefetch_stats != nullptr) { assert(prefetch_buffer->min_offset_read() < file_size); tail_prefetch_stats->RecordEffectiveSize( - file_size - prefetch_buffer->min_offset_read()); + static_cast(file_size) - prefetch_buffer->min_offset_read()); } *table_reader = std::move(new_table); } diff --git a/table/cuckoo_table_builder.cc b/table/cuckoo_table_builder.cc index 0da4d84ddc..7d9842a95f 100644 --- a/table/cuckoo_table_builder.cc +++ b/table/cuckoo_table_builder.cc @@ -164,9 +164,9 @@ bool CuckooTableBuilder::IsDeletedKey(uint64_t idx) const { Slice CuckooTableBuilder::GetKey(uint64_t idx) const { assert(closed_); if (IsDeletedKey(idx)) { - return Slice(&deleted_keys_[(idx - num_values_) * key_size_], key_size_); + return Slice(&deleted_keys_[static_cast((idx - num_values_) * key_size_)], static_cast(key_size_)); } - return Slice(&kvs_[idx * (key_size_ + value_size_)], key_size_); + return Slice(&kvs_[static_cast(idx * (key_size_ + value_size_))], static_cast(key_size_)); } Slice CuckooTableBuilder::GetUserKey(uint64_t idx) const { @@ -177,14 +177,14 @@ Slice CuckooTableBuilder::GetUserKey(uint64_t idx) const { Slice CuckooTableBuilder::GetValue(uint64_t idx) const { assert(closed_); if (IsDeletedKey(idx)) { - static std::string empty_value(value_size_, 'a'); + static std::string empty_value(static_cast(value_size_), 'a'); return Slice(empty_value); } - return Slice(&kvs_[idx * (key_size_ + value_size_) + key_size_], value_size_); + return Slice(&kvs_[static_cast(idx * (key_size_ + value_size_) + key_size_)], static_cast(value_size_)); } Status CuckooTableBuilder::MakeHashTable(std::vector* buckets) { - buckets->resize(hash_table_size_ + cuckoo_block_size_ - 1); + buckets->resize(static_cast(hash_table_size_ + cuckoo_block_size_ - 1)); uint32_t make_space_for_key_call_id = 0; for (uint32_t vector_idx = 0; vector_idx < num_entries_; vector_idx++) { uint64_t bucket_id = 0; @@ -200,13 +200,13 @@ Status CuckooTableBuilder::MakeHashTable(std::vector* buckets) { // stop searching and proceed for next hash function. for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_; ++block_idx, ++hash_val) { - if ((*buckets)[hash_val].vector_idx == kMaxVectorIdx) { + if ((*buckets)[static_cast(hash_val)].vector_idx == kMaxVectorIdx) { bucket_id = hash_val; bucket_found = true; break; } else { if (ucomp_->Compare(user_key, - GetUserKey((*buckets)[hash_val].vector_idx)) == 0) { + GetUserKey((*buckets)[static_cast(hash_val)].vector_idx)) == 0) { return Status::NotSupported("Same key is being inserted again."); } hash_vals.push_back(hash_val); @@ -226,7 +226,7 @@ Status CuckooTableBuilder::MakeHashTable(std::vector* buckets) { ++num_hash_func_; for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_; ++block_idx, ++hash_val) { - if ((*buckets)[hash_val].vector_idx == kMaxVectorIdx) { + if ((*buckets)[static_cast(hash_val)].vector_idx == kMaxVectorIdx) { bucket_found = true; bucket_id = hash_val; break; @@ -235,7 +235,7 @@ Status CuckooTableBuilder::MakeHashTable(std::vector* buckets) { } } } - (*buckets)[bucket_id].vector_idx = vector_idx; + (*buckets)[static_cast(bucket_id)].vector_idx = vector_idx; } return Status::OK(); } @@ -295,7 +295,7 @@ Status CuckooTableBuilder::Finish() { reinterpret_cast(&value_size_), sizeof(value_size_)); uint64_t bucket_size = key_size_ + value_size_; - unused_bucket.resize(bucket_size, 'a'); + unused_bucket.resize(static_cast(bucket_size), 'a'); // Write the table. uint32_t num_added = 0; for (auto& bucket : buckets) { @@ -320,7 +320,7 @@ Status CuckooTableBuilder::Finish() { uint64_t offset = buckets.size() * bucket_size; properties_.data_size = offset; - unused_bucket.resize(properties_.fixed_key_len); + unused_bucket.resize(static_cast(properties_.fixed_key_len)); properties_.user_collected_properties[ CuckooTablePropertyNames::kEmptyKey] = unused_bucket; properties_.user_collected_properties[ @@ -456,7 +456,7 @@ bool CuckooTableBuilder::MakeSpaceForKey( // no. of times this will be called is <= max_num_hash_func_ + num_entries_. for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) { uint64_t bid = hash_vals[hash_cnt]; - (*buckets)[bid].make_space_for_key_call_id = make_space_for_key_call_id; + (*buckets)[static_cast(bid)].make_space_for_key_call_id = make_space_for_key_call_id; tree.push_back(CuckooNode(bid, 0, 0)); } bool null_found = false; @@ -467,7 +467,7 @@ bool CuckooTableBuilder::MakeSpaceForKey( if (curr_depth >= max_search_depth_) { break; } - CuckooBucket& curr_bucket = (*buckets)[curr_node.bucket_id]; + CuckooBucket& curr_bucket = (*buckets)[static_cast(curr_node.bucket_id)]; for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_ && !null_found; ++hash_cnt) { uint64_t child_bucket_id = CuckooHash(GetUserKey(curr_bucket.vector_idx), @@ -476,15 +476,15 @@ bool CuckooTableBuilder::MakeSpaceForKey( // Iterate inside Cuckoo Block. for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_; ++block_idx, ++child_bucket_id) { - if ((*buckets)[child_bucket_id].make_space_for_key_call_id == + if ((*buckets)[static_cast(child_bucket_id)].make_space_for_key_call_id == make_space_for_key_call_id) { continue; } - (*buckets)[child_bucket_id].make_space_for_key_call_id = + (*buckets)[static_cast(child_bucket_id)].make_space_for_key_call_id = make_space_for_key_call_id; tree.push_back(CuckooNode(child_bucket_id, curr_depth + 1, curr_pos)); - if ((*buckets)[child_bucket_id].vector_idx == kMaxVectorIdx) { + if ((*buckets)[static_cast(child_bucket_id)].vector_idx == kMaxVectorIdx) { null_found = true; break; } @@ -502,8 +502,8 @@ bool CuckooTableBuilder::MakeSpaceForKey( uint32_t bucket_to_replace_pos = static_cast(tree.size()) - 1; while (bucket_to_replace_pos >= num_hash_func_) { CuckooNode& curr_node = tree[bucket_to_replace_pos]; - (*buckets)[curr_node.bucket_id] = - (*buckets)[tree[curr_node.parent_pos].bucket_id]; + (*buckets)[static_cast(curr_node.bucket_id)] = + (*buckets)[static_cast(tree[curr_node.parent_pos].bucket_id)]; bucket_to_replace_pos = curr_node.parent_pos; } *bucket_id = tree[bucket_to_replace_pos].bucket_id; diff --git a/table/cuckoo_table_reader.cc b/table/cuckoo_table_reader.cc index fb14b17595..be7b1ffa9d 100644 --- a/table/cuckoo_table_reader.cc +++ b/table/cuckoo_table_reader.cc @@ -136,7 +136,7 @@ CuckooTableReader::CuckooTableReader( cuckoo_block_size_ = *reinterpret_cast( cuckoo_block_size->second.data()); cuckoo_block_bytes_minus_one_ = cuckoo_block_size_ * bucket_length_ - 1; - status_ = file_->Read(0, file_size, &file_data_, nullptr); + status_ = file_->Read(0, static_cast(file_size), &file_data_, nullptr); } Status CuckooTableReader::Get(const ReadOptions& /*readOptions*/, @@ -268,7 +268,7 @@ void CuckooTableIterator::InitIfNeeded() { if (initialized_) { return; } - sorted_bucket_ids_.reserve(reader_->GetTableProperties()->num_entries); + sorted_bucket_ids_.reserve(static_cast(reader_->GetTableProperties()->num_entries)); uint64_t num_buckets = reader_->table_size_ + reader_->cuckoo_block_size_ - 1; assert(num_buckets < kInvalidIndex); const char* bucket = reader_->file_data_.data(); diff --git a/table/plain_table_reader.cc b/table/plain_table_reader.cc index 1143eb1cd2..4f6c99f94a 100644 --- a/table/plain_table_reader.cc +++ b/table/plain_table_reader.cc @@ -277,7 +277,7 @@ void PlainTableReader::FillBloom(vector* prefix_hashes) { Status PlainTableReader::MmapDataIfNeeded() { if (file_info_.is_mmap_mode) { // Get mmapped memory. - return file_info_.file->Read(0, file_size_, &file_info_.file_data, nullptr); + return file_info_.file->Read(0, static_cast(file_size_), &file_info_.file_data, nullptr); } return Status::OK(); } diff --git a/third-party/fbson/FbsonDocument.h b/third-party/fbson/FbsonDocument.h index 11b6fe28e4..c69fcb45f5 100644 --- a/third-party/fbson/FbsonDocument.h +++ b/third-party/fbson/FbsonDocument.h @@ -55,8 +55,7 @@ * @author Tian Xia */ -#ifndef FBSON_FBSONDOCUMENT_H -#define FBSON_FBSONDOCUMENT_H +#pragma once #include #include @@ -889,5 +888,3 @@ inline FbsonValue* FbsonValue::findPath(const char* key_path, #pragma pack(pop) } // namespace fbson - -#endif // FBSON_FBSONDOCUMENT_H diff --git a/third-party/fbson/FbsonJsonParser.h b/third-party/fbson/FbsonJsonParser.h index 47bff77fe5..f4b8ed2515 100644 --- a/third-party/fbson/FbsonJsonParser.h +++ b/third-party/fbson/FbsonJsonParser.h @@ -47,8 +47,7 @@ * @author Tian Xia */ -#ifndef FBSON_FBSONPARSER_H -#define FBSON_FBSONPARSER_H +#pragma once #include #include @@ -741,5 +740,3 @@ class FbsonJsonParserT { typedef FbsonJsonParserT FbsonJsonParser; } // namespace fbson - -#endif // FBSON_FBSONPARSER_H diff --git a/third-party/fbson/FbsonStream.h b/third-party/fbson/FbsonStream.h index 12723ea30e..b20cb1c3bf 100644 --- a/third-party/fbson/FbsonStream.h +++ b/third-party/fbson/FbsonStream.h @@ -18,8 +18,7 @@ * @author Tian Xia */ -#ifndef FBSON_FBSONSTREAM_H -#define FBSON_FBSONSTREAM_H +#pragma once #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS @@ -178,5 +177,3 @@ class FbsonOutStream : public std::ostream { }; } // namespace fbson - -#endif // FBSON_FBSONSTREAM_H diff --git a/third-party/fbson/FbsonUtil.h b/third-party/fbson/FbsonUtil.h index 2b6d6f5c97..70ac6cb2ba 100644 --- a/third-party/fbson/FbsonUtil.h +++ b/third-party/fbson/FbsonUtil.h @@ -9,8 +9,7 @@ * @author Tian Xia */ -#ifndef FBSON_FBSONUTIL_H -#define FBSON_FBSONUTIL_H +#pragma once #include #include "FbsonDocument.h" @@ -159,5 +158,3 @@ class FbsonToJson { }; } // namespace fbson - -#endif // FBSON_FBSONUTIL_H diff --git a/third-party/fbson/FbsonWriter.h b/third-party/fbson/FbsonWriter.h index 2b94ef0a01..e5010fade2 100644 --- a/third-party/fbson/FbsonWriter.h +++ b/third-party/fbson/FbsonWriter.h @@ -25,8 +25,7 @@ * @author Tian Xia */ -#ifndef FBSON_FBSONWRITER_H -#define FBSON_FBSONWRITER_H +#pragma once #include #include "FbsonDocument.h" @@ -433,5 +432,3 @@ class FbsonWriterT { typedef FbsonWriterT FbsonWriter; } // namespace fbson - -#endif // FBSON_FBSONWRITER_H diff --git a/util/channel.h b/util/channel.h index 1b030192cf..0225482c00 100644 --- a/util/channel.h +++ b/util/channel.h @@ -3,13 +3,13 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). +#pragma once + #include #include #include #include -#pragma once - namespace rocksdb { template diff --git a/util/crc32c_ppc.h b/util/crc32c_ppc.h index b52ad9b2a4..3bcaecfe82 100644 --- a/util/crc32c_ppc.h +++ b/util/crc32c_ppc.h @@ -6,8 +6,7 @@ // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. -#ifndef CRC32C_PPC_H -#define CRC32C_PPC_H +#pragma once #ifdef __cplusplus extern "C" { @@ -19,5 +18,3 @@ extern uint32_t crc32c_ppc(uint32_t crc, unsigned char const *buffer, #ifdef __cplusplus } #endif - -#endif diff --git a/util/crc32c_ppc_constants.h b/util/crc32c_ppc_constants.h index 1206a957a7..57d6630322 100644 --- a/util/crc32c_ppc_constants.h +++ b/util/crc32c_ppc_constants.h @@ -5,8 +5,9 @@ // of patent rights can be found in the PATENTS file in the same directory. // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. -#ifndef CRC32C_PPC_CONST_H -#define CRC32C_PPC_CONST_H + +#pragma once + #define CRC 0x1edc6f41 #define REFLECT #define CRC_XOR @@ -898,5 +899,3 @@ static const unsigned int crc_table[] = { /* 33 bit reflected Barrett constant n */ .octa 0x00000000000000000000000105ec76f1 #endif - -#endif diff --git a/util/fault_injection_test_env.h b/util/fault_injection_test_env.h index 1a62c619e8..f2d4aaad2b 100644 --- a/util/fault_injection_test_env.h +++ b/util/fault_injection_test_env.h @@ -11,8 +11,7 @@ // the last "sync". It then checks for data loss errors by purposely dropping // file data (or entire files) not protected by a "sync". -#ifndef UTIL_FAULT_INJECTION_TEST_ENV_H_ -#define UTIL_FAULT_INJECTION_TEST_ENV_H_ +#pragma once #include #include @@ -171,5 +170,3 @@ class FaultInjectionTestEnv : public EnvWrapper { }; } // namespace rocksdb - -#endif // UTIL_FAULT_INJECTION_TEST_ENV_H_ diff --git a/util/file_reader_writer.cc b/util/file_reader_writer.cc index f0bbb52829..cd09f71225 100644 --- a/util/file_reader_writer.cc +++ b/util/file_reader_writer.cc @@ -62,7 +62,7 @@ Status SequentialFileReader::Read(size_t n, Slice* result, char* scratch) { Status SequentialFileReader::Skip(uint64_t n) { #ifndef ROCKSDB_LITE if (use_direct_io()) { - offset_ += n; + offset_ += static_cast(n); return Status::OK(); } #endif // !ROCKSDB_LITE @@ -81,9 +81,9 @@ Status RandomAccessFileReader::Read(uint64_t offset, size_t n, Slice* result, if (use_direct_io()) { #ifndef ROCKSDB_LITE size_t alignment = file_->GetRequiredBufferAlignment(); - size_t aligned_offset = TruncateToPageBoundary(alignment, offset); - size_t offset_advance = offset - aligned_offset; - size_t read_size = Roundup(offset + n, alignment) - aligned_offset; + size_t aligned_offset = TruncateToPageBoundary(alignment, static_cast(offset)); + size_t offset_advance = static_cast(offset) - aligned_offset; + size_t read_size = Roundup(static_cast(offset + n), alignment) - aligned_offset; AlignedBuffer buf; buf.Alignment(alignment); buf.AllocateNewBuffer(read_size); @@ -673,7 +673,7 @@ Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader, // Only a few requested bytes are in the buffer. memmove those chunk of // bytes to the beginning, and memcpy them back into the new buffer if a // new buffer is created. - chunk_offset_in_buffer = Rounddown(offset - buffer_offset_, alignment); + chunk_offset_in_buffer = Rounddown(static_cast(offset - buffer_offset_), alignment); chunk_len = buffer_.CurrentSize() - chunk_offset_in_buffer; assert(chunk_offset_in_buffer % alignment == 0); assert(chunk_len % alignment == 0); @@ -694,11 +694,11 @@ Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader, buffer_.Alignment(alignment); buffer_.AllocateNewBuffer(static_cast(roundup_len), copy_data_to_new_buffer, chunk_offset_in_buffer, - chunk_len); + static_cast(chunk_len)); } else if (chunk_len > 0) { // New buffer not needed. But memmove bytes from tail to the beginning since // chunk_len is greater than 0. - buffer_.RefitTail(chunk_offset_in_buffer, chunk_len); + buffer_.RefitTail(static_cast(chunk_offset_in_buffer), static_cast(chunk_len)); } Slice result; @@ -707,7 +707,7 @@ Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader, buffer_.BufferStart() + chunk_len); if (s.ok()) { buffer_offset_ = rounddown_offset; - buffer_.Size(chunk_len + result.size()); + buffer_.Size(static_cast(chunk_len) + result.size()); } return s; } @@ -715,7 +715,7 @@ Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader, bool FilePrefetchBuffer::TryReadFromCache(uint64_t offset, size_t n, Slice* result) { if (track_min_offset_ && offset < min_offset_read_) { - min_offset_read_ = offset; + min_offset_read_ = static_cast(offset); } if (!enable_ || offset < buffer_offset_) { return false; diff --git a/util/ppc-opcode.h b/util/ppc-opcode.h index eeb0ae08ff..554fa50a89 100644 --- a/util/ppc-opcode.h +++ b/util/ppc-opcode.h @@ -6,8 +6,7 @@ // This source code is also licensed under the GPLv2 license found in the // COPYING file in the root directory of this source tree. -#ifndef __OPCODES_H -#define __OPCODES_H +#pragma once #define __PPC_RA(a) (((a)&0x1f) << 16) #define __PPC_RB(b) (((b)&0x1f) << 11) @@ -27,5 +26,3 @@ #define VPMSUMD(t, a, b) .long PPC_INST_VPMSUMD | VSX_XX3((t), a, b) #define MFVRD(a, t) .long PPC_INST_MFVSRD | VSX_XX1((t) + 32, a, 0) #define MTVRD(t, a) .long PPC_INST_MTVSRD | VSX_XX1((t) + 32, a, 0) - -#endif diff --git a/util/testutil.h b/util/testutil.h index 7b43da6c73..c16c0cbe50 100644 --- a/util/testutil.h +++ b/util/testutil.h @@ -250,7 +250,7 @@ class RandomRWStringSink : public RandomRWFile { Status Write(uint64_t offset, const Slice& data) override { if (offset + data.size() > ss_->contents_.size()) { - ss_->contents_.resize(offset + data.size(), '\0'); + ss_->contents_.resize(static_cast(offset) + data.size(), '\0'); } char* pos = const_cast(ss_->contents_.data() + offset); @@ -518,7 +518,7 @@ class StringEnv : public EnvWrapper { "Attemp to read when it already reached eof."); } // TODO(yhchiang): Currently doesn't handle the overflow case. - offset_ += n; + offset_ += static_cast(n); return Status::OK(); } @@ -532,7 +532,7 @@ class StringEnv : public EnvWrapper { explicit StringSink(std::string* contents) : WritableFile(), contents_(contents) {} virtual Status Truncate(uint64_t size) override { - contents_->resize(size); + contents_->resize(static_cast(size)); return Status::OK(); } virtual Status Close() override { return Status::OK(); } diff --git a/util/transaction_test_util.cc b/util/transaction_test_util.cc index 19d27b1a14..6333918917 100644 --- a/util/transaction_test_util.cc +++ b/util/transaction_test_util.cc @@ -137,6 +137,7 @@ bool RandomTransactionInserter::DoInsert(DB* db, Transaction* txn, std::iota(set_vec.begin(), set_vec.end(), static_cast(0)); std::random_shuffle(set_vec.begin(), set_vec.end(), [&](uint64_t r) { return rand_->Uniform(r); }); + // For each set, pick a key at random and increment it for (uint16_t set_i : set_vec) { uint64_t int_value = 0; diff --git a/utilities/backupable/backupable_db.cc b/utilities/backupable/backupable_db.cc index 73ca39b707..3a880cbe9d 100644 --- a/utilities/backupable/backupable_db.cc +++ b/utilities/backupable/backupable_db.cc @@ -781,7 +781,7 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata( RateLimiter* rate_limiter = options_.backup_rate_limiter.get(); if (rate_limiter) { - copy_file_buffer_size_ = rate_limiter->GetSingleBurstBytes(); + copy_file_buffer_size_ = static_cast(rate_limiter->GetSingleBurstBytes()); } // A set into which we will insert the dst_paths that are calculated for live @@ -1078,7 +1078,7 @@ Status BackupEngineImpl::RestoreDBFromBackup( RateLimiter* rate_limiter = options_.restore_rate_limiter.get(); if (rate_limiter) { - copy_file_buffer_size_ = rate_limiter->GetSingleBurstBytes(); + copy_file_buffer_size_ = static_cast(rate_limiter->GetSingleBurstBytes()); } Status s; std::vector restore_items_to_finish; @@ -1231,7 +1231,7 @@ Status BackupEngineImpl::CopyOrCreateFile( if (!src.empty()) { size_t buffer_to_read = (copy_file_buffer_size_ < size_limit) ? copy_file_buffer_size_ - : size_limit; + : static_cast(size_limit); s = src_reader->Read(buffer_to_read, &data, buf.get()); processed_buffer_size += buffer_to_read; } else { @@ -1426,7 +1426,7 @@ Status BackupEngineImpl::CalculateChecksum(const std::string& src, Env* src_env, return Status::Incomplete("Backup stopped"); } size_t buffer_to_read = (copy_file_buffer_size_ < size_limit) ? - copy_file_buffer_size_ : size_limit; + copy_file_buffer_size_ : static_cast(size_limit); s = src_reader->Read(buffer_to_read, &data, buf.get()); if (!s.ok()) { diff --git a/utilities/blob_db/blob_db_impl.cc b/utilities/blob_db/blob_db_impl.cc index 068f8cfe8e..867b83a219 100644 --- a/utilities/blob_db/blob_db_impl.cc +++ b/utilities/blob_db/blob_db_impl.cc @@ -1008,14 +1008,14 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, uint64_t record_size = sizeof(uint32_t) + key.size() + blob_index.size(); // Allocate the buffer. This is safe in C++11 - std::string buffer_str(record_size, static_cast(0)); + std::string buffer_str(static_cast(record_size), static_cast(0)); char* buffer = &buffer_str[0]; // A partial blob record contain checksum, key and value. Slice blob_record; { StopWatch read_sw(env_, statistics_, BLOB_DB_BLOB_FILE_READ_MICROS); - s = reader->Read(record_offset, record_size, &blob_record, buffer); + s = reader->Read(record_offset, static_cast(record_size), &blob_record, buffer); RecordTick(statistics_, BLOB_DB_BLOB_FILE_BYTES_READ, blob_record.size()); } if (!s.ok()) { @@ -1041,7 +1041,7 @@ Status BlobDBImpl::GetBlobValue(const Slice& key, const Slice& index_entry, } Slice crc_slice(blob_record.data(), sizeof(uint32_t)); Slice blob_value(blob_record.data() + sizeof(uint32_t) + key.size(), - blob_index.size()); + static_cast(blob_index.size())); uint32_t crc_exp; if (!GetFixed32(&crc_slice, &crc_exp)) { ROCKS_LOG_DEBUG(db_options_.info_log, diff --git a/utilities/blob_db/blob_db_impl_filesnapshot.cc b/utilities/blob_db/blob_db_impl_filesnapshot.cc index c89efb0d6a..8effe88c0a 100644 --- a/utilities/blob_db/blob_db_impl_filesnapshot.cc +++ b/utilities/blob_db/blob_db_impl_filesnapshot.cc @@ -93,7 +93,7 @@ void BlobDBImpl::GetLiveFilesMetaData(std::vector* metadata) { for (auto bfile_pair : blob_files_) { auto blob_file = bfile_pair.second; LiveFileMetaData filemetadata; - filemetadata.size = blob_file->GetFileSize(); + filemetadata.size = static_cast(blob_file->GetFileSize()); // Path should be relative to db_name, but begin with slash. filemetadata.name = BlobFileName("", bdb_options_.blob_dir, blob_file->BlobFileNumber()); diff --git a/utilities/blob_db/blob_dump_tool.cc b/utilities/blob_db/blob_dump_tool.cc index b50b76052e..e5ab07c47d 100644 --- a/utilities/blob_db/blob_dump_tool.cc +++ b/utilities/blob_db/blob_dump_tool.cc @@ -199,7 +199,7 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob, fprintf(stdout, " expiration : %" PRIu64 "\n", record.expiration); } *offset += BlobLogRecord::kHeaderSize; - s = Read(*offset, key_size + value_size, &slice); + s = Read(*offset, static_cast(key_size + value_size), &slice); if (!s.ok()) { return s; } @@ -212,7 +212,7 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob, UncompressionInfo info(context, CompressionDict::GetEmptyDict(), compression); s = UncompressBlockContentsForCompressionType( - info, slice.data() + key_size, value_size, &contents, + info, slice.data() + key_size, static_cast(value_size), &contents, 2 /*compress_format_version*/, ImmutableCFOptions(Options())); if (!s.ok()) { return s; @@ -221,10 +221,10 @@ Status BlobDumpTool::DumpRecord(DisplayType show_key, DisplayType show_blob, } if (show_key != DisplayType::kNone) { fprintf(stdout, " key : "); - DumpSlice(Slice(slice.data(), key_size), show_key); + DumpSlice(Slice(slice.data(), static_cast(key_size)), show_key); if (show_blob != DisplayType::kNone) { fprintf(stdout, " blob : "); - DumpSlice(Slice(slice.data() + key_size, value_size), show_blob); + DumpSlice(Slice(slice.data() + static_cast(key_size), static_cast(value_size)), show_blob); } if (show_uncompressed_blob != DisplayType::kNone) { fprintf(stdout, " raw blob : "); diff --git a/utilities/blob_db/blob_log_reader.cc b/utilities/blob_db/blob_log_reader.cc index c2ba214571..4996d987b6 100644 --- a/utilities/blob_db/blob_log_reader.cc +++ b/utilities/blob_db/blob_log_reader.cc @@ -26,8 +26,8 @@ Reader::Reader(unique_ptr&& file_reader, Env* env, Status Reader::ReadSlice(uint64_t size, Slice* slice, std::string* buf) { StopWatch read_sw(env_, statistics_, BLOB_DB_BLOB_FILE_READ_MICROS); - buf->reserve(size); - Status s = file_->Read(next_byte_, size, slice, &(*buf)[0]); + buf->reserve(static_cast(size)); + Status s = file_->Read(next_byte_, static_cast(size), slice, &(*buf)[0]); next_byte_ += size; if (!s.ok()) { return s; diff --git a/utilities/col_buf_decoder.cc b/utilities/col_buf_decoder.cc index 3fb31794f7..8f9fa74abd 100644 --- a/utilities/col_buf_decoder.cc +++ b/utilities/col_buf_decoder.cc @@ -147,7 +147,7 @@ size_t FixedLengthColBufDecoder::Decode(const char* src, char** dest) { col_compression_type_ == kColDict) { uint64_t dict_val = read_val; assert(dict_val < dict_vec_.size()); - write_val = dict_vec_[dict_val]; + write_val = dict_vec_[static_cast(dict_val)]; } // dest->append(reinterpret_cast(&write_val), size_); @@ -222,7 +222,7 @@ size_t VariableChunkColBufDecoder::Decode(const char* src, char** dest) { uint64_t dict_val; ReadVarint64(&src, &dict_val); assert(dict_val < dict_vec_.size()); - chunk_buf = dict_vec_[dict_val]; + chunk_buf = dict_vec_[static_cast(dict_val)]; } else { memcpy(&chunk_buf, src, chunk_size); src += chunk_size; diff --git a/utilities/column_aware_encoding_util.cc b/utilities/column_aware_encoding_util.cc index 1ce6ebd0ba..fca4fea9ea 100644 --- a/utilities/column_aware_encoding_util.cc +++ b/utilities/column_aware_encoding_util.cc @@ -101,7 +101,7 @@ void ColumnAwareEncodingReader::DecodeBlocks( size_t num_kv_pairs; const char* header_content_ptr = content_ptr; - num_kv_pairs = DecodeFixed64(header_content_ptr); + num_kv_pairs = static_cast(DecodeFixed64(header_content_ptr)); header_content_ptr += sizeof(size_t); size_t num_key_columns = key_col_bufs.size(); @@ -119,7 +119,7 @@ void ColumnAwareEncodingReader::DecodeBlocks( key_content_ptr[i] = col_content_ptr; key_content_ptr[i] += key_col_bufs[i]->Init(key_content_ptr[i]); size_t offset; - offset = DecodeFixed64(header_content_ptr); + offset = static_cast(DecodeFixed64(header_content_ptr)); header_content_ptr += sizeof(size_t); col_content_ptr += offset; } @@ -127,7 +127,7 @@ void ColumnAwareEncodingReader::DecodeBlocks( value_content_ptr[i] = col_content_ptr; value_content_ptr[i] += value_col_bufs[i]->Init(value_content_ptr[i]); size_t offset; - offset = DecodeFixed64(header_content_ptr); + offset = static_cast(DecodeFixed64(header_content_ptr)); header_content_ptr += sizeof(size_t); col_content_ptr += offset; } diff --git a/utilities/document/document_db.cc b/utilities/document/document_db.cc index 939327ed1b..279e4cb4da 100644 --- a/utilities/document/document_db.cc +++ b/utilities/document/document_db.cc @@ -1155,10 +1155,10 @@ Options GetRocksDBOptionsFromOptions(const DocumentDBOptions& options) { Options rocksdb_options; rocksdb_options.max_background_compactions = options.background_threads - 1; rocksdb_options.max_background_flushes = 1; - rocksdb_options.write_buffer_size = options.memtable_size; + rocksdb_options.write_buffer_size = static_cast(options.memtable_size); rocksdb_options.max_write_buffer_number = 6; BlockBasedTableOptions table_options; - table_options.block_cache = NewLRUCache(options.cache_size); + table_options.block_cache = NewLRUCache(static_cast(options.cache_size)); rocksdb_options.table_factory.reset(NewBlockBasedTableFactory(table_options)); return rocksdb_options; } diff --git a/utilities/merge_operators/bytesxor.h b/utilities/merge_operators/bytesxor.h index 1562ca852a..762e372445 100644 --- a/utilities/merge_operators/bytesxor.h +++ b/utilities/merge_operators/bytesxor.h @@ -3,8 +3,7 @@ // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -#ifndef UTILITIES_MERGE_OPERATORS_BYTESXOR_H_ -#define UTILITIES_MERGE_OPERATORS_BYTESXOR_H_ +#pragma once #include #include @@ -38,5 +37,3 @@ class BytesXOROperator : public AssociativeMergeOperator { }; } // namespace rocksdb - -#endif // UTILITIES_MERGE_OPERATORS_BYTESXOR_H_ diff --git a/utilities/persistent_cache/block_cache_tier.h b/utilities/persistent_cache/block_cache_tier.h index dcb97258b4..2b2c0ef4f1 100644 --- a/utilities/persistent_cache/block_cache_tier.h +++ b/utilities/persistent_cache/block_cache_tier.h @@ -44,9 +44,9 @@ class BlockCacheTier : public PersistentCacheTier { public: explicit BlockCacheTier(const PersistentCacheConfig& opt) : opt_(opt), - insert_ops_(opt_.max_write_pipeline_backlog_size), + insert_ops_(static_cast(opt_.max_write_pipeline_backlog_size)), buffer_allocator_(opt.write_buffer_size, opt.write_buffer_count()), - writer_(this, opt_.writer_qdepth, opt_.writer_dispatch_size) { + writer_(this, opt_.writer_qdepth, static_cast(opt_.writer_dispatch_size)) { Info(opt_.log, "Initializing allocator. size=%d B count=%d", opt_.write_buffer_size, opt_.write_buffer_count()); } diff --git a/utilities/spatialdb/spatial_db.cc b/utilities/spatialdb/spatial_db.cc index a9b990ee20..627eb9de6e 100644 --- a/utilities/spatialdb/spatial_db.cc +++ b/utilities/spatialdb/spatial_db.cc @@ -354,8 +354,8 @@ class SpatialIndexCursor : public Cursor { : value_getter_(value_getter), valid_(true) { // calculate quad keys we'll need to query std::vector quad_keys; - quad_keys.reserve((tile_bbox.max_x - tile_bbox.min_x + 1) * - (tile_bbox.max_y - tile_bbox.min_y + 1)); + quad_keys.reserve(static_cast((tile_bbox.max_x - tile_bbox.min_x + 1) * + (tile_bbox.max_y - tile_bbox.min_y + 1))); for (uint64_t x = tile_bbox.min_x; x <= tile_bbox.max_x; ++x) { for (uint64_t y = tile_bbox.min_y; y <= tile_bbox.max_y; ++y) { quad_keys.push_back(GetQuadKeyFromTile(x, y, tile_bits)); @@ -791,7 +791,7 @@ Status SpatialDB::Create( db_options.create_missing_column_families = true; db_options.error_if_exists = true; - auto block_cache = NewLRUCache(options.cache_size); + auto block_cache = NewLRUCache(static_cast(options.cache_size)); ColumnFamilyOptions column_family_options = GetColumnFamilyOptions(options, block_cache); @@ -832,7 +832,7 @@ Status SpatialDB::Create( Status SpatialDB::Open(const SpatialDBOptions& options, const std::string& name, SpatialDB** db, bool read_only) { DBOptions db_options = GetDBOptionsFromSpatialDBOptions(options); - auto block_cache = NewLRUCache(options.cache_size); + auto block_cache = NewLRUCache(static_cast(options.cache_size)); ColumnFamilyOptions column_family_options = GetColumnFamilyOptions(options, block_cache); diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc index 19321de485..d285fd30ed 100644 --- a/utilities/transactions/transaction_lock_mgr.cc +++ b/utilities/transactions/transaction_lock_mgr.cc @@ -446,8 +446,8 @@ bool TransactionLockMgr::IncrementWaiters( const autovector& wait_ids, const std::string& key, const uint32_t& cf_id, const bool& exclusive, Env* const env) { auto id = txn->GetID(); - std::vector queue_parents(txn->GetDeadlockDetectDepth()); - std::vector queue_values(txn->GetDeadlockDetectDepth()); + std::vector queue_parents(static_cast(txn->GetDeadlockDetectDepth())); + std::vector queue_values(static_cast(txn->GetDeadlockDetectDepth())); std::lock_guard lock(wait_txn_map_mutex_); assert(!wait_txn_map_.Contains(id)); diff --git a/utilities/transactions/write_prepared_txn_db.cc b/utilities/transactions/write_prepared_txn_db.cc index 34ed04aa67..2d8e4fcee1 100644 --- a/utilities/transactions/write_prepared_txn_db.cc +++ b/utilities/transactions/write_prepared_txn_db.cc @@ -460,7 +460,7 @@ void WritePreparedTxnDB::RemovePrepared(const uint64_t prepare_seq, bool WritePreparedTxnDB::GetCommitEntry(const uint64_t indexed_seq, CommitEntry64b* entry_64b, CommitEntry* entry) const { - *entry_64b = commit_cache_[indexed_seq].load(std::memory_order_acquire); + *entry_64b = commit_cache_[static_cast(indexed_seq)].load(std::memory_order_acquire); bool valid = entry_64b->Parse(indexed_seq, entry, FORMAT); return valid; } @@ -469,7 +469,7 @@ bool WritePreparedTxnDB::AddCommitEntry(const uint64_t indexed_seq, const CommitEntry& new_entry, CommitEntry* evicted_entry) { CommitEntry64b new_entry_64b(new_entry, FORMAT); - CommitEntry64b evicted_entry_64b = commit_cache_[indexed_seq].exchange( + CommitEntry64b evicted_entry_64b = commit_cache_[static_cast(indexed_seq)].exchange( new_entry_64b, std::memory_order_acq_rel); bool valid = evicted_entry_64b.Parse(indexed_seq, evicted_entry, FORMAT); return valid; @@ -478,7 +478,7 @@ bool WritePreparedTxnDB::AddCommitEntry(const uint64_t indexed_seq, bool WritePreparedTxnDB::ExchangeCommitEntry(const uint64_t indexed_seq, CommitEntry64b& expected_entry_64b, const CommitEntry& new_entry) { - auto& atomic_entry = commit_cache_[indexed_seq]; + auto& atomic_entry = commit_cache_[static_cast(indexed_seq)]; CommitEntry64b new_entry_64b(new_entry, FORMAT); bool succ = atomic_entry.compare_exchange_strong( expected_entry_64b, new_entry_64b, std::memory_order_acq_rel,