From 49628c9a83d337ea7b01f7ad3bdffa340e013913 Mon Sep 17 00:00:00 2001 From: sdong Date: Thu, 5 May 2022 13:08:21 -0700 Subject: [PATCH] Use std::numeric_limits<> (#9954) Summary: Right now we still don't fully use std::numeric_limits but use a macro, mainly for supporting VS 2013. Right now we only support VS 2017 and up so it is not a problem. The code comment claims that MinGW still needs it. We don't have a CI running MinGW so it's hard to validate. since we now require C++17, it's hard to imagine MinGW would still build RocksDB but doesn't support std::numeric_limits<>. Pull Request resolved: https://github.com/facebook/rocksdb/pull/9954 Test Plan: See CI Runs. Reviewed By: riversand963 Differential Revision: D36173954 fbshipit-source-id: a35a73af17cdcae20e258cdef57fcf29a50b49e0 --- db/column_family.cc | 7 +-- db/compaction/compaction.cc | 4 +- db/compaction/compaction_job.cc | 5 +- db/compaction/compaction_picker.cc | 4 +- db/compaction/compaction_picker_level.cc | 2 +- db/compaction/compaction_picker_test.cc | 4 +- db/compaction/compaction_picker_universal.cc | 2 +- db/db_compaction_test.cc | 3 +- db/db_filesnapshot.cc | 2 +- db/db_flush_test.cc | 2 +- db/db_impl/db_impl.cc | 2 +- db/db_impl/db_impl.h | 2 +- db/db_impl/db_impl_compaction_flush.cc | 9 ++-- db/db_impl/db_impl_debug.cc | 9 ++-- db/db_impl/db_impl_files.cc | 4 +- db/db_impl/db_impl_secondary.cc | 7 +-- db/db_kv_checksum_test.cc | 4 +- db/db_memtable_test.cc | 2 +- db/db_range_del_test.cc | 3 +- db/db_wal_test.cc | 2 +- db/dbformat.h | 3 +- db/external_sst_file_test.cc | 2 +- db/file_indexer.h | 5 +- db/flush_job_test.cc | 46 ++++++++++--------- db/memtable.cc | 4 +- db/memtable_list_test.cc | 38 +++++++++------ db/version_builder.cc | 2 +- db/version_set.cc | 2 +- db/version_set.h | 6 +-- db/wal_edit.h | 3 +- db/write_batch.cc | 12 ++--- db_stress_tool/db_stress_test_base.cc | 9 ++-- file/file_prefetch_buffer.h | 2 +- monitoring/histogram.cc | 3 +- monitoring/persistent_stats_history.cc | 4 +- options/cf_options.cc | 7 +-- options/options_test.cc | 5 +- port/port_posix.h | 10 ---- port/win/port_win.h | 26 ----------- table/block_based/block.cc | 2 +- .../block_based/block_based_table_factory.cc | 2 +- table/cuckoo/cuckoo_table_builder.h | 2 +- table/meta_blocks.cc | 4 +- table/table_properties.cc | 2 +- .../block_cache_trace_analyzer.cc | 20 ++++---- .../block_cache_trace_analyzer_test.cc | 6 +-- tools/db_bench_tool.cc | 3 +- tools/sst_dump_tool.cc | 4 +- tools/trace_analyzer_tool.cc | 2 +- trace_replay/block_cache_tracer.h | 2 +- util/heap.h | 8 ++-- util/rate_limiter.cc | 17 ++++--- util/rate_limiter_test.cc | 2 +- util/string_util.cc | 3 +- utilities/backup/backup_engine.cc | 10 ++-- utilities/backup/backup_engine_test.cc | 3 +- .../write_batch_with_index_internal.h | 4 +- 57 files changed, 182 insertions(+), 182 deletions(-) diff --git a/db/column_family.cc b/db/column_family.cc index 4c38546eb7..3eb4aab8e2 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -501,7 +501,8 @@ std::vector ColumnFamilyData::GetDbPaths() const { return paths; } -const uint32_t ColumnFamilyData::kDummyColumnFamilyDataId = port::kMaxUint32; +const uint32_t ColumnFamilyData::kDummyColumnFamilyDataId = + std::numeric_limits::max(); ColumnFamilyData::ColumnFamilyData( uint32_t id, const std::string& name, Version* _dummy_versions, @@ -826,8 +827,8 @@ int GetL0ThresholdSpeedupCompaction(int level0_file_num_compaction_trigger, // condition. // Or twice as compaction trigger, if it is smaller. int64_t res = std::min(twice_level0_trigger, one_fourth_trigger_slowdown); - if (res >= port::kMaxInt32) { - return port::kMaxInt32; + if (res >= std::numeric_limits::max()) { + return std::numeric_limits::max(); } else { // res fits in int return static_cast(res); diff --git a/db/compaction/compaction.cc b/db/compaction/compaction.cc index edda2fe71b..4d52454436 100644 --- a/db/compaction/compaction.cc +++ b/db/compaction/compaction.cc @@ -518,7 +518,7 @@ uint64_t Compaction::OutputFilePreallocationSize() const { } } - if (max_output_file_size_ != port::kMaxUint64 && + if (max_output_file_size_ != std::numeric_limits::max() && (immutable_options_.compaction_style == kCompactionStyleLevel || output_level() > 0)) { preallocation_size = std::min(max_output_file_size_, preallocation_size); @@ -616,7 +616,7 @@ bool Compaction::DoesInputReferenceBlobFiles() const { uint64_t Compaction::MinInputFileOldestAncesterTime( const InternalKey* start, const InternalKey* end) const { - uint64_t min_oldest_ancester_time = port::kMaxUint64; + uint64_t min_oldest_ancester_time = std::numeric_limits::max(); const InternalKeyComparator& icmp = column_family_data()->internal_comparator(); for (const auto& level_files : inputs_) { diff --git a/db/compaction/compaction_job.cc b/db/compaction/compaction_job.cc index 4cd8ee802d..e839146477 100644 --- a/db/compaction/compaction_job.cc +++ b/db/compaction/compaction_job.cc @@ -1974,7 +1974,8 @@ Status CompactionJob::FinishCompactionOutputFile( refined_oldest_ancester_time = sub_compact->compaction->MinInputFileOldestAncesterTime( &(meta->smallest), &(meta->largest)); - if (refined_oldest_ancester_time != port::kMaxUint64) { + if (refined_oldest_ancester_time != + std::numeric_limits::max()) { meta->oldest_ancester_time = refined_oldest_ancester_time; } } @@ -2264,7 +2265,7 @@ Status CompactionJob::OpenCompactionOutputFile( sub_compact->compaction->MinInputFileOldestAncesterTime( (sub_compact->start != nullptr) ? &tmp_start : nullptr, (sub_compact->end != nullptr) ? &tmp_end : nullptr); - if (oldest_ancester_time == port::kMaxUint64) { + if (oldest_ancester_time == std::numeric_limits::max()) { oldest_ancester_time = current_time; } diff --git a/db/compaction/compaction_picker.cc b/db/compaction/compaction_picker.cc index ae86d78940..e7e7e125b3 100644 --- a/db/compaction/compaction_picker.cc +++ b/db/compaction/compaction_picker.cc @@ -65,7 +65,7 @@ bool FindIntraL0Compaction(const std::vector& level_files, size_t compact_bytes = static_cast(level_files[start]->fd.file_size); uint64_t compensated_compact_bytes = level_files[start]->compensated_file_size; - size_t compact_bytes_per_del_file = port::kMaxSizet; + size_t compact_bytes_per_del_file = std::numeric_limits::max(); // Compaction range will be [start, limit). size_t limit; // Pull in files until the amount of compaction work per deleted file begins @@ -717,7 +717,7 @@ Compaction* CompactionPicker::CompactRange( // files that are created during the current compaction. if (compact_range_options.bottommost_level_compaction == BottommostLevelCompaction::kForceOptimized && - max_file_num_to_ignore != port::kMaxUint64) { + max_file_num_to_ignore != std::numeric_limits::max()) { assert(input_level == output_level); // inputs_shrunk holds a continuous subset of input files which were all // created before the current manual compaction diff --git a/db/compaction/compaction_picker_level.cc b/db/compaction/compaction_picker_level.cc index 31b76fb69c..87d1e8e63d 100644 --- a/db/compaction/compaction_picker_level.cc +++ b/db/compaction/compaction_picker_level.cc @@ -504,7 +504,7 @@ bool LevelCompactionBuilder::PickIntraL0Compaction() { return false; } return FindIntraL0Compaction(level_files, kMinFilesForIntraL0Compaction, - port::kMaxUint64, + std::numeric_limits::max(), mutable_cf_options_.max_compaction_bytes, &start_level_inputs_, earliest_mem_seqno_); } diff --git a/db/compaction/compaction_picker_test.cc b/db/compaction/compaction_picker_test.cc index 695d730fb3..03eccca8e1 100644 --- a/db/compaction/compaction_picker_test.cc +++ b/db/compaction/compaction_picker_test.cc @@ -2653,8 +2653,8 @@ TEST_F(CompactionPickerTest, UniversalMarkedManualCompaction) { universal_compaction_picker.CompactRange( cf_name_, mutable_cf_options_, mutable_db_options_, vstorage_.get(), ColumnFamilyData::kCompactAllLevels, 6, CompactRangeOptions(), - nullptr, nullptr, &manual_end, &manual_conflict, port::kMaxUint64, - "")); + nullptr, nullptr, &manual_end, &manual_conflict, + std::numeric_limits::max(), "")); ASSERT_TRUE(compaction); diff --git a/db/compaction/compaction_picker_universal.cc b/db/compaction/compaction_picker_universal.cc index 5ca2c41ead..c5c043c0fd 100644 --- a/db/compaction/compaction_picker_universal.cc +++ b/db/compaction/compaction_picker_universal.cc @@ -1371,7 +1371,7 @@ Compaction* UniversalCompactionBuilder::PickPeriodicCompaction() { uint64_t UniversalCompactionBuilder::GetMaxOverlappingBytes() const { if (!mutable_cf_options_.compaction_options_universal.incremental) { - return port::kMaxUint64; + return std::numeric_limits::max(); } else { // Try to align cutting boundary with files at the next level if the // file isn't end up with 1/2 of target size, or it would overlap diff --git a/db/db_compaction_test.cc b/db/db_compaction_test.cc index 0d99e87799..29180f224b 100644 --- a/db/db_compaction_test.cc +++ b/db/db_compaction_test.cc @@ -4404,7 +4404,8 @@ TEST_F(DBCompactionTest, LevelPeriodicCompactionWithCompactionFilters) { for (CompactionFilterType comp_filter_type : {kUseCompactionFilter, kUseCompactionFilterFactory}) { // Assert that periodic compactions are not enabled. - ASSERT_EQ(port::kMaxUint64 - 1, options.periodic_compaction_seconds); + ASSERT_EQ(std::numeric_limits::max() - 1, + options.periodic_compaction_seconds); if (comp_filter_type == kUseCompactionFilter) { options.compaction_filter = &test_compaction_filter; diff --git a/db/db_filesnapshot.cc b/db/db_filesnapshot.cc index a3ec9fb8f4..e30071341a 100644 --- a/db/db_filesnapshot.cc +++ b/db/db_filesnapshot.cc @@ -177,7 +177,7 @@ Status DBImpl::GetLiveFilesStorageInfo( VectorLogPtr live_wal_files; bool flush_memtable = true; if (!immutable_db_options_.allow_2pc) { - if (opts.wal_size_for_flush == port::kMaxUint64) { + if (opts.wal_size_for_flush == std::numeric_limits::max()) { flush_memtable = false; } else if (opts.wal_size_for_flush > 0) { // If the outstanding log files are small, we skip the flush. diff --git a/db/db_flush_test.cc b/db/db_flush_test.cc index 76442086d6..e661d74ea8 100644 --- a/db/db_flush_test.cc +++ b/db/db_flush_test.cc @@ -2356,7 +2356,7 @@ TEST_P(DBAtomicFlushTest, PrecomputeMinLogNumberToKeepNon2PC) { ASSERT_OK(Flush(cf_ids)); uint64_t log_num_after_flush = dbfull()->TEST_GetCurrentLogNumber(); - uint64_t min_log_number_to_keep = port::kMaxUint64; + uint64_t min_log_number_to_keep = std::numeric_limits::max(); autovector flushed_cfds; autovector> flush_edits; for (size_t i = 0; i != num_cfs; ++i) { diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc index c446121008..aa2cdac512 100644 --- a/db/db_impl/db_impl.cc +++ b/db/db_impl/db_impl.cc @@ -5338,7 +5338,7 @@ Status DBImpl::ReserveFileNumbersBeforeIngestion( Status DBImpl::GetCreationTimeOfOldestFile(uint64_t* creation_time) { if (mutable_db_options_.max_open_files == -1) { - uint64_t oldest_time = port::kMaxUint64; + uint64_t oldest_time = std::numeric_limits::max(); for (auto cfd : *versions_->GetColumnFamilySet()) { if (!cfd->IsDropped()) { uint64_t ctime; diff --git a/db/db_impl/db_impl.h b/db/db_impl/db_impl.h index 97e3d1b8a9..339399bbbf 100644 --- a/db/db_impl/db_impl.h +++ b/db/db_impl/db_impl.h @@ -2299,7 +2299,7 @@ class DBImpl : public DB { static const int KEEP_LOG_FILE_NUM = 1000; // MSVC version 1800 still does not have constexpr for ::max() - static const uint64_t kNoTimeOut = port::kMaxUint64; + static const uint64_t kNoTimeOut = std::numeric_limits::max(); std::string db_absolute_path_; diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc index 5b508c00d2..92043350ff 100644 --- a/db/db_impl/db_impl_compaction_flush.cc +++ b/db/db_impl/db_impl_compaction_flush.cc @@ -188,7 +188,7 @@ Status DBImpl::FlushMemTableToOutputFile( // a memtable without knowing such snapshot(s). uint64_t max_memtable_id = needs_to_sync_closed_wals ? cfd->imm()->GetLatestMemTableID() - : port::kMaxUint64; + : std::numeric_limits::max(); // If needs_to_sync_closed_wals is false, then the flush job will pick ALL // existing memtables of the column family when PickMemTable() is called @@ -1041,7 +1041,8 @@ Status DBImpl::CompactRangeInternal(const CompactRangeOptions& options, } s = RunManualCompaction(cfd, ColumnFamilyData::kCompactAllLevels, final_output_level, options, begin, end, exclusive, - false, port::kMaxUint64, trim_ts); + false, std::numeric_limits::max(), + trim_ts); } else { int first_overlapped_level = kInvalidLevel; int max_overlapped_level = kInvalidLevel; @@ -1078,7 +1079,7 @@ Status DBImpl::CompactRangeInternal(const CompactRangeOptions& options, if (s.ok() && first_overlapped_level != kInvalidLevel) { // max_file_num_to_ignore can be used to filter out newly created SST // files, useful for bottom level compaction in a manual compaction - uint64_t max_file_num_to_ignore = port::kMaxUint64; + uint64_t max_file_num_to_ignore = std::numeric_limits::max(); uint64_t next_file_number = versions_->current_next_file_number(); final_output_level = max_overlapped_level; int output_level; @@ -2015,7 +2016,7 @@ Status DBImpl::FlushMemTable(ColumnFamilyData* cfd, // be created and scheduled, status::OK() will be returned. s = SwitchMemtable(cfd, &context); } - const uint64_t flush_memtable_id = port::kMaxUint64; + const uint64_t flush_memtable_id = std::numeric_limits::max(); if (s.ok()) { if (cfd->imm()->NumNotFlushed() != 0 || !cfd->mem()->IsEmpty() || !cached_recoverable_state_empty_.load()) { diff --git a/db/db_impl/db_impl_debug.cc b/db/db_impl/db_impl_debug.cc index 7bbd207d98..5647aa26f2 100644 --- a/db/db_impl/db_impl_debug.cc +++ b/db/db_impl/db_impl_debug.cc @@ -118,10 +118,11 @@ Status DBImpl::TEST_CompactRange(int level, const Slice* begin, cfd->ioptions()->compaction_style == kCompactionStyleFIFO) ? level : level + 1; - return RunManualCompaction(cfd, level, output_level, CompactRangeOptions(), - begin, end, true, disallow_trivial_move, - port::kMaxUint64 /*max_file_num_to_ignore*/, - "" /*trim_ts*/); + return RunManualCompaction( + cfd, level, output_level, CompactRangeOptions(), begin, end, true, + disallow_trivial_move, + std::numeric_limits::max() /*max_file_num_to_ignore*/, + "" /*trim_ts*/); } Status DBImpl::TEST_SwitchMemtable(ColumnFamilyData* cfd) { diff --git a/db/db_impl/db_impl_files.cc b/db/db_impl/db_impl_files.cc index 1790ed836f..86a7808b28 100644 --- a/db/db_impl/db_impl_files.cc +++ b/db/db_impl/db_impl_files.cc @@ -761,7 +761,7 @@ uint64_t PrecomputeMinLogNumberToKeepNon2PC( assert(!cfds_to_flush.empty()); assert(cfds_to_flush.size() == edit_lists.size()); - uint64_t min_log_number_to_keep = port::kMaxUint64; + uint64_t min_log_number_to_keep = std::numeric_limits::max(); for (const auto& edit_list : edit_lists) { uint64_t log = 0; for (const auto& e : edit_list) { @@ -773,7 +773,7 @@ uint64_t PrecomputeMinLogNumberToKeepNon2PC( min_log_number_to_keep = std::min(min_log_number_to_keep, log); } } - if (min_log_number_to_keep == port::kMaxUint64) { + if (min_log_number_to_keep == std::numeric_limits::max()) { min_log_number_to_keep = cfds_to_flush[0]->GetLogNumber(); for (size_t i = 1; i < cfds_to_flush.size(); i++) { min_log_number_to_keep = diff --git a/db/db_impl/db_impl_secondary.cc b/db/db_impl/db_impl_secondary.cc index 1e3c9f2ac5..fb93a4408a 100644 --- a/db/db_impl/db_impl_secondary.cc +++ b/db/db_impl/db_impl_secondary.cc @@ -247,15 +247,16 @@ Status DBImplSecondary::RecoverLogFiles( if (seq_of_batch <= seq) { continue; } - auto curr_log_num = port::kMaxUint64; + auto curr_log_num = std::numeric_limits::max(); if (cfd_to_current_log_.count(cfd) > 0) { curr_log_num = cfd_to_current_log_[cfd]; } // If the active memtable contains records added by replaying an // earlier WAL, then we need to seal the memtable, add it to the // immutable memtable list and create a new active memtable. - if (!cfd->mem()->IsEmpty() && (curr_log_num == port::kMaxUint64 || - curr_log_num != log_number)) { + if (!cfd->mem()->IsEmpty() && + (curr_log_num == std::numeric_limits::max() || + curr_log_num != log_number)) { const MutableCFOptions mutable_cf_options = *cfd->GetLatestMutableCFOptions(); MemTable* new_mem = diff --git a/db/db_kv_checksum_test.cc b/db/db_kv_checksum_test.cc index b50681e5dc..44ee56786f 100644 --- a/db/db_kv_checksum_test.cc +++ b/db/db_kv_checksum_test.cc @@ -79,7 +79,7 @@ class DbKvChecksumTest void CorruptNextByteCallBack(void* arg) { Slice encoded = *static_cast(arg); - if (entry_len_ == port::kMaxSizet) { + if (entry_len_ == std::numeric_limits::max()) { // We learn the entry size on the first attempt entry_len_ = encoded.size(); } @@ -96,7 +96,7 @@ class DbKvChecksumTest WriteBatchOpType op_type_; char corrupt_byte_addend_; size_t corrupt_byte_offset_ = 0; - size_t entry_len_ = port::kMaxSizet; + size_t entry_len_ = std::numeric_limits::max(); }; std::string GetTestNameSuffix( diff --git a/db/db_memtable_test.cc b/db/db_memtable_test.cc index 62b50b60a8..13736daacc 100644 --- a/db/db_memtable_test.cc +++ b/db/db_memtable_test.cc @@ -97,7 +97,7 @@ class MockMemTableRepFactory : public MemTableRepFactory { private: MockMemTableRep* mock_rep_; - // workaround since there's no port::kMaxUint32 yet. + // workaround since there's no std::numeric_limits::max() yet. uint32_t last_column_family_id_ = static_cast(-1); }; diff --git a/db/db_range_del_test.cc b/db/db_range_del_test.cc index 8451143392..2b4fa3ba2c 100644 --- a/db/db_range_del_test.cc +++ b/db/db_range_del_test.cc @@ -500,7 +500,8 @@ TEST_F(DBRangeDelTest, ValidUniversalSubcompactionBoundaries) { 1 /* input_level */, 2 /* output_level */, CompactRangeOptions(), nullptr /* begin */, nullptr /* end */, true /* exclusive */, true /* disallow_trivial_move */, - port::kMaxUint64 /* max_file_num_to_ignore */, "" /*trim_ts*/)); + std::numeric_limits::max() /* max_file_num_to_ignore */, + "" /*trim_ts*/)); } #endif // ROCKSDB_LITE diff --git a/db/db_wal_test.cc b/db/db_wal_test.cc index 9a953a178a..5483fcad76 100644 --- a/db/db_wal_test.cc +++ b/db/db_wal_test.cc @@ -1009,7 +1009,7 @@ TEST_F(DBWALTest, RecoveryWithLogDataForSomeCFs) { if (log_files.size() > 0) { earliest_log_nums[i] = log_files[0]->LogNumber(); } else { - earliest_log_nums[i] = port::kMaxUint64; + earliest_log_nums[i] = std::numeric_limits::max(); } } // Check at least the first WAL was cleaned up during the recovery. diff --git a/db/dbformat.h b/db/dbformat.h index ee9c27e761..670c188c7b 100644 --- a/db/dbformat.h +++ b/db/dbformat.h @@ -90,7 +90,8 @@ inline bool IsExtendedValueType(ValueType t) { // can be packed together into 64-bits. static const SequenceNumber kMaxSequenceNumber = ((0x1ull << 56) - 1); -static const SequenceNumber kDisableGlobalSequenceNumber = port::kMaxUint64; +static const SequenceNumber kDisableGlobalSequenceNumber = + std::numeric_limits::max(); constexpr uint64_t kNumInternalBytes = 8; diff --git a/db/external_sst_file_test.cc b/db/external_sst_file_test.cc index 1f47d2ab6c..0341bdcc32 100644 --- a/db/external_sst_file_test.cc +++ b/db/external_sst_file_test.cc @@ -2405,7 +2405,7 @@ TEST_P(ExternalSSTBlockChecksumTest, DISABLED_HugeBlockChecksum) { SstFileWriter sst_file_writer(EnvOptions(), options); // 2^32 - 1, will lead to data block with more than 2^32 bytes - size_t huge_size = port::kMaxUint32; + size_t huge_size = std::numeric_limits::max(); std::string f = sst_files_dir_ + "f.sst"; ASSERT_OK(sst_file_writer.Open(f)); diff --git a/db/file_indexer.h b/db/file_indexer.h index ad7553f2c4..fd889b0314 100644 --- a/db/file_indexer.h +++ b/db/file_indexer.h @@ -58,10 +58,7 @@ class FileIndexer { void UpdateIndex(Arena* arena, const size_t num_levels, std::vector* const files); - enum { - // MSVC version 1800 still does not have constexpr for ::max() - kLevelMaxIndex = ROCKSDB_NAMESPACE::port::kMaxInt32 - }; + enum { kLevelMaxIndex = std::numeric_limits::max() }; private: size_t num_levels_; diff --git a/db/flush_job_test.cc b/db/flush_job_test.cc index b8ef21fc6d..e276ba836c 100644 --- a/db/flush_job_test.cc +++ b/db/flush_job_test.cc @@ -164,12 +164,12 @@ TEST_F(FlushJobTest, Empty) { SnapshotChecker* snapshot_checker = nullptr; // not relavant FlushJob flush_job( dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_, - *cfd->GetLatestMutableCFOptions(), port::kMaxUint64 /* memtable_id */, - env_options_, versions_.get(), &mutex_, &shutting_down_, {}, - kMaxSequenceNumber, snapshot_checker, &job_context, nullptr, nullptr, - nullptr, kNoCompression, nullptr, &event_logger, false, - true /* sync_output_directory */, true /* write_manifest */, - Env::Priority::USER, nullptr /*IOTracer*/); + *cfd->GetLatestMutableCFOptions(), + std::numeric_limits::max() /* memtable_id */, env_options_, + versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber, + snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression, + nullptr, &event_logger, false, true /* sync_output_directory */, + true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/); { InstrumentedMutexLock l(&mutex_); flush_job.PickMemTable(); @@ -248,11 +248,12 @@ TEST_F(FlushJobTest, NonEmpty) { SnapshotChecker* snapshot_checker = nullptr; // not relavant FlushJob flush_job( dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_, - *cfd->GetLatestMutableCFOptions(), port::kMaxUint64 /* memtable_id */, - env_options_, versions_.get(), &mutex_, &shutting_down_, {}, - kMaxSequenceNumber, snapshot_checker, &job_context, nullptr, nullptr, - nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, - true, true /* sync_output_directory */, true /* write_manifest */, + *cfd->GetLatestMutableCFOptions(), + std::numeric_limits::max() /* memtable_id */, env_options_, + versions_.get(), &mutex_, &shutting_down_, {}, kMaxSequenceNumber, + snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression, + db_options_.statistics.get(), &event_logger, true, + true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/); HistogramData hist; @@ -509,11 +510,12 @@ TEST_F(FlushJobTest, Snapshots) { SnapshotChecker* snapshot_checker = nullptr; // not relavant FlushJob flush_job( dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_, - *cfd->GetLatestMutableCFOptions(), port::kMaxUint64 /* memtable_id */, - env_options_, versions_.get(), &mutex_, &shutting_down_, snapshots, - kMaxSequenceNumber, snapshot_checker, &job_context, nullptr, nullptr, - nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, - true, true /* sync_output_directory */, true /* write_manifest */, + *cfd->GetLatestMutableCFOptions(), + std::numeric_limits::max() /* memtable_id */, env_options_, + versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, + snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression, + db_options_.statistics.get(), &event_logger, true, + true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/); mutex_.Lock(); flush_job.PickMemTable(); @@ -577,9 +579,9 @@ TEST_F(FlushJobTimestampTest, AllKeysExpired) { PutFixed64(&full_history_ts_low, std::numeric_limits::max()); FlushJob flush_job( dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(), - port::kMaxUint64 /* memtable_id */, env_options_, versions_.get(), - &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, snapshot_checker, - &job_context, nullptr, nullptr, nullptr, kNoCompression, + std::numeric_limits::max() /* memtable_id */, env_options_, + versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, + snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, true, true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/, /*db_id=*/"", @@ -628,9 +630,9 @@ TEST_F(FlushJobTimestampTest, NoKeyExpired) { PutFixed64(&full_history_ts_low, 0); FlushJob flush_job( dbname_, cfd, db_options_, *cfd->GetLatestMutableCFOptions(), - port::kMaxUint64 /* memtable_id */, env_options_, versions_.get(), - &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, snapshot_checker, - &job_context, nullptr, nullptr, nullptr, kNoCompression, + std::numeric_limits::max() /* memtable_id */, env_options_, + versions_.get(), &mutex_, &shutting_down_, snapshots, kMaxSequenceNumber, + snapshot_checker, &job_context, nullptr, nullptr, nullptr, kNoCompression, db_options_.statistics.get(), &event_logger, true, true /* sync_output_directory */, true /* write_manifest */, Env::Priority::USER, nullptr /*IOTracer*/, /*db_id=*/"", diff --git a/db/memtable.cc b/db/memtable.cc index 3ce44ea1d5..6a4d2e127c 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -140,8 +140,8 @@ size_t MemTable::ApproximateMemoryUsage() { for (size_t usage : usages) { // If usage + total_usage >= kMaxSizet, return kMaxSizet. // the following variation is to avoid numeric overflow. - if (usage >= port::kMaxSizet - total_usage) { - return port::kMaxSizet; + if (usage >= std::numeric_limits::max() - total_usage) { + return std::numeric_limits::max(); } total_usage += usage; } diff --git a/db/memtable_list_test.cc b/db/memtable_list_test.cc index df1694c212..29de3b6625 100644 --- a/db/memtable_list_test.cc +++ b/db/memtable_list_test.cc @@ -209,7 +209,8 @@ TEST_F(MemTableListTest, Empty) { ASSERT_FALSE(list.IsFlushPending()); autovector mems; - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &mems); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &mems); ASSERT_EQ(0, mems.size()); autovector to_delete; @@ -418,7 +419,8 @@ TEST_F(MemTableListTest, GetFromHistoryTest) { // Flush this memtable from the list. // (It will then be a part of the memtable history). autovector to_flush; - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); ASSERT_EQ(1, to_flush.size()); MutableCFOptions mutable_cf_options(options); @@ -472,7 +474,8 @@ TEST_F(MemTableListTest, GetFromHistoryTest) { ASSERT_EQ(0, to_delete.size()); to_flush.clear(); - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); ASSERT_EQ(1, to_flush.size()); // Flush second memtable @@ -593,7 +596,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { ASSERT_FALSE(list.IsFlushPending()); ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); autovector to_flush; - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); ASSERT_EQ(0, to_flush.size()); // Request a flush even though there is nothing to flush @@ -602,7 +606,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); // Attempt to 'flush' to clear request for flush - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); ASSERT_EQ(0, to_flush.size()); ASSERT_FALSE(list.IsFlushPending()); ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); @@ -626,7 +631,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); // Pick tables to flush - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); ASSERT_EQ(2, to_flush.size()); ASSERT_EQ(2, list.NumNotFlushed()); ASSERT_FALSE(list.IsFlushPending()); @@ -647,7 +653,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { ASSERT_EQ(0, to_delete.size()); // Pick tables to flush - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); ASSERT_EQ(3, to_flush.size()); ASSERT_EQ(3, list.NumNotFlushed()); ASSERT_FALSE(list.IsFlushPending()); @@ -655,7 +662,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { // Pick tables to flush again autovector to_flush2; - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush2); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush2); ASSERT_EQ(0, to_flush2.size()); ASSERT_EQ(3, list.NumNotFlushed()); ASSERT_FALSE(list.IsFlushPending()); @@ -673,7 +681,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); // Pick tables to flush again - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush2); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush2); ASSERT_EQ(1, to_flush2.size()); ASSERT_EQ(4, list.NumNotFlushed()); ASSERT_FALSE(list.IsFlushPending()); @@ -694,7 +703,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { ASSERT_EQ(0, to_delete.size()); // Pick tables to flush - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush); // Should pick 4 of 5 since 1 table has been picked in to_flush2 ASSERT_EQ(4, to_flush.size()); ASSERT_EQ(5, list.NumNotFlushed()); @@ -703,7 +713,8 @@ TEST_F(MemTableListTest, FlushPendingTest) { // Pick tables to flush again autovector to_flush3; - list.PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, &to_flush3); + list.PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, &to_flush3); ASSERT_EQ(0, to_flush3.size()); // nothing not in progress of being flushed ASSERT_EQ(5, list.NumNotFlushed()); ASSERT_FALSE(list.IsFlushPending()); @@ -872,8 +883,9 @@ TEST_F(MemTableListTest, AtomicFlusTest) { auto* list = lists[i]; ASSERT_FALSE(list->IsFlushPending()); ASSERT_FALSE(list->imm_flush_needed.load(std::memory_order_acquire)); - list->PickMemtablesToFlush(port::kMaxUint64 /* memtable_id */, - &flush_candidates[i]); + list->PickMemtablesToFlush( + std::numeric_limits::max() /* memtable_id */, + &flush_candidates[i]); ASSERT_EQ(0, flush_candidates[i].size()); } // Request flush even though there is nothing to flush diff --git a/db/version_builder.cc b/db/version_builder.cc index e76985687c..b785adfdd6 100644 --- a/db/version_builder.cc +++ b/db/version_builder.cc @@ -1144,7 +1144,7 @@ class VersionBuilder::Rep { size_t table_cache_capacity = table_cache_->get_cache()->GetCapacity(); bool always_load = (table_cache_capacity == TableCache::kInfiniteCapacity); - size_t max_load = port::kMaxSizet; + size_t max_load = std::numeric_limits::max(); if (!always_load) { // If it is initial loading and not set to always loading all the diff --git a/db/version_set.cc b/db/version_set.cc index 81d254f2b7..b0e7080bd2 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -1517,7 +1517,7 @@ uint64_t Version::GetSstFilesSize() { } void Version::GetCreationTimeOfOldestFile(uint64_t* creation_time) { - uint64_t oldest_time = port::kMaxUint64; + uint64_t oldest_time = std::numeric_limits::max(); for (int level = 0; level < storage_info_.num_non_empty_levels_; level++) { for (FileMetaData* meta : storage_info_.LevelFiles(level)) { assert(meta->fd.table_reader != nullptr); diff --git a/db/version_set.h b/db/version_set.h index abb4046c7c..5afd1202f2 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -1213,7 +1213,7 @@ class VersionSet { // new_log_number_for_empty_cf. uint64_t PreComputeMinLogNumberWithUnflushedData( uint64_t new_log_number_for_empty_cf) const { - uint64_t min_log_num = port::kMaxUint64; + uint64_t min_log_num = std::numeric_limits::max(); for (auto cfd : *column_family_set_) { // It's safe to ignore dropped column families here: // cfd->IsDropped() becomes true after the drop is persisted in MANIFEST. @@ -1229,7 +1229,7 @@ class VersionSet { // file, except data from `cfd_to_skip`. uint64_t PreComputeMinLogNumberWithUnflushedData( const ColumnFamilyData* cfd_to_skip) const { - uint64_t min_log_num = port::kMaxUint64; + uint64_t min_log_num = std::numeric_limits::max(); for (auto cfd : *column_family_set_) { if (cfd == cfd_to_skip) { continue; @@ -1246,7 +1246,7 @@ class VersionSet { // file, except data from `cfds_to_skip`. uint64_t PreComputeMinLogNumberWithUnflushedData( const std::unordered_set& cfds_to_skip) const { - uint64_t min_log_num = port::kMaxUint64; + uint64_t min_log_num = std::numeric_limits::max(); for (auto cfd : *column_family_set_) { if (cfds_to_skip.count(cfd)) { continue; diff --git a/db/wal_edit.h b/db/wal_edit.h index 7e1f9a5762..23dc589051 100644 --- a/db/wal_edit.h +++ b/db/wal_edit.h @@ -44,7 +44,8 @@ class WalMetadata { private: // The size of WAL is unknown, used when the WAL is not synced yet or is // empty. - constexpr static uint64_t kUnknownWalSize = port::kMaxUint64; + constexpr static uint64_t kUnknownWalSize = + std::numeric_limits::max(); // Size of the most recently synced WAL in bytes. uint64_t synced_size_bytes_ = kUnknownWalSize; diff --git a/db/write_batch.cc b/db/write_batch.cc index 77e91504eb..788b9bae49 100644 --- a/db/write_batch.cc +++ b/db/write_batch.cc @@ -745,10 +745,10 @@ Status CheckColumnFamilyTimestampSize(ColumnFamilyHandle* column_family, Status WriteBatchInternal::Put(WriteBatch* b, uint32_t column_family_id, const Slice& key, const Slice& value) { - if (key.size() > size_t{port::kMaxUint32}) { + if (key.size() > size_t{std::numeric_limits::max()}) { return Status::InvalidArgument("key is too large"); } - if (value.size() > size_t{port::kMaxUint32}) { + if (value.size() > size_t{std::numeric_limits::max()}) { return Status::InvalidArgument("value is too large"); } @@ -825,7 +825,7 @@ Status WriteBatchInternal::CheckSlicePartsLength(const SliceParts& key, for (int i = 0; i < key.num_parts; ++i) { total_key_bytes += key.parts[i].size(); } - if (total_key_bytes >= size_t{port::kMaxUint32}) { + if (total_key_bytes >= size_t{std::numeric_limits::max()}) { return Status::InvalidArgument("key is too large"); } @@ -833,7 +833,7 @@ Status WriteBatchInternal::CheckSlicePartsLength(const SliceParts& key, for (int i = 0; i < value.num_parts; ++i) { total_value_bytes += value.parts[i].size(); } - if (total_value_bytes >= size_t{port::kMaxUint32}) { + if (total_value_bytes >= size_t{std::numeric_limits::max()}) { return Status::InvalidArgument("value is too large"); } return Status::OK(); @@ -1292,10 +1292,10 @@ Status WriteBatch::DeleteRange(ColumnFamilyHandle* column_family, Status WriteBatchInternal::Merge(WriteBatch* b, uint32_t column_family_id, const Slice& key, const Slice& value) { - if (key.size() > size_t{port::kMaxUint32}) { + if (key.size() > size_t{std::numeric_limits::max()}) { return Status::InvalidArgument("key is too large"); } - if (value.size() > size_t{port::kMaxUint32}) { + if (value.size() > size_t{std::numeric_limits::max()}) { return Status::InvalidArgument("value is too large"); } diff --git a/db_stress_tool/db_stress_test_base.cc b/db_stress_tool/db_stress_test_base.cc index 37835bb212..e18234eecc 100644 --- a/db_stress_tool/db_stress_test_base.cc +++ b/db_stress_tool/db_stress_test_base.cc @@ -2029,11 +2029,11 @@ void StressTest::TestAcquireSnapshot(ThreadState* thread, if (FLAGS_long_running_snapshots) { // Hold 10% of snapshots for 10x more if (thread->rand.OneIn(10)) { - assert(hold_for < port::kMaxInt64 / 10); + assert(hold_for < std::numeric_limits::max() / 10); hold_for *= 10; // Hold 1% of snapshots for 100x more if (thread->rand.OneIn(10)) { - assert(hold_for < port::kMaxInt64 / 10); + assert(hold_for < std::numeric_limits::max() / 10); hold_for *= 10; } } @@ -2065,8 +2065,9 @@ void StressTest::TestCompactRange(ThreadState* thread, int64_t rand_key, const Slice& start_key, ColumnFamilyHandle* column_family) { int64_t end_key_num; - if (port::kMaxInt64 - rand_key < FLAGS_compact_range_width) { - end_key_num = port::kMaxInt64; + if (std::numeric_limits::max() - rand_key < + FLAGS_compact_range_width) { + end_key_num = std::numeric_limits::max(); } else { end_key_num = FLAGS_compact_range_width + rand_key; } diff --git a/file/file_prefetch_buffer.h b/file/file_prefetch_buffer.h index 94d09bba43..88b350cebc 100644 --- a/file/file_prefetch_buffer.h +++ b/file/file_prefetch_buffer.h @@ -71,7 +71,7 @@ class FilePrefetchBuffer { readahead_size_(readahead_size), initial_auto_readahead_size_(readahead_size), max_readahead_size_(max_readahead_size), - min_offset_read_(port::kMaxSizet), + min_offset_read_(std::numeric_limits::max()), enable_(enable), track_min_offset_(track_min_offset), implicit_auto_readahead_(implicit_auto_readahead), diff --git a/monitoring/histogram.cc b/monitoring/histogram.cc index c6ef856b00..323a08efb6 100644 --- a/monitoring/histogram.cc +++ b/monitoring/histogram.cc @@ -26,7 +26,8 @@ HistogramBucketMapper::HistogramBucketMapper() { // size of array buckets_ in HistogramImpl bucketValues_ = {1, 2}; double bucket_val = static_cast(bucketValues_.back()); - while ((bucket_val = 1.5 * bucket_val) <= static_cast(port::kMaxUint64)) { + while ((bucket_val = 1.5 * bucket_val) <= + static_cast(std::numeric_limits::max())) { bucketValues_.push_back(static_cast(bucket_val)); // Extracts two most significant digits to make histogram buckets more // human-readable. E.g., 172 becomes 170. diff --git a/monitoring/persistent_stats_history.cc b/monitoring/persistent_stats_history.cc index 86fe98f1fc..9bde38b3ab 100644 --- a/monitoring/persistent_stats_history.cc +++ b/monitoring/persistent_stats_history.cc @@ -98,13 +98,13 @@ std::pair parseKey(const Slice& key, std::string::size_type pos = key_str.find("#"); // TODO(Zhongyi): add counters to track parse failures? if (pos == std::string::npos) { - result.first = port::kMaxUint64; + result.first = std::numeric_limits::max(); result.second.clear(); } else { uint64_t parsed_time = ParseUint64(key_str.substr(0, pos)); // skip entries with timestamp smaller than start_time if (parsed_time < start_time) { - result.first = port::kMaxUint64; + result.first = std::numeric_limits::max(); result.second = ""; } else { result.first = parsed_time; diff --git a/options/cf_options.cc b/options/cf_options.cc index d1e6f13d5c..8c927fff5f 100644 --- a/options/cf_options.cc +++ b/options/cf_options.cc @@ -886,7 +886,7 @@ uint64_t MultiplyCheckOverflow(uint64_t op1, double op2) { if (op1 == 0 || op2 <= 0) { return 0; } - if (port::kMaxUint64 / op1 < op2) { + if (std::numeric_limits::max() / op1 < op2) { return op1; } return static_cast(op1 * op2); @@ -915,8 +915,9 @@ size_t MaxFileSizeForL0MetaPin(const MutableCFOptions& cf_options) { // or a former larger `write_buffer_size` value to avoid surprising users with // pinned memory usage. We use a factor of 1.5 to account for overhead // introduced during flush in most cases. - if (port::kMaxSizet / 3 < cf_options.write_buffer_size / 2) { - return port::kMaxSizet; + if (std::numeric_limits::max() / 3 < + cf_options.write_buffer_size / 2) { + return std::numeric_limits::max(); } return cf_options.write_buffer_size / 2 * 3; } diff --git a/options/options_test.cc b/options/options_test.cc index 58070b3ff2..3ff230eff1 100644 --- a/options/options_test.cc +++ b/options/options_test.cc @@ -4082,9 +4082,10 @@ TEST_F(OptionsParserTest, IntegerParsing) { ASSERT_EQ(ParseUint32("4294967295"), 4294967295U); ASSERT_EQ(ParseSizeT("18446744073709551615"), 18446744073709551615U); ASSERT_EQ(ParseInt64("9223372036854775807"), 9223372036854775807); - ASSERT_EQ(ParseInt64("-9223372036854775808"), port::kMinInt64); + ASSERT_EQ(ParseInt64("-9223372036854775808"), + std::numeric_limits::min()); ASSERT_EQ(ParseInt32("2147483647"), 2147483647); - ASSERT_EQ(ParseInt32("-2147483648"), port::kMinInt32); + ASSERT_EQ(ParseInt32("-2147483648"), std::numeric_limits::min()); ASSERT_EQ(ParseInt("-32767"), -32767); ASSERT_EQ(ParseDouble("-1.234567"), -1.234567); } diff --git a/port/port_posix.h b/port/port_posix.h index 1bb0841fdb..e23b828239 100644 --- a/port/port_posix.h +++ b/port/port_posix.h @@ -95,16 +95,6 @@ namespace ROCKSDB_NAMESPACE { extern const bool kDefaultToAdaptiveMutex; namespace port { - -// For use at db/file_indexer.h kLevelMaxIndex -const uint32_t kMaxUint32 = std::numeric_limits::max(); -const int kMaxInt32 = std::numeric_limits::max(); -const int kMinInt32 = std::numeric_limits::min(); -const uint64_t kMaxUint64 = std::numeric_limits::max(); -const int64_t kMaxInt64 = std::numeric_limits::max(); -const int64_t kMinInt64 = std::numeric_limits::min(); -const size_t kMaxSizet = std::numeric_limits::max(); - constexpr bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN; #undef PLATFORM_IS_LITTLE_ENDIAN diff --git a/port/win/port_win.h b/port/win/port_win.h index a1d8e02bf0..6894758d36 100644 --- a/port/win/port_win.h +++ b/port/win/port_win.h @@ -82,37 +82,11 @@ namespace port { #define snprintf _snprintf #define ROCKSDB_NOEXCEPT -// std::numeric_limits::max() is not constexpr just yet -// therefore, use the same limits - -// For use at db/file_indexer.h kLevelMaxIndex -const uint32_t kMaxUint32 = UINT32_MAX; -const int kMaxInt32 = INT32_MAX; -const int kMinInt32 = INT32_MIN; -const int64_t kMaxInt64 = INT64_MAX; -const int64_t kMinInt64 = INT64_MIN; -const uint64_t kMaxUint64 = UINT64_MAX; - -#ifdef _WIN64 -const size_t kMaxSizet = UINT64_MAX; -#else -const size_t kMaxSizet = UINT_MAX; -#endif #else // VS >= 2015 or MinGW #define ROCKSDB_NOEXCEPT noexcept -// For use at db/file_indexer.h kLevelMaxIndex -const uint32_t kMaxUint32 = std::numeric_limits::max(); -const int kMaxInt32 = std::numeric_limits::max(); -const int kMinInt32 = std::numeric_limits::min(); -const uint64_t kMaxUint64 = std::numeric_limits::max(); -const int64_t kMaxInt64 = std::numeric_limits::max(); -const int64_t kMinInt64 = std::numeric_limits::min(); - -const size_t kMaxSizet = std::numeric_limits::max(); - #endif //_MSC_VER // "Windows is designed to run on little-endian computer architectures." diff --git a/table/block_based/block.cc b/table/block_based/block.cc index ba1489f937..ef02bc869c 100644 --- a/table/block_based/block.cc +++ b/table/block_based/block.cc @@ -721,7 +721,7 @@ void BlockIter::FindKeyAfterBinarySeek(const Slice& target, } else { // We are in the last restart interval. The while-loop will terminate by // `Valid()` returning false upon advancing past the block's last key. - max_offset = port::kMaxUint32; + max_offset = std::numeric_limits::max(); } while (true) { NextImpl(); diff --git a/table/block_based/block_based_table_factory.cc b/table/block_based/block_based_table_factory.cc index 2a2258a40b..db2858b190 100644 --- a/table/block_based/block_based_table_factory.cc +++ b/table/block_based/block_based_table_factory.cc @@ -658,7 +658,7 @@ Status BlockBasedTableFactory::ValidateOptions( return Status::InvalidArgument( "Block alignment requested but block size is not a power of 2"); } - if (table_options_.block_size > port::kMaxUint32) { + if (table_options_.block_size > std::numeric_limits::max()) { return Status::InvalidArgument( "block size exceeds maximum number (4GiB) allowed"); } diff --git a/table/cuckoo/cuckoo_table_builder.h b/table/cuckoo/cuckoo_table_builder.h index a72d5183aa..20ed71bfc2 100644 --- a/table/cuckoo/cuckoo_table_builder.h +++ b/table/cuckoo/cuckoo_table_builder.h @@ -85,7 +85,7 @@ class CuckooTableBuilder: public TableBuilder { // We assume number of items is <= 2^32. uint32_t make_space_for_key_call_id; }; - static const uint32_t kMaxVectorIdx = port::kMaxInt32; + static const uint32_t kMaxVectorIdx = std::numeric_limits::max(); bool MakeSpaceForKey(const autovector& hash_vals, const uint32_t call_id, diff --git a/table/meta_blocks.cc b/table/meta_blocks.cc index 6ffa4a14f8..13ecf87143 100644 --- a/table/meta_blocks.cc +++ b/table/meta_blocks.cc @@ -53,8 +53,8 @@ Slice MetaIndexBuilder::Finish() { // object, so there's no need for restart points. Thus we set the restart // interval to infinity to save space. PropertyBlockBuilder::PropertyBlockBuilder() - : properties_block_( - new BlockBuilder(port::kMaxInt32 /* restart interval */)) {} + : properties_block_(new BlockBuilder( + std::numeric_limits::max() /* restart interval */)) {} void PropertyBlockBuilder::Add(const std::string& name, const std::string& val) { diff --git a/table/table_properties.cc b/table/table_properties.cc index 8af0315ba1..49b474758b 100644 --- a/table/table_properties.cc +++ b/table/table_properties.cc @@ -17,7 +17,7 @@ namespace ROCKSDB_NAMESPACE { const uint32_t TablePropertiesCollectorFactory::Context::kUnknownColumnFamily = - port::kMaxInt32; + std::numeric_limits::max(); namespace { void AppendProperty( diff --git a/tools/block_cache_analyzer/block_cache_trace_analyzer.cc b/tools/block_cache_analyzer/block_cache_trace_analyzer.cc index ff618f4b52..59ad7004b7 100644 --- a/tools/block_cache_analyzer/block_cache_trace_analyzer.cc +++ b/tools/block_cache_analyzer/block_cache_trace_analyzer.cc @@ -412,7 +412,7 @@ void BlockCacheTraceAnalyzer::WriteMissRatioTimeline(uint64_t time_unit) const { } std::map>> cs_name_timeline; - uint64_t start_time = port::kMaxUint64; + uint64_t start_time = std::numeric_limits::max(); uint64_t end_time = 0; const std::map& trace_num_misses = adjust_time_unit(miss_ratio_stats_.num_misses_timeline(), time_unit); @@ -427,7 +427,8 @@ void BlockCacheTraceAnalyzer::WriteMissRatioTimeline(uint64_t time_unit) const { auto it = trace_num_accesses.find(time); assert(it != trace_num_accesses.end()); uint64_t access = it->second; - cs_name_timeline[port::kMaxUint64]["trace"][time] = percent(miss, access); + cs_name_timeline[std::numeric_limits::max()]["trace"][time] = + percent(miss, access); } for (auto const& config_caches : cache_simulator_->sim_caches()) { const CacheConfiguration& config = config_caches.first; @@ -492,7 +493,7 @@ void BlockCacheTraceAnalyzer::WriteMissTimeline(uint64_t time_unit) const { } std::map>> cs_name_timeline; - uint64_t start_time = port::kMaxUint64; + uint64_t start_time = std::numeric_limits::max(); uint64_t end_time = 0; const std::map& trace_num_misses = adjust_time_unit(miss_ratio_stats_.num_misses_timeline(), time_unit); @@ -501,7 +502,8 @@ void BlockCacheTraceAnalyzer::WriteMissTimeline(uint64_t time_unit) const { start_time = std::min(start_time, time); end_time = std::max(end_time, time); uint64_t miss = num_miss.second; - cs_name_timeline[port::kMaxUint64]["trace"][time] = miss; + cs_name_timeline[std::numeric_limits::max()]["trace"][time] = + miss; } for (auto const& config_caches : cache_simulator_->sim_caches()) { const CacheConfiguration& config = config_caches.first; @@ -589,7 +591,7 @@ void BlockCacheTraceAnalyzer::WriteSkewness( for (auto const& percent : percent_buckets) { label_bucket_naccesses[label_str][percent] = 0; size_t end_index = 0; - if (percent == port::kMaxUint64) { + if (percent == std::numeric_limits::max()) { end_index = label_naccesses.size(); } else { end_index = percent * label_naccesses.size() / 100; @@ -856,7 +858,7 @@ void BlockCacheTraceAnalyzer::WriteAccessTimeline(const std::string& label_str, uint64_t time_unit, bool user_access_only) const { std::set labels = ParseLabelStr(label_str); - uint64_t start_time = port::kMaxUint64; + uint64_t start_time = std::numeric_limits::max(); uint64_t end_time = 0; std::map> label_access_timeline; std::map> access_count_block_id_map; @@ -1091,7 +1093,7 @@ void BlockCacheTraceAnalyzer::WriteReuseInterval( kMicrosInSecond) / block.num_accesses; } else { - avg_reuse_interval = port::kMaxUint64 - 1; + avg_reuse_interval = std::numeric_limits::max() - 1; } if (labels.find(kGroupbyCaller) != labels.end()) { for (auto const& timeline : block.caller_num_accesses_timeline) { @@ -1152,7 +1154,7 @@ void BlockCacheTraceAnalyzer::WriteReuseLifetime( lifetime = (block.last_access_time - block.first_access_time) / kMicrosInSecond; } else { - lifetime = port::kMaxUint64 - 1; + lifetime = std::numeric_limits::max() - 1; } const std::string label = BuildLabel( labels, cf_name, fd, level, type, @@ -2103,7 +2105,7 @@ std::vector parse_buckets(const std::string& bucket_str) { getline(ss, bucket, ','); buckets.push_back(ParseUint64(bucket)); } - buckets.push_back(port::kMaxUint64); + buckets.push_back(std::numeric_limits::max()); return buckets; } diff --git a/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc b/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc index 896a6ced16..5b8300a81c 100644 --- a/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc +++ b/tools/block_cache_analyzer/block_cache_trace_analyzer_test.cc @@ -277,7 +277,7 @@ TEST_F(BlockCacheTracerTest, BlockCacheAnalyzer) { ASSERT_OK(env_->DeleteFile(mrc_path)); const std::vector time_units{"1", "60", "3600"}; - expected_capacities.push_back(port::kMaxUint64); + expected_capacities.push_back(std::numeric_limits::max()); for (auto const& expected_capacity : expected_capacities) { for (auto const& time_unit : time_units) { const std::string miss_ratio_timeline_path = @@ -293,7 +293,7 @@ TEST_F(BlockCacheTracerTest, BlockCacheAnalyzer) { std::string substr; getline(ss, substr, ','); if (!read_header) { - if (expected_capacity == port::kMaxUint64) { + if (expected_capacity == std::numeric_limits::max()) { ASSERT_EQ("trace", substr); } else { ASSERT_EQ("lru-1-0", substr); @@ -321,7 +321,7 @@ TEST_F(BlockCacheTracerTest, BlockCacheAnalyzer) { std::string substr; getline(ss, substr, ','); if (num_misses == 0) { - if (expected_capacity == port::kMaxUint64) { + if (expected_capacity == std::numeric_limits::max()) { ASSERT_EQ("trace", substr); } else { ASSERT_EQ("lru-1-0", substr); diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index b63b0a9a47..f04eff4f1b 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -8073,7 +8073,8 @@ class Benchmark { } std::unique_ptr shi; - Status s = db->GetStatsHistory(0, port::kMaxUint64, &shi); + Status s = + db->GetStatsHistory(0, std::numeric_limits::max(), &shi); if (!s.ok()) { fprintf(stdout, "%s\n", s.ToString().c_str()); return; diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index 622344e88d..1b27cc33cf 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -282,7 +282,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { } else if (ParseIntArg(argv[i], "--compression_max_dict_bytes=", "compression_max_dict_bytes must be numeric", &tmp_val)) { - if (tmp_val < 0 || tmp_val > port::kMaxUint32) { + if (tmp_val < 0 || tmp_val > std::numeric_limits::max()) { fprintf(stderr, "compression_max_dict_bytes must be a uint32_t: '%s'\n", argv[i]); print_help(/*to_stderr*/ true); @@ -292,7 +292,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { } else if (ParseIntArg(argv[i], "--compression_zstd_max_train_bytes=", "compression_zstd_max_train_bytes must be numeric", &tmp_val)) { - if (tmp_val < 0 || tmp_val > port::kMaxUint32) { + if (tmp_val < 0 || tmp_val > std::numeric_limits::max()) { fprintf(stderr, "compression_zstd_max_train_bytes must be a uint32_t: '%s'\n", argv[i]); diff --git a/tools/trace_analyzer_tool.cc b/tools/trace_analyzer_tool.cc index 972eff8634..6423352cdc 100644 --- a/tools/trace_analyzer_tool.cc +++ b/tools/trace_analyzer_tool.cc @@ -190,7 +190,7 @@ uint64_t MultiplyCheckOverflow(uint64_t op1, uint64_t op2) { if (op1 == 0 || op2 == 0) { return 0; } - if (port::kMaxUint64 / op1 < op2) { + if (std::numeric_limits::max() / op1 < op2) { return op1; } return (op1 * op2); diff --git a/trace_replay/block_cache_tracer.h b/trace_replay/block_cache_tracer.h index 23672e1df7..feea5ad517 100644 --- a/trace_replay/block_cache_tracer.h +++ b/trace_replay/block_cache_tracer.h @@ -281,7 +281,7 @@ class BlockCacheTracer { const Slice& block_key, const Slice& cf_name, const Slice& referenced_key); - // GetId cycles from 1 to port::kMaxUint64. + // GetId cycles from 1 to std::numeric_limits::max(). uint64_t NextGetId(); private: diff --git a/util/heap.h b/util/heap.h index e0737581e5..3f4cddeb90 100644 --- a/util/heap.h +++ b/util/heap.h @@ -101,7 +101,9 @@ class BinaryHeap { size_t size() const { return data_.size(); } - void reset_root_cmp_cache() { root_cmp_cache_ = port::kMaxSizet; } + void reset_root_cmp_cache() { + root_cmp_cache_ = std::numeric_limits::max(); + } private: static inline size_t get_root() { return 0; } @@ -126,7 +128,7 @@ class BinaryHeap { void downheap(size_t index) { T v = std::move(data_[index]); - size_t picked_child = port::kMaxSizet; + size_t picked_child = std::numeric_limits::max(); while (1) { const size_t left_child = get_left(index); if (get_left(index) >= data_.size()) { @@ -165,7 +167,7 @@ class BinaryHeap { Compare cmp_; autovector data_; // Used to reduce number of cmp_ calls in downheap() - size_t root_cmp_cache_ = port::kMaxSizet; + size_t root_cmp_cache_ = std::numeric_limits::max(); }; } // namespace ROCKSDB_NAMESPACE diff --git a/util/rate_limiter.cc b/util/rate_limiter.cc index 2260a91118..f369e3220b 100644 --- a/util/rate_limiter.cc +++ b/util/rate_limiter.cc @@ -347,10 +347,11 @@ void GenericRateLimiter::RefillBytesAndGrantRequests() { int64_t GenericRateLimiter::CalculateRefillBytesPerPeriod( int64_t rate_bytes_per_sec) { - if (port::kMaxInt64 / rate_bytes_per_sec < options_.refill_period_us) { + if (std::numeric_limits::max() / rate_bytes_per_sec < + options_.refill_period_us) { // Avoid unexpected result in the overflow case. The result now is still // inaccurate but is a number that is large enough. - return port::kMaxInt64 / 1000000; + return std::numeric_limits::max() / 1000000; } else { return rate_bytes_per_sec * options_.refill_period_us / 1000000; } @@ -374,7 +375,7 @@ Status GenericRateLimiter::Tune() { std::chrono::microseconds(options_.refill_period_us); // We tune every kRefillsPerTune intervals, so the overflow and division-by- // zero conditions should never happen. - assert(num_drains_ <= port::kMaxInt64 / 100); + assert(num_drains_ <= std::numeric_limits::max() / 100); assert(elapsed_intervals > 0); int64_t drained_pct = num_drains_ * 100 / elapsed_intervals; @@ -385,14 +386,15 @@ Status GenericRateLimiter::Tune() { } else if (drained_pct < kLowWatermarkPct) { // sanitize to prevent overflow int64_t sanitized_prev_bytes_per_sec = - std::min(prev_bytes_per_sec, port::kMaxInt64 / 100); + std::min(prev_bytes_per_sec, std::numeric_limits::max() / 100); new_bytes_per_sec = std::max(options_.max_bytes_per_sec / kAllowedRangeFactor, sanitized_prev_bytes_per_sec * 100 / (100 + kAdjustFactorPct)); } else if (drained_pct > kHighWatermarkPct) { // sanitize to prevent overflow - int64_t sanitized_prev_bytes_per_sec = std::min( - prev_bytes_per_sec, port::kMaxInt64 / (100 + kAdjustFactorPct)); + int64_t sanitized_prev_bytes_per_sec = + std::min(prev_bytes_per_sec, std::numeric_limits::max() / + (100 + kAdjustFactorPct)); new_bytes_per_sec = std::min(options_.max_bytes_per_sec, sanitized_prev_bytes_per_sec * (100 + kAdjustFactorPct) / 100); @@ -433,7 +435,8 @@ static int RegisterBuiltinRateLimiters(ObjectLibrary& library, GenericRateLimiter::kClassName(), [](const std::string& /*uri*/, std::unique_ptr* guard, std::string* /*errmsg*/) { - guard->reset(new GenericRateLimiter(port::kMaxInt64)); + guard->reset( + new GenericRateLimiter(std::numeric_limits::max())); return guard->get(); }); size_t num_types; diff --git a/util/rate_limiter_test.cc b/util/rate_limiter_test.cc index ad44d5736c..cd809d183f 100644 --- a/util/rate_limiter_test.cc +++ b/util/rate_limiter_test.cc @@ -36,7 +36,7 @@ class RateLimiterTest : public testing::Test { }; TEST_F(RateLimiterTest, OverflowRate) { - GenericRateLimiter limiter(port::kMaxInt64, 1000, 10, + GenericRateLimiter limiter(std::numeric_limits::max(), 1000, 10, RateLimiter::Mode::kWritesOnly, SystemClock::Default(), false /* auto_tuned */); ASSERT_GT(limiter.GetSingleBurstBytes(), 1000000000ll); diff --git a/util/string_util.cc b/util/string_util.cc index 03bf5a40c6..24b70ba10d 100644 --- a/util/string_util.cc +++ b/util/string_util.cc @@ -315,7 +315,8 @@ uint32_t ParseUint32(const std::string& value) { int32_t ParseInt32(const std::string& value) { int64_t num = ParseInt64(value); - if (num <= port::kMaxInt32 && num >= port::kMinInt32) { + if (num <= std::numeric_limits::max() && + num >= std::numeric_limits::min()) { return static_cast(num); } else { throw std::out_of_range(value); diff --git a/utilities/backup/backup_engine.cc b/utilities/backup/backup_engine.cc index af633bff4c..1c6a2cb0c4 100644 --- a/utilities/backup/backup_engine.cc +++ b/utilities/backup/backup_engine.cc @@ -1012,8 +1012,9 @@ IOStatus BackupEngineImpl::Initialize() { // we might need to clean up from previous crash or I/O errors might_need_garbage_collect_ = true; - if (options_.max_valid_backups_to_open != port::kMaxInt32) { - options_.max_valid_backups_to_open = port::kMaxInt32; + if (options_.max_valid_backups_to_open != + std::numeric_limits::max()) { + options_.max_valid_backups_to_open = std::numeric_limits::max(); ROCKS_LOG_WARN( options_.info_log, "`max_valid_backups_to_open` is not set to the default value. Ignoring " @@ -1434,7 +1435,8 @@ IOStatus BackupEngineImpl::CreateNewBackupWithMetadata( contents.size(), db_options.statistics.get(), 0 /* size_limit */, false /* shared_checksum */, options.progress_callback, contents); } /* create_file_cb */, - &sequence_number, options.flush_before_backup ? 0 : port::kMaxUint64, + &sequence_number, + options.flush_before_backup ? 0 : std::numeric_limits::max(), compare_checksum)); if (io_s.ok()) { new_backup->SetSequenceNumber(sequence_number); @@ -2171,7 +2173,7 @@ IOStatus BackupEngineImpl::AddBackupFileWorkItem( return io_s; } } - if (size_bytes == port::kMaxUint64) { + if (size_bytes == std::numeric_limits::max()) { return IOStatus::NotFound("File missing: " + src_path); } // dst_relative depends on the following conditions: diff --git a/utilities/backup/backup_engine_test.cc b/utilities/backup/backup_engine_test.cc index 8585dbf121..00b71cfa0c 100644 --- a/utilities/backup/backup_engine_test.cc +++ b/utilities/backup/backup_engine_test.cc @@ -3756,7 +3756,8 @@ TEST_F(BackupEngineTest, WriteOnlyEngineNoSharedFileDeletion) { } CloseDBAndBackupEngine(); - engine_options_->max_valid_backups_to_open = port::kMaxInt32; + engine_options_->max_valid_backups_to_open = + std::numeric_limits::max(); AssertBackupConsistency(i + 1, 0, (i + 1) * kNumKeys); } } diff --git a/utilities/write_batch_with_index/write_batch_with_index_internal.h b/utilities/write_batch_with_index/write_batch_with_index_internal.h index cf8c46e5c0..edabc95bcd 100644 --- a/utilities/write_batch_with_index/write_batch_with_index_internal.h +++ b/utilities/write_batch_with_index/write_batch_with_index_internal.h @@ -95,7 +95,7 @@ struct WriteBatchIndexEntry { bool is_forward_direction, bool is_seek_to_first) // For SeekForPrev(), we need to make the dummy entry larger than any // entry who has the same search key. Otherwise, we'll miss those entries. - : offset(is_forward_direction ? 0 : port::kMaxSizet), + : offset(is_forward_direction ? 0 : std::numeric_limits::max()), column_family(_column_family), key_offset(0), key_size(is_seek_to_first ? kFlagMinInCf : 0), @@ -105,7 +105,7 @@ struct WriteBatchIndexEntry { // If this flag appears in the key_size, it indicates a // key that is smaller than any other entry for the same column family. - static const size_t kFlagMinInCf = port::kMaxSizet; + static const size_t kFlagMinInCf = std::numeric_limits::max(); bool is_min_in_cf() const { assert(key_size != kFlagMinInCf ||