diff --git a/CMakeLists.txt b/CMakeLists.txt index f8e1264d9c..8b2ae7b71f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -145,7 +145,7 @@ target_include_directories(build_version PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/util) if(MSVC) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /wd4127 /wd4800 /wd4996 /wd4351") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W4 /wd4127 /wd4800 /wd4996 /wd4351 /wd4100 /wd4204 /wd4324") else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -W -Wextra -Wall") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wsign-compare -Wshadow -Wno-unused-parameter -Wno-unused-variable -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers -Wno-strict-aliasing") @@ -560,9 +560,11 @@ set(SOURCES $) if(HAVE_SSE42 AND NOT FORCE_SSE42) + if(NOT MSVC) set_source_files_properties( util/crc32c.cc - PROPERTIES COMPILE_FLAGS "-msse4.2") + PROPERTIES COMPILE_FLAGS "-msse4.2") + endif() endif() if(WIN32) diff --git a/db/cuckoo_table_db_test.cc b/db/cuckoo_table_db_test.cc index e7c2d279a4..41b3e3c6b1 100644 --- a/db/cuckoo_table_db_test.cc +++ b/db/cuckoo_table_db_test.cc @@ -241,7 +241,7 @@ TEST_F(CuckooTableDBTest, CompactionIntoMultipleFiles) { // Write 28 values, each 10016 B ~ 10KB for (int idx = 0; idx < 28; ++idx) { - ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + idx))); + ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + char(idx)))); } dbfull()->TEST_WaitForFlushMemTable(); ASSERT_EQ("1", FilesPerLevel()); @@ -250,7 +250,7 @@ TEST_F(CuckooTableDBTest, CompactionIntoMultipleFiles) { true /* disallow trivial move */); ASSERT_EQ("0,2", FilesPerLevel()); for (int idx = 0; idx < 28; ++idx) { - ASSERT_EQ(std::string(10000, 'a' + idx), Get(Key(idx))); + ASSERT_EQ(std::string(10000, 'a' + char(idx)), Get(Key(idx))); } } @@ -271,14 +271,14 @@ TEST_F(CuckooTableDBTest, SameKeyInsertedInTwoDifferentFilesAndCompacted) { // Generate one more file in level-0, and should trigger level-0 compaction for (int idx = 0; idx < 11; ++idx) { - ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + idx))); + ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + char(idx)))); } dbfull()->TEST_WaitForFlushMemTable(); dbfull()->TEST_CompactRange(0, nullptr, nullptr); ASSERT_EQ("0,1", FilesPerLevel()); for (int idx = 0; idx < 11; ++idx) { - ASSERT_EQ(std::string(10000, 'a' + idx), Get(Key(idx))); + ASSERT_EQ(std::string(10000, 'a' + char(idx)), Get(Key(idx))); } } diff --git a/db/db_basic_test.cc b/db/db_basic_test.cc index 654a457ef5..6fe33a193a 100644 --- a/db/db_basic_test.cc +++ b/db/db_basic_test.cc @@ -41,7 +41,7 @@ TEST_F(DBBasicTest, ReadOnlyDB) { Close(); auto options = CurrentOptions(); - assert(options.env = env_); + assert(options.env == env_); ASSERT_OK(ReadOnlyReopen(options)); ASSERT_EQ("v3", Get("foo")); ASSERT_EQ("v2", Get("bar")); diff --git a/db/db_compaction_filter_test.cc b/db/db_compaction_filter_test.cc index 9f751f059f..2be006ba68 100644 --- a/db/db_compaction_filter_test.cc +++ b/db/db_compaction_filter_test.cc @@ -308,7 +308,6 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) { ASSERT_OK(iter->status()); while (iter->Valid()) { ParsedInternalKey ikey(Slice(), 0, kTypeValue); - ikey.sequence = -1; ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true); total++; if (ikey.sequence != 0) { @@ -617,7 +616,6 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextManual) { ASSERT_OK(iter->status()); while (iter->Valid()) { ParsedInternalKey ikey(Slice(), 0, kTypeValue); - ikey.sequence = -1; ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true); total++; if (ikey.sequence != 0) { diff --git a/db/db_impl.cc b/db/db_impl.cc index 061e1caec9..dc8597f81e 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -1411,6 +1411,7 @@ bool DBImpl::KeyMayExist(const ReadOptions& read_options, Iterator* DBImpl::NewIterator(const ReadOptions& read_options, ColumnFamilyHandle* column_family) { + Iterator* result = nullptr; if (read_options.read_tier == kPersistedTier) { return NewErrorIterator(Status::NotSupported( "ReadTier::kPersistedData is not yet supported in iterators.")); @@ -1421,25 +1422,27 @@ Iterator* DBImpl::NewIterator(const ReadOptions& read_options, if (read_options.managed) { #ifdef ROCKSDB_LITE // not supported in lite version - return NewErrorIterator(Status::InvalidArgument( + result = NewErrorIterator(Status::InvalidArgument( "Managed Iterators not supported in RocksDBLite.")); #else if ((read_options.tailing) || (read_options.snapshot != nullptr) || (is_snapshot_supported_)) { - return new ManagedIterator(this, read_options, cfd); - } - // Managed iter not supported - return NewErrorIterator(Status::InvalidArgument( + result = new ManagedIterator(this, read_options, cfd); + } else { + // Managed iter not supported + result = NewErrorIterator(Status::InvalidArgument( "Managed Iterators not supported without snapshots.")); + } #endif } else if (read_options.tailing) { #ifdef ROCKSDB_LITE // not supported in lite version - return nullptr; + result = nullptr; + #else SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_); auto iter = new ForwardIterator(this, read_options, cfd, sv); - return NewDBIterator( + result = NewDBIterator( env_, read_options, *cfd->ioptions(), cfd->user_comparator(), iter, kMaxSequenceNumber, sv->mutable_cf_options.max_sequential_skip_in_iterations, @@ -1449,10 +1452,9 @@ Iterator* DBImpl::NewIterator(const ReadOptions& read_options, auto snapshot = read_options.snapshot != nullptr ? read_options.snapshot->GetSequenceNumber() : versions_->LastSequence(); - return NewIteratorImpl(read_options, cfd, snapshot, read_callback); + result = NewIteratorImpl(read_options, cfd, snapshot, read_callback); } - // To stop compiler from complaining - return nullptr; + return result; } ArenaWrappedDBIter* DBImpl::NewIteratorImpl(const ReadOptions& read_options, diff --git a/db/db_impl_compaction_flush.cc b/db/db_impl_compaction_flush.cc index cbd860b3e3..5f3239720f 100644 --- a/db/db_impl_compaction_flush.cc +++ b/db/db_impl_compaction_flush.cc @@ -894,15 +894,15 @@ Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level, while (!manual.done) { assert(HasPendingManualCompaction()); manual_conflict = false; - Compaction* compaction; + Compaction* compaction = nullptr; if (ShouldntRunManualCompaction(&manual) || (manual.in_progress == true) || scheduled || - ((manual.manual_end = &manual.tmp_storage1) && - ((compaction = manual.cfd->CompactRange( - *manual.cfd->GetLatestMutableCFOptions(), manual.input_level, - manual.output_level, manual.output_path_id, manual.begin, - manual.end, &manual.manual_end, &manual_conflict)) == nullptr) && - manual_conflict)) { + (((manual.manual_end = &manual.tmp_storage1) != nullptr) && + ((compaction = manual.cfd->CompactRange( + *manual.cfd->GetLatestMutableCFOptions(), manual.input_level, + manual.output_level, manual.output_path_id, manual.begin, + manual.end, &manual.manual_end, &manual_conflict)) == nullptr && + manual_conflict))) { // exclusive manual compactions should not see a conflict during // CompactRange assert(!exclusive || !manual_conflict); diff --git a/db/db_iter.cc b/db/db_iter.cc index e9632246a2..541598834c 100644 --- a/db/db_iter.cc +++ b/db/db_iter.cc @@ -139,7 +139,9 @@ class DBIter final: public Iterator { if (pinned_iters_mgr_.PinningEnabled()) { pinned_iters_mgr_.ReleasePinnedData(); } - RecordTick(statistics_, NO_ITERATORS, -1); + // Compiler warning issue filed: + // https://github.com/facebook/rocksdb/issues/3013 + RecordTick(statistics_, NO_ITERATORS, uint64_t(-1)); local_stats_.BumpGlobalStatistics(statistics_); if (!arena_mode_) { delete iter_; diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 79212353a6..5282d1f534 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -1169,7 +1169,7 @@ void DBTestBase::validateNumberOfEntries(int numValues, int cf) { int seq = numValues; while (iter->Valid()) { ParsedInternalKey ikey; - ikey.sequence = -1; + ikey.clear(); ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true); // checks sequence number for updates diff --git a/db/log_test.cc b/db/log_test.cc index 651a1d0eee..24187e0484 100644 --- a/db/log_test.cc +++ b/db/log_test.cc @@ -195,7 +195,7 @@ class LogTest : public ::testing::TestWithParam { } } - void IncrementByte(int offset, int delta) { + void IncrementByte(int offset, char delta) { dest_contents()[offset] += delta; } @@ -487,7 +487,7 @@ TEST_P(LogTest, ChecksumMismatch) { TEST_P(LogTest, UnexpectedMiddleType) { Write("foo"); - SetByte(6, GetParam() ? kRecyclableMiddleType : kMiddleType); + SetByte(6, static_cast(GetParam() ? kRecyclableMiddleType : kMiddleType)); FixChecksum(0, 3, !!GetParam()); ASSERT_EQ("EOF", Read()); ASSERT_EQ(3U, DroppedBytes()); @@ -496,7 +496,7 @@ TEST_P(LogTest, UnexpectedMiddleType) { TEST_P(LogTest, UnexpectedLastType) { Write("foo"); - SetByte(6, GetParam() ? kRecyclableLastType : kLastType); + SetByte(6, static_cast(GetParam() ? kRecyclableLastType : kLastType)); FixChecksum(0, 3, !!GetParam()); ASSERT_EQ("EOF", Read()); ASSERT_EQ(3U, DroppedBytes()); @@ -506,7 +506,7 @@ TEST_P(LogTest, UnexpectedLastType) { TEST_P(LogTest, UnexpectedFullType) { Write("foo"); Write("bar"); - SetByte(6, GetParam() ? kRecyclableFirstType : kFirstType); + SetByte(6, static_cast(GetParam() ? kRecyclableFirstType : kFirstType)); FixChecksum(0, 3, !!GetParam()); ASSERT_EQ("bar", Read()); ASSERT_EQ("EOF", Read()); @@ -517,7 +517,7 @@ TEST_P(LogTest, UnexpectedFullType) { TEST_P(LogTest, UnexpectedFirstType) { Write("foo"); Write(BigString("bar", 100000)); - SetByte(6, GetParam() ? kRecyclableFirstType : kFirstType); + SetByte(6, static_cast(GetParam() ? kRecyclableFirstType : kFirstType)); FixChecksum(0, 3, !!GetParam()); ASSERT_EQ(BigString("bar", 100000), Read()); ASSERT_EQ("EOF", Read()); diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index a3722045f6..4511f015b9 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -418,7 +418,7 @@ TEST_F(WriteBatchTest, PrepareCommit) { TEST_F(WriteBatchTest, DISABLED_ManyUpdates) { // Insert key and value of 3GB and push total batch size to 12GB. static const size_t kKeyValueSize = 4u; - static const uint32_t kNumUpdates = 3 << 30; + static const uint32_t kNumUpdates = uint32_t(3 << 30); std::string raw(kKeyValueSize, 'A'); WriteBatch batch(kNumUpdates * (4 + kKeyValueSize * 2) + 1024u); char c = 'A'; diff --git a/env/env_test.cc b/env/env_test.cc index 9ec2f142ed..01f441ccb7 100644 --- a/env/env_test.cc +++ b/env/env_test.cc @@ -73,7 +73,7 @@ struct Deleter { std::unique_ptr NewAligned(const size_t size, const char ch) { char* ptr = nullptr; #ifdef OS_WIN - if (!(ptr = reinterpret_cast(_aligned_malloc(size, kPageSize)))) { + if (nullptr == (ptr = reinterpret_cast(_aligned_malloc(size, kPageSize)))) { return std::unique_ptr(nullptr, Deleter(_aligned_free)); } std::unique_ptr uptr(ptr, Deleter(_aligned_free)); @@ -701,7 +701,6 @@ TEST_F(EnvPosixTest, PositionedAppend) { IoctlFriendlyTmpdir ift; ASSERT_OK(env_->NewWritableFile(ift.name() + "/f", &writable_file, options)); const size_t kBlockSize = 4096; - const size_t kPageSize = 4096; const size_t kDataSize = kPageSize; // Write a page worth of 'a' auto data_ptr = NewAligned(kDataSize, 'a'); diff --git a/include/rocksdb/filter_policy.h b/include/rocksdb/filter_policy.h index 8add48e496..452d1913ec 100644 --- a/include/rocksdb/filter_policy.h +++ b/include/rocksdb/filter_policy.h @@ -46,6 +46,10 @@ class FilterBitsBuilder { virtual Slice Finish(std::unique_ptr* buf) = 0; // Calculate num of entries fit into a space. +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4702) // unreachable code +#endif virtual int CalculateNumEntry(const uint32_t space) { #ifndef ROCKSDB_LITE throw std::runtime_error("CalculateNumEntry not Implemented"); @@ -54,6 +58,9 @@ class FilterBitsBuilder { #endif return 0; } +#if defined(_MSC_VER) +#pragma warning(pop) +#endif }; // A class that checks if a key can be in filter diff --git a/include/rocksdb/utilities/env_mirror.h b/include/rocksdb/utilities/env_mirror.h index ffd175ae5e..bc27cdc488 100644 --- a/include/rocksdb/utilities/env_mirror.h +++ b/include/rocksdb/utilities/env_mirror.h @@ -73,6 +73,11 @@ class EnvMirror : public EnvWrapper { assert(as == bs); return as; } +#if defined(_MSC_VER) +#pragma warning(push) +// logical operation on address of string constant +#pragma warning(disable : 4130) +#endif Status GetChildren(const std::string& dir, std::vector* r) override { std::vector ar, br; @@ -87,6 +92,9 @@ class EnvMirror : public EnvWrapper { *r = ar; return as; } +#if defined(_MSC_VER) +#pragma warning(pop) +#endif Status DeleteFile(const std::string& f) override { Status as = a_->DeleteFile(f); Status bs = b_->DeleteFile(f); diff --git a/memtable/inlineskiplist.h b/memtable/inlineskiplist.h index 6262fc6913..6ab79e6680 100644 --- a/memtable/inlineskiplist.h +++ b/memtable/inlineskiplist.h @@ -565,8 +565,8 @@ InlineSkipList::InlineSkipList(const Comparator cmp, Allocator* allocator, int32_t max_height, int32_t branching_factor) - : kMaxHeight_(max_height), - kBranching_(branching_factor), + : kMaxHeight_(static_cast(max_height)), + kBranching_(static_cast(branching_factor)), kScaledInverseBranching_((Random::kMaxNext + 1) / kBranching_), compare_(cmp), allocator_(allocator), diff --git a/memtable/skiplist.h b/memtable/skiplist.h index 58996be3e1..47a89034eb 100644 --- a/memtable/skiplist.h +++ b/memtable/skiplist.h @@ -409,8 +409,8 @@ template SkipList::SkipList(const Comparator cmp, Allocator* allocator, int32_t max_height, int32_t branching_factor) - : kMaxHeight_(max_height), - kBranching_(branching_factor), + : kMaxHeight_(static_cast(max_height)), + kBranching_(static_cast(branching_factor)), kScaledInverseBranching_((Random::kMaxNext + 1) / kBranching_), compare_(cmp), allocator_(allocator), diff --git a/port/win/io_win.cc b/port/win/io_win.cc index 3d2533a2ef..d68c412a32 100644 --- a/port/win/io_win.cc +++ b/port/win/io_win.cc @@ -675,7 +675,6 @@ Status WinRandomAccessImpl::ReadImpl(uint64_t offset, size_t n, Slice* result, } size_t left = n; - char* dest = scratch; SSIZE_T r = PositionedReadInternal(scratch, left, offset); if (r > 0) { diff --git a/table/cuckoo_table_builder.cc b/table/cuckoo_table_builder.cc index e3ed314b36..0da4d84ddc 100644 --- a/table/cuckoo_table_builder.cc +++ b/table/cuckoo_table_builder.cc @@ -187,7 +187,7 @@ Status CuckooTableBuilder::MakeHashTable(std::vector* buckets) { buckets->resize(hash_table_size_ + cuckoo_block_size_ - 1); uint32_t make_space_for_key_call_id = 0; for (uint32_t vector_idx = 0; vector_idx < num_entries_; vector_idx++) { - uint64_t bucket_id; + uint64_t bucket_id = 0; bool bucket_found = false; autovector hash_vals; Slice user_key = GetUserKey(vector_idx); diff --git a/table/index_builder.cc b/table/index_builder.cc index cdf20aee92..08ce56e120 100644 --- a/table/index_builder.cc +++ b/table/index_builder.cc @@ -28,26 +28,28 @@ IndexBuilder* IndexBuilder::CreateIndexBuilder( const InternalKeyComparator* comparator, const InternalKeySliceTransform* int_key_slice_transform, const BlockBasedTableOptions& table_opt) { + IndexBuilder* result = nullptr; switch (index_type) { case BlockBasedTableOptions::kBinarySearch: { - return new ShortenedIndexBuilder(comparator, + result = new ShortenedIndexBuilder(comparator, table_opt.index_block_restart_interval); } + break; case BlockBasedTableOptions::kHashSearch: { - return new HashIndexBuilder(comparator, int_key_slice_transform, + result = new HashIndexBuilder(comparator, int_key_slice_transform, table_opt.index_block_restart_interval); } + break; case BlockBasedTableOptions::kTwoLevelIndexSearch: { - return PartitionedIndexBuilder::CreateIndexBuilder(comparator, table_opt); + result = PartitionedIndexBuilder::CreateIndexBuilder(comparator, table_opt); } + break; default: { assert(!"Do not recognize the index type "); - return nullptr; } + break; } - // impossible. - assert(false); - return nullptr; + return result; } PartitionedIndexBuilder* PartitionedIndexBuilder::CreateIndexBuilder( diff --git a/table/plain_table_factory.h b/table/plain_table_factory.h index 6c9ca44f30..e86f6dc8e0 100644 --- a/table/plain_table_factory.h +++ b/table/plain_table_factory.h @@ -161,7 +161,7 @@ class PlainTableFactory : public TableFactory { const PlainTableOptions& table_options() const; - static const char kValueTypeSeqId0 = char(0xFF); + static const char kValueTypeSeqId0 = char(~0); // Sanitizes the specified DB Options. Status SanitizeOptions(const DBOptions& db_opts, diff --git a/third-party/fbson/FbsonWriter.h b/third-party/fbson/FbsonWriter.h index a254e9bbf8..2b94ef0a01 100644 --- a/third-party/fbson/FbsonWriter.h +++ b/third-party/fbson/FbsonWriter.h @@ -32,6 +32,13 @@ #include "FbsonDocument.h" #include "FbsonStream.h" +// conversion' conversion from 'type1' to 'type2', possible loss of data +// Can not restore at the header end as the warnings are emitted at the point of +// template instantiation +#if defined(_MSC_VER) +#pragma warning(disable : 4244) +#endif + namespace fbson { template diff --git a/tools/db_bench_tool.cc b/tools/db_bench_tool.cc index c36596b95e..7fd44c15f9 100644 --- a/tools/db_bench_tool.cc +++ b/tools/db_bench_tool.cc @@ -3365,7 +3365,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { db->db = db->opt_txn_db->GetBaseDB(); } } else if (FLAGS_transaction_db) { - TransactionDB* ptr; + TransactionDB* ptr = nullptr; TransactionDBOptions txn_db_options; s = CreateLoggerFromOptions(db_name, options, &options.info_log); if (s.ok()) { @@ -3376,7 +3376,7 @@ void VerifyDBFromDB(std::string& truth_db_name) { } } else if (FLAGS_use_blob_db) { blob_db::BlobDBOptions blob_db_options; - blob_db::BlobDB* ptr; + blob_db::BlobDB* ptr = nullptr; s = blob_db::BlobDB::Open(options, blob_db_options, db_name, &ptr); if (s.ok()) { db->db = ptr; diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 4c4b596c72..c7c7cbd1cf 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -2806,7 +2806,7 @@ void DumpSstFile(std::string filename, bool output_hex, bool show_properties) { } // no verification rocksdb::SstFileReader reader(filename, false, output_hex); - Status st = reader.ReadSequential(true, -1, false, // has_from + Status st = reader.ReadSequential(true, std::numeric_limits::max(), false, // has_from from_key, false, // has_to to_key); if (!st.ok()) { diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc index db78268b47..29d8e42926 100644 --- a/tools/sst_dump_tool.cc +++ b/tools/sst_dump_tool.cc @@ -81,7 +81,7 @@ Status SstFileReader::GetTableReader(const std::string& file_path) { Footer footer; unique_ptr file; - uint64_t file_size; + uint64_t file_size = 0; Status s = options_.env->NewRandomAccessFile(file_path, &file, soptions_); if (s.ok()) { s = options_.env->GetFileSize(file_path, &file_size); @@ -411,7 +411,7 @@ void print_help() { int SSTDumpTool::Run(int argc, char** argv) { const char* dir_or_file = nullptr; - uint64_t read_num = -1; + uint64_t read_num = std::numeric_limits::max(); std::string command; char junk; @@ -428,7 +428,7 @@ int SSTDumpTool::Run(int argc, char** argv) { std::string from_key; std::string to_key; std::string block_size_str; - size_t block_size; + size_t block_size = 0; std::vector> compression_types; uint64_t total_num_files = 0; uint64_t total_num_data_blocks = 0; diff --git a/tools/write_stress.cc b/tools/write_stress.cc index e5e4204a8d..e8427be470 100644 --- a/tools/write_stress.cc +++ b/tools/write_stress.cc @@ -163,7 +163,7 @@ class WriteStress { std::uniform_int_distribution char_dist('a', 'z'); std::string ret; for (int i = 0; i < len; ++i) { - ret += char_dist(r); + ret += static_cast(char_dist(r)); } return ret; }; @@ -210,13 +210,13 @@ class WriteStress { FLAGS_prefix_mutate_period_sec * 1000 * 1000LL)); if (dist(rng) < FLAGS_first_char_mutate_probability) { - key_prefix_[0].store(char_dist(rng), std::memory_order_relaxed); + key_prefix_[0].store(static_cast(char_dist(rng)), std::memory_order_relaxed); } if (dist(rng) < FLAGS_second_char_mutate_probability) { - key_prefix_[1].store(char_dist(rng), std::memory_order_relaxed); + key_prefix_[1].store(static_cast(char_dist(rng)), std::memory_order_relaxed); } if (dist(rng) < FLAGS_third_char_mutate_probability) { - key_prefix_[2].store(char_dist(rng), std::memory_order_relaxed); + key_prefix_[2].store(static_cast(char_dist(rng)), std::memory_order_relaxed); } } } diff --git a/util/coding.cc b/util/coding.cc index 3b58e3f1fa..b5cfac869a 100644 --- a/util/coding.cc +++ b/util/coding.cc @@ -15,6 +15,11 @@ namespace rocksdb { +// conversion' conversion from 'type1' to 'type2', possible loss of data +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4244) +#endif char* EncodeVarint32(char* dst, uint32_t v) { // Operate on characters as unsigneds unsigned char* ptr = reinterpret_cast(dst); @@ -42,6 +47,9 @@ char* EncodeVarint32(char* dst, uint32_t v) { } return reinterpret_cast(ptr); } +#if defined(_MSC_VER) +#pragma warning(pop) +#endif const char* GetVarint32PtrFallback(const char* p, const char* limit, uint32_t* value) { diff --git a/util/compression.h b/util/compression.h index 468b961fbf..48278d6be5 100644 --- a/util/compression.h +++ b/util/compression.h @@ -46,8 +46,9 @@ namespace rocksdb { inline bool Snappy_Supported() { #ifdef SNAPPY return true; -#endif +#else return false; +#endif } inline bool Zlib_Supported() { @@ -60,37 +61,42 @@ inline bool Zlib_Supported() { inline bool BZip2_Supported() { #ifdef BZIP2 return true; -#endif +#else return false; +#endif } inline bool LZ4_Supported() { #ifdef LZ4 return true; -#endif +#else return false; +#endif } inline bool XPRESS_Supported() { #ifdef XPRESS return true; -#endif +#else return false; +#endif } inline bool ZSTD_Supported() { #ifdef ZSTD // ZSTD format is finalized since version 0.8.0. return (ZSTD_versionNumber() >= 800); -#endif +#else return false; +#endif } inline bool ZSTDNotFinal_Supported() { #ifdef ZSTD return true; -#endif +#else return false; +#endif } inline bool CompressionTypeSupported(CompressionType compression_type) { @@ -159,9 +165,9 @@ inline bool Snappy_Compress(const CompressionOptions& opts, const char* input, snappy::RawCompress(input, length, &(*output)[0], &outlen); output->resize(outlen); return true; -#endif - +#else return false; +#endif } inline bool Snappy_GetUncompressedLength(const char* input, size_t length, @@ -708,16 +714,18 @@ inline bool LZ4HC_Compress(const CompressionOptions& opts, inline bool XPRESS_Compress(const char* input, size_t length, std::string* output) { #ifdef XPRESS return port::xpress::Compress(input, length, output); -#endif +#else return false; +#endif } inline char* XPRESS_Uncompress(const char* input_data, size_t input_length, int* decompress_size) { #ifdef XPRESS return port::xpress::Decompress(input_data, input_length, decompress_size); -#endif +#else return nullptr; +#endif } diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc index 306194e9c1..7ea98fb43b 100644 --- a/util/crc32c_test.cc +++ b/util/crc32c_test.cc @@ -26,12 +26,12 @@ TEST(CRC, StandardResults) { ASSERT_EQ(0x62a8ab43U, Value(buf, sizeof(buf))); for (int i = 0; i < 32; i++) { - buf[i] = i; + buf[i] = static_cast(i); } ASSERT_EQ(0x46dd794eU, Value(buf, sizeof(buf))); for (int i = 0; i < 32; i++) { - buf[i] = 31 - i; + buf[i] = static_cast(31 - i); } ASSERT_EQ(0x113fdb5cU, Value(buf, sizeof(buf))); diff --git a/util/dynamic_bloom.h b/util/dynamic_bloom.h index 17325dd390..398222b1d8 100644 --- a/util/dynamic_bloom.h +++ b/util/dynamic_bloom.h @@ -125,12 +125,20 @@ inline bool DynamicBloom::MayContain(const Slice& key) const { return (MayContainHash(hash_func_(key))); } +#if defined(_MSC_VER) +#pragma warning(push) +// local variable is initialized but not referenced +#pragma warning(disable : 4189) +#endif inline void DynamicBloom::Prefetch(uint32_t h) { if (kNumBlocks != 0) { uint32_t b = ((h >> 11 | (h << 21)) % kNumBlocks) * (CACHE_LINE_SIZE * 8); PREFETCH(&(data_[b / 8]), 0, 3); } } +#if defined(_MSC_VER) +#pragma warning(pop) +#endif inline bool DynamicBloom::MayContainHash(uint32_t h) const { assert(IsInitialized()); diff --git a/util/slice.cc b/util/slice.cc index 8d95a8ae19..10b19080b2 100644 --- a/util/slice.cc +++ b/util/slice.cc @@ -182,7 +182,7 @@ bool Slice::DecodeHex(std::string* result) const { if (h2 < 0) { return false; } - result->push_back((h1 << 4) | h2); + result->push_back(static_cast((h1 << 4) | h2)); } return true; } diff --git a/util/thread_local.cc b/util/thread_local.cc index 5361951a99..41f656b611 100644 --- a/util/thread_local.cc +++ b/util/thread_local.cc @@ -174,7 +174,7 @@ namespace wintlscleanup { // This is set to OnThreadExit in StaticMeta singleton constructor UnrefHandler thread_local_inclass_routine = nullptr; -pthread_key_t thread_local_key = -1; +pthread_key_t thread_local_key = pthread_key_t (-1); // Static callback function to call with each thread termination. void NTAPI WinOnThreadExit(PVOID module, DWORD reason, PVOID reserved) { diff --git a/utilities/blob_db/blob_log_writer.cc b/utilities/blob_db/blob_log_writer.cc index f4fcaeb90f..f92df8faec 100644 --- a/utilities/blob_db/blob_log_writer.cc +++ b/utilities/blob_db/blob_log_writer.cc @@ -87,7 +87,7 @@ Status Writer::AddRecord(const Slice& key, const Slice& val, assert(last_elem_type_ == kEtFileHdr || last_elem_type_ == kEtRecord); std::string buf; - ConstructBlobHeader(&buf, key, val, -1, -1); + ConstructBlobHeader(&buf, key, val, 0, -1); Status s = EmitPhysicalRecord(buf, key, val, key_offset, blob_offset); return s; diff --git a/utilities/document/document_db.cc b/utilities/document/document_db.cc index f7b5b3b2f3..0dadc99a05 100644 --- a/utilities/document/document_db.cc +++ b/utilities/document/document_db.cc @@ -431,6 +431,10 @@ class SimpleSortedIndex : public Index { return direction; } // REQUIRES: UsefulIndex(filter) == true +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4702) // Unreachable code +#endif virtual bool ShouldContinueLooking( const Filter& filter, const Slice& secondary_key, Index::Direction direction) const override { @@ -483,7 +487,9 @@ class SimpleSortedIndex : public Index { // this is here just so compiler doesn't complain return false; } - +#if defined(_MSC_VER) +#pragma warning(pop) +#endif private: std::string field_; std::string name_; @@ -1060,6 +1066,10 @@ class DocumentDBImpl : public DocumentDB { } private: +#if defined(_MSC_VER) +#pragma warning(push) +#pragma warning(disable : 4702) // unreachable code +#endif Cursor* ConstructFilterCursor(ReadOptions read_options, Cursor* cursor, const JSONDocument& query) { std::unique_ptr filter(Filter::ParseFilter(query)); @@ -1113,6 +1123,9 @@ class DocumentDBImpl : public DocumentDB { assert(false); return nullptr; } +#if defined(_MSC_VER) +#pragma warning(pop) +#endif // currently, we lock and serialize all writes to rocksdb. reads are not // locked and always get consistent view of the database. we should optimize diff --git a/utilities/persistent_cache/hash_table_evictable.h b/utilities/persistent_cache/hash_table_evictable.h index 6557eb440e..40b693cec5 100644 --- a/utilities/persistent_cache/hash_table_evictable.h +++ b/utilities/persistent_cache/hash_table_evictable.h @@ -88,7 +88,7 @@ class EvictableHashTable : private HashTable { WriteLock _(&hash_table::locks_[idx]); LRUListType& lru = lru_lists_[idx]; - if (!lru.IsEmpty() && (t = lru.Pop())) { + if (!lru.IsEmpty() && (t = lru.Pop()) != nullptr) { assert(!t->refs_); // We got an item to evict, erase from the bucket const uint64_t h = Hash()(t); diff --git a/utilities/redis/redis_lists_test.cc b/utilities/redis/redis_lists_test.cc index 22acdff644..efb4c02751 100644 --- a/utilities/redis/redis_lists_test.cc +++ b/utilities/redis/redis_lists_test.cc @@ -747,7 +747,7 @@ namespace { void MakeUpper(std::string* const s) { int len = static_cast(s->length()); for (int i = 0; i < len; ++i) { - (*s)[i] = toupper((*s)[i]); // C-version defined in + (*s)[i] = static_cast(toupper((*s)[i])); // C-version defined in } } diff --git a/utilities/spatialdb/spatial_db_test.cc b/utilities/spatialdb/spatial_db_test.cc index 7e0d67489f..55de1d0db7 100644 --- a/utilities/spatialdb/spatial_db_test.cc +++ b/utilities/spatialdb/spatial_db_test.cc @@ -224,7 +224,7 @@ namespace { std::string RandomStr(Random* rnd) { std::string r; for (int k = 0; k < 10; ++k) { - r.push_back(rnd->Uniform(26) + 'a'); + r.push_back(static_cast(rnd->Uniform(26)) + 'a'); } return r; } diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index 0c73c422b1..e0a8310907 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -1153,11 +1153,11 @@ TEST_P(TransactionTest, DISABLED_TwoPhaseMultiThreadTest) { if (id % 2 == 0) { txn_options.expiration = 1000000; } - TransactionName name("xid_" + std::string(1, 'A' + id)); + TransactionName name("xid_" + std::string(1, 'A' + static_cast(id))); Transaction* txn = db->BeginTransaction(write_options, txn_options); ASSERT_OK(txn->SetName(name)); for (int i = 0; i < 10; i++) { - std::string key(name + "_" + std::string(1, 'A' + i)); + std::string key(name + "_" + std::string(1, static_cast('A' + i))); ASSERT_OK(txn->Put(key, "val")); } ASSERT_OK(txn->Prepare()); @@ -1208,9 +1208,9 @@ TEST_P(TransactionTest, DISABLED_TwoPhaseMultiThreadTest) { std::string value; Status s; for (uint32_t t = 0; t < NUM_TXN_THREADS; t++) { - TransactionName name("xid_" + std::string(1, 'A' + t)); + TransactionName name("xid_" + std::string(1, 'A' + static_cast(t))); for (int i = 0; i < 10; i++) { - std::string key(name + "_" + std::string(1, 'A' + i)); + std::string key(name + "_" + std::string(1, static_cast('A' + i))); s = db->Get(read_options, key, &value); ASSERT_OK(s); ASSERT_EQ(value, "val");