diff --git a/CMakeLists.txt b/CMakeLists.txt index 0dde8f3..7d9f542 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -37,6 +37,49 @@ if(NOT CMAKE_CXX_STANDARD) set(CMAKE_CXX_EXTENSIONS OFF) endif(NOT CMAKE_CXX_STANDARD) +# https://github.com/izenecloud/cmake/blob/master/SetCompilerWarningAll.cmake +if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # Use the highest warning level for Visual Studio. + set(CMAKE_CXX_WARNING_LEVEL 4) + if(CMAKE_CXX_FLAGS MATCHES "/W[0-4]") + string(REGEX REPLACE "/W[0-4]" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + else(CMAKE_CXX_FLAGS MATCHES "/W[0-4]") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4") + endif(CMAKE_CXX_FLAGS MATCHES "/W[0-4]") + + # Disable C++ exceptions. + string(REGEX REPLACE "/EH[a-z]+" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHs-c-") + add_definitions(-D_HAS_EXCEPTIONS=0) + + # Disable RTTI. + string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-") +else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # Use -Wall for clang and gcc. + if(NOT CMAKE_CXX_FLAGS MATCHES "-Wall") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall") + endif(NOT CMAKE_CXX_FLAGS MATCHES "-Wall") + + # Use -Wextra for clang and gcc. + if(NOT CMAKE_CXX_FLAGS MATCHES "-Wextra") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra") + endif(NOT CMAKE_CXX_FLAGS MATCHES "-Wextra") + + # Use -Werror for clang and gcc. + if(NOT CMAKE_CXX_FLAGS MATCHES "-Werror") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") + endif(NOT CMAKE_CXX_FLAGS MATCHES "-Werror") + + # Disable C++ exceptions. + string(REGEX REPLACE "-fexceptions" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions") + + # Disable RTTI. + string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") +endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # BUILD_SHARED_LIBS is a standard CMake variable, but we declare it here to make # it prominent in the GUI. option(BUILD_SHARED_LIBS "Build shared libraries(DLLs)." OFF) diff --git a/snappy-sinksource.cc b/snappy-sinksource.cc index 605a2cb..8214964 100644 --- a/snappy-sinksource.cc +++ b/snappy-sinksource.cc @@ -33,17 +33,24 @@ namespace snappy { -Source::~Source() { } +Source::~Source() = default; -Sink::~Sink() { } +Sink::~Sink() = default; char* Sink::GetAppendBuffer(size_t length, char* scratch) { + // TODO: Switch to [[maybe_unused]] when we can assume C++17. + (void)length; + return scratch; } char* Sink::GetAppendBufferVariable( size_t min_size, size_t desired_size_hint, char* scratch, size_t scratch_size, size_t* allocated_size) { + // TODO: Switch to [[maybe_unused]] when we can assume C++17. + (void)min_size; + (void)desired_size_hint; + *allocated_size = scratch_size; return scratch; } @@ -56,7 +63,7 @@ void Sink::AppendAndTakeOwnership( (*deleter)(deleter_arg, bytes, n); } -ByteArraySource::~ByteArraySource() { } +ByteArraySource::~ByteArraySource() = default; size_t ByteArraySource::Available() const { return left_; } @@ -81,16 +88,20 @@ void UncheckedByteArraySink::Append(const char* data, size_t n) { } char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) { + // TODO: Switch to [[maybe_unused]] when we can assume C++17. + (void)len; + (void)scratch; + return dest_; } void UncheckedByteArraySink::AppendAndTakeOwnership( - char* data, size_t n, + char* bytes, size_t n, void (*deleter)(void*, const char*, size_t), void *deleter_arg) { - if (data != dest_) { - std::memcpy(dest_, data, n); - (*deleter)(deleter_arg, data, n); + if (bytes != dest_) { + std::memcpy(dest_, bytes, n); + (*deleter)(deleter_arg, bytes, n); } dest_ += n; } @@ -98,6 +109,11 @@ void UncheckedByteArraySink::AppendAndTakeOwnership( char* UncheckedByteArraySink::GetAppendBufferVariable( size_t min_size, size_t desired_size_hint, char* scratch, size_t scratch_size, size_t* allocated_size) { + // TODO: Switch to [[maybe_unused]] when we can assume C++17. + (void)min_size; + (void)scratch; + (void)scratch_size; + *allocated_size = desired_size_hint; return dest_; } diff --git a/snappy-stubs-internal.h b/snappy-stubs-internal.h index 0e62e73..7a44848 100644 --- a/snappy-stubs-internal.h +++ b/snappy-stubs-internal.h @@ -95,7 +95,7 @@ #ifdef ARRAYSIZE #undef ARRAYSIZE #endif -#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a))) +#define ARRAYSIZE(a) int{sizeof(a) / sizeof(*(a))} // Static prediction hints. #ifdef HAVE_BUILTIN_EXPECT @@ -439,28 +439,28 @@ inline const char* Varint::Parse32WithLimit(const char* p, inline char* Varint::Encode32(char* sptr, uint32_t v) { // Operate on characters as unsigneds - unsigned char* ptr = reinterpret_cast(sptr); - static const int B = 128; - if (v < (1<<7)) { - *(ptr++) = v; - } else if (v < (1<<14)) { - *(ptr++) = v | B; - *(ptr++) = v>>7; - } else if (v < (1<<21)) { - *(ptr++) = v | B; - *(ptr++) = (v>>7) | B; - *(ptr++) = v>>14; - } else if (v < (1<<28)) { - *(ptr++) = v | B; - *(ptr++) = (v>>7) | B; - *(ptr++) = (v>>14) | B; - *(ptr++) = v>>21; + uint8_t* ptr = reinterpret_cast(sptr); + static const uint8_t B = 128; + if (v < (1 << 7)) { + *(ptr++) = static_cast(v); + } else if (v < (1 << 14)) { + *(ptr++) = static_cast(v | B); + *(ptr++) = static_cast(v >> 7); + } else if (v < (1 << 21)) { + *(ptr++) = static_cast(v | B); + *(ptr++) = static_cast((v >> 7) | B); + *(ptr++) = static_cast(v >> 14); + } else if (v < (1 << 28)) { + *(ptr++) = static_cast(v | B); + *(ptr++) = static_cast((v >> 7) | B); + *(ptr++) = static_cast((v >> 14) | B); + *(ptr++) = static_cast(v >> 21); } else { - *(ptr++) = v | B; - *(ptr++) = (v>>7) | B; - *(ptr++) = (v>>14) | B; - *(ptr++) = (v>>21) | B; - *(ptr++) = v>>28; + *(ptr++) = static_cast(v | B); + *(ptr++) = static_cast((v>>7) | B); + *(ptr++) = static_cast((v>>14) | B); + *(ptr++) = static_cast((v>>21) | B); + *(ptr++) = static_cast(v >> 28); } return reinterpret_cast(ptr); } diff --git a/snappy-test.h b/snappy-test.h index c9a777d..b5a53b5 100644 --- a/snappy-test.h +++ b/snappy-test.h @@ -118,7 +118,7 @@ namespace file { }; DummyStatus GetContents( - const std::string& filename, std::string* data, int unused) { + const std::string& filename, std::string* data, int /*unused*/) { FILE* fp = std::fopen(filename.c_str(), "rb"); if (fp == NULL) { std::perror(filename.c_str()); @@ -142,7 +142,7 @@ namespace file { } inline DummyStatus SetContents( - const std::string& filename, const std::string& str, int unused) { + const std::string& filename, const std::string& str, int /*unused*/) { FILE* fp = std::fopen(filename.c_str(), "wb"); if (fp == NULL) { std::perror(filename.c_str()); diff --git a/snappy.cc b/snappy.cc index e378c2a..90bdd48 100644 --- a/snappy.cc +++ b/snappy.cc @@ -96,7 +96,7 @@ static inline uint32_t HashBytes(uint32_t bytes, int shift) { return (bytes * kMul) >> shift; } -size_t MaxCompressedLength(size_t source_len) { +size_t MaxCompressedLength(size_t source_bytes) { // Compressed data can be defined as: // compressed := item* literal* // item := literal* copy @@ -117,7 +117,7 @@ size_t MaxCompressedLength(size_t source_len) { // I.e., 6 bytes of input turn into 7 bytes of "compressed" data. // // This last factor dominates the blowup, so the final estimate is: - return 32 + source_len + source_len/6; + return 32 + source_bytes + source_bytes / 6; } namespace { @@ -563,7 +563,7 @@ char* CompressFragment(const char* input, // These for-loops are meant to be unrolled. So we can freely // special case the first iteration to use the value already // loaded in preload. - uint32_t dword = i == 0 ? preload : data; + uint32_t dword = i == 0 ? preload : static_cast(data); assert(dword == LittleEndian::Load32(ip + i)); uint32_t hash = HashBytes(dword, shift); candidate = base_ip + table[hash]; @@ -680,7 +680,12 @@ char* CompressFragment(const char* input, // Called back at avery compression call to trace parameters and sizes. static inline void Report(const char *algorithm, size_t compressed_size, - size_t uncompressed_size) {} + size_t uncompressed_size) { + // TODO: Switch to [[maybe_unused]] when we can assume C++17. + (void)algorithm; + (void)compressed_size; + (void)uncompressed_size; +} // Signature of output types needed by decompression code. // The decompression code is templatized on a type that obeys this @@ -1170,7 +1175,10 @@ class SnappyIOVecWriter { } char* GetOutputPtr() { return nullptr; } - void SetOutputPtr(char* op) {} + void SetOutputPtr(char* op) { + // TODO: Switch to [[maybe_unused]] when we can assume C++17. + (void)op; + } inline bool AppendNoCheck(const char* ip, size_t len) { while (len > 0) { @@ -1360,10 +1368,13 @@ class SnappyArrayWriter { SNAPPY_ATTRIBUTE_ALWAYS_INLINE inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) { char* const op = *op_p; + assert(op >= base_); char* const op_end = op + len; // Check if we try to append from before the start of the buffer. - if (SNAPPY_PREDICT_FALSE(op - base_ < offset)) return false; + if (SNAPPY_PREDICT_FALSE(static_cast(op - base_) < offset)) + return false; + if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) || op >= op_limit_min_slop_ || offset < len)) { if (op_end > op_limit_ || offset == 0) return false; @@ -1381,8 +1392,9 @@ class SnappyArrayWriter { inline void Flush() {} }; -bool RawUncompress(const char* compressed, size_t n, char* uncompressed) { - ByteArraySource reader(compressed, n); +bool RawUncompress(const char* compressed, size_t compressed_length, + char* uncompressed) { + ByteArraySource reader(compressed, compressed_length); return RawUncompress(&reader, uncompressed); } @@ -1391,9 +1403,10 @@ bool RawUncompress(Source* compressed, char* uncompressed) { return InternalUncompress(compressed, &output); } -bool Uncompress(const char* compressed, size_t n, std::string* uncompressed) { +bool Uncompress(const char* compressed, size_t compressed_length, + std::string* uncompressed) { size_t ulength; - if (!GetUncompressedLength(compressed, n, &ulength)) { + if (!GetUncompressedLength(compressed, compressed_length, &ulength)) { return false; } // On 32-bit builds: max_size() < kuint32max. Check for that instead @@ -1402,7 +1415,8 @@ bool Uncompress(const char* compressed, size_t n, std::string* uncompressed) { return false; } STLStringResizeUninitialized(uncompressed, ulength); - return RawUncompress(compressed, n, string_as_array(uncompressed)); + return RawUncompress(compressed, compressed_length, + string_as_array(uncompressed)); } // A Writer that drops everything on the floor and just does validation @@ -1422,11 +1436,20 @@ class SnappyDecompressionValidator { return expected_ == produced_; } inline bool Append(const char* ip, size_t len, size_t* produced) { + // TODO: Switch to [[maybe_unused]] when we can assume C++17. + (void)ip; + *produced += len; return *produced <= expected_; } inline bool TryFastAppend(const char* ip, size_t available, size_t length, size_t* produced) { + // TODO: Switch to [[maybe_unused]] when we can assume C++17. + (void)ip; + (void)available; + (void)length; + (void)produced; + return false; } inline bool AppendFromSelf(size_t offset, size_t len, size_t* produced) { @@ -1439,8 +1462,8 @@ class SnappyDecompressionValidator { inline void Flush() {} }; -bool IsValidCompressedBuffer(const char* compressed, size_t n) { - ByteArraySource reader(compressed, n); +bool IsValidCompressedBuffer(const char* compressed, size_t compressed_length) { + ByteArraySource reader(compressed, compressed_length); SnappyDecompressionValidator writer; return InternalUncompress(&reader, &writer); } @@ -1567,13 +1590,15 @@ class SnappyScatteredWriter { inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) { char* op = *op_p; + assert(op >= op_base_); // Check if we try to append from before the start of the buffer. if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) || - op - op_base_ < offset || - op >= op_limit_min_slop_ || offset < len)) { + static_cast(op - op_base_) < offset || + op >= op_limit_min_slop_ || offset < len)) { if (offset == 0) return false; char* const op_end = op + len; - if (SNAPPY_PREDICT_FALSE(op - op_base_ < offset || op_end > op_limit_)) { + if (SNAPPY_PREDICT_FALSE(static_cast(op - op_base_) < offset || + op_end > op_limit_)) { op_ptr_ = op; bool res = SlowAppendFromSelf(offset, len); *op_p = op_ptr_; @@ -1692,6 +1717,10 @@ class SnappySinkAllocator { }; static void Deleter(void* arg, const char* bytes, size_t size) { + // TODO: Switch to [[maybe_unused]] when we can assume C++17. + (void)arg; + (void)size; + delete[] bytes; } diff --git a/snappy_unittest.cc b/snappy_unittest.cc index d39be43..d14c56b 100644 --- a/snappy_unittest.cc +++ b/snappy_unittest.cc @@ -415,7 +415,7 @@ static void VerifyIOVec(const std::string& input) { num = input.size(); } struct iovec* iov = new iovec[num]; - int used_so_far = 0; + size_t used_so_far = 0; std::bernoulli_distribution one_in_five(1.0 / 5); for (size_t i = 0; i < num; ++i) { assert(used_so_far < input.size()); @@ -742,7 +742,7 @@ TEST(Snappy, FourByteOffset) { // How many times each fragment is emitted. const int n1 = 2; const int n2 = 100000 / fragment2.size(); - const int length = n1 * fragment1.size() + n2 * fragment2.size(); + const size_t length = n1 * fragment1.size() + n2 * fragment2.size(); std::string compressed; Varint::Append32(&compressed, length); @@ -1078,12 +1078,12 @@ TEST(Snappy, FindMatchLengthRandom) { } DataEndingAtUnreadablePage u(s); DataEndingAtUnreadablePage v(t); - int matched = TestFindMatchLength(u.data(), v.data(), t.size()); + size_t matched = TestFindMatchLength(u.data(), v.data(), t.size()); if (matched == t.size()) { EXPECT_EQ(s, t); } else { EXPECT_NE(s[matched], t[matched]); - for (int j = 0; j < matched; ++j) { + for (size_t j = 0; j < matched; ++j) { EXPECT_EQ(s[j], t[j]); } } @@ -1323,7 +1323,7 @@ static void BM_UIOVec(int iters, int arg) { const int kNumEntries = 10; struct iovec iov[kNumEntries]; char *dst = new char[contents.size()]; - int used_so_far = 0; + size_t used_so_far = 0; for (int i = 0; i < kNumEntries; ++i) { iov[i].iov_base = dst + used_so_far; if (used_so_far == contents.size()) { @@ -1475,7 +1475,7 @@ static void BM_ZFlatIncreasingTableSize(int iters, int arg) { SetBenchmarkBytesProcessed(static_cast(iters) * total_contents_size); StartBenchmarkTiming(); while (iters-- > 0) { - for (int i = 0; i < contents.size(); ++i) { + for (size_t i = 0; i < contents.size(); ++i) { snappy::RawCompress(contents[i].data(), contents[i].size(), dst[i], &zsize); }