diff --git a/BUILD.bazel b/BUILD.bazel index d72ae867..15d83699 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1,5 +1,22 @@ licenses(["notice"]) +COPTS = [ + "-pedantic", + "-pedantic-errors", + "-std=c++11", + "-Wall", + "-Wconversion", + "-Wextra", + "-Wshadow", + # "-Wshorten-64-to-32", + "-Wfloat-equal", + "-fstrict-aliasing", + ## assert() are used a lot in tests upstream, which may be optimised out leading to + ## unused-variable warning. + "-Wno-unused-variable", + "-Werror=old-style-cast", +] + config_setting( name = "qnx", constraint_values = ["@platforms//os:qnx"], @@ -47,7 +64,7 @@ cc_library( ], copts = select({ ":windows": [], - "//conditions:default": ["-Werror=old-style-cast"], + "//conditions:default": COPTS, }), defines = [ "BENCHMARK_STATIC_DEFINE", diff --git a/CMakeLists.txt b/CMakeLists.txt index d9bcc6a4..23b519c2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -190,6 +190,7 @@ else() add_cxx_compiler_flag(-Wshadow) add_cxx_compiler_flag(-Wfloat-equal) add_cxx_compiler_flag(-Wold-style-cast) + add_cxx_compiler_flag(-Wconversion) if(BENCHMARK_ENABLE_WERROR) add_cxx_compiler_flag(-Werror) endif() diff --git a/src/benchmark.cc b/src/benchmark.cc index 563c4438..1f2f6cc2 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -407,7 +407,8 @@ void RunBenchmarks(const std::vector& benchmarks, benchmarks_with_threads += (benchmark.threads() > 1); runners.emplace_back(benchmark, &perfcounters, reports_for_family); int num_repeats_of_this_instance = runners.back().GetNumRepeats(); - num_repetitions_total += num_repeats_of_this_instance; + num_repetitions_total += + static_cast(num_repeats_of_this_instance); if (reports_for_family) reports_for_family->num_runs_total += num_repeats_of_this_instance; } diff --git a/src/benchmark_register.cc b/src/benchmark_register.cc index e447c9a2..8ade0482 100644 --- a/src/benchmark_register.cc +++ b/src/benchmark_register.cc @@ -482,8 +482,9 @@ int Benchmark::ArgsCnt() const { const char* Benchmark::GetArgName(int arg) const { BM_CHECK_GE(arg, 0); - BM_CHECK_LT(arg, static_cast(arg_names_.size())); - return arg_names_[arg].c_str(); + size_t uarg = static_cast(arg); + BM_CHECK_LT(uarg, arg_names_.size()); + return arg_names_[uarg].c_str(); } TimeUnit Benchmark::GetTimeUnit() const { diff --git a/src/benchmark_register.h b/src/benchmark_register.h index 53367c70..be50265f 100644 --- a/src/benchmark_register.h +++ b/src/benchmark_register.h @@ -24,7 +24,7 @@ typename std::vector::iterator AddPowers(std::vector* dst, T lo, T hi, static const T kmax = std::numeric_limits::max(); // Space out the values in multiples of "mult" - for (T i = static_cast(1); i <= hi; i *= static_cast(mult)) { + for (T i = static_cast(1); i <= hi; i = static_cast(i * mult)) { if (i >= lo) { dst->push_back(i); } @@ -52,7 +52,7 @@ void AddNegatedPowers(std::vector* dst, T lo, T hi, int mult) { const auto it = AddPowers(dst, hi_complement, lo_complement, mult); - std::for_each(it, dst->end(), [](T& t) { t *= -1; }); + std::for_each(it, dst->end(), [](T& t) { t = static_cast(t * -1); }); std::reverse(it, dst->end()); } diff --git a/src/benchmark_runner.cc b/src/benchmark_runner.cc index dcddb437..a74bdadd 100644 --- a/src/benchmark_runner.cc +++ b/src/benchmark_runner.cc @@ -235,7 +235,7 @@ BenchmarkRunner::BenchmarkRunner( has_explicit_iteration_count(b.iterations() != 0 || parsed_benchtime_flag.tag == BenchTimeType::ITERS), - pool(b.threads() - 1), + pool(static_cast(b.threads() - 1)), iters(has_explicit_iteration_count ? ComputeIters(b_, parsed_benchtime_flag) : 1), diff --git a/src/cycleclock.h b/src/cycleclock.h index eff563e7..91abcf9d 100644 --- a/src/cycleclock.h +++ b/src/cycleclock.h @@ -70,7 +70,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { // frequency scaling). Also note that when the Mac sleeps, this // counter pauses; it does not continue counting, nor does it // reset to zero. - return mach_absolute_time(); + return static_cast(mach_absolute_time()); #elif defined(BENCHMARK_OS_EMSCRIPTEN) // this goes above x86-specific code because old versions of Emscripten // define __x86_64__, although they have nothing to do with it. @@ -82,7 +82,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { #elif defined(__x86_64__) || defined(__amd64__) uint64_t low, high; __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); - return (high << 32) | low; + return static_cast((high << 32) | low); #elif defined(__powerpc__) || defined(__ppc__) // This returns a time-base, which is not always precisely a cycle-count. #if defined(__powerpc64__) || defined(__ppc64__) diff --git a/src/statistics.cc b/src/statistics.cc index 261dcb29..16b60261 100644 --- a/src/statistics.cc +++ b/src/statistics.cc @@ -97,7 +97,7 @@ std::vector ComputeStats( auto error_count = std::count_if(reports.begin(), reports.end(), [](Run const& run) { return run.skipped; }); - if (reports.size() - error_count < 2) { + if (reports.size() - static_cast(error_count) < 2) { // We don't report aggregated data if there was a single run. return results; } @@ -179,7 +179,7 @@ std::vector ComputeStats( // Similarly, if there are N repetitions with 1 iterations each, // an aggregate will be computed over N measurements, not 1. // Thus it is best to simply use the count of separate reports. - data.iterations = reports.size(); + data.iterations = static_cast(reports.size()); data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat); data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat); diff --git a/src/string_util.cc b/src/string_util.cc index c69e40a8..9ba63a70 100644 --- a/src/string_util.cc +++ b/src/string_util.cc @@ -56,7 +56,7 @@ void ToExponentAndMantissa(double val, int precision, double one_k, scaled /= one_k; if (scaled <= big_threshold) { mantissa_stream << scaled; - *exponent = i + 1; + *exponent = static_cast(i + 1); *mantissa = mantissa_stream.str(); return; } diff --git a/src/sysinfo.cc b/src/sysinfo.cc index daeb98b0..57a23e7b 100644 --- a/src/sysinfo.cc +++ b/src/sysinfo.cc @@ -350,7 +350,7 @@ std::vector GetCacheSizesWindows() { CPUInfo::CacheInfo C; C.num_sharing = static_cast(b.count()); C.level = cache.Level; - C.size = cache.Size; + C.size = static_cast(cache.Size); C.type = "Unknown"; switch (cache.Type) { case CacheUnified: @@ -485,9 +485,8 @@ int GetNumCPUsImpl() { // positives. std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO)); GetSystemInfo(&sysinfo); - return sysinfo.dwNumberOfProcessors; // number of logical - // processors in the current - // group + // number of logical processors in the current group + return static_cast(sysinfo.dwNumberOfProcessors); #elif defined(BENCHMARK_OS_SOLARIS) // Returns -1 in case of a failure. long num_cpu = sysconf(_SC_NPROCESSORS_ONLN); @@ -837,7 +836,7 @@ std::vector GetLoadAvg() { !(defined(__ANDROID__) && __ANDROID_API__ < 29) static constexpr int kMaxSamples = 3; std::vector res(kMaxSamples, 0.0); - const int nelem = getloadavg(res.data(), kMaxSamples); + const size_t nelem = static_cast(getloadavg(res.data(), kMaxSamples)); if (nelem < 1) { res.clear(); } else { diff --git a/src/timers.cc b/src/timers.cc index 667e7b2e..d0821f31 100644 --- a/src/timers.cc +++ b/src/timers.cc @@ -245,9 +245,9 @@ std::string LocalDateTimeString() { tz_offset_sign = '-'; } - tz_len = + tz_len = static_cast( ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li", - tz_offset_sign, offset_minutes / 100, offset_minutes % 100); + tz_offset_sign, offset_minutes / 100, offset_minutes % 100)); BM_CHECK(tz_len == kTzOffsetLen); ((void)tz_len); // Prevent unused variable warning in optimized build. } else { diff --git a/test/BUILD b/test/BUILD index e43b8023..b245fa76 100644 --- a/test/BUILD +++ b/test/BUILD @@ -21,6 +21,7 @@ TEST_COPTS = [ ## assert() are used a lot in tests upstream, which may be optimised out leading to ## unused-variable warning. "-Wno-unused-variable", + "-Werror=old-style-cast", ] # Some of the issues with DoNotOptimize only occur when optimization is enabled diff --git a/test/benchmark_gtest.cc b/test/benchmark_gtest.cc index 2c9e555d..0aa2552c 100644 --- a/test/benchmark_gtest.cc +++ b/test/benchmark_gtest.cc @@ -38,7 +38,7 @@ TEST(AddRangeTest, Advanced64) { TEST(AddRangeTest, FullRange8) { std::vector dst; - AddRange(&dst, int8_t{1}, std::numeric_limits::max(), int8_t{8}); + AddRange(&dst, int8_t{1}, std::numeric_limits::max(), 8); EXPECT_THAT( dst, testing::ElementsAre(int8_t{1}, int8_t{8}, int8_t{64}, int8_t{127})); } diff --git a/test/complexity_test.cc b/test/complexity_test.cc index 0c159cd2..0729d15a 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -71,11 +71,11 @@ void BM_Complexity_O1(benchmark::State &state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero benchmark::DoNotOptimize(state.iterations()); - double tmp = state.iterations(); + double tmp = static_cast(state.iterations()); benchmark::DoNotOptimize(tmp); for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { benchmark::DoNotOptimize(state.iterations()); - tmp *= state.iterations(); + tmp *= static_cast(state.iterations()); benchmark::DoNotOptimize(tmp); } @@ -120,16 +120,16 @@ void BM_Complexity_O_N(benchmark::State &state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero benchmark::DoNotOptimize(state.iterations()); - double tmp = state.iterations(); + double tmp = static_cast(state.iterations()); benchmark::DoNotOptimize(tmp); for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { benchmark::DoNotOptimize(state.iterations()); - tmp *= state.iterations(); + tmp *= static_cast(state.iterations()); benchmark::DoNotOptimize(tmp); } // 1ns per iteration per entry - state.SetIterationTime(state.range(0) * 42 * 1e-9); + state.SetIterationTime(static_cast(state.range(0)) * 42 * 1e-9); } state.SetComplexityN(state.range(0)); } @@ -178,16 +178,16 @@ static void BM_Complexity_O_N_log_N(benchmark::State &state) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero benchmark::DoNotOptimize(state.iterations()); - double tmp = state.iterations(); + double tmp = static_cast(state.iterations()); benchmark::DoNotOptimize(tmp); for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { benchmark::DoNotOptimize(state.iterations()); - tmp *= state.iterations(); + tmp *= static_cast(state.iterations()); benchmark::DoNotOptimize(tmp); } - state.SetIterationTime(state.range(0) * kLog2E * std::log(state.range(0)) * - 42 * 1e-9); + state.SetIterationTime(static_cast(state.range(0)) * kLog2E * + std::log(state.range(0)) * 42 * 1e-9); } state.SetComplexityN(state.range(0)); } @@ -238,15 +238,15 @@ void BM_ComplexityCaptureArgs(benchmark::State &state, int n) { for (auto _ : state) { // This test requires a non-zero CPU time to avoid divide-by-zero benchmark::DoNotOptimize(state.iterations()); - double tmp = state.iterations(); + double tmp = static_cast(state.iterations()); benchmark::DoNotOptimize(tmp); for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { benchmark::DoNotOptimize(state.iterations()); - tmp *= state.iterations(); + tmp *= static_cast(state.iterations()); benchmark::DoNotOptimize(tmp); } - state.SetIterationTime(state.range(0) * 42 * 1e-9); + state.SetIterationTime(static_cast(state.range(0)) * 42 * 1e-9); } state.SetComplexityN(n); }