mitigate clang build warnings -Wconversion (#1763)

* mitigate clang build warnings -Wconversion

* ensure we have warnings set everywhere and fix some
This commit is contained in:
dominic 2024-03-07 12:19:56 +00:00 committed by GitHub
parent 654d8d6cf3
commit c64b144f42
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 52 additions and 32 deletions

View File

@ -1,5 +1,22 @@
licenses(["notice"]) licenses(["notice"])
COPTS = [
"-pedantic",
"-pedantic-errors",
"-std=c++11",
"-Wall",
"-Wconversion",
"-Wextra",
"-Wshadow",
# "-Wshorten-64-to-32",
"-Wfloat-equal",
"-fstrict-aliasing",
## assert() are used a lot in tests upstream, which may be optimised out leading to
## unused-variable warning.
"-Wno-unused-variable",
"-Werror=old-style-cast",
]
config_setting( config_setting(
name = "qnx", name = "qnx",
constraint_values = ["@platforms//os:qnx"], constraint_values = ["@platforms//os:qnx"],
@ -47,7 +64,7 @@ cc_library(
], ],
copts = select({ copts = select({
":windows": [], ":windows": [],
"//conditions:default": ["-Werror=old-style-cast"], "//conditions:default": COPTS,
}), }),
defines = [ defines = [
"BENCHMARK_STATIC_DEFINE", "BENCHMARK_STATIC_DEFINE",

View File

@ -190,6 +190,7 @@ else()
add_cxx_compiler_flag(-Wshadow) add_cxx_compiler_flag(-Wshadow)
add_cxx_compiler_flag(-Wfloat-equal) add_cxx_compiler_flag(-Wfloat-equal)
add_cxx_compiler_flag(-Wold-style-cast) add_cxx_compiler_flag(-Wold-style-cast)
add_cxx_compiler_flag(-Wconversion)
if(BENCHMARK_ENABLE_WERROR) if(BENCHMARK_ENABLE_WERROR)
add_cxx_compiler_flag(-Werror) add_cxx_compiler_flag(-Werror)
endif() endif()

View File

@ -407,7 +407,8 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
benchmarks_with_threads += (benchmark.threads() > 1); benchmarks_with_threads += (benchmark.threads() > 1);
runners.emplace_back(benchmark, &perfcounters, reports_for_family); runners.emplace_back(benchmark, &perfcounters, reports_for_family);
int num_repeats_of_this_instance = runners.back().GetNumRepeats(); int num_repeats_of_this_instance = runners.back().GetNumRepeats();
num_repetitions_total += num_repeats_of_this_instance; num_repetitions_total +=
static_cast<size_t>(num_repeats_of_this_instance);
if (reports_for_family) if (reports_for_family)
reports_for_family->num_runs_total += num_repeats_of_this_instance; reports_for_family->num_runs_total += num_repeats_of_this_instance;
} }

View File

@ -482,8 +482,9 @@ int Benchmark::ArgsCnt() const {
const char* Benchmark::GetArgName(int arg) const { const char* Benchmark::GetArgName(int arg) const {
BM_CHECK_GE(arg, 0); BM_CHECK_GE(arg, 0);
BM_CHECK_LT(arg, static_cast<int>(arg_names_.size())); size_t uarg = static_cast<size_t>(arg);
return arg_names_[arg].c_str(); BM_CHECK_LT(uarg, arg_names_.size());
return arg_names_[uarg].c_str();
} }
TimeUnit Benchmark::GetTimeUnit() const { TimeUnit Benchmark::GetTimeUnit() const {

View File

@ -24,7 +24,7 @@ typename std::vector<T>::iterator AddPowers(std::vector<T>* dst, T lo, T hi,
static const T kmax = std::numeric_limits<T>::max(); static const T kmax = std::numeric_limits<T>::max();
// Space out the values in multiples of "mult" // Space out the values in multiples of "mult"
for (T i = static_cast<T>(1); i <= hi; i *= static_cast<T>(mult)) { for (T i = static_cast<T>(1); i <= hi; i = static_cast<T>(i * mult)) {
if (i >= lo) { if (i >= lo) {
dst->push_back(i); dst->push_back(i);
} }
@ -52,7 +52,7 @@ void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) {
const auto it = AddPowers(dst, hi_complement, lo_complement, mult); const auto it = AddPowers(dst, hi_complement, lo_complement, mult);
std::for_each(it, dst->end(), [](T& t) { t *= -1; }); std::for_each(it, dst->end(), [](T& t) { t = static_cast<T>(t * -1); });
std::reverse(it, dst->end()); std::reverse(it, dst->end());
} }

View File

@ -235,7 +235,7 @@ BenchmarkRunner::BenchmarkRunner(
has_explicit_iteration_count(b.iterations() != 0 || has_explicit_iteration_count(b.iterations() != 0 ||
parsed_benchtime_flag.tag == parsed_benchtime_flag.tag ==
BenchTimeType::ITERS), BenchTimeType::ITERS),
pool(b.threads() - 1), pool(static_cast<size_t>(b.threads() - 1)),
iters(has_explicit_iteration_count iters(has_explicit_iteration_count
? ComputeIters(b_, parsed_benchtime_flag) ? ComputeIters(b_, parsed_benchtime_flag)
: 1), : 1),

View File

@ -70,7 +70,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
// frequency scaling). Also note that when the Mac sleeps, this // frequency scaling). Also note that when the Mac sleeps, this
// counter pauses; it does not continue counting, nor does it // counter pauses; it does not continue counting, nor does it
// reset to zero. // reset to zero.
return mach_absolute_time(); return static_cast<int64_t>(mach_absolute_time());
#elif defined(BENCHMARK_OS_EMSCRIPTEN) #elif defined(BENCHMARK_OS_EMSCRIPTEN)
// this goes above x86-specific code because old versions of Emscripten // this goes above x86-specific code because old versions of Emscripten
// define __x86_64__, although they have nothing to do with it. // define __x86_64__, although they have nothing to do with it.
@ -82,7 +82,7 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
#elif defined(__x86_64__) || defined(__amd64__) #elif defined(__x86_64__) || defined(__amd64__)
uint64_t low, high; uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low; return static_cast<int64_t>((high << 32) | low);
#elif defined(__powerpc__) || defined(__ppc__) #elif defined(__powerpc__) || defined(__ppc__)
// This returns a time-base, which is not always precisely a cycle-count. // This returns a time-base, which is not always precisely a cycle-count.
#if defined(__powerpc64__) || defined(__ppc64__) #if defined(__powerpc64__) || defined(__ppc64__)

View File

@ -97,7 +97,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
auto error_count = std::count_if(reports.begin(), reports.end(), auto error_count = std::count_if(reports.begin(), reports.end(),
[](Run const& run) { return run.skipped; }); [](Run const& run) { return run.skipped; });
if (reports.size() - error_count < 2) { if (reports.size() - static_cast<size_t>(error_count) < 2) {
// We don't report aggregated data if there was a single run. // We don't report aggregated data if there was a single run.
return results; return results;
} }
@ -179,7 +179,7 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
// Similarly, if there are N repetitions with 1 iterations each, // Similarly, if there are N repetitions with 1 iterations each,
// an aggregate will be computed over N measurements, not 1. // an aggregate will be computed over N measurements, not 1.
// Thus it is best to simply use the count of separate reports. // Thus it is best to simply use the count of separate reports.
data.iterations = reports.size(); data.iterations = static_cast<IterationCount>(reports.size());
data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat); data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat); data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);

View File

@ -56,7 +56,7 @@ void ToExponentAndMantissa(double val, int precision, double one_k,
scaled /= one_k; scaled /= one_k;
if (scaled <= big_threshold) { if (scaled <= big_threshold) {
mantissa_stream << scaled; mantissa_stream << scaled;
*exponent = i + 1; *exponent = static_cast<int64_t>(i + 1);
*mantissa = mantissa_stream.str(); *mantissa = mantissa_stream.str();
return; return;
} }

View File

@ -350,7 +350,7 @@ std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows() {
CPUInfo::CacheInfo C; CPUInfo::CacheInfo C;
C.num_sharing = static_cast<int>(b.count()); C.num_sharing = static_cast<int>(b.count());
C.level = cache.Level; C.level = cache.Level;
C.size = cache.Size; C.size = static_cast<int>(cache.Size);
C.type = "Unknown"; C.type = "Unknown";
switch (cache.Type) { switch (cache.Type) {
case CacheUnified: case CacheUnified:
@ -485,9 +485,8 @@ int GetNumCPUsImpl() {
// positives. // positives.
std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO)); std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO));
GetSystemInfo(&sysinfo); GetSystemInfo(&sysinfo);
return sysinfo.dwNumberOfProcessors; // number of logical // number of logical processors in the current group
// processors in the current return static_cast<int>(sysinfo.dwNumberOfProcessors);
// group
#elif defined(BENCHMARK_OS_SOLARIS) #elif defined(BENCHMARK_OS_SOLARIS)
// Returns -1 in case of a failure. // Returns -1 in case of a failure.
long num_cpu = sysconf(_SC_NPROCESSORS_ONLN); long num_cpu = sysconf(_SC_NPROCESSORS_ONLN);
@ -837,7 +836,7 @@ std::vector<double> GetLoadAvg() {
!(defined(__ANDROID__) && __ANDROID_API__ < 29) !(defined(__ANDROID__) && __ANDROID_API__ < 29)
static constexpr int kMaxSamples = 3; static constexpr int kMaxSamples = 3;
std::vector<double> res(kMaxSamples, 0.0); std::vector<double> res(kMaxSamples, 0.0);
const int nelem = getloadavg(res.data(), kMaxSamples); const size_t nelem = static_cast<size_t>(getloadavg(res.data(), kMaxSamples));
if (nelem < 1) { if (nelem < 1) {
res.clear(); res.clear();
} else { } else {

View File

@ -245,9 +245,9 @@ std::string LocalDateTimeString() {
tz_offset_sign = '-'; tz_offset_sign = '-';
} }
tz_len = tz_len = static_cast<size_t>(
::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li", ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li",
tz_offset_sign, offset_minutes / 100, offset_minutes % 100); tz_offset_sign, offset_minutes / 100, offset_minutes % 100));
BM_CHECK(tz_len == kTzOffsetLen); BM_CHECK(tz_len == kTzOffsetLen);
((void)tz_len); // Prevent unused variable warning in optimized build. ((void)tz_len); // Prevent unused variable warning in optimized build.
} else { } else {

View File

@ -21,6 +21,7 @@ TEST_COPTS = [
## assert() are used a lot in tests upstream, which may be optimised out leading to ## assert() are used a lot in tests upstream, which may be optimised out leading to
## unused-variable warning. ## unused-variable warning.
"-Wno-unused-variable", "-Wno-unused-variable",
"-Werror=old-style-cast",
] ]
# Some of the issues with DoNotOptimize only occur when optimization is enabled # Some of the issues with DoNotOptimize only occur when optimization is enabled

View File

@ -38,7 +38,7 @@ TEST(AddRangeTest, Advanced64) {
TEST(AddRangeTest, FullRange8) { TEST(AddRangeTest, FullRange8) {
std::vector<int8_t> dst; std::vector<int8_t> dst;
AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), int8_t{8}); AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8);
EXPECT_THAT( EXPECT_THAT(
dst, testing::ElementsAre(int8_t{1}, int8_t{8}, int8_t{64}, int8_t{127})); dst, testing::ElementsAre(int8_t{1}, int8_t{8}, int8_t{64}, int8_t{127}));
} }

View File

@ -71,11 +71,11 @@ void BM_Complexity_O1(benchmark::State &state) {
for (auto _ : state) { for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
double tmp = state.iterations(); double tmp = static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp); benchmark::DoNotOptimize(tmp);
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
tmp *= state.iterations(); tmp *= static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp); benchmark::DoNotOptimize(tmp);
} }
@ -120,16 +120,16 @@ void BM_Complexity_O_N(benchmark::State &state) {
for (auto _ : state) { for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
double tmp = state.iterations(); double tmp = static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp); benchmark::DoNotOptimize(tmp);
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
tmp *= state.iterations(); tmp *= static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp); benchmark::DoNotOptimize(tmp);
} }
// 1ns per iteration per entry // 1ns per iteration per entry
state.SetIterationTime(state.range(0) * 42 * 1e-9); state.SetIterationTime(static_cast<double>(state.range(0)) * 42 * 1e-9);
} }
state.SetComplexityN(state.range(0)); state.SetComplexityN(state.range(0));
} }
@ -178,16 +178,16 @@ static void BM_Complexity_O_N_log_N(benchmark::State &state) {
for (auto _ : state) { for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
double tmp = state.iterations(); double tmp = static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp); benchmark::DoNotOptimize(tmp);
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
tmp *= state.iterations(); tmp *= static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp); benchmark::DoNotOptimize(tmp);
} }
state.SetIterationTime(state.range(0) * kLog2E * std::log(state.range(0)) * state.SetIterationTime(static_cast<double>(state.range(0)) * kLog2E *
42 * 1e-9); std::log(state.range(0)) * 42 * 1e-9);
} }
state.SetComplexityN(state.range(0)); state.SetComplexityN(state.range(0));
} }
@ -238,15 +238,15 @@ void BM_ComplexityCaptureArgs(benchmark::State &state, int n) {
for (auto _ : state) { for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero // This test requires a non-zero CPU time to avoid divide-by-zero
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
double tmp = state.iterations(); double tmp = static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp); benchmark::DoNotOptimize(tmp);
for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) { for (benchmark::IterationCount i = 0; i < state.iterations(); ++i) {
benchmark::DoNotOptimize(state.iterations()); benchmark::DoNotOptimize(state.iterations());
tmp *= state.iterations(); tmp *= static_cast<double>(state.iterations());
benchmark::DoNotOptimize(tmp); benchmark::DoNotOptimize(tmp);
} }
state.SetIterationTime(state.range(0) * 42 * 1e-9); state.SetIterationTime(static_cast<double>(state.range(0)) * 42 * 1e-9);
} }
state.SetComplexityN(n); state.SetComplexityN(n);
} }