prefix macros to avoid clashes (#1186)

This commit is contained in:
Dominic Hamon 2021-06-24 18:21:59 +01:00 committed by GitHub
parent 5da5660429
commit 6a5bf081d3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 157 additions and 153 deletions

View File

@ -58,71 +58,71 @@
namespace benchmark {
// Print a list of benchmarks. This option overrides all other options.
DEFINE_bool(benchmark_list_tests, false);
BM_DEFINE_bool(benchmark_list_tests, false);
// A regular expression that specifies the set of benchmarks to execute. If
// this flag is empty, or if this flag is the string \"all\", all benchmarks
// linked into the binary are run.
DEFINE_string(benchmark_filter, ".");
BM_DEFINE_string(benchmark_filter, ".");
// Minimum number of seconds we should run benchmark before results are
// considered significant. For cpu-time based tests, this is the lower bound
// on the total cpu time used by all threads that make up the test. For
// real-time based tests, this is the lower bound on the elapsed time of the
// benchmark execution, regardless of number of threads.
DEFINE_double(benchmark_min_time, 0.5);
BM_DEFINE_double(benchmark_min_time, 0.5);
// The number of runs of each benchmark. If greater than 1, the mean and
// standard deviation of the runs will be reported.
DEFINE_int32(benchmark_repetitions, 1);
BM_DEFINE_int32(benchmark_repetitions, 1);
// If set, enable random interleaving of repetitions of all benchmarks.
// See http://github.com/google/benchmark/issues/1051 for details.
DEFINE_bool(benchmark_enable_random_interleaving, false);
BM_DEFINE_bool(benchmark_enable_random_interleaving, false);
// Report the result of each benchmark repetitions. When 'true' is specified
// only the mean, standard deviation, and other statistics are reported for
// repeated benchmarks. Affects all reporters.
DEFINE_bool(benchmark_report_aggregates_only, false);
BM_DEFINE_bool(benchmark_report_aggregates_only, false);
// Display the result of each benchmark repetitions. When 'true' is specified
// only the mean, standard deviation, and other statistics are displayed for
// repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects
// the display reporter, but *NOT* file reporter, which will still contain
// all the output.
DEFINE_bool(benchmark_display_aggregates_only, false);
BM_DEFINE_bool(benchmark_display_aggregates_only, false);
// The format to use for console output.
// Valid values are 'console', 'json', or 'csv'.
DEFINE_string(benchmark_format, "console");
BM_DEFINE_string(benchmark_format, "console");
// The format to use for file output.
// Valid values are 'console', 'json', or 'csv'.
DEFINE_string(benchmark_out_format, "json");
BM_DEFINE_string(benchmark_out_format, "json");
// The file to write additional output to.
DEFINE_string(benchmark_out, "");
BM_DEFINE_string(benchmark_out, "");
// Whether to use colors in the output. Valid values:
// 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if
// the output is being sent to a terminal and the TERM environment variable is
// set to a terminal type that supports colors.
DEFINE_string(benchmark_color, "auto");
BM_DEFINE_string(benchmark_color, "auto");
// Whether to use tabular format when printing user counters to the console.
// Valid values: 'true'/'yes'/1, 'false'/'no'/0. Defaults to false.
DEFINE_bool(benchmark_counters_tabular, false);
BM_DEFINE_bool(benchmark_counters_tabular, false);
// List of additional perf counters to collect, in libpfm format. For more
// information about libpfm: https://man7.org/linux/man-pages/man3/libpfm.3.html
DEFINE_string(benchmark_perf_counters, "");
BM_DEFINE_string(benchmark_perf_counters, "");
// Extra context to include in the output formatted as comma-separated key-value
// pairs. Kept internal as it's only used for parsing from env/command line.
DEFINE_kvpairs(benchmark_context, {});
BM_DEFINE_kvpairs(benchmark_context, {});
// The level of verbose logging to output
DEFINE_int32(v, 0);
BM_DEFINE_int32(v, 0);
namespace internal {
@ -151,8 +151,9 @@ State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
timer_(timer),
manager_(manager),
perf_counters_measurement_(perf_counters_measurement) {
CHECK(max_iterations != 0) << "At least one iteration must be run";
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
BM_CHECK(max_iterations != 0) << "At least one iteration must be run";
BM_CHECK_LT(thread_index, threads)
<< "thread_index must be less than threads";
// Note: The use of offsetof below is technically undefined until C++17
// because State is not a standard layout type. However, all compilers
@ -181,21 +182,21 @@ State::State(IterationCount max_iters, const std::vector<int64_t>& ranges,
void State::PauseTiming() {
// Add in time accumulated so far
CHECK(started_ && !finished_ && !error_occurred_);
BM_CHECK(started_ && !finished_ && !error_occurred_);
timer_->StopTimer();
if (perf_counters_measurement_) {
auto measurements = perf_counters_measurement_->StopAndGetMeasurements();
for (const auto& name_and_measurement : measurements) {
auto name = name_and_measurement.first;
auto measurement = name_and_measurement.second;
CHECK_EQ(counters[name], 0.0);
BM_CHECK_EQ(counters[name], 0.0);
counters[name] = Counter(measurement, Counter::kAvgIterations);
}
}
}
void State::ResumeTiming() {
CHECK(started_ && !finished_ && !error_occurred_);
BM_CHECK(started_ && !finished_ && !error_occurred_);
timer_->StartTimer();
if (perf_counters_measurement_) {
perf_counters_measurement_->Start();
@ -203,7 +204,7 @@ void State::ResumeTiming() {
}
void State::SkipWithError(const char* msg) {
CHECK(msg);
BM_CHECK(msg);
error_occurred_ = true;
{
MutexLock l(manager_->GetBenchmarkMutex());
@ -226,7 +227,7 @@ void State::SetLabel(const char* label) {
}
void State::StartKeepRunning() {
CHECK(!started_ && !finished_);
BM_CHECK(!started_ && !finished_);
started_ = true;
total_iterations_ = error_occurred_ ? 0 : max_iterations;
manager_->StartStopBarrier();
@ -234,7 +235,7 @@ void State::StartKeepRunning() {
}
void State::FinishKeepRunning() {
CHECK(started_ && (!finished_ || error_occurred_));
BM_CHECK(started_ && (!finished_ || error_occurred_));
if (!error_occurred_) {
PauseTiming();
}
@ -282,7 +283,7 @@ void RunBenchmarks(const std::vector<BenchmarkInstance>& benchmarks,
BenchmarkReporter* display_reporter,
BenchmarkReporter* file_reporter) {
// Note the file_reporter can be null.
CHECK(display_reporter != nullptr);
BM_CHECK(display_reporter != nullptr);
// Determine the width of the name field using a minimum width of 10.
bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;

View File

@ -111,7 +111,7 @@ void BenchmarkFamilies::ClearBenchmarks() {
bool BenchmarkFamilies::FindBenchmarks(
std::string spec, std::vector<BenchmarkInstance>* benchmarks,
std::ostream* ErrStream) {
CHECK(ErrStream);
BM_CHECK(ErrStream);
auto& Err = *ErrStream;
// Make regular expression out of command-line flag
std::string error_msg;
@ -225,7 +225,7 @@ Benchmark* Benchmark::Name(const std::string& name) {
}
Benchmark* Benchmark::Arg(int64_t x) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x});
return this;
}
@ -236,7 +236,7 @@ Benchmark* Benchmark::Unit(TimeUnit unit) {
}
Benchmark* Benchmark::Range(int64_t start, int64_t limit) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
std::vector<int64_t> arglist;
AddRange(&arglist, start, limit, range_multiplier_);
@ -248,7 +248,7 @@ Benchmark* Benchmark::Range(int64_t start, int64_t limit) {
Benchmark* Benchmark::Ranges(
const std::vector<std::pair<int64_t, int64_t>>& ranges) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
std::vector<std::vector<int64_t>> arglists(ranges.size());
for (std::size_t i = 0; i < ranges.size(); i++) {
AddRange(&arglists[i], ranges[i].first, ranges[i].second,
@ -262,7 +262,7 @@ Benchmark* Benchmark::Ranges(
Benchmark* Benchmark::ArgsProduct(
const std::vector<std::vector<int64_t>>& arglists) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(arglists.size()));
BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(arglists.size()));
std::vector<std::size_t> indices(arglists.size());
const std::size_t total = std::accumulate(
@ -289,20 +289,20 @@ Benchmark* Benchmark::ArgsProduct(
}
Benchmark* Benchmark::ArgName(const std::string& name) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
arg_names_ = {name};
return this;
}
Benchmark* Benchmark::ArgNames(const std::vector<std::string>& names) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
arg_names_ = names;
return this;
}
Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
CHECK_LE(start, limit);
BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
BM_CHECK_LE(start, limit);
for (int64_t arg = start; arg <= limit; arg += step) {
args_.push_back({arg});
}
@ -310,7 +310,7 @@ Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) {
}
Benchmark* Benchmark::Args(const std::vector<int64_t>& args) {
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
args_.push_back(args);
return this;
}
@ -321,27 +321,27 @@ Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) {
}
Benchmark* Benchmark::RangeMultiplier(int multiplier) {
CHECK(multiplier > 1);
BM_CHECK(multiplier > 1);
range_multiplier_ = multiplier;
return this;
}
Benchmark* Benchmark::MinTime(double t) {
CHECK(t > 0.0);
CHECK(iterations_ == 0);
BM_CHECK(t > 0.0);
BM_CHECK(iterations_ == 0);
min_time_ = t;
return this;
}
Benchmark* Benchmark::Iterations(IterationCount n) {
CHECK(n > 0);
CHECK(IsZero(min_time_));
BM_CHECK(n > 0);
BM_CHECK(IsZero(min_time_));
iterations_ = n;
return this;
}
Benchmark* Benchmark::Repetitions(int n) {
CHECK(n > 0);
BM_CHECK(n > 0);
repetitions_ = n;
return this;
}
@ -374,14 +374,14 @@ Benchmark* Benchmark::MeasureProcessCPUTime() {
}
Benchmark* Benchmark::UseRealTime() {
CHECK(!use_manual_time_)
BM_CHECK(!use_manual_time_)
<< "Cannot set UseRealTime and UseManualTime simultaneously.";
use_real_time_ = true;
return this;
}
Benchmark* Benchmark::UseManualTime() {
CHECK(!use_real_time_)
BM_CHECK(!use_real_time_)
<< "Cannot set UseRealTime and UseManualTime simultaneously.";
use_manual_time_ = true;
return this;
@ -405,14 +405,14 @@ Benchmark* Benchmark::ComputeStatistics(std::string name,
}
Benchmark* Benchmark::Threads(int t) {
CHECK_GT(t, 0);
BM_CHECK_GT(t, 0);
thread_counts_.push_back(t);
return this;
}
Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) {
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
BM_CHECK_GT(min_threads, 0);
BM_CHECK_GE(max_threads, min_threads);
AddRange(&thread_counts_, min_threads, max_threads, 2);
return this;
@ -420,9 +420,9 @@ Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) {
Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads,
int stride) {
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
CHECK_GE(stride, 1);
BM_CHECK_GT(min_threads, 0);
BM_CHECK_GE(max_threads, min_threads);
BM_CHECK_GE(stride, 1);
for (auto i = min_threads; i < max_threads; i += stride) {
thread_counts_.push_back(i);
@ -466,7 +466,7 @@ std::vector<int64_t> CreateRange(int64_t lo, int64_t hi, int multi) {
std::vector<int64_t> CreateDenseRange(int64_t start, int64_t limit,
int step) {
CHECK_LE(start, limit);
BM_CHECK_LE(start, limit);
std::vector<int64_t> args;
for (int64_t arg = start; arg <= limit; arg += step) {
args.push_back(arg);

View File

@ -14,9 +14,9 @@ namespace internal {
template <typename T>
typename std::vector<T>::iterator
AddPowers(std::vector<T>* dst, T lo, T hi, int mult) {
CHECK_GE(lo, 0);
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
BM_CHECK_GE(lo, 0);
BM_CHECK_GE(hi, lo);
BM_CHECK_GE(mult, 2);
const size_t start_offset = dst->size();
@ -38,10 +38,10 @@ AddPowers(std::vector<T>* dst, T lo, T hi, int mult) {
template <typename T>
void AddNegatedPowers(std::vector<T>* dst, T lo, T hi, int mult) {
// We negate lo and hi so we require that they cannot be equal to 'min'.
CHECK_GT(lo, std::numeric_limits<T>::min());
CHECK_GT(hi, std::numeric_limits<T>::min());
CHECK_GE(hi, lo);
CHECK_LE(hi, 0);
BM_CHECK_GT(lo, std::numeric_limits<T>::min());
BM_CHECK_GT(hi, std::numeric_limits<T>::min());
BM_CHECK_GE(hi, lo);
BM_CHECK_LE(hi, 0);
// Add positive powers, then negate and reverse.
// Casts necessary since small integers get promoted
@ -60,8 +60,8 @@ void AddRange(std::vector<T>* dst, T lo, T hi, int mult) {
static_assert(std::is_integral<T>::value && std::is_signed<T>::value,
"Args type must be a signed integer");
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
BM_CHECK_GE(hi, lo);
BM_CHECK_GE(mult, 2);
// Add "lo"
dst->push_back(lo);

View File

@ -124,7 +124,7 @@ void RunInThread(const BenchmarkInstance* b, IterationCount iters,
: internal::ThreadTimer::Create());
State st =
b->Run(iters, thread_id, &timer, manager, perf_counters_measurement);
CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
BM_CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!";
{
MutexLock l(manager->GetBenchmarkMutex());
@ -168,8 +168,8 @@ BenchmarkRunner::BenchmarkRunner(
internal::ARM_DisplayReportAggregatesOnly);
run_results.file_report_aggregates_only =
(b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly);
CHECK(FLAGS_benchmark_perf_counters.empty() ||
perf_counters_measurement.IsValid())
BM_CHECK(FLAGS_benchmark_perf_counters.empty() ||
perf_counters_measurement.IsValid())
<< "Perf counters were requested but could not be set up.";
}
}

View File

@ -25,11 +25,11 @@
namespace benchmark {
DECLARE_double(benchmark_min_time);
DECLARE_int32(benchmark_repetitions);
DECLARE_bool(benchmark_report_aggregates_only);
DECLARE_bool(benchmark_display_aggregates_only);
DECLARE_string(benchmark_perf_counters);
BM_DECLARE_double(benchmark_min_time);
BM_DECLARE_int32(benchmark_repetitions);
BM_DECLARE_bool(benchmark_report_aggregates_only);
BM_DECLARE_bool(benchmark_display_aggregates_only);
BM_DECLARE_string(benchmark_perf_counters);
namespace internal {

View File

@ -23,8 +23,9 @@ BENCHMARK_NORETURN inline void CallAbortHandler() {
std::abort(); // fallback to enforce noreturn
}
// CheckHandler is the class constructed by failing CHECK macros. CheckHandler
// will log information about the failures and abort when it is destructed.
// CheckHandler is the class constructed by failing BM_CHECK macros.
// CheckHandler will log information about the failures and abort when it is
// destructed.
class CheckHandler {
public:
CheckHandler(const char* check, const char* file, const char* func, int line)
@ -51,32 +52,32 @@ class CheckHandler {
} // end namespace internal
} // end namespace benchmark
// The CHECK macro returns a std::ostream object that can have extra information
// written to it.
// The BM_CHECK macro returns a std::ostream object that can have extra
// information written to it.
#ifndef NDEBUG
#define CHECK(b) \
#define BM_CHECK(b) \
(b ? ::benchmark::internal::GetNullLogInstance() \
: ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \
.GetLog())
#else
#define CHECK(b) ::benchmark::internal::GetNullLogInstance()
#define BM_CHECK(b) ::benchmark::internal::GetNullLogInstance()
#endif
// clang-format off
// preserve whitespacing between operators for alignment
#define CHECK_EQ(a, b) CHECK((a) == (b))
#define CHECK_NE(a, b) CHECK((a) != (b))
#define CHECK_GE(a, b) CHECK((a) >= (b))
#define CHECK_LE(a, b) CHECK((a) <= (b))
#define CHECK_GT(a, b) CHECK((a) > (b))
#define CHECK_LT(a, b) CHECK((a) < (b))
#define BM_CHECK_EQ(a, b) BM_CHECK((a) == (b))
#define BM_CHECK_NE(a, b) BM_CHECK((a) != (b))
#define BM_CHECK_GE(a, b) BM_CHECK((a) >= (b))
#define BM_CHECK_LE(a, b) BM_CHECK((a) <= (b))
#define BM_CHECK_GT(a, b) BM_CHECK((a) > (b))
#define BM_CHECK_LT(a, b) BM_CHECK((a) < (b))
#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps))
#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps))
#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps))
#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps))
#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps))
#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps))
#define BM_CHECK_FLOAT_EQ(a, b, eps) BM_CHECK(std::fabs((a) - (b)) < (eps))
#define BM_CHECK_FLOAT_NE(a, b, eps) BM_CHECK(std::fabs((a) - (b)) >= (eps))
#define BM_CHECK_FLOAT_GE(a, b, eps) BM_CHECK((a) - (b) > -(eps))
#define BM_CHECK_FLOAT_LE(a, b, eps) BM_CHECK((b) - (a) > -(eps))
#define BM_CHECK_FLOAT_GT(a, b, eps) BM_CHECK((a) - (b) > (eps))
#define BM_CHECK_FLOAT_LT(a, b, eps) BM_CHECK((b) - (a) > (eps))
//clang-format on
#endif // CHECK_H_

View File

@ -94,7 +94,7 @@ std::string FormatString(const char* msg, va_list args) {
va_end(args_cp);
// currently there is no error handling for failure, so this is hack.
CHECK(ret >= 0);
BM_CHECK(ret >= 0);
if (ret == 0) // handle empty expansion
return {};
@ -105,7 +105,7 @@ std::string FormatString(const char* msg, va_list args) {
size = (size_t)ret + 1; // + 1 for the null byte
std::unique_ptr<char[]> buff(new char[size]);
ret = vsnprintf(buff.get(), size, msg, args);
CHECK(ret > 0 && ((size_t)ret) < size);
BM_CHECK(ret > 0 && ((size_t)ret) < size);
return buff.get();
}
}

View File

@ -9,23 +9,23 @@
#define FLAG(name) FLAGS_##name
// Macros for declaring flags.
#define DECLARE_bool(name) extern bool FLAG(name)
#define DECLARE_int32(name) extern int32_t FLAG(name)
#define DECLARE_double(name) extern double FLAG(name)
#define DECLARE_string(name) extern std::string FLAG(name)
#define DECLARE_kvpairs(name) \
#define BM_DECLARE_bool(name) extern bool FLAG(name)
#define BM_DECLARE_int32(name) extern int32_t FLAG(name)
#define BM_DECLARE_double(name) extern double FLAG(name)
#define BM_DECLARE_string(name) extern std::string FLAG(name)
#define BM_DECLARE_kvpairs(name) \
extern std::map<std::string, std::string> FLAG(name)
// Macros for defining flags.
#define DEFINE_bool(name, default_val) \
#define BM_DEFINE_bool(name, default_val) \
bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val)
#define DEFINE_int32(name, default_val) \
#define BM_DEFINE_int32(name, default_val) \
int32_t FLAG(name) = benchmark::Int32FromEnv(#name, default_val)
#define DEFINE_double(name, default_val) \
#define BM_DEFINE_double(name, default_val) \
double FLAG(name) = benchmark::DoubleFromEnv(#name, default_val)
#define DEFINE_string(name, default_val) \
#define BM_DEFINE_string(name, default_val) \
std::string FLAG(name) = benchmark::StringFromEnv(#name, default_val)
#define DEFINE_kvpairs(name, default_val) \
#define BM_DEFINE_kvpairs(name, default_val) \
std::map<std::string, std::string> FLAG(name) = \
benchmark::KvPairsFromEnv(#name, default_val)

View File

@ -123,10 +123,10 @@ LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
// fitting curve.
LeastSq MinimalLeastSq(const std::vector<int64_t>& n,
const std::vector<double>& time, const BigO complexity) {
CHECK_EQ(n.size(), time.size());
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
// benchmark runs are given
CHECK_NE(complexity, oNone);
BM_CHECK_EQ(n.size(), time.size());
BM_CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
// benchmark runs are given
BM_CHECK_NE(complexity, oNone);
LeastSq best_fit;
@ -167,7 +167,8 @@ std::vector<BenchmarkReporter::Run> ComputeBigO(
// Populate the accumulators.
for (const Run& run : reports) {
CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?";
BM_CHECK_GT(run.complexity_n, 0)
<< "Did you forget to call SetComplexityN?";
n.push_back(run.complexity_n);
real_time.push_back(run.real_accumulated_time / run.iterations);
cpu_time.push_back(run.cpu_accumulated_time / run.iterations);

View File

@ -85,7 +85,8 @@ void CSVReporter::ReportRuns(const std::vector<Run>& reports) {
for (const auto& cnt : run.counters) {
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
BM_CHECK(user_counter_names_.find(cnt.first) !=
user_counter_names_.end())
<< "All counters must be present in each run. "
<< "Counter named \"" << cnt.first
<< "\" was not in a run after being added to the header";

View File

@ -130,7 +130,7 @@ class Barrier {
// entered the barrier. Returns iff this is the last thread to
// enter the barrier.
bool createBarrier(MutexLock& ml) REQUIRES(lock_) {
CHECK_LT(entered_, running_threads_);
BM_CHECK_LT(entered_, running_threads_);
entered_++;
if (entered_ < running_threads_) {
// Wait for all threads to enter

View File

@ -42,7 +42,7 @@ namespace internal {
class PerfCounterValues {
public:
explicit PerfCounterValues(size_t nr_counters) : nr_counters_(nr_counters) {
CHECK_LE(nr_counters_, kMaxCounters);
BM_CHECK_LE(nr_counters_, kMaxCounters);
}
uint64_t operator[](size_t pos) const { return values_[kPadding + pos]; }

View File

@ -126,7 +126,7 @@ inline bool Regex::Init(const std::string& spec, std::string* error) {
// regerror returns the number of bytes necessary to null terminate
// the string, so we move that when assigning to error.
CHECK_NE(needed, 0);
BM_CHECK_NE(needed, 0);
error->assign(errbuf, needed - 1);
delete[] errbuf;

View File

@ -38,7 +38,7 @@ BenchmarkReporter::~BenchmarkReporter() {}
void BenchmarkReporter::PrintBasicContext(std::ostream *out,
Context const &context) {
CHECK(out) << "cannot be null";
BM_CHECK(out) << "cannot be null";
auto &Out = *out;
Out << LocalDateTimeString() << "\n";

View File

@ -112,22 +112,22 @@ std::vector<BenchmarkReporter::Run> ComputeStats(
it = counter_stats.find(cnt.first);
it->second.s.reserve(reports.size());
} else {
CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
BM_CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
}
}
}
// Populate the accumulators.
for (Run const& run : reports) {
CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
CHECK_EQ(run_iterations, run.iterations);
BM_CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
BM_CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred) continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
// user counters
for (auto const& cnt : run.counters) {
auto it = counter_stats.find(cnt.first);
CHECK_NE(it, counter_stats.end());
BM_CHECK_NE(it, counter_stats.end());
it->second.s.emplace_back(cnt.second);
}
}

View File

@ -135,7 +135,7 @@ struct ValueUnion {
template <class T, int N>
std::array<T, N> GetAsArray() {
const int ArrSize = sizeof(T) * N;
CHECK_LE(ArrSize, Size);
BM_CHECK_LE(ArrSize, Size);
std::array<T, N> Arr;
std::memcpy(Arr.data(), data(), ArrSize);
return Arr;

View File

@ -28,7 +28,7 @@ class ThreadTimer {
// Called by each thread
void StopTimer() {
CHECK(running_);
BM_CHECK(running_);
running_ = false;
real_time_used_ += ChronoClockNow() - start_real_time_;
// Floating point error can result in the subtraction producing a negative
@ -44,19 +44,19 @@ class ThreadTimer {
// REQUIRES: timer is not running
double real_time_used() const {
CHECK(!running_);
BM_CHECK(!running_);
return real_time_used_;
}
// REQUIRES: timer is not running
double cpu_time_used() const {
CHECK(!running_);
BM_CHECK(!running_);
return cpu_time_used_;
}
// REQUIRES: timer is not running
double manual_time_used() const {
CHECK(!running_);
BM_CHECK(!running_);
return manual_time_used_;
}

View File

@ -225,7 +225,7 @@ std::string LocalDateTimeString() {
tz_len = ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li",
tz_offset_sign, offset_minutes / 100, offset_minutes % 100);
CHECK(tz_len == kTzOffsetLen);
BM_CHECK(tz_len == kTzOffsetLen);
((void)tz_len); // Prevent unused variable warning in optimized build.
} else {
// Unknown offset. RFC3339 specifies that unknown local offsets should be
@ -242,7 +242,7 @@ std::string LocalDateTimeString() {
timestamp_len = std::strftime(storage, sizeof(storage), "%Y-%m-%dT%H:%M:%S",
timeinfo_p);
CHECK(timestamp_len == kTimestampLen);
BM_CHECK(timestamp_len == kTimestampLen);
// Prevent unused variable warning in optimized build.
((void)kTimestampLen);

View File

@ -10,9 +10,9 @@
namespace benchmark {
DECLARE_bool(benchmark_enable_random_interleaving);
DECLARE_string(benchmark_filter);
DECLARE_int32(benchmark_repetitions);
BM_DECLARE_bool(benchmark_enable_random_interleaving);
BM_DECLARE_string(benchmark_filter);
BM_DECLARE_int32(benchmark_repetitions);
namespace internal {
namespace {

View File

@ -143,12 +143,12 @@ struct Results {
template <class T>
T Results::GetAs(const char* entry_name) const {
auto* sv = Get(entry_name);
CHECK(sv != nullptr && !sv->empty());
BM_CHECK(sv != nullptr && !sv->empty());
std::stringstream ss;
ss << *sv;
T out;
ss >> out;
CHECK(!ss.fail());
BM_CHECK(!ss.fail());
return out;
}
@ -159,7 +159,7 @@ T Results::GetAs(const char* entry_name) const {
// clang-format off
#define CHECK_RESULT_VALUE_IMPL(entry, getfn, var_type, var_name, relationship, value) \
CONCAT(CHECK_, relationship) \
CONCAT(BM_CHECK_, relationship) \
(entry.getfn< var_type >(var_name), (value)) << "\n" \
<< __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \
<< __FILE__ << ":" << __LINE__ << ": " \
@ -170,7 +170,7 @@ T Results::GetAs(const char* entry_name) const {
// check with tolerance. eps_factor is the tolerance window, which is
// interpreted relative to value (eg, 0.1 means 10% of value).
#define CHECK_FLOAT_RESULT_VALUE_IMPL(entry, getfn, var_type, var_name, relationship, value, eps_factor) \
CONCAT(CHECK_FLOAT_, relationship) \
CONCAT(BM_CHECK_FLOAT_, relationship) \
(entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \
<< __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \
<< __FILE__ << ":" << __LINE__ << ": " \

View File

@ -94,27 +94,27 @@ void CheckCase(std::stringstream& remaining_output, TestCase const& TC,
bool on_first = true;
std::string line;
while (remaining_output.eof() == false) {
CHECK(remaining_output.good());
BM_CHECK(remaining_output.good());
std::getline(remaining_output, line);
if (on_first) {
first_line = line;
on_first = false;
}
for (const auto& NC : not_checks) {
CHECK(!NC.regex->Match(line))
BM_CHECK(!NC.regex->Match(line))
<< "Unexpected match for line \"" << line << "\" for MR_Not regex \""
<< NC.regex_str << "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
if (TC.regex->Match(line)) return;
CHECK(TC.match_rule != MR_Next)
BM_CHECK(TC.match_rule != MR_Next)
<< "Expected line \"" << line << "\" to match regex \"" << TC.regex_str
<< "\""
<< "\n actual regex string \"" << TC.substituted_regex << "\""
<< "\n started matching near: " << first_line;
}
CHECK(remaining_output.eof() == false)
BM_CHECK(remaining_output.eof() == false)
<< "End of output reached before match for regex \"" << TC.regex_str
<< "\" was found"
<< "\n actual regex string \"" << TC.substituted_regex << "\""
@ -144,7 +144,7 @@ class TestReporter : public benchmark::BenchmarkReporter {
bool first = true;
for (auto rep : reporters_) {
bool new_ret = rep->ReportContext(context);
CHECK(first || new_ret == last_ret)
BM_CHECK(first || new_ret == last_ret)
<< "Reports return different values for ReportContext";
first = false;
last_ret = new_ret;
@ -226,7 +226,7 @@ void ResultsChecker::CheckResults(std::stringstream& output) {
std::string line;
bool on_first = true;
while (output.eof() == false) {
CHECK(output.good());
BM_CHECK(output.good());
std::getline(output, line);
if (on_first) {
SetHeader_(line); // this is important
@ -261,9 +261,9 @@ void ResultsChecker::SetHeader_(const std::string& csv_header) {
// set the values for a benchmark
void ResultsChecker::SetValues_(const std::string& entry_csv_line) {
if (entry_csv_line.empty()) return; // some lines are empty
CHECK(!field_names.empty());
BM_CHECK(!field_names.empty());
auto vals = SplitCsv_(entry_csv_line);
CHECK_EQ(vals.size(), field_names.size());
BM_CHECK_EQ(vals.size(), field_names.size());
results.emplace_back(vals[0]); // vals[0] is the benchmark name
auto& entry = results.back();
for (size_t i = 1, e = vals.size(); i < e; ++i) {
@ -278,7 +278,7 @@ std::vector<std::string> ResultsChecker::SplitCsv_(const std::string& line) {
if (!field_names.empty()) out.reserve(field_names.size());
size_t prev = 0, pos = line.find_first_of(','), curr = pos;
while (pos != line.npos) {
CHECK(curr > 0);
BM_CHECK(curr > 0);
if (line[prev] == '"') ++prev;
if (line[curr - 1] == '"') --curr;
out.push_back(line.substr(prev, curr - prev));
@ -309,7 +309,7 @@ int Results::NumThreads() const {
ss << name.substr(pos + 9, end);
int num = 1;
ss >> num;
CHECK(!ss.fail());
BM_CHECK(!ss.fail());
return num;
}
@ -318,11 +318,11 @@ double Results::NumIterations() const {
}
double Results::GetTime(BenchmarkTime which) const {
CHECK(which == kCpuTime || which == kRealTime);
BM_CHECK(which == kCpuTime || which == kRealTime);
const char* which_str = which == kCpuTime ? "cpu_time" : "real_time";
double val = GetAs<double>(which_str);
auto unit = Get("time_unit");
CHECK(unit);
BM_CHECK(unit);
if (*unit == "ns") {
return val * 1.e-9;
} else if (*unit == "us") {
@ -332,7 +332,7 @@ double Results::GetTime(BenchmarkTime which) const {
} else if (*unit == "s") {
return val;
} else {
CHECK(1 == 0) << "unknown time unit: " << *unit;
BM_CHECK(1 == 0) << "unknown time unit: " << *unit;
return 0;
}
}
@ -348,10 +348,10 @@ TestCase::TestCase(std::string re, int rule)
regex(std::make_shared<benchmark::Regex>()) {
std::string err_str;
regex->Init(substituted_regex, &err_str);
CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex
<< "\""
<< "\n originally \"" << regex_str << "\""
<< "\n got error: " << err_str;
BM_CHECK(err_str.empty())
<< "Could not construct regex \"" << substituted_regex << "\""
<< "\n originally \"" << regex_str << "\""
<< "\n got error: " << err_str;
}
int AddCases(TestCaseID ID, std::initializer_list<TestCase> il) {
@ -438,7 +438,7 @@ void RunOutputTests(int argc, char* argv[]) {
// the checks to subscribees.
auto& csv = TestCases[2];
// would use == but gcc spits a warning
CHECK(std::strcmp(csv.name, "CSVReporter") == 0);
BM_CHECK(std::strcmp(csv.name, "CSVReporter") == 0);
internal::GetResultsChecker().CheckResults(csv.out_stream);
}

View File

@ -103,10 +103,10 @@ size_t do_work() {
void measure(size_t threadcount, PerfCounterValues* values1,
PerfCounterValues* values2) {
CHECK_NE(values1, nullptr);
CHECK_NE(values2, nullptr);
BM_CHECK_NE(values1, nullptr);
BM_CHECK_NE(values2, nullptr);
std::vector<std::thread> threads(threadcount);
auto work = [&]() { CHECK(do_work() > 1000); };
auto work = [&]() { BM_CHECK(do_work() > 1000); };
// We need to first set up the counters, then start the threads, so the
// threads would inherit the counters. But later, we need to first destroy the

View File

@ -30,13 +30,13 @@ struct TestCase {
void CheckRun(Run const& run) const {
// clang-format off
CHECK(name == run.benchmark_name()) << "expected " << name << " got "
BM_CHECK(name == run.benchmark_name()) << "expected " << name << " got "
<< run.benchmark_name();
if (label) {
CHECK(run.report_label == label) << "expected " << label << " got "
BM_CHECK(run.report_label == label) << "expected " << label << " got "
<< run.report_label;
} else {
CHECK(run.report_label == "");
BM_CHECK(run.report_label == "");
}
// clang-format on
}

View File

@ -33,14 +33,14 @@ struct TestCase {
typedef benchmark::BenchmarkReporter::Run Run;
void CheckRun(Run const& run) const {
CHECK(name == run.benchmark_name())
BM_CHECK(name == run.benchmark_name())
<< "expected " << name << " got " << run.benchmark_name();
CHECK(error_occurred == run.error_occurred);
CHECK(error_message == run.error_message);
BM_CHECK(error_occurred == run.error_occurred);
BM_CHECK(error_message == run.error_message);
if (error_occurred) {
// CHECK(run.iterations == 0);
// BM_CHECK(run.iterations == 0);
} else {
CHECK(run.iterations != 0);
BM_CHECK(run.iterations != 0);
}
}
};