2017-04-27 18:25:20 +00:00
|
|
|
|
|
|
|
#undef NDEBUG
|
|
|
|
|
|
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#include "output_test.h"
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ---------------------- Testing Prologue Output -------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2018-06-01 10:14:19 +00:00
|
|
|
// clang-format off
|
|
|
|
|
2017-04-27 18:25:20 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut,
|
|
|
|
{{"^[-]+$", MR_Next},
|
|
|
|
{"^Benchmark %s Time %s CPU %s Iterations UserCounters...$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}});
|
|
|
|
|
2018-06-01 10:14:19 +00:00
|
|
|
// clang-format on
|
|
|
|
|
2017-04-27 18:25:20 +00:00
|
|
|
// ========================================================================= //
|
2017-04-27 21:11:40 +00:00
|
|
|
// ------------------------- Simple Counters Output ------------------------ //
|
2017-04-27 18:25:20 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_Simple(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2017-04-27 18:25:20 +00:00
|
|
|
}
|
|
|
|
state.counters["foo"] = 1;
|
2017-05-01 21:22:11 +00:00
|
|
|
state.counters["bar"] = 2 * (double)state.iterations();
|
2017-04-27 18:25:20 +00:00
|
|
|
}
|
2017-04-29 17:26:30 +00:00
|
|
|
BENCHMARK(BM_Counters_Simple);
|
2018-06-01 10:14:19 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut,
|
|
|
|
{{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}});
|
2017-04-27 18:25:20 +00:00
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Counters_Simple\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 0,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2017-08-01 01:04:02 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-24 23:13:55 +00:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2017-04-27 18:25:20 +00:00
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}});
|
2017-04-29 21:27:55 +00:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckSimple(Results const& e) {
|
2018-06-27 14:45:30 +00:00
|
|
|
double its = e.NumIterations();
|
2017-04-28 14:02:27 +00:00
|
|
|
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
|
2017-04-28 19:45:30 +00:00
|
|
|
// check that the value of bar is within 0.1% of the expected value
|
2018-06-01 10:14:19 +00:00
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
|
2017-04-29 21:27:55 +00:00
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
|
2017-04-27 18:25:20 +00:00
|
|
|
|
|
|
|
// ========================================================================= //
|
2017-04-27 21:11:40 +00:00
|
|
|
// --------------------- Counters+Items+Bytes/s Output --------------------- //
|
2017-04-27 18:25:20 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
|
2018-06-01 10:14:19 +00:00
|
|
|
namespace {
|
|
|
|
int num_calls1 = 0;
|
|
|
|
}
|
2017-04-27 18:25:20 +00:00
|
|
|
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2019-07-27 16:02:31 +00:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2017-04-27 18:25:20 +00:00
|
|
|
}
|
|
|
|
state.counters["foo"] = 1;
|
2017-04-28 14:02:27 +00:00
|
|
|
state.counters["bar"] = ++num_calls1;
|
2017-04-27 18:25:20 +00:00
|
|
|
state.SetBytesProcessed(364);
|
2017-04-28 14:02:27 +00:00
|
|
|
state.SetItemsProcessed(150);
|
2017-04-27 18:25:20 +00:00
|
|
|
}
|
2017-04-29 17:26:30 +00:00
|
|
|
BENCHMARK(BM_Counters_WithBytesAndItemsPSec);
|
2018-09-13 19:03:47 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report "
|
|
|
|
"bar=%hrfloat bytes_per_second=%hrfloat/s "
|
|
|
|
"foo=%hrfloat items_per_second=%hrfloat/s$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"},
|
|
|
|
{"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 0,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
2018-09-13 19:03:47 +00:00
|
|
|
{"\"bytes_per_second\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float,$", MR_Next},
|
|
|
|
{"\"items_per_second\": %float$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"}", MR_Next}});
|
2017-04-27 18:25:20 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\","
|
|
|
|
"%csv_bytes_items_report,%float,%float$"}});
|
2017-04-29 21:27:55 +00:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckBytesAndItemsPSec(Results const& e) {
|
2018-06-01 10:14:19 +00:00
|
|
|
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
2017-04-28 14:02:27 +00:00
|
|
|
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1);
|
2017-04-28 19:45:30 +00:00
|
|
|
// check that the values are within 0.1% of the expected values
|
2018-06-01 10:14:19 +00:00
|
|
|
CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001);
|
|
|
|
CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001);
|
2017-04-29 21:27:55 +00:00
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
|
|
|
|
&CheckBytesAndItemsPSec);
|
2017-04-27 18:25:20 +00:00
|
|
|
|
2017-04-27 21:11:40 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Rate Counters Output -------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_Rate(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2019-07-27 16:02:31 +00:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2017-04-27 21:11:40 +00:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
|
|
|
|
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate};
|
|
|
|
}
|
2017-04-29 17:26:30 +00:00
|
|
|
BENCHMARK(BM_Counters_Rate);
|
2018-06-01 10:14:19 +00:00
|
|
|
ADD_CASES(
|
|
|
|
TC_ConsoleOut,
|
|
|
|
{{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
2017-04-27 21:11:40 +00:00
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Counters_Rate\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 0,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2017-08-01 01:04:02 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-24 23:13:55 +00:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2017-04-27 21:11:40 +00:00
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}});
|
2017-04-29 21:27:55 +00:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckRate(Results const& e) {
|
2018-06-01 10:14:19 +00:00
|
|
|
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
2017-04-29 17:26:30 +00:00
|
|
|
// check that the values are within 0.1% of the expected values
|
2018-06-01 10:14:19 +00:00
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001);
|
2017-04-29 21:27:55 +00:00
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
|
2017-04-27 21:11:40 +00:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Thread Counters Output ------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_Threads(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2017-04-27 21:11:40 +00:00
|
|
|
}
|
|
|
|
state.counters["foo"] = 1;
|
|
|
|
state.counters["bar"] = 2;
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8);
|
2018-06-01 10:14:19 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report "
|
|
|
|
"bar=%hrfloat foo=%hrfloat$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Threads/threads:%int\",$"},
|
|
|
|
{"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 0,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2018-06-01 10:14:19 +00:00
|
|
|
ADD_CASES(
|
|
|
|
TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}});
|
2017-04-29 21:27:55 +00:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckThreads(Results const& e) {
|
2017-04-29 18:26:34 +00:00
|
|
|
CHECK_COUNTER_VALUE(e, int, "foo", EQ, e.NumThreads());
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2 * e.NumThreads());
|
2017-04-29 21:27:55 +00:00
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
|
2017-04-27 21:11:40 +00:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ---------------------- ThreadAvg Counters Output ------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_AvgThreads(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2017-04-27 21:11:40 +00:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
|
|
|
|
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8);
|
2018-06-01 10:14:19 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int "
|
|
|
|
"%console_report bar=%hrfloat foo=%hrfloat$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"},
|
|
|
|
{"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 0,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2018-06-01 10:14:19 +00:00
|
|
|
ADD_CASES(
|
|
|
|
TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}});
|
2017-04-29 21:27:55 +00:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckAvgThreads(Results const& e) {
|
2017-04-29 18:26:34 +00:00
|
|
|
CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "bar", EQ, 2);
|
2017-04-29 21:27:55 +00:00
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
|
|
|
|
&CheckAvgThreads);
|
2017-04-27 21:11:40 +00:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ---------------------- ThreadAvg Counters Output ------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2019-07-27 16:02:31 +00:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2017-04-27 21:11:40 +00:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};
|
|
|
|
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8);
|
2018-06-01 10:14:19 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int "
|
|
|
|
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$",
|
|
|
|
MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 0,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-06-01 10:14:19 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/"
|
|
|
|
"threads:%int\",%csv_report,%float,%float$"}});
|
2017-04-29 21:27:55 +00:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckAvgThreadsRate(Results const& e) {
|
2018-06-01 10:14:19 +00:00
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001);
|
2017-04-29 21:27:55 +00:00
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int",
|
|
|
|
&CheckAvgThreadsRate);
|
2017-04-27 21:11:40 +00:00
|
|
|
|
2018-06-27 14:45:30 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------- IterationInvariant Counters Output ------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_IterationInvariant(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant};
|
|
|
|
state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_IterationInvariant);
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report "
|
|
|
|
"bar=%hrfloat foo=%hrfloat$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_IterationInvariant\",$"},
|
|
|
|
{"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 0,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2018-06-27 14:45:30 +00:00
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckIterationInvariant(Results const& e) {
|
|
|
|
double its = e.NumIterations();
|
|
|
|
// check that the values are within 0.1% of the expected value
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant",
|
|
|
|
&CheckIterationInvariant);
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ----------------- IterationInvariantRate Counters Output ---------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
2019-07-27 16:02:31 +00:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2018-06-27 14:45:30 +00:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] =
|
|
|
|
bm::Counter{1, bm::Counter::kIsIterationInvariantRate};
|
|
|
|
state.counters["bar"] =
|
|
|
|
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_kIsIterationInvariantRate);
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate "
|
|
|
|
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$",
|
|
|
|
MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 0,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-06-27 14:45:30 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report,"
|
|
|
|
"%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckIsIterationInvariantRate(Results const& e) {
|
|
|
|
double its = e.NumIterations();
|
|
|
|
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
|
|
|
// check that the values are within 0.1% of the expected values
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate",
|
|
|
|
&CheckIsIterationInvariantRate);
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------- AvgIterations Counters Output ------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_AvgIterations(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations};
|
|
|
|
state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_AvgIterations);
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report "
|
|
|
|
"bar=%hrfloat foo=%hrfloat$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_AvgIterations\",$"},
|
|
|
|
{"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 0,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2018-06-27 14:45:30 +00:00
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckAvgIterations(Results const& e) {
|
|
|
|
double its = e.NumIterations();
|
|
|
|
// check that the values are within 0.1% of the expected value
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations);
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ----------------- AvgIterationsRate Counters Output ---------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_kAvgIterationsRate(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
2019-07-27 16:02:31 +00:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2018-06-27 14:45:30 +00:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate};
|
|
|
|
state.counters["bar"] =
|
|
|
|
bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations};
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_kAvgIterationsRate);
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate "
|
|
|
|
"%console_report bar=%hrfloat/s foo=%hrfloat/s$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"},
|
|
|
|
{"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 0,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"bar\": %float,$", MR_Next},
|
|
|
|
{"\"foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2018-06-27 14:45:30 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report,"
|
|
|
|
"%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckAvgIterationsRate(Results const& e) {
|
|
|
|
double its = e.NumIterations();
|
|
|
|
double t = e.DurationCPUTime(); // this (and not real time) is the time used
|
|
|
|
// check that the values are within 0.1% of the expected values
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate",
|
|
|
|
&CheckAvgIterationsRate);
|
|
|
|
|
2017-04-27 18:25:20 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- TEST CASES END ------------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
|