2017-05-02 19:33:28 +00:00
|
|
|
|
|
|
|
#undef NDEBUG
|
|
|
|
|
|
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#include "output_test.h"
|
|
|
|
|
2017-05-02 22:00:45 +00:00
|
|
|
// @todo: <jpmag> this checks the full output at once; the rule for
|
|
|
|
// CounterSet1 was failing because it was not matching "^[-]+$".
|
|
|
|
// @todo: <jpmag> check that the counters are vertically aligned.
|
2021-06-02 20:45:41 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut,
|
|
|
|
{
|
|
|
|
// keeping these lines long improves readability, so:
|
|
|
|
// clang-format off
|
2017-05-02 22:00:45 +00:00
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:1_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:1_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
Introduce Coefficient of variation aggregate (#1220)
* Introduce Coefficient of variation aggregate
I believe, it is much more useful / use to understand,
because it is already normalized by the mean,
so it is not affected by the duration of the benchmark,
unlike the standard deviation.
Example of real-world output:
```
raw.pixls.us-unique/GoPro/HERO6 Black$ ~/rawspeed/build-old/src/utilities/rsbench/rsbench GOPR9172.GPR --benchmark_repetitions=27 --benchmark_display_aggregates_only=true --benchmark_counters_tabular=true
2021-09-03T18:05:56+03:00
Running /home/lebedevri/rawspeed/build-old/src/utilities/rsbench/rsbench
Run on (32 X 3596.16 MHz CPU s)
CPU Caches:
L1 Data 32 KiB (x16)
L1 Instruction 32 KiB (x16)
L2 Unified 512 KiB (x16)
L3 Unified 32768 KiB (x2)
Load Average: 7.00, 2.99, 1.85
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Benchmark Time CPU Iterations CPUTime,s CPUTime/WallTime Pixels Pixels/CPUTime Pixels/WallTime Raws/CPUTime Raws/WallTime WallTime,s
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
GOPR9172.GPR/threads:32/process_time/real_time_mean 11.1 ms 353 ms 27 0.353122 31.9473 12M 33.9879M 1085.84M 2.83232 90.4864 0.0110535
GOPR9172.GPR/threads:32/process_time/real_time_median 11.0 ms 352 ms 27 0.351696 31.9599 12M 34.1203M 1090.11M 2.84336 90.8425 0.0110081
GOPR9172.GPR/threads:32/process_time/real_time_stddev 0.159 ms 4.60 ms 27 4.59539m 0.0462064 0 426.371k 14.9631M 0.0355309 1.24692 158.944u
GOPR9172.GPR/threads:32/process_time/real_time_cv 1.44 % 1.30 % 27 0.0130136 1.44633m 0 0.0125448 0.0137802 0.0125448 0.0137802 0.0143795
```
Fixes https://github.com/google/benchmark/issues/1146
* Be consistent, it's CV, not 'rel std dev'
2021-09-03 17:44:10 +00:00
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:1_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
2021-09-06 08:33:27 +00:00
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:1_cv %console_percentage_report [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*%$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:2_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:2_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
Introduce Coefficient of variation aggregate (#1220)
* Introduce Coefficient of variation aggregate
I believe, it is much more useful / use to understand,
because it is already normalized by the mean,
so it is not affected by the duration of the benchmark,
unlike the standard deviation.
Example of real-world output:
```
raw.pixls.us-unique/GoPro/HERO6 Black$ ~/rawspeed/build-old/src/utilities/rsbench/rsbench GOPR9172.GPR --benchmark_repetitions=27 --benchmark_display_aggregates_only=true --benchmark_counters_tabular=true
2021-09-03T18:05:56+03:00
Running /home/lebedevri/rawspeed/build-old/src/utilities/rsbench/rsbench
Run on (32 X 3596.16 MHz CPU s)
CPU Caches:
L1 Data 32 KiB (x16)
L1 Instruction 32 KiB (x16)
L2 Unified 512 KiB (x16)
L3 Unified 32768 KiB (x2)
Load Average: 7.00, 2.99, 1.85
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Benchmark Time CPU Iterations CPUTime,s CPUTime/WallTime Pixels Pixels/CPUTime Pixels/WallTime Raws/CPUTime Raws/WallTime WallTime,s
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
GOPR9172.GPR/threads:32/process_time/real_time_mean 11.1 ms 353 ms 27 0.353122 31.9473 12M 33.9879M 1085.84M 2.83232 90.4864 0.0110535
GOPR9172.GPR/threads:32/process_time/real_time_median 11.0 ms 352 ms 27 0.351696 31.9599 12M 34.1203M 1090.11M 2.84336 90.8425 0.0110081
GOPR9172.GPR/threads:32/process_time/real_time_stddev 0.159 ms 4.60 ms 27 4.59539m 0.0462064 0 426.371k 14.9631M 0.0355309 1.24692 158.944u
GOPR9172.GPR/threads:32/process_time/real_time_cv 1.44 % 1.30 % 27 0.0130136 1.44633m 0 0.0125448 0.0137802 0.0125448 0.0137802 0.0143795
```
Fixes https://github.com/google/benchmark/issues/1146
* Be consistent, it's CV, not 'rel std dev'
2021-09-03 17:44:10 +00:00
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:2_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
2021-09-06 08:33:27 +00:00
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:2_cv %console_percentage_report [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*%$", MR_Next},
|
2017-05-02 22:00:45 +00:00
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Baz %s Foo$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^Benchmark %s Time %s CPU %s Iterations %s Bat %s Baz %s Foo$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"},
|
2021-06-02 20:45:41 +00:00
|
|
|
// clang-format on
|
|
|
|
});
|
2017-05-02 22:00:45 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"%csv_header,"
|
|
|
|
"\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}});
|
2017-05-02 19:33:28 +00:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Tabular Counters Output ----------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_Tabular(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2024-02-13 21:04:44 +00:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
2024-02-19 15:22:35 +00:00
|
|
|
auto iterations = double(state.iterations()) * double(state.iterations());
|
2024-02-13 21:04:44 +00:00
|
|
|
benchmark::DoNotOptimize(iterations);
|
2017-05-02 19:33:28 +00:00
|
|
|
}
|
2017-05-02 22:00:45 +00:00
|
|
|
namespace bm = benchmark;
|
2017-05-02 19:33:28 +00:00
|
|
|
state.counters.insert({
|
2018-06-01 10:14:19 +00:00
|
|
|
{"Foo", {1, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bar", {2, bm::Counter::kAvgThreads}},
|
|
|
|
{"Baz", {4, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bat", {8, bm::Counter::kAvgThreads}},
|
|
|
|
{"Frob", {16, bm::Counter::kAvgThreads}},
|
|
|
|
{"Lob", {32, bm::Counter::kAvgThreads}},
|
2017-05-02 19:33:28 +00:00
|
|
|
});
|
|
|
|
}
|
2021-06-02 20:45:41 +00:00
|
|
|
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 2)->Repetitions(2);
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
2021-06-02 20:45:41 +00:00
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
|
|
|
|
MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2021-06-02 20:45:41 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 1,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_mean\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_median\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_stddev\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
Introduce Coefficient of variation aggregate (#1220)
* Introduce Coefficient of variation aggregate
I believe, it is much more useful / use to understand,
because it is already normalized by the mean,
so it is not affected by the duration of the benchmark,
unlike the standard deviation.
Example of real-world output:
```
raw.pixls.us-unique/GoPro/HERO6 Black$ ~/rawspeed/build-old/src/utilities/rsbench/rsbench GOPR9172.GPR --benchmark_repetitions=27 --benchmark_display_aggregates_only=true --benchmark_counters_tabular=true
2021-09-03T18:05:56+03:00
Running /home/lebedevri/rawspeed/build-old/src/utilities/rsbench/rsbench
Run on (32 X 3596.16 MHz CPU s)
CPU Caches:
L1 Data 32 KiB (x16)
L1 Instruction 32 KiB (x16)
L2 Unified 512 KiB (x16)
L3 Unified 32768 KiB (x2)
Load Average: 7.00, 2.99, 1.85
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Benchmark Time CPU Iterations CPUTime,s CPUTime/WallTime Pixels Pixels/CPUTime Pixels/WallTime Raws/CPUTime Raws/WallTime WallTime,s
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
GOPR9172.GPR/threads:32/process_time/real_time_mean 11.1 ms 353 ms 27 0.353122 31.9473 12M 33.9879M 1085.84M 2.83232 90.4864 0.0110535
GOPR9172.GPR/threads:32/process_time/real_time_median 11.0 ms 352 ms 27 0.351696 31.9599 12M 34.1203M 1090.11M 2.84336 90.8425 0.0110081
GOPR9172.GPR/threads:32/process_time/real_time_stddev 0.159 ms 4.60 ms 27 4.59539m 0.0462064 0 426.371k 14.9631M 0.0355309 1.24692 158.944u
GOPR9172.GPR/threads:32/process_time/real_time_cv 1.44 % 1.30 % 27 0.0130136 1.44633m 0 0.0125448 0.0137802 0.0125448 0.0137802 0.0143795
```
Fixes https://github.com/google/benchmark/issues/1146
* Be consistent, it's CV, not 'rel std dev'
2021-09-03 17:44:10 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_cv\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"cv\",$", MR_Next},
|
|
|
|
{"\"aggregate_unit\": \"percentage\",$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2021-06-02 20:45:41 +00:00
|
|
|
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 1,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 2,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 1,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 1,$", MR_Next},
|
|
|
|
{"\"threads\": 2,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_median\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 1,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 2,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_stddev\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 1,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 2,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
Introduce Coefficient of variation aggregate (#1220)
* Introduce Coefficient of variation aggregate
I believe, it is much more useful / use to understand,
because it is already normalized by the mean,
so it is not affected by the duration of the benchmark,
unlike the standard deviation.
Example of real-world output:
```
raw.pixls.us-unique/GoPro/HERO6 Black$ ~/rawspeed/build-old/src/utilities/rsbench/rsbench GOPR9172.GPR --benchmark_repetitions=27 --benchmark_display_aggregates_only=true --benchmark_counters_tabular=true
2021-09-03T18:05:56+03:00
Running /home/lebedevri/rawspeed/build-old/src/utilities/rsbench/rsbench
Run on (32 X 3596.16 MHz CPU s)
CPU Caches:
L1 Data 32 KiB (x16)
L1 Instruction 32 KiB (x16)
L2 Unified 512 KiB (x16)
L3 Unified 32768 KiB (x2)
Load Average: 7.00, 2.99, 1.85
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Benchmark Time CPU Iterations CPUTime,s CPUTime/WallTime Pixels Pixels/CPUTime Pixels/WallTime Raws/CPUTime Raws/WallTime WallTime,s
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
GOPR9172.GPR/threads:32/process_time/real_time_mean 11.1 ms 353 ms 27 0.353122 31.9473 12M 33.9879M 1085.84M 2.83232 90.4864 0.0110535
GOPR9172.GPR/threads:32/process_time/real_time_median 11.0 ms 352 ms 27 0.351696 31.9599 12M 34.1203M 1090.11M 2.84336 90.8425 0.0110081
GOPR9172.GPR/threads:32/process_time/real_time_stddev 0.159 ms 4.60 ms 27 4.59539m 0.0462064 0 426.371k 14.9631M 0.0355309 1.24692 158.944u
GOPR9172.GPR/threads:32/process_time/real_time_cv 1.44 % 1.30 % 27 0.0130136 1.44633m 0 0.0125448 0.0137802 0.0125448 0.0137802 0.0143795
```
Fixes https://github.com/google/benchmark/issues/1146
* Be consistent, it's CV, not 'rel std dev'
2021-09-03 17:44:10 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_cv\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 1,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 2,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"cv\",$", MR_Next},
|
|
|
|
{"\"aggregate_unit\": \"percentage\",$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2021-06-02 20:45:41 +00:00
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:1_mean\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:1_median\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:1_stddev\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
Introduce Coefficient of variation aggregate (#1220)
* Introduce Coefficient of variation aggregate
I believe, it is much more useful / use to understand,
because it is already normalized by the mean,
so it is not affected by the duration of the benchmark,
unlike the standard deviation.
Example of real-world output:
```
raw.pixls.us-unique/GoPro/HERO6 Black$ ~/rawspeed/build-old/src/utilities/rsbench/rsbench GOPR9172.GPR --benchmark_repetitions=27 --benchmark_display_aggregates_only=true --benchmark_counters_tabular=true
2021-09-03T18:05:56+03:00
Running /home/lebedevri/rawspeed/build-old/src/utilities/rsbench/rsbench
Run on (32 X 3596.16 MHz CPU s)
CPU Caches:
L1 Data 32 KiB (x16)
L1 Instruction 32 KiB (x16)
L2 Unified 512 KiB (x16)
L3 Unified 32768 KiB (x2)
Load Average: 7.00, 2.99, 1.85
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Benchmark Time CPU Iterations CPUTime,s CPUTime/WallTime Pixels Pixels/CPUTime Pixels/WallTime Raws/CPUTime Raws/WallTime WallTime,s
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
GOPR9172.GPR/threads:32/process_time/real_time_mean 11.1 ms 353 ms 27 0.353122 31.9473 12M 33.9879M 1085.84M 2.83232 90.4864 0.0110535
GOPR9172.GPR/threads:32/process_time/real_time_median 11.0 ms 352 ms 27 0.351696 31.9599 12M 34.1203M 1090.11M 2.84336 90.8425 0.0110081
GOPR9172.GPR/threads:32/process_time/real_time_stddev 0.159 ms 4.60 ms 27 4.59539m 0.0462064 0 426.371k 14.9631M 0.0355309 1.24692 158.944u
GOPR9172.GPR/threads:32/process_time/real_time_cv 1.44 % 1.30 % 27 0.0130136 1.44633m 0 0.0125448 0.0137802 0.0125448 0.0137802 0.0143795
```
Fixes https://github.com/google/benchmark/issues/1146
* Be consistent, it's CV, not 'rel std dev'
2021-09-03 17:44:10 +00:00
|
|
|
ADD_CASES(TC_CSVOut,
|
2023-09-26 12:43:23 +00:00
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:1_cv\",%csv_cv_report,"
|
Introduce Coefficient of variation aggregate (#1220)
* Introduce Coefficient of variation aggregate
I believe, it is much more useful / use to understand,
because it is already normalized by the mean,
so it is not affected by the duration of the benchmark,
unlike the standard deviation.
Example of real-world output:
```
raw.pixls.us-unique/GoPro/HERO6 Black$ ~/rawspeed/build-old/src/utilities/rsbench/rsbench GOPR9172.GPR --benchmark_repetitions=27 --benchmark_display_aggregates_only=true --benchmark_counters_tabular=true
2021-09-03T18:05:56+03:00
Running /home/lebedevri/rawspeed/build-old/src/utilities/rsbench/rsbench
Run on (32 X 3596.16 MHz CPU s)
CPU Caches:
L1 Data 32 KiB (x16)
L1 Instruction 32 KiB (x16)
L2 Unified 512 KiB (x16)
L3 Unified 32768 KiB (x2)
Load Average: 7.00, 2.99, 1.85
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Benchmark Time CPU Iterations CPUTime,s CPUTime/WallTime Pixels Pixels/CPUTime Pixels/WallTime Raws/CPUTime Raws/WallTime WallTime,s
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
GOPR9172.GPR/threads:32/process_time/real_time_mean 11.1 ms 353 ms 27 0.353122 31.9473 12M 33.9879M 1085.84M 2.83232 90.4864 0.0110535
GOPR9172.GPR/threads:32/process_time/real_time_median 11.0 ms 352 ms 27 0.351696 31.9599 12M 34.1203M 1090.11M 2.84336 90.8425 0.0110081
GOPR9172.GPR/threads:32/process_time/real_time_stddev 0.159 ms 4.60 ms 27 4.59539m 0.0462064 0 426.371k 14.9631M 0.0355309 1.24692 158.944u
GOPR9172.GPR/threads:32/process_time/real_time_cv 1.44 % 1.30 % 27 0.0130136 1.44633m 0 0.0125448 0.0137802 0.0125448 0.0137802 0.0143795
```
Fixes https://github.com/google/benchmark/issues/1146
* Be consistent, it's CV, not 'rel std dev'
2021-09-03 17:44:10 +00:00
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
2021-06-02 20:45:41 +00:00
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:2_mean\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:2_median\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:2_stddev\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
Introduce Coefficient of variation aggregate (#1220)
* Introduce Coefficient of variation aggregate
I believe, it is much more useful / use to understand,
because it is already normalized by the mean,
so it is not affected by the duration of the benchmark,
unlike the standard deviation.
Example of real-world output:
```
raw.pixls.us-unique/GoPro/HERO6 Black$ ~/rawspeed/build-old/src/utilities/rsbench/rsbench GOPR9172.GPR --benchmark_repetitions=27 --benchmark_display_aggregates_only=true --benchmark_counters_tabular=true
2021-09-03T18:05:56+03:00
Running /home/lebedevri/rawspeed/build-old/src/utilities/rsbench/rsbench
Run on (32 X 3596.16 MHz CPU s)
CPU Caches:
L1 Data 32 KiB (x16)
L1 Instruction 32 KiB (x16)
L2 Unified 512 KiB (x16)
L3 Unified 32768 KiB (x2)
Load Average: 7.00, 2.99, 1.85
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Benchmark Time CPU Iterations CPUTime,s CPUTime/WallTime Pixels Pixels/CPUTime Pixels/WallTime Raws/CPUTime Raws/WallTime WallTime,s
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
GOPR9172.GPR/threads:32/process_time/real_time_mean 11.1 ms 353 ms 27 0.353122 31.9473 12M 33.9879M 1085.84M 2.83232 90.4864 0.0110535
GOPR9172.GPR/threads:32/process_time/real_time_median 11.0 ms 352 ms 27 0.351696 31.9599 12M 34.1203M 1090.11M 2.84336 90.8425 0.0110081
GOPR9172.GPR/threads:32/process_time/real_time_stddev 0.159 ms 4.60 ms 27 4.59539m 0.0462064 0 426.371k 14.9631M 0.0355309 1.24692 158.944u
GOPR9172.GPR/threads:32/process_time/real_time_cv 1.44 % 1.30 % 27 0.0130136 1.44633m 0 0.0125448 0.0137802 0.0125448 0.0137802 0.0143795
```
Fixes https://github.com/google/benchmark/issues/1146
* Be consistent, it's CV, not 'rel std dev'
2021-09-03 17:44:10 +00:00
|
|
|
ADD_CASES(TC_CSVOut,
|
2023-09-26 12:43:23 +00:00
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:2_cv\",%csv_cv_report,"
|
Introduce Coefficient of variation aggregate (#1220)
* Introduce Coefficient of variation aggregate
I believe, it is much more useful / use to understand,
because it is already normalized by the mean,
so it is not affected by the duration of the benchmark,
unlike the standard deviation.
Example of real-world output:
```
raw.pixls.us-unique/GoPro/HERO6 Black$ ~/rawspeed/build-old/src/utilities/rsbench/rsbench GOPR9172.GPR --benchmark_repetitions=27 --benchmark_display_aggregates_only=true --benchmark_counters_tabular=true
2021-09-03T18:05:56+03:00
Running /home/lebedevri/rawspeed/build-old/src/utilities/rsbench/rsbench
Run on (32 X 3596.16 MHz CPU s)
CPU Caches:
L1 Data 32 KiB (x16)
L1 Instruction 32 KiB (x16)
L2 Unified 512 KiB (x16)
L3 Unified 32768 KiB (x2)
Load Average: 7.00, 2.99, 1.85
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Benchmark Time CPU Iterations CPUTime,s CPUTime/WallTime Pixels Pixels/CPUTime Pixels/WallTime Raws/CPUTime Raws/WallTime WallTime,s
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
GOPR9172.GPR/threads:32/process_time/real_time_mean 11.1 ms 353 ms 27 0.353122 31.9473 12M 33.9879M 1085.84M 2.83232 90.4864 0.0110535
GOPR9172.GPR/threads:32/process_time/real_time_median 11.0 ms 352 ms 27 0.351696 31.9599 12M 34.1203M 1090.11M 2.84336 90.8425 0.0110081
GOPR9172.GPR/threads:32/process_time/real_time_stddev 0.159 ms 4.60 ms 27 4.59539m 0.0462064 0 426.371k 14.9631M 0.0355309 1.24692 158.944u
GOPR9172.GPR/threads:32/process_time/real_time_cv 1.44 % 1.30 % 27 0.0130136 1.44633m 0 0.0125448 0.0137802 0.0125448 0.0137802 0.0143795
```
Fixes https://github.com/google/benchmark/issues/1146
* Be consistent, it's CV, not 'rel std dev'
2021-09-03 17:44:10 +00:00
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
2017-05-02 19:33:28 +00:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckTabular(Results const& e) {
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32);
|
|
|
|
}
|
2021-06-02 20:45:41 +00:00
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:1$",
|
|
|
|
&CheckTabular);
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$",
|
|
|
|
&CheckTabular);
|
2017-05-02 19:33:28 +00:00
|
|
|
|
2017-05-02 19:47:41 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// -------------------- Tabular+Rate Counters Output ----------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_CounterRates_Tabular(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2019-07-27 16:02:31 +00:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
2024-02-02 15:39:46 +00:00
|
|
|
auto iterations = double(state.iterations()) * double(state.iterations());
|
2023-03-06 14:47:54 +00:00
|
|
|
benchmark::DoNotOptimize(iterations);
|
2017-05-02 19:47:41 +00:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters.insert({
|
2018-06-01 10:14:19 +00:00
|
|
|
{"Foo", {1, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Bar", {2, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Baz", {4, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Bat", {8, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Frob", {16, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Lob", {32, bm::Counter::kAvgThreadsRate}},
|
2017-05-02 19:47:41 +00:00
|
|
|
});
|
|
|
|
}
|
2017-05-02 22:00:45 +00:00
|
|
|
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 1,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2017-05-02 22:00:45 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report,"
|
2017-05-02 19:47:41 +00:00
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckTabularRate(Results const& e) {
|
|
|
|
double t = e.DurationCPUTime();
|
2018-06-01 10:14:19 +00:00
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001);
|
2017-05-02 19:47:41 +00:00
|
|
|
}
|
2017-05-02 22:00:45 +00:00
|
|
|
CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int",
|
|
|
|
&CheckTabularRate);
|
2017-05-02 19:47:41 +00:00
|
|
|
|
2017-05-02 19:33:28 +00:00
|
|
|
// ========================================================================= //
|
2017-05-02 21:14:49 +00:00
|
|
|
// ------------------------- Tabular Counters Output ----------------------- //
|
2017-05-02 19:33:28 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
|
2017-05-02 21:14:49 +00:00
|
|
|
// set only some of the counters
|
2017-05-02 22:00:45 +00:00
|
|
|
void BM_CounterSet0_Tabular(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2017-05-02 21:14:49 +00:00
|
|
|
}
|
2017-05-02 22:00:45 +00:00
|
|
|
namespace bm = benchmark;
|
2017-05-02 21:14:49 +00:00
|
|
|
state.counters.insert({
|
2018-06-01 10:14:19 +00:00
|
|
|
{"Foo", {10, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bar", {20, bm::Counter::kAvgThreads}},
|
|
|
|
{"Baz", {40, bm::Counter::kAvgThreads}},
|
2017-05-02 21:14:49 +00:00
|
|
|
});
|
2017-05-02 19:47:41 +00:00
|
|
|
}
|
2017-05-02 22:00:45 +00:00
|
|
|
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 2,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2017-05-02 22:00:45 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report,"
|
2017-05-02 21:14:49 +00:00
|
|
|
"%float,,%float,%float,,"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
2017-05-02 22:00:45 +00:00
|
|
|
void CheckSet0(Results const& e) {
|
2017-05-02 21:14:49 +00:00
|
|
|
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
|
|
|
|
}
|
2017-05-02 22:00:45 +00:00
|
|
|
CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0);
|
|
|
|
|
|
|
|
// again.
|
|
|
|
void BM_CounterSet1_Tabular(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2017-05-02 22:00:45 +00:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters.insert({
|
2018-06-01 10:14:19 +00:00
|
|
|
{"Foo", {15, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bar", {25, bm::Counter::kAvgThreads}},
|
|
|
|
{"Baz", {45, bm::Counter::kAvgThreads}},
|
2017-05-02 22:00:45 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 3,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2017-05-02 22:00:45 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report,"
|
|
|
|
"%float,,%float,%float,,"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckSet1(Results const& e) {
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Tabular Counters Output ----------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
// set only some of the counters, different set now.
|
|
|
|
void BM_CounterSet2_Tabular(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2017-05-02 22:00:45 +00:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters.insert({
|
2018-06-01 10:14:19 +00:00
|
|
|
{"Foo", {10, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bat", {30, bm::Counter::kAvgThreads}},
|
|
|
|
{"Baz", {40, bm::Counter::kAvgThreads}},
|
2017-05-02 22:00:45 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 4,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2017-05-02 22:00:45 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report,"
|
|
|
|
",%float,%float,%float,,"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckSet2(Results const& e) {
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2);
|
2017-05-02 21:14:49 +00:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- TEST CASES END ------------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
|