2017-05-02 19:33:28 +00:00
|
|
|
|
|
|
|
#undef NDEBUG
|
|
|
|
|
|
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#include "output_test.h"
|
|
|
|
|
2017-05-02 22:00:45 +00:00
|
|
|
// @todo: <jpmag> this checks the full output at once; the rule for
|
|
|
|
// CounterSet1 was failing because it was not matching "^[-]+$".
|
|
|
|
// @todo: <jpmag> check that the counters are vertically aligned.
|
2021-06-02 20:45:41 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut,
|
|
|
|
{
|
|
|
|
// keeping these lines long improves readability, so:
|
|
|
|
// clang-format off
|
2017-05-02 22:00:45 +00:00
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:1_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:1_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:1_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:2_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:2_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_Counters_Tabular/repeats:2/threads:2_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
2017-05-02 22:00:45 +00:00
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Baz %s Foo$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet0_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet1_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^Benchmark %s Time %s CPU %s Iterations %s Bat %s Baz %s Foo$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next},
|
|
|
|
{"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"},
|
2021-06-02 20:45:41 +00:00
|
|
|
// clang-format on
|
|
|
|
});
|
2017-05-02 22:00:45 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"%csv_header,"
|
|
|
|
"\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}});
|
2017-05-02 19:33:28 +00:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Tabular Counters Output ----------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_Tabular(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2017-05-02 19:33:28 +00:00
|
|
|
}
|
2017-05-02 22:00:45 +00:00
|
|
|
namespace bm = benchmark;
|
2017-05-02 19:33:28 +00:00
|
|
|
state.counters.insert({
|
2018-06-01 10:14:19 +00:00
|
|
|
{"Foo", {1, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bar", {2, bm::Counter::kAvgThreads}},
|
|
|
|
{"Baz", {4, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bat", {8, bm::Counter::kAvgThreads}},
|
|
|
|
{"Frob", {16, bm::Counter::kAvgThreads}},
|
|
|
|
{"Lob", {32, bm::Counter::kAvgThreads}},
|
2017-05-02 19:33:28 +00:00
|
|
|
});
|
|
|
|
}
|
2021-06-02 20:45:41 +00:00
|
|
|
BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 2)->Repetitions(2);
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
2021-06-02 20:45:41 +00:00
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
|
|
|
|
MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2021-06-02 20:45:41 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 1,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_mean\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_median\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_stddev\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 1,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 2,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 1,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 1,$", MR_Next},
|
|
|
|
{"\"threads\": 2,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_median\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 1,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 2,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_stddev\",$"},
|
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 1,$", MR_Next},
|
|
|
|
{"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 2,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:1_mean\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:1_median\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:1_stddev\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:2_mean\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:2_median\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Tabular/repeats:2/threads:2_stddev\",%csv_report,"
|
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
2017-05-02 19:33:28 +00:00
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckTabular(Results const& e) {
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 1);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 2);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 4);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 8);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32);
|
|
|
|
}
|
2021-06-02 20:45:41 +00:00
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:1$",
|
|
|
|
&CheckTabular);
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$",
|
|
|
|
&CheckTabular);
|
2017-05-02 19:33:28 +00:00
|
|
|
|
2017-05-02 19:47:41 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// -------------------- Tabular+Rate Counters Output ----------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_CounterRates_Tabular(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2019-07-27 16:02:31 +00:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2017-05-02 19:47:41 +00:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters.insert({
|
2018-06-01 10:14:19 +00:00
|
|
|
{"Foo", {1, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Bar", {2, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Baz", {4, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Bat", {8, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Frob", {16, bm::Counter::kAvgThreadsRate}},
|
|
|
|
{"Lob", {32, bm::Counter::kAvgThreadsRate}},
|
2017-05-02 19:47:41 +00:00
|
|
|
});
|
|
|
|
}
|
2017-05-02 22:00:45 +00:00
|
|
|
BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16);
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 1,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float,$", MR_Next},
|
|
|
|
{"\"Frob\": %float,$", MR_Next},
|
|
|
|
{"\"Lob\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2017-05-02 22:00:45 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report,"
|
2017-05-02 19:47:41 +00:00
|
|
|
"%float,%float,%float,%float,%float,%float$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckTabularRate(Results const& e) {
|
|
|
|
double t = e.DurationCPUTime();
|
2018-06-01 10:14:19 +00:00
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001);
|
2017-05-02 19:47:41 +00:00
|
|
|
}
|
2017-05-02 22:00:45 +00:00
|
|
|
CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int",
|
|
|
|
&CheckTabularRate);
|
2017-05-02 19:47:41 +00:00
|
|
|
|
2017-05-02 19:33:28 +00:00
|
|
|
// ========================================================================= //
|
2017-05-02 21:14:49 +00:00
|
|
|
// ------------------------- Tabular Counters Output ----------------------- //
|
2017-05-02 19:33:28 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
|
2017-05-02 21:14:49 +00:00
|
|
|
// set only some of the counters
|
2017-05-02 22:00:45 +00:00
|
|
|
void BM_CounterSet0_Tabular(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2017-05-02 21:14:49 +00:00
|
|
|
}
|
2017-05-02 22:00:45 +00:00
|
|
|
namespace bm = benchmark;
|
2017-05-02 21:14:49 +00:00
|
|
|
state.counters.insert({
|
2018-06-01 10:14:19 +00:00
|
|
|
{"Foo", {10, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bar", {20, bm::Counter::kAvgThreads}},
|
|
|
|
{"Baz", {40, bm::Counter::kAvgThreads}},
|
2017-05-02 21:14:49 +00:00
|
|
|
});
|
2017-05-02 19:47:41 +00:00
|
|
|
}
|
2017-05-02 22:00:45 +00:00
|
|
|
BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16);
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 2,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2017-05-02 22:00:45 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report,"
|
2017-05-02 21:14:49 +00:00
|
|
|
"%float,,%float,%float,,"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
2017-05-02 22:00:45 +00:00
|
|
|
void CheckSet0(Results const& e) {
|
2017-05-02 21:14:49 +00:00
|
|
|
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 20);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
|
|
|
|
}
|
2017-05-02 22:00:45 +00:00
|
|
|
CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0);
|
|
|
|
|
|
|
|
// again.
|
|
|
|
void BM_CounterSet1_Tabular(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2017-05-02 22:00:45 +00:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters.insert({
|
2018-06-01 10:14:19 +00:00
|
|
|
{"Foo", {15, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bar", {25, bm::Counter::kAvgThreads}},
|
|
|
|
{"Baz", {45, bm::Counter::kAvgThreads}},
|
2017-05-02 22:00:45 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16);
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 3,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bar\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2017-05-02 22:00:45 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report,"
|
|
|
|
"%float,,%float,%float,,"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckSet1(Results const& e) {
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 15);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bar", EQ, 25);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 45);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Tabular Counters Output ----------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
// set only some of the counters, different set now.
|
|
|
|
void BM_CounterSet2_Tabular(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2017-05-02 22:00:45 +00:00
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters.insert({
|
2018-06-01 10:14:19 +00:00
|
|
|
{"Foo", {10, bm::Counter::kAvgThreads}},
|
|
|
|
{"Bat", {30, bm::Counter::kAvgThreads}},
|
|
|
|
{"Baz", {40, bm::Counter::kAvgThreads}},
|
2017-05-02 22:00:45 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16);
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 4,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"Bat\": %float,$", MR_Next},
|
|
|
|
{"\"Baz\": %float,$", MR_Next},
|
|
|
|
{"\"Foo\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2017-05-02 22:00:45 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report,"
|
|
|
|
",%float,%float,%float,,"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckSet2(Results const& e) {
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Foo", EQ, 10);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Bat", EQ, 30);
|
|
|
|
CHECK_COUNTER_VALUE(e, int, "Baz", EQ, 40);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_CounterSet2_Tabular", &CheckSet2);
|
2017-05-02 21:14:49 +00:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- TEST CASES END ------------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
|