2018-08-29 18:11:06 +00:00
|
|
|
|
|
|
|
#undef NDEBUG
|
|
|
|
|
|
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#include "output_test.h"
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------ Thousands Customisation ------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Counters_Thousands(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
namespace bm = benchmark;
|
|
|
|
state.counters.insert({
|
|
|
|
{"t0_1000000DefaultBase",
|
|
|
|
bm::Counter(1000 * 1000, bm::Counter::kDefaults)},
|
|
|
|
{"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
|
|
|
|
benchmark::Counter::OneK::kIs1000)},
|
|
|
|
{"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults,
|
|
|
|
benchmark::Counter::OneK::kIs1024)},
|
|
|
|
{"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
|
|
|
|
benchmark::Counter::OneK::kIs1000)},
|
|
|
|
{"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults,
|
|
|
|
benchmark::Counter::OneK::kIs1024)},
|
|
|
|
});
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_Counters_Thousands)->Repetitions(2);
|
|
|
|
ADD_CASES(
|
|
|
|
TC_ConsoleOut,
|
|
|
|
{
|
|
|
|
{"^BM_Counters_Thousands/repeats:2 %console_report "
|
|
|
|
"t0_1000000DefaultBase=1000k "
|
|
|
|
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
|
|
|
|
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
|
|
|
|
{"^BM_Counters_Thousands/repeats:2 %console_report "
|
|
|
|
"t0_1000000DefaultBase=1000k "
|
|
|
|
"t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k "
|
|
|
|
"t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"},
|
|
|
|
{"^BM_Counters_Thousands/repeats:2_mean %console_report "
|
|
|
|
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
|
|
|
|
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
|
|
|
|
"t4_1048576Base1024=1024k$"},
|
|
|
|
{"^BM_Counters_Thousands/repeats:2_median %console_report "
|
|
|
|
"t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k "
|
|
|
|
"t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k "
|
|
|
|
"t4_1048576Base1024=1024k$"},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ "
|
|
|
|
"]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 "
|
|
|
|
"t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"},
|
2018-08-29 18:11:06 +00:00
|
|
|
});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
2018-08-29 18:11:06 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-08-29 18:11:06 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
2018-08-29 18:11:06 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 1,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-08-29 18:11:06 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
2018-08-29 18:11:06 +00:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 2,$", MR_Next},
|
2018-08-29 18:11:06 +00:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
2018-08-29 18:11:06 +00:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 2,$", MR_Next},
|
2018-08-29 18:11:06 +00:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next},
|
|
|
|
{"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next},
|
2018-08-29 18:11:06 +00:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 2,$", MR_Next},
|
2018-08-29 18:11:06 +00:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next},
|
|
|
|
{"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
|
|
|
|
{"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next},
|
|
|
|
{"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next},
|
|
|
|
{"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
|
|
|
|
ADD_CASES(
|
|
|
|
TC_CSVOut,
|
|
|
|
{{"^\"BM_Counters_Thousands/"
|
|
|
|
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
|
|
|
|
"0)*6,1\\.04858e\\+(0)*6$"},
|
|
|
|
{"^\"BM_Counters_Thousands/"
|
|
|
|
"repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+("
|
|
|
|
"0)*6,1\\.04858e\\+(0)*6$"},
|
|
|
|
{"^\"BM_Counters_Thousands/"
|
|
|
|
"repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
|
|
|
|
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
|
|
|
|
{"^\"BM_Counters_Thousands/"
|
|
|
|
"repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\."
|
|
|
|
"04858e\\+(0)*6,1\\.04858e\\+(0)*6$"},
|
|
|
|
{"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}});
|
|
|
|
// VS2013 does not allow this function to be passed as a lambda argument
|
|
|
|
// to CHECK_BENCHMARK_RESULTS()
|
|
|
|
void CheckThousands(Results const& e) {
|
|
|
|
if (e.name != "BM_Counters_Thousands/repeats:2")
|
|
|
|
return; // Do not check the aggregates!
|
|
|
|
|
|
|
|
// check that the values are within 0.01% of the expected values
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000,
|
|
|
|
0.0001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001);
|
|
|
|
CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001);
|
|
|
|
}
|
|
|
|
CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands);
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- TEST CASES END ------------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
|