2016-05-27 19:34:37 +00:00
|
|
|
|
|
|
|
#undef NDEBUG
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
#include <numeric>
|
2016-05-27 19:34:37 +00:00
|
|
|
#include <utility>
|
|
|
|
|
2016-10-07 18:04:50 +00:00
|
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#include "output_test.h"
|
2016-05-27 19:34:37 +00:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ---------------------- Testing Prologue Output -------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2018-06-01 10:14:19 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next},
|
|
|
|
{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next},
|
|
|
|
{"^[-]+$", MR_Next}});
|
2017-11-26 20:33:01 +00:00
|
|
|
static int AddContextCases() {
|
|
|
|
AddCases(TC_ConsoleErr,
|
|
|
|
{
|
2020-06-15 16:28:17 +00:00
|
|
|
{"^%int-%int-%intT%int:%int:%int[-+]%int:%int$", MR_Default},
|
2022-06-13 16:45:20 +00:00
|
|
|
{"Running .*(/|\\\\)reporter_output_test(\\.exe)?$", MR_Next},
|
2018-08-28 14:10:14 +00:00
|
|
|
{"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
|
2017-11-26 20:33:01 +00:00
|
|
|
});
|
2018-06-01 10:14:19 +00:00
|
|
|
AddCases(TC_JSONOut,
|
|
|
|
{{"^\\{", MR_Default},
|
|
|
|
{"\"context\":", MR_Next},
|
|
|
|
{"\"date\": \"", MR_Next},
|
2018-12-11 11:23:02 +00:00
|
|
|
{"\"host_name\":", MR_Next},
|
2018-08-16 16:47:09 +00:00
|
|
|
{"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",",
|
|
|
|
MR_Next},
|
2018-06-01 10:14:19 +00:00
|
|
|
{"\"num_cpus\": %int,$", MR_Next},
|
|
|
|
{"\"mhz_per_cpu\": %float,$", MR_Next},
|
2020-07-28 11:46:07 +00:00
|
|
|
{"\"caches\": \\[$", MR_Default}});
|
2018-07-09 04:17:44 +00:00
|
|
|
auto const& Info = benchmark::CPUInfo::Get();
|
|
|
|
auto const& Caches = Info.caches;
|
2017-11-26 20:33:01 +00:00
|
|
|
if (!Caches.empty()) {
|
|
|
|
AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}});
|
|
|
|
}
|
|
|
|
for (size_t I = 0; I < Caches.size(); ++I) {
|
|
|
|
std::string num_caches_str =
|
|
|
|
Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$";
|
2019-12-02 09:29:16 +00:00
|
|
|
AddCases(TC_ConsoleErr,
|
|
|
|
{{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str,
|
|
|
|
MR_Next}});
|
2017-11-26 20:33:01 +00:00
|
|
|
AddCases(TC_JSONOut, {{"\\{$", MR_Next},
|
|
|
|
{"\"type\": \"", MR_Next},
|
|
|
|
{"\"level\": %int,$", MR_Next},
|
|
|
|
{"\"size\": %int,$", MR_Next},
|
|
|
|
{"\"num_sharing\": %int$", MR_Next},
|
|
|
|
{"}[,]{0,1}$", MR_Next}});
|
|
|
|
}
|
|
|
|
AddCases(TC_JSONOut, {{"],$"}});
|
2018-07-09 04:17:44 +00:00
|
|
|
auto const& LoadAvg = Info.load_avg;
|
|
|
|
if (!LoadAvg.empty()) {
|
|
|
|
AddCases(TC_ConsoleErr,
|
|
|
|
{{"Load Average: (%float, ){0,2}%float$", MR_Next}});
|
|
|
|
}
|
|
|
|
AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
|
2024-01-29 13:15:43 +00:00
|
|
|
AddCases(TC_JSONOut, {{"\"library_version\": \".*\",$", MR_Next}});
|
|
|
|
AddCases(TC_JSONOut, {{"\"library_build_type\": \".*\",$", MR_Next}});
|
|
|
|
AddCases(TC_JSONOut, {{"\"json_schema_version\": 1$", MR_Next}});
|
2017-11-26 20:33:01 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
int dummy_register = AddContextCases();
|
2017-04-27 18:24:06 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"%csv_header"}});
|
2016-05-27 19:34:37 +00:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------ Testing Basic Output --------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_basic(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-07 18:04:50 +00:00
|
|
|
}
|
2016-05-27 19:34:37 +00:00
|
|
|
}
|
|
|
|
BENCHMARK(BM_basic);
|
|
|
|
|
2016-10-07 18:04:50 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}});
|
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 0,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_basic\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2017-08-01 01:04:02 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-24 23:13:55 +00:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2016-10-07 18:04:50 +00:00
|
|
|
{"\"time_unit\": \"ns\"$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
|
2016-05-27 19:34:37 +00:00
|
|
|
|
2016-10-28 16:13:57 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------ Testing Bytes per Second Output ---------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_bytes_per_second(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2019-07-27 16:02:31 +00:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
2024-02-19 15:22:35 +00:00
|
|
|
auto iterations = double(state.iterations()) * double(state.iterations());
|
2023-03-06 14:47:54 +00:00
|
|
|
benchmark::DoNotOptimize(iterations);
|
2016-10-28 16:13:57 +00:00
|
|
|
}
|
|
|
|
state.SetBytesProcessed(1);
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_bytes_per_second);
|
|
|
|
|
2018-09-13 19:03:47 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_bytes_per_second %console_report "
|
|
|
|
"bytes_per_second=%float[kM]{0,1}/s$"}});
|
2016-10-28 16:13:57 +00:00
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 1,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_bytes_per_second\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2017-08-01 01:04:02 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-24 23:13:55 +00:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2016-10-28 16:13:57 +00:00
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-24 23:13:55 +00:00
|
|
|
{"\"bytes_per_second\": %float$", MR_Next},
|
2016-10-28 16:13:57 +00:00
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------ Testing Items per Second Output ---------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_items_per_second(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2019-07-27 16:02:31 +00:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
2024-02-19 15:22:35 +00:00
|
|
|
auto iterations = double(state.iterations()) * double(state.iterations());
|
2023-03-06 14:47:54 +00:00
|
|
|
benchmark::DoNotOptimize(iterations);
|
2016-10-28 16:13:57 +00:00
|
|
|
}
|
|
|
|
state.SetItemsProcessed(1);
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_items_per_second);
|
|
|
|
|
2018-09-13 19:03:47 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_items_per_second %console_report "
|
|
|
|
"items_per_second=%float[kM]{0,1}/s$"}});
|
2016-10-28 16:13:57 +00:00
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 2,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_items_per_second\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2017-08-01 01:04:02 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-24 23:13:55 +00:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2016-10-28 16:13:57 +00:00
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-24 23:13:55 +00:00
|
|
|
{"\"items_per_second\": %float$", MR_Next},
|
2016-10-28 16:13:57 +00:00
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------ Testing Label Output --------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_label(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-28 16:13:57 +00:00
|
|
|
}
|
|
|
|
state.SetLabel("some label");
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_label);
|
|
|
|
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}});
|
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 3,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_label\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2017-08-01 01:04:02 +00:00
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
Json reporter: don't cast floating-point to int; adjust tooling (#426)
* Json reporter: passthrough fp, don't cast it to int; adjust tooling
Json output format is generally meant for further processing
using some automated tools. Thus, it makes sense not to
intentionally limit the precision of the values contained
in the report.
As it can be seen, FormatKV() for doubles, used %.2f format,
which was meant to preserve at least some of the precision.
However, before that function is ever called, the doubles
were already cast to the integer via RoundDouble()...
This is also the case for console reporter, where it makes
sense because the screen space is limited, and this reporter,
however the CSV reporter does output some( decimal digits.
Thus i can only conclude that the loss of the precision
was not really considered, so i have decided to adjust the
code of the json reporter to output the full fp precision.
There can be several reasons why that is the right thing
to do, the bigger the time_unit used, the greater the
precision loss, so i'd say any sort of further processing
(like e.g. tools/compare_bench.py does) is best done
on the values with most precision.
Also, that cast skewed the data away from zero, which
i think may or may not result in false- positives/negatives
in the output of tools/compare_bench.py
* Json reporter: FormatKV(double): address review note
* tools/gbench/report.py: skip benchmarks with different time units
While it may be useful to teach it to operate on the
measurements with different time units, which is now
possible since floats are stored, and not the integers,
but for now at least doing such a sanity-checking
is better than providing misinformation.
2017-07-24 23:13:55 +00:00
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
2016-10-28 16:13:57 +00:00
|
|
|
{"\"time_unit\": \"ns\",$", MR_Next},
|
|
|
|
{"\"label\": \"some label\"$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
|
|
|
|
"label\"%csv_label_report_end$"}});
|
|
|
|
|
2020-12-21 17:15:58 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------ Testing Time Label Output ---------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_time_label_nanosecond(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_time_label_nanosecond)->Unit(benchmark::kNanosecond);
|
|
|
|
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_nanosecond %console_report$"}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_time_label_nanosecond\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 4,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2020-12-21 17:15:58 +00:00
|
|
|
{"\"run_name\": \"BM_time_label_nanosecond\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2020-12-21 17:15:58 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\"$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_nanosecond\",%csv_report$"}});
|
|
|
|
|
|
|
|
void BM_time_label_microsecond(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_time_label_microsecond)->Unit(benchmark::kMicrosecond);
|
|
|
|
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_microsecond %console_us_report$"}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_time_label_microsecond\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 5,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2020-12-21 17:15:58 +00:00
|
|
|
{"\"run_name\": \"BM_time_label_microsecond\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2020-12-21 17:15:58 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"us\"$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_microsecond\",%csv_us_report$"}});
|
|
|
|
|
|
|
|
void BM_time_label_millisecond(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_time_label_millisecond)->Unit(benchmark::kMillisecond);
|
|
|
|
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_millisecond %console_ms_report$"}});
|
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_time_label_millisecond\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 6,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2020-12-21 17:15:58 +00:00
|
|
|
{"\"run_name\": \"BM_time_label_millisecond\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2020-12-21 17:15:58 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ms\"$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_millisecond\",%csv_ms_report$"}});
|
|
|
|
|
|
|
|
void BM_time_label_second(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_time_label_second)->Unit(benchmark::kSecond);
|
|
|
|
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_second %console_s_report$"}});
|
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_time_label_second\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 7,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2020-12-21 17:15:58 +00:00
|
|
|
{"\"run_name\": \"BM_time_label_second\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2020-12-21 17:15:58 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"s\"$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_second\",%csv_s_report$"}});
|
|
|
|
|
2016-05-27 19:34:37 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------ Testing Error Output --------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_error(benchmark::State& state) {
|
2016-10-07 18:04:50 +00:00
|
|
|
state.SkipWithError("message");
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-07 18:04:50 +00:00
|
|
|
}
|
2016-05-27 19:34:37 +00:00
|
|
|
}
|
|
|
|
BENCHMARK(BM_error);
|
2016-10-07 18:04:50 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}});
|
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_error\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 8,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_error\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2016-10-07 18:04:50 +00:00
|
|
|
{"\"error_occurred\": true,$", MR_Next},
|
|
|
|
{"\"error_message\": \"message\",$", MR_Next}});
|
2016-05-27 19:34:37 +00:00
|
|
|
|
2016-10-07 18:04:50 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}});
|
2016-05-27 19:34:37 +00:00
|
|
|
|
2016-10-24 07:49:36 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------ Testing No Arg Name Output -----------------------
|
|
|
|
// //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_no_arg_name(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-24 07:49:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_no_arg_name)->Arg(3);
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}});
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 9,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_no_arg_name/3\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next}});
|
2016-10-24 07:49:36 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
|
|
|
|
|
|
|
|
// ========================================================================= //
|
2022-08-27 17:41:33 +00:00
|
|
|
// ------------------------ Testing Arg Name Output ------------------------ //
|
2016-10-24 07:49:36 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_arg_name(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-24 07:49:36 +00:00
|
|
|
}
|
|
|
|
}
|
2016-10-26 07:36:39 +00:00
|
|
|
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
|
2016-10-24 07:49:36 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}});
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 10,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_arg_name/first:3\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next}});
|
2016-10-24 07:49:36 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------ Testing Arg Names Output ----------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_arg_names(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-24 07:49:36 +00:00
|
|
|
}
|
|
|
|
}
|
2016-10-25 07:45:35 +00:00
|
|
|
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
|
|
|
|
ADD_CASES(TC_ConsoleOut,
|
|
|
|
{{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 11,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next}});
|
2016-10-25 07:45:35 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
|
2016-10-24 07:49:36 +00:00
|
|
|
|
2021-03-30 13:43:03 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------ Testing Name Output ---------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_name(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_name)->Name("BM_custom_name");
|
|
|
|
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_custom_name %console_report$"}});
|
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_custom_name\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 12,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2021-03-30 13:43:03 +00:00
|
|
|
{"\"run_name\": \"BM_custom_name\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2021-03-30 13:43:03 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"iterations\": %int,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"cpu_time\": %float,$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\"$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_custom_name\",%csv_report$"}});
|
|
|
|
|
2018-11-27 00:55:05 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------ Testing Big Args Output ------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_BigArgs(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U);
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"},
|
|
|
|
{"^BM_BigArgs/2147483648 %console_report$"}});
|
|
|
|
|
2016-05-27 19:34:37 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// ----------------------- Testing Complexity Output ----------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_Complexity_O1(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2019-07-27 16:02:31 +00:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
2024-02-19 15:22:35 +00:00
|
|
|
auto iterations = double(state.iterations()) * double(state.iterations());
|
2023-03-06 14:47:54 +00:00
|
|
|
benchmark::DoNotOptimize(iterations);
|
2016-05-27 19:34:37 +00:00
|
|
|
}
|
2016-08-04 19:30:14 +00:00
|
|
|
state.SetComplexityN(state.range(0));
|
2016-05-27 19:34:37 +00:00
|
|
|
}
|
2016-10-07 18:04:50 +00:00
|
|
|
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
|
2016-10-08 03:56:22 +00:00
|
|
|
SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"},
|
2016-10-07 18:04:50 +00:00
|
|
|
{"%RMS", "[ ]*[0-9]+ %"}});
|
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"},
|
|
|
|
{"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
|
2016-05-27 19:34:37 +00:00
|
|
|
|
2016-08-11 00:20:54 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// ----------------------- Testing Aggregate Output ------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
// Test that non-aggregate data is printed by default
|
2016-10-07 18:04:50 +00:00
|
|
|
void BM_Repeat(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-07 18:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
// need two repetitions min to be able to output any aggregate output
|
|
|
|
BENCHMARK(BM_Repeat)->Repetitions(2);
|
2018-10-18 14:17:14 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut,
|
|
|
|
{{"^BM_Repeat/repeats:2 %console_report$"},
|
|
|
|
{"^BM_Repeat/repeats:2 %console_report$"},
|
|
|
|
{"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"},
|
|
|
|
{"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"},
|
|
|
|
{"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}});
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 15,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:2\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 15,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 1,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:2_mean\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 15,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 2,$", MR_Next},
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:2_median\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 15,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 2,$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:2_stddev\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 15,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 2,$", MR_Next}});
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:2\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:2_mean\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:2_median\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}});
|
|
|
|
// but for two repetitions, mean and median is the same, so let's repeat..
|
2016-08-11 00:20:54 +00:00
|
|
|
BENCHMARK(BM_Repeat)->Repetitions(3);
|
2018-10-18 14:17:14 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut,
|
|
|
|
{{"^BM_Repeat/repeats:3 %console_report$"},
|
|
|
|
{"^BM_Repeat/repeats:3 %console_report$"},
|
|
|
|
{"^BM_Repeat/repeats:3 %console_report$"},
|
|
|
|
{"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"},
|
|
|
|
{"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"},
|
|
|
|
{"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
|
2016-10-07 18:04:50 +00:00
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 16,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2016-10-07 18:04:50 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:3\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 16,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 1,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2016-10-07 18:04:50 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:3\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 16,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2016-10-07 18:04:50 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:3_mean\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 16,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:3_median\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 16,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:3_stddev\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 16,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next}});
|
2016-10-07 18:04:50 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:3_mean\",%csv_report$"},
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
{"^\"BM_Repeat/repeats:3_median\",%csv_report$"},
|
2016-10-07 18:04:50 +00:00
|
|
|
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
// median differs between even/odd number of repetitions, so just to be sure
|
|
|
|
BENCHMARK(BM_Repeat)->Repetitions(4);
|
2018-10-18 14:17:14 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut,
|
|
|
|
{{"^BM_Repeat/repeats:4 %console_report$"},
|
|
|
|
{"^BM_Repeat/repeats:4 %console_report$"},
|
|
|
|
{"^BM_Repeat/repeats:4 %console_report$"},
|
|
|
|
{"^BM_Repeat/repeats:4 %console_report$"},
|
|
|
|
{"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"},
|
|
|
|
{"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"},
|
|
|
|
{"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}});
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 17,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 4,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:4\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 17,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 4,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 1,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:4\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 17,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 4,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:4\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 17,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 4,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:4_mean\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 17,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 4,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 4,$", MR_Next},
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:4_median\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 17,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 4,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 4,$", MR_Next},
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
{"\"name\": \"BM_Repeat/repeats:4_stddev\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 17,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 4,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 4,$", MR_Next}});
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:4_mean\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:4_median\",%csv_report$"},
|
|
|
|
{"^\"BM_Repeat/repeats:4_stddev\",%csv_report$"}});
|
2016-08-11 00:20:54 +00:00
|
|
|
|
|
|
|
// Test that a non-repeated test still prints non-aggregate results even when
|
|
|
|
// only-aggregate reports have been requested
|
2016-10-07 18:04:50 +00:00
|
|
|
void BM_RepeatOnce(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-07 18:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
2016-08-11 00:20:54 +00:00
|
|
|
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
|
2016-10-07 18:04:50 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}});
|
Track 'type' of the run - is it an actual measurement, or an aggregate. (#658)
This is *only* exposed in the JSON. Not in CSV, which is deprecated.
This *only* supposed to track these two states.
An additional field could later track which aggregate this is,
specifically (statistic name, rms, bigo, ...)
The motivation is that we already have ReportAggregatesOnly,
but it affects the entire reports, both the display,
and the reporters (json files), which isn't ideal.
It would be very useful to have a 'display aggregates only' option,
both in the library's console reporter, and the python tooling,
This will be especially needed for the 'store separate iterations'.
2018-08-28 15:11:36 +00:00
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 18,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_RepeatOnce/repeats:1\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next}});
|
2016-10-07 18:04:50 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
|
2016-08-28 19:24:16 +00:00
|
|
|
|
2016-08-11 00:20:54 +00:00
|
|
|
// Test that non-aggregate data is not reported
|
2016-10-07 18:04:50 +00:00
|
|
|
void BM_SummaryRepeat(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-07 18:04:50 +00:00
|
|
|
}
|
|
|
|
}
|
2016-08-11 00:20:54 +00:00
|
|
|
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
|
2018-10-18 14:17:14 +00:00
|
|
|
ADD_CASES(
|
|
|
|
TC_ConsoleOut,
|
|
|
|
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
|
|
|
{"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"},
|
|
|
|
{"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"},
|
|
|
|
{"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
|
|
|
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 19,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 19,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 19,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next}});
|
2016-10-07 18:04:50 +00:00
|
|
|
ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
|
|
|
|
{"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"},
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
{"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"},
|
2016-10-07 18:04:50 +00:00
|
|
|
{"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}});
|
2016-08-11 00:20:54 +00:00
|
|
|
|
2018-09-12 13:26:17 +00:00
|
|
|
// Test that non-aggregate data is not displayed.
|
|
|
|
// NOTE: this test is kinda bad. we are only testing the display output.
|
|
|
|
// But we don't check that the file output still contains everything...
|
|
|
|
void BM_SummaryDisplay(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly();
|
2018-10-18 14:17:14 +00:00
|
|
|
ADD_CASES(
|
|
|
|
TC_ConsoleOut,
|
|
|
|
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
|
|
|
|
{"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"},
|
|
|
|
{"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"},
|
|
|
|
{"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
|
|
|
|
{"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 20,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 2,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 20,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 2,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 20,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 2,$", MR_Next}});
|
2018-09-12 13:26:17 +00:00
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
|
|
|
|
{"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"},
|
|
|
|
{"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"},
|
|
|
|
{"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}});
|
|
|
|
|
|
|
|
// Test repeats with custom time unit.
|
2016-10-21 12:59:06 +00:00
|
|
|
void BM_RepeatTimeUnit(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-21 12:59:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_RepeatTimeUnit)
|
|
|
|
->Repetitions(3)
|
|
|
|
->ReportAggregatesOnly()
|
|
|
|
->Unit(benchmark::kMicrosecond);
|
2018-10-18 14:17:14 +00:00
|
|
|
ADD_CASES(
|
|
|
|
TC_ConsoleOut,
|
|
|
|
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
|
|
|
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"},
|
|
|
|
{"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ "
|
|
|
|
"]*3$"},
|
|
|
|
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ "
|
|
|
|
"]*3$"}});
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_CASES(TC_JSONOut,
|
|
|
|
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
|
|
|
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 21,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"time_unit\": \"us\",?$"},
|
|
|
|
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 21,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"time_unit\": \"us\",?$"},
|
|
|
|
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 21,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
{"\"time_unit\": \"us\",?$"}});
|
2016-10-21 12:59:06 +00:00
|
|
|
ADD_CASES(TC_CSVOut,
|
|
|
|
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
|
|
|
|
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
{"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"},
|
2016-10-21 12:59:06 +00:00
|
|
|
{"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
|
|
|
|
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// -------------------- Testing user-provided statistics ------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
const auto UserStatistics = [](const std::vector<double>& v) {
|
|
|
|
return v.back();
|
|
|
|
};
|
|
|
|
void BM_UserStats(benchmark::State& state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2018-10-18 14:17:14 +00:00
|
|
|
state.SetIterationTime(150 / 10e8);
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
}
|
|
|
|
}
|
2018-06-01 10:14:19 +00:00
|
|
|
// clang-format off
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
BENCHMARK(BM_UserStats)
|
2018-06-01 10:14:19 +00:00
|
|
|
->Repetitions(3)
|
2018-10-18 14:17:14 +00:00
|
|
|
->Iterations(5)
|
|
|
|
->UseManualTime()
|
2018-06-01 10:14:19 +00:00
|
|
|
->ComputeStatistics("", UserStatistics);
|
|
|
|
// clang-format on
|
|
|
|
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
// check that user-provided stats is calculated, and is after the default-ones
|
|
|
|
// empty string as name is intentional, it would sort before anything else
|
2018-10-18 14:17:14 +00:00
|
|
|
ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
|
|
|
|
"]* 150 ns %time [ ]*5$"},
|
|
|
|
{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
|
|
|
|
"]* 150 ns %time [ ]*5$"},
|
|
|
|
{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
|
|
|
|
"]* 150 ns %time [ ]*5$"},
|
|
|
|
{"^BM_UserStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time_mean [ ]* 150 ns %time [ ]*3$"},
|
|
|
|
{"^BM_UserStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time_median [ ]* 150 ns %time [ ]*3$"},
|
|
|
|
{"^BM_UserStats/iterations:5/repeats:3/"
|
2018-12-14 03:49:21 +00:00
|
|
|
"manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"^BM_UserStats/iterations:5/repeats:3/manual_time_ "
|
|
|
|
"[ ]* 150 ns %time [ ]*3$"}});
|
|
|
|
ADD_CASES(
|
|
|
|
TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 22,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 5,$", MR_Next},
|
|
|
|
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
|
|
|
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 22,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 1,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 5,$", MR_Next},
|
|
|
|
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
|
|
|
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 22,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 5,$", MR_Next},
|
|
|
|
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
|
|
|
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 22,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
|
|
|
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
|
|
|
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 22,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
|
|
|
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
|
|
|
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 22,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"family_index\": 22,$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
2019-03-26 09:53:07 +00:00
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"aggregate_name\": \"\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2018-10-18 14:17:14 +00:00
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
|
|
|
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}});
|
|
|
|
ADD_CASES(
|
|
|
|
TC_CSVOut,
|
|
|
|
{{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
|
|
|
|
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
|
|
|
|
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
|
|
|
|
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"},
|
|
|
|
{"^\"BM_UserStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time_median\",%csv_report$"},
|
|
|
|
{"^\"BM_UserStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time_stddev\",%csv_report$"},
|
|
|
|
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}});
|
Drop Stat1, refactor statistics to be user-providable, add median. (#428)
* Drop Stat1, refactor statistics to be user-providable, add median.
My main goal was to add median statistic. Since Stat1
calculated the stats incrementally, and did not store
the values themselves, it is was not possible. Thus,
i have replaced Stat1 with simple std::vector<double>,
containing all the values.
Then, i have refactored current mean/stdev to be a
function that is provided with values vector, and
returns the statistic. While there, it seemed to make
sense to deduplicate the code by storing all the
statistics functions in a map, and then simply iterate
over it. And the interface to add new statistics is
intentionally exposed, so they may be added easily.
The notable change is that Iterations are no longer
displayed as 0 for stdev. Is could be changed, but
i'm not sure how to nicely fit that into the API.
Similarly, this dance about sometimes (for some fields,
for some statistics) dividing by run.iterations, and
then multiplying the calculated stastic back is also
dropped, and if you do the math, i fail to see why
it was needed there in the first place.
Since that was the only use of stat.h, it is removed.
* complexity.h: attempt to fix MSVC build
* Update README.md
* Store statistics to compute in a vector, ensures ordering.
* Add a bit more tests for repetitions.
* Partially address review notes.
* Fix gcc build: drop extra ';'
clang, why didn't you warn me?
* Address review comments.
* double() -> 0.0
* early return
2017-08-23 23:44:29 +00:00
|
|
|
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------- Testing relative standard deviation statistics ------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2021-09-07 16:54:23 +00:00
|
|
|
const auto UserPercentStatistics = [](const std::vector<double>&) {
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
return 1. / 100.;
|
|
|
|
};
|
|
|
|
void BM_UserPercentStats(benchmark::State& state) {
|
|
|
|
for (auto _ : state) {
|
|
|
|
state.SetIterationTime(150 / 10e8);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// clang-format off
|
|
|
|
BENCHMARK(BM_UserPercentStats)
|
|
|
|
->Repetitions(3)
|
|
|
|
->Iterations(5)
|
|
|
|
->UseManualTime()
|
|
|
|
->Unit(benchmark::TimeUnit::kNanosecond)
|
|
|
|
->ComputeStatistics("", UserPercentStatistics, benchmark::StatisticUnit::kPercentage);
|
|
|
|
// clang-format on
|
|
|
|
|
|
|
|
// check that UserPercent-provided stats is calculated, and is after the
|
|
|
|
// default-ones empty string as name is intentional, it would sort before
|
|
|
|
// anything else
|
|
|
|
ADD_CASES(TC_ConsoleOut,
|
|
|
|
{{"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ "
|
|
|
|
"]* 150 ns %time [ ]*5$"},
|
|
|
|
{"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ "
|
|
|
|
"]* 150 ns %time [ ]*5$"},
|
|
|
|
{"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ "
|
|
|
|
"]* 150 ns %time [ ]*5$"},
|
|
|
|
{"^BM_UserPercentStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time_mean [ ]* 150 ns %time [ ]*3$"},
|
|
|
|
{"^BM_UserPercentStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time_median [ ]* 150 ns %time [ ]*3$"},
|
|
|
|
{"^BM_UserPercentStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"},
|
|
|
|
{"^BM_UserPercentStats/iterations:5/repeats:3/manual_time_ "
|
|
|
|
"[ ]* 1.00 % [ ]* 1.00 %[ ]*3$"}});
|
|
|
|
ADD_CASES(
|
|
|
|
TC_JSONOut,
|
|
|
|
{{"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"},
|
|
|
|
{"\"family_index\": 23,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": "
|
|
|
|
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"iterations\": 5,$", MR_Next},
|
|
|
|
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
|
|
|
{"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"},
|
|
|
|
{"\"family_index\": 23,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": "
|
|
|
|
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 1,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"iterations\": 5,$", MR_Next},
|
|
|
|
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
|
|
|
{"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"},
|
|
|
|
{"\"family_index\": 23,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": "
|
|
|
|
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"repetition_index\": 2,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"iterations\": 5,$", MR_Next},
|
|
|
|
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
|
|
|
{"\"name\": "
|
|
|
|
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_mean\",$"},
|
|
|
|
{"\"family_index\": 23,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": "
|
|
|
|
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"mean\",$", MR_Next},
|
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
|
|
|
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
|
|
|
{"\"name\": "
|
|
|
|
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_median\",$"},
|
|
|
|
{"\"family_index\": 23,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": "
|
|
|
|
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"median\",$", MR_Next},
|
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
|
|
|
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
|
|
|
|
{"\"name\": "
|
|
|
|
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_stddev\",$"},
|
|
|
|
{"\"family_index\": 23,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": "
|
|
|
|
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"stddev\",$", MR_Next},
|
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
|
|
|
{"\"real_time\": %float,$", MR_Next},
|
|
|
|
{"\"name\": "
|
|
|
|
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_\",$"},
|
|
|
|
{"\"family_index\": 23,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
|
|
|
{"\"run_name\": "
|
|
|
|
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
|
|
|
|
MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": 3,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"\",$", MR_Next},
|
|
|
|
{"\"aggregate_unit\": \"percentage\",$", MR_Next},
|
|
|
|
{"\"iterations\": 3,$", MR_Next},
|
|
|
|
{"\"real_time\": 1\\.(0)*e-(0)*2,$", MR_Next}});
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time\",%csv_report$"},
|
|
|
|
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time\",%csv_report$"},
|
|
|
|
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time\",%csv_report$"},
|
|
|
|
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time_mean\",%csv_report$"},
|
|
|
|
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time_median\",%csv_report$"},
|
|
|
|
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
|
|
|
|
"manual_time_stddev\",%csv_report$"},
|
|
|
|
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
|
2023-09-26 12:43:23 +00:00
|
|
|
"manual_time_\",%csv_cv_report$"}});
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
|
2019-04-19 17:47:25 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Testing StrEscape JSON ------------------------ //
|
|
|
|
// ========================================================================= //
|
2020-12-21 17:15:58 +00:00
|
|
|
#if 0 // enable when csv testing code correctly handles multi-line fields
|
2019-04-19 17:47:25 +00:00
|
|
|
void BM_JSON_Format(benchmark::State& state) {
|
|
|
|
state.SkipWithError("val\b\f\n\r\t\\\"with\"es,capes");
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_JSON_Format);
|
|
|
|
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"family_index\": 23,$", MR_Next},
|
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2019-04-19 17:47:25 +00:00
|
|
|
{"\"run_name\": \"BM_JSON_Format\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"iteration\",$", MR_Next},
|
2021-06-02 09:34:00 +00:00
|
|
|
{"\"repetitions\": 1,$", MR_Next},
|
2019-04-19 17:47:25 +00:00
|
|
|
{"\"repetition_index\": 0,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"error_occurred\": true,$", MR_Next},
|
|
|
|
{R"("error_message": "val\\b\\f\\n\\r\\t\\\\\\"with\\"es,capes",$)", MR_Next}});
|
|
|
|
#endif
|
|
|
|
// ========================================================================= //
|
|
|
|
// -------------------------- Testing CsvEscape ---------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
|
|
|
void BM_CSV_Format(benchmark::State& state) {
|
|
|
|
state.SkipWithError("\"freedom\"");
|
|
|
|
for (auto _ : state) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BENCHMARK(BM_CSV_Format);
|
|
|
|
ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}});
|
|
|
|
|
2016-05-27 19:34:37 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- TEST CASES END ------------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2016-10-21 12:59:06 +00:00
|
|
|
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }
|