benchmark/test/reporter_output_test.cc

1134 lines
56 KiB
C++
Raw Permalink Normal View History

#undef NDEBUG
#include <numeric>
#include <utility>
#include "benchmark/benchmark.h"
#include "output_test.h"
// ========================================================================= //
// ---------------------- Testing Prologue Output -------------------------- //
// ========================================================================= //
ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next},
{"^Benchmark %s Time %s CPU %s Iterations$", MR_Next},
{"^[-]+$", MR_Next}});
static int AddContextCases() {
AddCases(TC_ConsoleErr,
{
use rfc3339-formatted timestamps in output [output format change] (#965) * timestamp: use rfc3339-formatted timestamps in output Replace localized timestamps with machine-readable IETF RFC 3339 format timestamps. This is an attempt to make the output timestamps easily machine-readable. ISO8601 specifies standards for time interchange formats. IETF RFC 3339: https://tools.ietf.org/html/rfc3339 defines a subset of these for use in the internet. The general form for these timestamps is: YYYY-MM-DDTHH:mm:SS[+-]hhmm This replaces the localized time formats that are currently being used in the benchmark output to prioritize interchangeability and machine-readability. This might break existing programs that rely on the particular date-time format. This might also may make times less human readable. RFC3339 was intended to balance human readability and simplicity for machine readability, but it is primarily intended as an internal representation. * timers: remove utc string formatting We only ever need local time printing. Remove the UTC printing and cosnolidate the logic slightly. * timers: manually create rfc3339 string The C++ standard library does not output the time offset in RFC3339 format, it is missing the : between hours and minutes. VS does not appear to support timezone information by default. To avoid adding too much complexity to benchmark around timezone handling e.g. a full date library like https://github.com/HowardHinnant/date, we fall back to outputting GMT time with a -00:00 offset for those cases. * timers: use reentrant form for localtime_r & tmtime_r For non-windows, use the reentrant form for the time conversion functions. * timers: cleanup Use strtol instead of brittle moving characters around. * timers: only call strftime twice. Also size buffers to known maximum necessary size and name constants more appropriately. * timers: fix unused variable warning
2020-06-15 16:28:17 +00:00
{"^%int-%int-%intT%int:%int:%int[-+]%int:%int$", MR_Default},
{"Running .*(/|\\\\)reporter_output_test(\\.exe)?$", MR_Next},
{"Run on \\(%int X %float MHz CPU s?\\)", MR_Next},
});
AddCases(TC_JSONOut,
{{"^\\{", MR_Default},
{"\"context\":", MR_Next},
{"\"date\": \"", MR_Next},
{"\"host_name\":", MR_Next},
2018-08-16 16:47:09 +00:00
{"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",",
MR_Next},
{"\"num_cpus\": %int,$", MR_Next},
{"\"mhz_per_cpu\": %float,$", MR_Next},
{"\"caches\": \\[$", MR_Default}});
auto const& Info = benchmark::CPUInfo::Get();
auto const& Caches = Info.caches;
if (!Caches.empty()) {
AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}});
}
for (size_t I = 0; I < Caches.size(); ++I) {
std::string num_caches_str =
Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$";
AddCases(TC_ConsoleErr,
{{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str,
MR_Next}});
AddCases(TC_JSONOut, {{"\\{$", MR_Next},
{"\"type\": \"", MR_Next},
{"\"level\": %int,$", MR_Next},
{"\"size\": %int,$", MR_Next},
{"\"num_sharing\": %int$", MR_Next},
{"}[,]{0,1}$", MR_Next}});
}
AddCases(TC_JSONOut, {{"],$"}});
auto const& LoadAvg = Info.load_avg;
if (!LoadAvg.empty()) {
AddCases(TC_ConsoleErr,
{{"Load Average: (%float, ){0,2}%float$", MR_Next}});
}
AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}});
AddCases(TC_JSONOut, {{"\"library_version\": \".*\",$", MR_Next}});
AddCases(TC_JSONOut, {{"\"library_build_type\": \".*\",$", MR_Next}});
AddCases(TC_JSONOut, {{"\"json_schema_version\": 1$", MR_Next}});
return 0;
}
int dummy_register = AddContextCases();
2017-04-27 18:24:06 +00:00
ADD_CASES(TC_CSVOut, {{"%csv_header"}});
// ========================================================================= //
// ------------------------ Testing Basic Output --------------------------- //
// ========================================================================= //
void BM_basic(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_basic);
ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"},
{"\"family_index\": 0,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_basic\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
Json reporter: don't cast floating-point to int; adjust tooling (#426) * Json reporter: passthrough fp, don't cast it to int; adjust tooling Json output format is generally meant for further processing using some automated tools. Thus, it makes sense not to intentionally limit the precision of the values contained in the report. As it can be seen, FormatKV() for doubles, used %.2f format, which was meant to preserve at least some of the precision. However, before that function is ever called, the doubles were already cast to the integer via RoundDouble()... This is also the case for console reporter, where it makes sense because the screen space is limited, and this reporter, however the CSV reporter does output some( decimal digits. Thus i can only conclude that the loss of the precision was not really considered, so i have decided to adjust the code of the json reporter to output the full fp precision. There can be several reasons why that is the right thing to do, the bigger the time_unit used, the greater the precision loss, so i'd say any sort of further processing (like e.g. tools/compare_bench.py does) is best done on the values with most precision. Also, that cast skewed the data away from zero, which i think may or may not result in false- positives/negatives in the output of tools/compare_bench.py * Json reporter: FormatKV(double): address review note * tools/gbench/report.py: skip benchmarks with different time units While it may be useful to teach it to operate on the measurements with different time units, which is now possible since floats are stored, and not the integers, but for now at least doing such a sanity-checking is better than providing misinformation.
2017-07-24 23:13:55 +00:00
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Bytes per Second Output ---------------- //
// ========================================================================= //
void BM_bytes_per_second(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
2023-03-06 14:47:54 +00:00
benchmark::DoNotOptimize(iterations);
}
state.SetBytesProcessed(1);
}
BENCHMARK(BM_bytes_per_second);
ADD_CASES(TC_ConsoleOut, {{"^BM_bytes_per_second %console_report "
"bytes_per_second=%float[kM]{0,1}/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"},
{"\"family_index\": 1,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_bytes_per_second\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
Json reporter: don't cast floating-point to int; adjust tooling (#426) * Json reporter: passthrough fp, don't cast it to int; adjust tooling Json output format is generally meant for further processing using some automated tools. Thus, it makes sense not to intentionally limit the precision of the values contained in the report. As it can be seen, FormatKV() for doubles, used %.2f format, which was meant to preserve at least some of the precision. However, before that function is ever called, the doubles were already cast to the integer via RoundDouble()... This is also the case for console reporter, where it makes sense because the screen space is limited, and this reporter, however the CSV reporter does output some( decimal digits. Thus i can only conclude that the loss of the precision was not really considered, so i have decided to adjust the code of the json reporter to output the full fp precision. There can be several reasons why that is the right thing to do, the bigger the time_unit used, the greater the precision loss, so i'd say any sort of further processing (like e.g. tools/compare_bench.py does) is best done on the values with most precision. Also, that cast skewed the data away from zero, which i think may or may not result in false- positives/negatives in the output of tools/compare_bench.py * Json reporter: FormatKV(double): address review note * tools/gbench/report.py: skip benchmarks with different time units While it may be useful to teach it to operate on the measurements with different time units, which is now possible since floats are stored, and not the integers, but for now at least doing such a sanity-checking is better than providing misinformation.
2017-07-24 23:13:55 +00:00
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
Json reporter: don't cast floating-point to int; adjust tooling (#426) * Json reporter: passthrough fp, don't cast it to int; adjust tooling Json output format is generally meant for further processing using some automated tools. Thus, it makes sense not to intentionally limit the precision of the values contained in the report. As it can be seen, FormatKV() for doubles, used %.2f format, which was meant to preserve at least some of the precision. However, before that function is ever called, the doubles were already cast to the integer via RoundDouble()... This is also the case for console reporter, where it makes sense because the screen space is limited, and this reporter, however the CSV reporter does output some( decimal digits. Thus i can only conclude that the loss of the precision was not really considered, so i have decided to adjust the code of the json reporter to output the full fp precision. There can be several reasons why that is the right thing to do, the bigger the time_unit used, the greater the precision loss, so i'd say any sort of further processing (like e.g. tools/compare_bench.py does) is best done on the values with most precision. Also, that cast skewed the data away from zero, which i think may or may not result in false- positives/negatives in the output of tools/compare_bench.py * Json reporter: FormatKV(double): address review note * tools/gbench/report.py: skip benchmarks with different time units While it may be useful to teach it to operate on the measurements with different time units, which is now possible since floats are stored, and not the integers, but for now at least doing such a sanity-checking is better than providing misinformation.
2017-07-24 23:13:55 +00:00
{"\"bytes_per_second\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
// ========================================================================= //
// ------------------------ Testing Items per Second Output ---------------- //
// ========================================================================= //
void BM_items_per_second(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
2023-03-06 14:47:54 +00:00
benchmark::DoNotOptimize(iterations);
}
state.SetItemsProcessed(1);
}
BENCHMARK(BM_items_per_second);
ADD_CASES(TC_ConsoleOut, {{"^BM_items_per_second %console_report "
"items_per_second=%float[kM]{0,1}/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"},
{"\"family_index\": 2,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_items_per_second\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
Json reporter: don't cast floating-point to int; adjust tooling (#426) * Json reporter: passthrough fp, don't cast it to int; adjust tooling Json output format is generally meant for further processing using some automated tools. Thus, it makes sense not to intentionally limit the precision of the values contained in the report. As it can be seen, FormatKV() for doubles, used %.2f format, which was meant to preserve at least some of the precision. However, before that function is ever called, the doubles were already cast to the integer via RoundDouble()... This is also the case for console reporter, where it makes sense because the screen space is limited, and this reporter, however the CSV reporter does output some( decimal digits. Thus i can only conclude that the loss of the precision was not really considered, so i have decided to adjust the code of the json reporter to output the full fp precision. There can be several reasons why that is the right thing to do, the bigger the time_unit used, the greater the precision loss, so i'd say any sort of further processing (like e.g. tools/compare_bench.py does) is best done on the values with most precision. Also, that cast skewed the data away from zero, which i think may or may not result in false- positives/negatives in the output of tools/compare_bench.py * Json reporter: FormatKV(double): address review note * tools/gbench/report.py: skip benchmarks with different time units While it may be useful to teach it to operate on the measurements with different time units, which is now possible since floats are stored, and not the integers, but for now at least doing such a sanity-checking is better than providing misinformation.
2017-07-24 23:13:55 +00:00
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
Json reporter: don't cast floating-point to int; adjust tooling (#426) * Json reporter: passthrough fp, don't cast it to int; adjust tooling Json output format is generally meant for further processing using some automated tools. Thus, it makes sense not to intentionally limit the precision of the values contained in the report. As it can be seen, FormatKV() for doubles, used %.2f format, which was meant to preserve at least some of the precision. However, before that function is ever called, the doubles were already cast to the integer via RoundDouble()... This is also the case for console reporter, where it makes sense because the screen space is limited, and this reporter, however the CSV reporter does output some( decimal digits. Thus i can only conclude that the loss of the precision was not really considered, so i have decided to adjust the code of the json reporter to output the full fp precision. There can be several reasons why that is the right thing to do, the bigger the time_unit used, the greater the precision loss, so i'd say any sort of further processing (like e.g. tools/compare_bench.py does) is best done on the values with most precision. Also, that cast skewed the data away from zero, which i think may or may not result in false- positives/negatives in the output of tools/compare_bench.py * Json reporter: FormatKV(double): address review note * tools/gbench/report.py: skip benchmarks with different time units While it may be useful to teach it to operate on the measurements with different time units, which is now possible since floats are stored, and not the integers, but for now at least doing such a sanity-checking is better than providing misinformation.
2017-07-24 23:13:55 +00:00
{"\"items_per_second\": %float$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
// ========================================================================= //
// ------------------------ Testing Label Output --------------------------- //
// ========================================================================= //
void BM_label(benchmark::State& state) {
for (auto _ : state) {
}
state.SetLabel("some label");
}
BENCHMARK(BM_label);
ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"},
{"\"family_index\": 3,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_label\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
Json reporter: don't cast floating-point to int; adjust tooling (#426) * Json reporter: passthrough fp, don't cast it to int; adjust tooling Json output format is generally meant for further processing using some automated tools. Thus, it makes sense not to intentionally limit the precision of the values contained in the report. As it can be seen, FormatKV() for doubles, used %.2f format, which was meant to preserve at least some of the precision. However, before that function is ever called, the doubles were already cast to the integer via RoundDouble()... This is also the case for console reporter, where it makes sense because the screen space is limited, and this reporter, however the CSV reporter does output some( decimal digits. Thus i can only conclude that the loss of the precision was not really considered, so i have decided to adjust the code of the json reporter to output the full fp precision. There can be several reasons why that is the right thing to do, the bigger the time_unit used, the greater the precision loss, so i'd say any sort of further processing (like e.g. tools/compare_bench.py does) is best done on the values with most precision. Also, that cast skewed the data away from zero, which i think may or may not result in false- positives/negatives in the output of tools/compare_bench.py * Json reporter: FormatKV(double): address review note * tools/gbench/report.py: skip benchmarks with different time units While it may be useful to teach it to operate on the measurements with different time units, which is now possible since floats are stored, and not the integers, but for now at least doing such a sanity-checking is better than providing misinformation.
2017-07-24 23:13:55 +00:00
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\",$", MR_Next},
{"\"label\": \"some label\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
"label\"%csv_label_report_end$"}});
// ========================================================================= //
// ------------------------ Testing Time Label Output ---------------------- //
// ========================================================================= //
void BM_time_label_nanosecond(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_time_label_nanosecond)->Unit(benchmark::kNanosecond);
ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_nanosecond %console_report$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_time_label_nanosecond\",$"},
{"\"family_index\": 4,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_time_label_nanosecond\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_nanosecond\",%csv_report$"}});
void BM_time_label_microsecond(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_time_label_microsecond)->Unit(benchmark::kMicrosecond);
ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_microsecond %console_us_report$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_time_label_microsecond\",$"},
{"\"family_index\": 5,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_time_label_microsecond\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"us\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_microsecond\",%csv_us_report$"}});
void BM_time_label_millisecond(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_time_label_millisecond)->Unit(benchmark::kMillisecond);
ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_millisecond %console_ms_report$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_time_label_millisecond\",$"},
{"\"family_index\": 6,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_time_label_millisecond\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ms\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_millisecond\",%csv_ms_report$"}});
void BM_time_label_second(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_time_label_second)->Unit(benchmark::kSecond);
ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_second %console_s_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_time_label_second\",$"},
{"\"family_index\": 7,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_time_label_second\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"s\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_second\",%csv_s_report$"}});
// ========================================================================= //
// ------------------------ Testing Error Output --------------------------- //
// ========================================================================= //
void BM_error(benchmark::State& state) {
state.SkipWithError("message");
for (auto _ : state) {
}
}
BENCHMARK(BM_error);
ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_error\",$"},
{"\"family_index\": 8,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_error\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"error_occurred\": true,$", MR_Next},
{"\"error_message\": \"message\",$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}});
// ========================================================================= //
// ------------------------ Testing No Arg Name Output -----------------------
// //
// ========================================================================= //
void BM_no_arg_name(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_no_arg_name)->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"},
{"\"family_index\": 9,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_no_arg_name/3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Arg Name Output ------------------------ //
// ========================================================================= //
void BM_arg_name(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"},
{"\"family_index\": 10,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_arg_name/first:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Arg Names Output ----------------------- //
// ========================================================================= //
void BM_arg_names(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
ADD_CASES(TC_ConsoleOut,
{{"^BM_arg_names/first:2/5/third:4 %console_report$"}});
ADD_CASES(TC_JSONOut,
{{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"},
{"\"family_index\": 11,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Name Output ---------------------------- //
// ========================================================================= //
void BM_name(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_name)->Name("BM_custom_name");
ADD_CASES(TC_ConsoleOut, {{"^BM_custom_name %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_custom_name\",$"},
{"\"family_index\": 12,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_custom_name\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"cpu_time\": %float,$", MR_Next},
{"\"time_unit\": \"ns\"$", MR_Next},
{"}", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_custom_name\",%csv_report$"}});
// ========================================================================= //
// ------------------------ Testing Big Args Output ------------------------ //
// ========================================================================= //
void BM_BigArgs(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U);
ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"},
{"^BM_BigArgs/2147483648 %console_report$"}});
// ========================================================================= //
// ----------------------- Testing Complexity Output ----------------------- //
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
for (auto _ : state) {
// This test requires a non-zero CPU time to avoid divide-by-zero
auto iterations = double(state.iterations()) * double(state.iterations());
2023-03-06 14:47:54 +00:00
benchmark::DoNotOptimize(iterations);
}
state.SetComplexityN(state.range(0));
}
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
SET_SUBSTITUTIONS({{"%bigOStr", "[ ]* %float \\([0-9]+\\)"},
{"%RMS", "[ ]*[0-9]+ %"}});
ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"},
{"^BM_Complexity_O1_RMS %RMS %RMS[ ]*$"}});
// ========================================================================= //
// ----------------------- Testing Aggregate Output ------------------------ //
// ========================================================================= //
// Test that non-aggregate data is printed by default
void BM_Repeat(benchmark::State& state) {
for (auto _ : state) {
}
}
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
// need two repetitions min to be able to output any aggregate output
BENCHMARK(BM_Repeat)->Repetitions(2);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2 %console_report$"},
{"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}});
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"},
{"\"family_index\": 15,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
{"\"name\": \"BM_Repeat/repeats:2\",$"},
{"\"family_index\": 15,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
{"\"name\": \"BM_Repeat/repeats:2_mean\",$"},
{"\"family_index\": 15,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
{"\"name\": \"BM_Repeat/repeats:2_median\",$"},
{"\"family_index\": 15,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:2_stddev\",$"},
{"\"family_index\": 15,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next}});
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"},
{"^\"BM_Repeat/repeats:2\",%csv_report$"},
{"^\"BM_Repeat/repeats:2_mean\",%csv_report$"},
{"^\"BM_Repeat/repeats:2_median\",%csv_report$"},
{"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}});
// but for two repetitions, mean and median is the same, so let's repeat..
BENCHMARK(BM_Repeat)->Repetitions(3);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3 %console_report$"},
{"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"},
{"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"family_index\": 16,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"family_index\": 16,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3\",$"},
{"\"family_index\": 16,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3_mean\",$"},
{"\"family_index\": 16,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
{"\"name\": \"BM_Repeat/repeats:3_median\",$"},
{"\"family_index\": 16,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:3_stddev\",$"},
{"\"family_index\": 16,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_mean\",%csv_report$"},
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
{"^\"BM_Repeat/repeats:3_median\",%csv_report$"},
{"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}});
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
// median differs between even/odd number of repetitions, so just to be sure
BENCHMARK(BM_Repeat)->Repetitions(4);
ADD_CASES(TC_ConsoleOut,
{{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4 %console_report$"},
{"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"},
{"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"},
{"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}});
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
{"\"name\": \"BM_Repeat/repeats:4\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
{"\"name\": \"BM_Repeat/repeats:4\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"repetition_index\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
{"\"name\": \"BM_Repeat/repeats:4\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"repetition_index\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
{"\"name\": \"BM_Repeat/repeats:4_mean\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 4,$", MR_Next},
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
{"\"name\": \"BM_Repeat/repeats:4_median\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 4,$", MR_Next},
{"\"name\": \"BM_Repeat/repeats:4_stddev\",$"},
{"\"family_index\": 17,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 4,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 4,$", MR_Next}});
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
{"^\"BM_Repeat/repeats:4\",%csv_report$"},
{"^\"BM_Repeat/repeats:4_mean\",%csv_report$"},
{"^\"BM_Repeat/repeats:4_median\",%csv_report$"},
{"^\"BM_Repeat/repeats:4_stddev\",%csv_report$"}});
// Test that a non-repeated test still prints non-aggregate results even when
// only-aggregate reports have been requested
void BM_RepeatOnce(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"},
{"\"family_index\": 18,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_RepeatOnce/repeats:1\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
// Test that non-aggregate data is not reported
void BM_SummaryRepeat(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
ADD_CASES(
TC_ConsoleOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"},
{"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}});
ADD_CASES(TC_JSONOut,
{{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"},
{"\"family_index\": 19,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"},
{"\"family_index\": 19,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"},
{"\"family_index\": 19,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"},
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
{"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"},
{"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}});
// Test that non-aggregate data is not displayed.
// NOTE: this test is kinda bad. we are only testing the display output.
// But we don't check that the file output still contains everything...
void BM_SummaryDisplay(benchmark::State& state) {
for (auto _ : state) {
}
}
BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly();
ADD_CASES(
TC_ConsoleOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"},
{"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}});
ADD_CASES(TC_JSONOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"},
{"\"family_index\": 20,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"},
{"\"family_index\": 20,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next},
{"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"},
{"\"family_index\": 20,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 2,$", MR_Next}});
ADD_CASES(TC_CSVOut,
{{".*BM_SummaryDisplay/repeats:2 ", MR_Not},
{"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"},
{"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}});
// Test repeats with custom time unit.
2016-10-21 12:59:06 +00:00
void BM_RepeatTimeUnit(benchmark::State& state) {
for (auto _ : state) {
2016-10-21 12:59:06 +00:00
}
}
BENCHMARK(BM_RepeatTimeUnit)
->Repetitions(3)
->ReportAggregatesOnly()
->Unit(benchmark::kMicrosecond);
ADD_CASES(
TC_ConsoleOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"},
{"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ "
"]*3$"},
{"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ "
"]*3$"}});
ADD_CASES(TC_JSONOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"},
{"\"family_index\": 21,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"},
{"\"family_index\": 21,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"},
{"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"},
{"\"family_index\": 21,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"time_unit\": \"us\",?$"}});
2016-10-21 12:59:06 +00:00
ADD_CASES(TC_CSVOut,
{{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not},
{"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"},
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
{"^\"BM_RepeatTimeUnit/repeats:3_median\",%csv_us_report$"},
2016-10-21 12:59:06 +00:00
{"^\"BM_RepeatTimeUnit/repeats:3_stddev\",%csv_us_report$"}});
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
// ========================================================================= //
// -------------------- Testing user-provided statistics ------------------- //
// ========================================================================= //
const auto UserStatistics = [](const std::vector<double>& v) {
return v.back();
};
void BM_UserStats(benchmark::State& state) {
for (auto _ : state) {
state.SetIterationTime(150 / 10e8);
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
}
}
// clang-format off
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
BENCHMARK(BM_UserStats)
->Repetitions(3)
->Iterations(5)
->UseManualTime()
->ComputeStatistics("", UserStatistics);
// clang-format on
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
// check that user-provided stats is calculated, and is after the default-ones
// empty string as name is intentional, it would sort before anything else
ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
"]* 150 ns %time [ ]*5$"},
{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
"]* 150 ns %time [ ]*5$"},
{"^BM_UserStats/iterations:5/repeats:3/manual_time [ "
"]* 150 ns %time [ ]*5$"},
{"^BM_UserStats/iterations:5/repeats:3/"
"manual_time_mean [ ]* 150 ns %time [ ]*3$"},
{"^BM_UserStats/iterations:5/repeats:3/"
"manual_time_median [ ]* 150 ns %time [ ]*3$"},
{"^BM_UserStats/iterations:5/repeats:3/"
"manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"},
{"^BM_UserStats/iterations:5/repeats:3/manual_time_ "
"[ ]* 150 ns %time [ ]*3$"}});
ADD_CASES(
TC_JSONOut,
{{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"},
{"\"family_index\": 22,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}});
ADD_CASES(
TC_CSVOut,
{{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/"
"manual_time_median\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/"
"manual_time_stddev\",%csv_report$"},
{"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}});
Drop Stat1, refactor statistics to be user-providable, add median. (#428) * Drop Stat1, refactor statistics to be user-providable, add median. My main goal was to add median statistic. Since Stat1 calculated the stats incrementally, and did not store the values themselves, it is was not possible. Thus, i have replaced Stat1 with simple std::vector<double>, containing all the values. Then, i have refactored current mean/stdev to be a function that is provided with values vector, and returns the statistic. While there, it seemed to make sense to deduplicate the code by storing all the statistics functions in a map, and then simply iterate over it. And the interface to add new statistics is intentionally exposed, so they may be added easily. The notable change is that Iterations are no longer displayed as 0 for stdev. Is could be changed, but i'm not sure how to nicely fit that into the API. Similarly, this dance about sometimes (for some fields, for some statistics) dividing by run.iterations, and then multiplying the calculated stastic back is also dropped, and if you do the math, i fail to see why it was needed there in the first place. Since that was the only use of stat.h, it is removed. * complexity.h: attempt to fix MSVC build * Update README.md * Store statistics to compute in a vector, ensures ordering. * Add a bit more tests for repetitions. * Partially address review notes. * Fix gcc build: drop extra ';' clang, why didn't you warn me? * Address review comments. * double() -> 0.0 * early return
2017-08-23 23:44:29 +00:00
// ========================================================================= //
// ------------- Testing relative standard deviation statistics ------------ //
// ========================================================================= //
const auto UserPercentStatistics = [](const std::vector<double>&) {
return 1. / 100.;
};
void BM_UserPercentStats(benchmark::State& state) {
for (auto _ : state) {
state.SetIterationTime(150 / 10e8);
}
}
// clang-format off
BENCHMARK(BM_UserPercentStats)
->Repetitions(3)
->Iterations(5)
->UseManualTime()
->Unit(benchmark::TimeUnit::kNanosecond)
->ComputeStatistics("", UserPercentStatistics, benchmark::StatisticUnit::kPercentage);
// clang-format on
// check that UserPercent-provided stats is calculated, and is after the
// default-ones empty string as name is intentional, it would sort before
// anything else
ADD_CASES(TC_ConsoleOut,
{{"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ "
"]* 150 ns %time [ ]*5$"},
{"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ "
"]* 150 ns %time [ ]*5$"},
{"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ "
"]* 150 ns %time [ ]*5$"},
{"^BM_UserPercentStats/iterations:5/repeats:3/"
"manual_time_mean [ ]* 150 ns %time [ ]*3$"},
{"^BM_UserPercentStats/iterations:5/repeats:3/"
"manual_time_median [ ]* 150 ns %time [ ]*3$"},
{"^BM_UserPercentStats/iterations:5/repeats:3/"
"manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"},
{"^BM_UserPercentStats/iterations:5/repeats:3/manual_time_ "
"[ ]* 1.00 % [ ]* 1.00 %[ ]*3$"}});
ADD_CASES(
TC_JSONOut,
{{"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"},
{"\"family_index\": 23,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": "
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"},
{"\"family_index\": 23,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": "
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 1,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"},
{"\"family_index\": 23,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": "
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"repetition_index\": 2,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"iterations\": 5,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": "
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_mean\",$"},
{"\"family_index\": 23,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": "
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"mean\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": "
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_median\",$"},
{"\"family_index\": 23,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": "
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"median\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next},
{"\"name\": "
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_stddev\",$"},
{"\"family_index\": 23,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": "
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"stddev\",$", MR_Next},
{"\"aggregate_unit\": \"time\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
{"\"name\": "
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_\",$"},
{"\"family_index\": 23,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": "
"\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$",
MR_Next},
{"\"run_type\": \"aggregate\",$", MR_Next},
{"\"repetitions\": 3,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"aggregate_name\": \"\",$", MR_Next},
{"\"aggregate_unit\": \"percentage\",$", MR_Next},
{"\"iterations\": 3,$", MR_Next},
{"\"real_time\": 1\\.(0)*e-(0)*2,$", MR_Next}});
ADD_CASES(TC_CSVOut, {{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
"manual_time\",%csv_report$"},
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
"manual_time\",%csv_report$"},
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
"manual_time\",%csv_report$"},
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
"manual_time_mean\",%csv_report$"},
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
"manual_time_median\",%csv_report$"},
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
"manual_time_stddev\",%csv_report$"},
{"^\"BM_UserPercentStats/iterations:5/repeats:3/"
"manual_time_\",%csv_cv_report$"}});
// ========================================================================= //
// ------------------------- Testing StrEscape JSON ------------------------ //
// ========================================================================= //
#if 0 // enable when csv testing code correctly handles multi-line fields
void BM_JSON_Format(benchmark::State& state) {
state.SkipWithError("val\b\f\n\r\t\\\"with\"es,capes");
for (auto _ : state) {
}
}
BENCHMARK(BM_JSON_Format);
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"},
{"\"family_index\": 23,$", MR_Next},
{"\"per_family_instance_index\": 0,$", MR_Next},
{"\"run_name\": \"BM_JSON_Format\",$", MR_Next},
{"\"run_type\": \"iteration\",$", MR_Next},
{"\"repetitions\": 1,$", MR_Next},
{"\"repetition_index\": 0,$", MR_Next},
{"\"threads\": 1,$", MR_Next},
{"\"error_occurred\": true,$", MR_Next},
{R"("error_message": "val\\b\\f\\n\\r\\t\\\\\\"with\\"es,capes",$)", MR_Next}});
#endif
// ========================================================================= //
// -------------------------- Testing CsvEscape ---------------------------- //
// ========================================================================= //
void BM_CSV_Format(benchmark::State& state) {
state.SkipWithError("\"freedom\"");
for (auto _ : state) {
}
}
BENCHMARK(BM_CSV_Format);
ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}});
// ========================================================================= //
// --------------------------- TEST CASES END ------------------------------ //
// ========================================================================= //
2016-10-21 12:59:06 +00:00
int main(int argc, char* argv[]) { RunOutputTests(argc, argv); }