2016-06-01 21:08:01 +00:00
|
|
|
#undef NDEBUG
|
2016-05-18 17:59:34 +00:00
|
|
|
#include <algorithm>
|
2016-10-07 18:04:50 +00:00
|
|
|
#include <cassert>
|
2016-06-03 16:33:17 +00:00
|
|
|
#include <cmath>
|
2016-10-07 18:04:50 +00:00
|
|
|
#include <cstdlib>
|
|
|
|
#include <vector>
|
2021-11-10 16:22:31 +00:00
|
|
|
|
2016-10-07 18:04:50 +00:00
|
|
|
#include "benchmark/benchmark.h"
|
|
|
|
#include "output_test.h"
|
2016-05-18 17:59:34 +00:00
|
|
|
|
2016-06-01 21:08:01 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
#define ADD_COMPLEXITY_CASES(...) \
|
2016-10-07 18:04:50 +00:00
|
|
|
int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__)
|
|
|
|
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
int AddComplexityTest(std::string test_name, std::string big_o_test_name,
|
2021-06-02 15:06:45 +00:00
|
|
|
std::string rms_test_name, std::string big_o,
|
|
|
|
int family_index) {
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
SetSubstitutions({{"%name", test_name},
|
|
|
|
{"%bigo_name", big_o_test_name},
|
2016-10-07 18:04:50 +00:00
|
|
|
{"%rms_name", rms_test_name},
|
2016-10-08 03:56:22 +00:00
|
|
|
{"%bigo_str", "[ ]* %float " + big_o},
|
2016-10-07 18:04:50 +00:00
|
|
|
{"%bigo", big_o},
|
|
|
|
{"%rms", "[ ]*[0-9]+ %"}});
|
|
|
|
AddCases(
|
|
|
|
TC_ConsoleOut,
|
|
|
|
{{"^%bigo_name %bigo_str %bigo_str[ ]*$"},
|
|
|
|
{"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name.
|
|
|
|
{"^%rms_name %rms %rms[ ]*$", MR_Next}});
|
2021-06-02 15:06:45 +00:00
|
|
|
AddCases(
|
|
|
|
TC_JSONOut,
|
|
|
|
{{"\"name\": \"%bigo_name\",$"},
|
|
|
|
{"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"run_name\": \"%name\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": %int,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"BigO\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"time\",$", MR_Next},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"cpu_coefficient\": %float,$", MR_Next},
|
|
|
|
{"\"real_coefficient\": %float,$", MR_Next},
|
|
|
|
{"\"big_o\": \"%bigo\",$", MR_Next},
|
|
|
|
{"\"time_unit\": \"ns\"$", MR_Next},
|
|
|
|
{"}", MR_Next},
|
|
|
|
{"\"name\": \"%rms_name\",$"},
|
|
|
|
{"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next},
|
2021-06-02 20:45:41 +00:00
|
|
|
{"\"per_family_instance_index\": 0,$", MR_Next},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"run_name\": \"%name\",$", MR_Next},
|
|
|
|
{"\"run_type\": \"aggregate\",$", MR_Next},
|
|
|
|
{"\"repetitions\": %int,$", MR_Next},
|
|
|
|
{"\"threads\": 1,$", MR_Next},
|
|
|
|
{"\"aggregate_name\": \"RMS\",$", MR_Next},
|
Statistics: add support for percentage unit in addition to time (#1219)
* Statistics: add support for percentage unit in addition to time
I think, `stddev` statistic is useful, but confusing.
What does it mean if `stddev` of `1ms` is reported?
Is that good or bad? If the `median` is `1s`,
then that means that the measurements are pretty noise-less.
And what about `stddev` of `100ms` is reported?
If the `median` is `1s` - awful, if the `median` is `10s` - good.
And hurray, there is just the statistic that we need:
https://en.wikipedia.org/wiki/Coefficient_of_variation
But, naturally, that produces a value in percents,
but the statistics are currently hardcoded to produce time.
So this refactors thinkgs a bit, and allows a percentage unit for statistics.
I'm not sure whether or not `benchmark` would be okay
with adding this `RSD` statistic by default,
but regales, that is a separate patch.
Refs. https://github.com/google/benchmark/issues/1146
* Address review notes
2021-09-03 14:36:56 +00:00
|
|
|
{"\"aggregate_unit\": \"percentage\",$", MR_Next},
|
2021-06-02 15:06:45 +00:00
|
|
|
{"\"rms\": %float$", MR_Next},
|
|
|
|
{"}", MR_Next}});
|
2016-10-07 18:04:50 +00:00
|
|
|
AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"},
|
|
|
|
{"^\"%bigo_name\"", MR_Not},
|
|
|
|
{"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}});
|
2016-06-01 21:08:01 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // end namespace
|
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- Testing BigO O(1) --------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2021-11-10 16:22:31 +00:00
|
|
|
void BM_Complexity_O1(benchmark::State &state) {
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-07 18:04:50 +00:00
|
|
|
for (int i = 0; i < 1024; ++i) {
|
|
|
|
benchmark::DoNotOptimize(&i);
|
|
|
|
}
|
2016-05-18 17:59:34 +00:00
|
|
|
}
|
2016-08-04 19:30:14 +00:00
|
|
|
state.SetComplexityN(state.range(0));
|
2016-05-18 17:59:34 +00:00
|
|
|
}
|
2016-10-07 18:04:50 +00:00
|
|
|
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1);
|
|
|
|
BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity();
|
Iteration counts should be `uint64_t` globally. (#817)
This is a shameless rip-off of https://github.com/google/benchmark/pull/646
I did promise to look into why that proposed PR was producing
so much worse assembly, and so i finally did.
The reason is - that diff changes `size_t` (unsigned) to `int64_t` (signed).
There is this nice little `assert`:
https://github.com/google/benchmark/blob/7a1c37028359ca9d386d719a6ad527743cf1b753/include/benchmark/benchmark.h#L744
It ensures that we didn't magically decide to advance our iterator
when we should have finished benchmarking.
When `cached_` was unsigned, the `assert` was `cached_ UGT 0`.
But we only ever get to that `assert` if `cached_ NE 0`,
and naturally if `cached_` is not `0`, then it is bigger than `0`,
so the `assert` is tautological, and gets folded away.
But now that `cached_` became signed, the assert became `cached_ SGT 0`.
And we still only know that `cached_ NE 0`, so the assert can't be
optimized out, or at least it doesn't currently.
Regardless of whether or not that is a bug in itself,
that particular diff would have regressed the normal 64-bit systems,
by halving the maximal iteration space (since we go from unsigned counter
to signed one, of the same bit-width), which seems like a bug.
And just so it happens, fixing *this* bug, fixes the other bug.
This produces fully (bit-by-bit) identical state_assembly_test.s
The filecheck change is actually needed regardless of this patch,
else this test does not pass for me even without this diff.
2019-05-13 09:33:11 +00:00
|
|
|
BENCHMARK(BM_Complexity_O1)
|
|
|
|
->Range(1, 1 << 18)
|
|
|
|
->Complexity([](benchmark::IterationCount) { return 1.0; });
|
2016-10-07 18:04:50 +00:00
|
|
|
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
const char *one_test_name = "BM_Complexity_O1";
|
2016-10-07 18:04:50 +00:00
|
|
|
const char *big_o_1_test_name = "BM_Complexity_O1_BigO";
|
|
|
|
const char *rms_o_1_test_name = "BM_Complexity_O1_RMS";
|
|
|
|
const char *enum_big_o_1 = "\\([0-9]+\\)";
|
|
|
|
// FIXME: Tolerate both '(1)' and 'lgN' as output when the complexity is auto
|
|
|
|
// deduced.
|
2016-08-09 20:14:15 +00:00
|
|
|
// See https://github.com/google/benchmark/issues/272
|
2016-10-07 18:04:50 +00:00
|
|
|
const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)";
|
|
|
|
const char *lambda_big_o_1 = "f\\(N\\)";
|
2016-06-01 21:08:01 +00:00
|
|
|
|
|
|
|
// Add enum tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
|
2021-06-02 15:06:45 +00:00
|
|
|
enum_big_o_1, /*family_index=*/0);
|
2016-06-01 21:08:01 +00:00
|
|
|
|
2016-07-22 23:31:05 +00:00
|
|
|
// Add auto enum tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
|
2021-06-02 15:06:45 +00:00
|
|
|
auto_big_o_1, /*family_index=*/1);
|
2016-07-22 23:31:05 +00:00
|
|
|
|
2016-06-01 21:08:01 +00:00
|
|
|
// Add lambda tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name,
|
2021-06-02 15:06:45 +00:00
|
|
|
lambda_big_o_1, /*family_index=*/2);
|
2016-06-01 21:08:01 +00:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- Testing BigO O(N) --------------------------- //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2018-04-03 22:12:47 +00:00
|
|
|
std::vector<int> ConstructRandomVector(int64_t size) {
|
2016-06-01 21:08:01 +00:00
|
|
|
std::vector<int> v;
|
2018-04-03 22:12:47 +00:00
|
|
|
v.reserve(static_cast<int>(size));
|
2016-06-01 21:08:01 +00:00
|
|
|
for (int i = 0; i < size; ++i) {
|
2018-07-09 10:45:10 +00:00
|
|
|
v.push_back(static_cast<int>(std::rand() % size));
|
2016-06-01 21:08:01 +00:00
|
|
|
}
|
|
|
|
return v;
|
|
|
|
}
|
2016-05-18 17:59:34 +00:00
|
|
|
|
2021-11-10 16:22:31 +00:00
|
|
|
void BM_Complexity_O_N(benchmark::State &state) {
|
2016-08-04 19:30:14 +00:00
|
|
|
auto v = ConstructRandomVector(state.range(0));
|
2018-04-03 22:12:47 +00:00
|
|
|
// Test worst case scenario (item not in vector)
|
|
|
|
const int64_t item_not_in_vector = state.range(0) * 2;
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-07 18:04:50 +00:00
|
|
|
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
|
2016-05-18 17:59:34 +00:00
|
|
|
}
|
2016-08-04 19:30:14 +00:00
|
|
|
state.SetComplexityN(state.range(0));
|
2016-05-18 17:59:34 +00:00
|
|
|
}
|
2016-10-07 18:04:50 +00:00
|
|
|
BENCHMARK(BM_Complexity_O_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 16)
|
|
|
|
->Complexity(benchmark::oN);
|
|
|
|
BENCHMARK(BM_Complexity_O_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 16)
|
Iteration counts should be `uint64_t` globally. (#817)
This is a shameless rip-off of https://github.com/google/benchmark/pull/646
I did promise to look into why that proposed PR was producing
so much worse assembly, and so i finally did.
The reason is - that diff changes `size_t` (unsigned) to `int64_t` (signed).
There is this nice little `assert`:
https://github.com/google/benchmark/blob/7a1c37028359ca9d386d719a6ad527743cf1b753/include/benchmark/benchmark.h#L744
It ensures that we didn't magically decide to advance our iterator
when we should have finished benchmarking.
When `cached_` was unsigned, the `assert` was `cached_ UGT 0`.
But we only ever get to that `assert` if `cached_ NE 0`,
and naturally if `cached_` is not `0`, then it is bigger than `0`,
so the `assert` is tautological, and gets folded away.
But now that `cached_` became signed, the assert became `cached_ SGT 0`.
And we still only know that `cached_ NE 0`, so the assert can't be
optimized out, or at least it doesn't currently.
Regardless of whether or not that is a bug in itself,
that particular diff would have regressed the normal 64-bit systems,
by halving the maximal iteration space (since we go from unsigned counter
to signed one, of the same bit-width), which seems like a bug.
And just so it happens, fixing *this* bug, fixes the other bug.
This produces fully (bit-by-bit) identical state_assembly_test.s
The filecheck change is actually needed regardless of this patch,
else this test does not pass for me even without this diff.
2019-05-13 09:33:11 +00:00
|
|
|
->Complexity([](benchmark::IterationCount n) -> double {
|
|
|
|
return static_cast<double>(n);
|
|
|
|
});
|
2016-10-07 18:04:50 +00:00
|
|
|
BENCHMARK(BM_Complexity_O_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 16)
|
|
|
|
->Complexity();
|
|
|
|
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
const char *n_test_name = "BM_Complexity_O_N";
|
2016-10-07 18:04:50 +00:00
|
|
|
const char *big_o_n_test_name = "BM_Complexity_O_N_BigO";
|
|
|
|
const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS";
|
|
|
|
const char *enum_auto_big_o_n = "N";
|
|
|
|
const char *lambda_big_o_n = "f\\(N\\)";
|
2016-05-24 20:25:59 +00:00
|
|
|
|
2016-06-01 21:08:01 +00:00
|
|
|
// Add enum tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
|
2021-06-02 15:06:45 +00:00
|
|
|
enum_auto_big_o_n, /*family_index=*/3);
|
2016-06-01 21:08:01 +00:00
|
|
|
|
|
|
|
// Add lambda tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name,
|
2021-06-02 15:06:45 +00:00
|
|
|
lambda_big_o_n, /*family_index=*/4);
|
2016-06-01 21:08:01 +00:00
|
|
|
|
|
|
|
// ========================================================================= //
|
|
|
|
// ------------------------- Testing BigO O(N*lgN) ------------------------- //
|
|
|
|
// ========================================================================= //
|
2016-05-18 17:59:34 +00:00
|
|
|
|
2021-11-10 16:22:31 +00:00
|
|
|
static void BM_Complexity_O_N_log_N(benchmark::State &state) {
|
2016-08-04 19:30:14 +00:00
|
|
|
auto v = ConstructRandomVector(state.range(0));
|
2017-10-17 18:17:02 +00:00
|
|
|
for (auto _ : state) {
|
2016-10-07 18:04:50 +00:00
|
|
|
std::sort(v.begin(), v.end());
|
2016-05-18 17:59:34 +00:00
|
|
|
}
|
2016-08-04 19:30:14 +00:00
|
|
|
state.SetComplexityN(state.range(0));
|
2016-05-18 17:59:34 +00:00
|
|
|
}
|
2018-06-05 10:36:26 +00:00
|
|
|
static const double kLog2E = 1.44269504088896340736;
|
2016-10-07 18:04:50 +00:00
|
|
|
BENCHMARK(BM_Complexity_O_N_log_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 16)
|
|
|
|
->Complexity(benchmark::oNLogN);
|
|
|
|
BENCHMARK(BM_Complexity_O_N_log_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 16)
|
Iteration counts should be `uint64_t` globally. (#817)
This is a shameless rip-off of https://github.com/google/benchmark/pull/646
I did promise to look into why that proposed PR was producing
so much worse assembly, and so i finally did.
The reason is - that diff changes `size_t` (unsigned) to `int64_t` (signed).
There is this nice little `assert`:
https://github.com/google/benchmark/blob/7a1c37028359ca9d386d719a6ad527743cf1b753/include/benchmark/benchmark.h#L744
It ensures that we didn't magically decide to advance our iterator
when we should have finished benchmarking.
When `cached_` was unsigned, the `assert` was `cached_ UGT 0`.
But we only ever get to that `assert` if `cached_ NE 0`,
and naturally if `cached_` is not `0`, then it is bigger than `0`,
so the `assert` is tautological, and gets folded away.
But now that `cached_` became signed, the assert became `cached_ SGT 0`.
And we still only know that `cached_ NE 0`, so the assert can't be
optimized out, or at least it doesn't currently.
Regardless of whether or not that is a bug in itself,
that particular diff would have regressed the normal 64-bit systems,
by halving the maximal iteration space (since we go from unsigned counter
to signed one, of the same bit-width), which seems like a bug.
And just so it happens, fixing *this* bug, fixes the other bug.
This produces fully (bit-by-bit) identical state_assembly_test.s
The filecheck change is actually needed regardless of this patch,
else this test does not pass for me even without this diff.
2019-05-13 09:33:11 +00:00
|
|
|
->Complexity([](benchmark::IterationCount n) {
|
2019-03-26 09:53:07 +00:00
|
|
|
return kLog2E * n * log(static_cast<double>(n));
|
|
|
|
});
|
2016-10-07 18:04:50 +00:00
|
|
|
BENCHMARK(BM_Complexity_O_N_log_N)
|
|
|
|
->RangeMultiplier(2)
|
|
|
|
->Range(1 << 10, 1 << 16)
|
|
|
|
->Complexity();
|
|
|
|
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N";
|
2016-10-07 18:04:50 +00:00
|
|
|
const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO";
|
|
|
|
const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS";
|
|
|
|
const char *enum_auto_big_o_n_lg_n = "NlgN";
|
|
|
|
const char *lambda_big_o_n_lg_n = "f\\(N\\)";
|
2016-06-01 21:08:01 +00:00
|
|
|
|
|
|
|
// Add enum tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
|
2021-06-02 15:06:45 +00:00
|
|
|
rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n,
|
|
|
|
/*family_index=*/6);
|
2016-06-01 21:08:01 +00:00
|
|
|
|
|
|
|
// Add lambda tests
|
Track two more details about runs - the aggregate name, and run name. (#675)
This is related to @BaaMeow's work in https://github.com/google/benchmark/pull/616 but is not based on it.
Two new fields are tracked, and dumped into JSON:
* If the run is an aggregate, the aggregate's name is stored.
It can be RMS, BigO, mean, median, stddev, or any custom stat name.
* The aggregate-name-less run name is additionally stored.
I.e. not some name of the benchmark function, but the actual
name, but without the 'aggregate name' suffix.
This way one can group/filter all the runs,
and filter by the particular aggregate type.
I *might* need this for further tooling improvement.
Or maybe not.
But this is certainly worthwhile for custom tooling.
2018-09-13 12:08:15 +00:00
|
|
|
ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name,
|
2021-06-02 15:06:45 +00:00
|
|
|
rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n,
|
|
|
|
/*family_index=*/7);
|
2016-06-01 21:08:01 +00:00
|
|
|
|
2019-03-17 13:38:51 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// -------- Testing formatting of Complexity with captured args ------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2021-11-10 16:22:31 +00:00
|
|
|
void BM_ComplexityCaptureArgs(benchmark::State &state, int n) {
|
2019-03-17 13:38:51 +00:00
|
|
|
for (auto _ : state) {
|
2019-07-27 16:02:31 +00:00
|
|
|
// This test requires a non-zero CPU time to avoid divide-by-zero
|
|
|
|
benchmark::DoNotOptimize(state.iterations());
|
2019-03-17 13:38:51 +00:00
|
|
|
}
|
|
|
|
state.SetComplexityN(n);
|
|
|
|
}
|
|
|
|
|
|
|
|
BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100)
|
|
|
|
->Complexity(benchmark::oN)
|
|
|
|
->Ranges({{1, 2}, {3, 4}});
|
|
|
|
|
|
|
|
const std::string complexity_capture_name =
|
|
|
|
"BM_ComplexityCaptureArgs/capture_test";
|
|
|
|
|
|
|
|
ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO",
|
2021-06-02 15:06:45 +00:00
|
|
|
complexity_capture_name + "_RMS", "N", /*family_index=*/9);
|
2019-03-17 13:38:51 +00:00
|
|
|
|
2016-06-01 21:08:01 +00:00
|
|
|
// ========================================================================= //
|
|
|
|
// --------------------------- TEST CASES END ------------------------------ //
|
|
|
|
// ========================================================================= //
|
|
|
|
|
2016-10-07 18:04:50 +00:00
|
|
|
int main(int argc, char *argv[]) { RunOutputTests(argc, argv); }
|