Refactor most usages of KeepRunning to use the perfered ranged-for. (#459)

Recently the library added a new ranged-for variant of the KeepRunning
loop that is much faster. For this reason it should be preferred in all
new code.

Because a library, its documentation, and its tests should all embody
the best practices of using the library, this patch changes all but a
few usages of KeepRunning() into for (auto _ : state).

The remaining usages in the tests and documentation persist only
to document and test behavior that is different between the two formulations.

Also note that because the range-for loop requires C++11, the KeepRunning
variant has not been deprecated at this time.
This commit is contained in:
Eric 2017-10-17 12:17:02 -06:00 committed by GitHub
parent 22fd1a556e
commit 25acf220a4
16 changed files with 145 additions and 115 deletions

View File

@ -21,7 +21,7 @@ Define a function that executes the code to be measured.
#include <benchmark/benchmark.h>
static void BM_StringCreation(benchmark::State& state) {
while (state.KeepRunning())
for (auto _ : state)
std::string empty_string;
}
// Register the function as a benchmark
@ -30,7 +30,7 @@ BENCHMARK(BM_StringCreation);
// Define another benchmark
static void BM_StringCopy(benchmark::State& state) {
std::string x = "hello";
while (state.KeepRunning())
for (auto _ : state)
std::string copy(x);
}
BENCHMARK(BM_StringCopy);
@ -51,7 +51,7 @@ static void BM_memcpy(benchmark::State& state) {
char* src = new char[state.range(0)];
char* dst = new char[state.range(0)];
memset(src, 'x', state.range(0));
while (state.KeepRunning())
for (auto _ : state)
memcpy(dst, src, state.range(0));
state.SetBytesProcessed(int64_t(state.iterations()) *
int64_t(state.range(0)));
@ -84,7 +84,7 @@ insertion.
```c++
static void BM_SetInsert(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
state.PauseTiming();
std::set<int> data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
@ -135,7 +135,7 @@ running time and the normalized root-mean square error of string comparison.
static void BM_StringCompare(benchmark::State& state) {
std::string s1(state.range(0), '-');
std::string s2(state.range(0), '-');
while (state.KeepRunning()) {
for (auto _ : state) {
benchmark::DoNotOptimize(s1.compare(s2));
}
state.SetComplexityN(state.range(0));
@ -169,7 +169,7 @@ absence of multiprogramming.
template <class Q> int BM_Sequential(benchmark::State& state) {
Q q;
typename Q::value_type v;
while (state.KeepRunning()) {
for (auto _ : state) {
for (int i = state.range(0); i--; )
q.push(v);
for (int e = state.range(0); e--; )
@ -208,7 +208,7 @@ static void BM_Fast(benchmark::State &state) {
BENCHMARK(BM_Fast);
```
The reason the ranged-based for loop is faster than using `KeepRunning`, is
The reason the ranged-for loop is faster than using `KeepRunning`, is
because `KeepRunning` requires a memory load and store of the iteration count
ever iteration, whereas the ranged-for variant is able to keep the iteration count
in a register.
@ -247,6 +247,9 @@ Compared to an empty `KeepRunning` loop, which looks like:
.LoopEnd:
```
Unless C++03 compatibility is required, the ranged-for variant of writing
the benchmark loop should be preferred.
## Passing arbitrary arguments to a benchmark
In C++11 it is possible to define a benchmark that takes an arbitrary number
of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)`
@ -296,9 +299,10 @@ int main(int argc, char** argv) {
### Multithreaded benchmarks
In a multithreaded test (benchmark invoked by multiple threads simultaneously),
it is guaranteed that none of the threads will start until all have called
`KeepRunning`, and all will have finished before `KeepRunning` returns `false`. As
such, any global setup or teardown can be wrapped in a check against the thread
it is guaranteed that none of the threads will start until all have reached
the start of the benchmark loop, and all will have finished before any thread
exits the benchmark loop. (This behavior is also provided by the `KeepRunning()`
API) As such, any global setup or teardown can be wrapped in a check against the thread
index:
```c++
@ -306,7 +310,7 @@ static void BM_MultiThreaded(benchmark::State& state) {
if (state.thread_index == 0) {
// Setup code here.
}
while (state.KeepRunning()) {
for (auto _ : state) {
// Run the test as normal.
}
if (state.thread_index == 0) {
@ -333,7 +337,7 @@ correct or accurate enough, completely manual timing is supported using
the `UseManualTime` function.
When `UseManualTime` is used, the benchmarked code must call
`SetIterationTime` once per iteration of the `KeepRunning` loop to
`SetIterationTime` once per iteration of the benchmark loop to
report the manually measured time.
An example use case for this is benchmarking GPU execution (e.g. OpenCL
@ -349,7 +353,7 @@ static void BM_ManualTiming(benchmark::State& state) {
static_cast<double>(microseconds)
};
while (state.KeepRunning()) {
for (auto _ : state) {
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(sleep_duration);
@ -372,7 +376,7 @@ functions can be used.
```c++
static void BM_test(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
int x = 0;
for (int i=0; i < 64; ++i) {
benchmark::DoNotOptimize(x += i);
@ -411,7 +415,7 @@ away.
```c++
static void BM_vector_push_back(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
std::vector<int> v;
v.reserve(1);
benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered.
@ -467,7 +471,7 @@ by a lambda function.
```c++
void BM_spin_empty(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
for (int x = 0; x < state.range(0); ++x) {
benchmark::DoNotOptimize(x);
}
@ -496,13 +500,13 @@ For Example:
class MyFixture : public benchmark::Fixture {};
BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) {
while (st.KeepRunning()) {
for (auto _ : st) {
...
}
}
BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) {
while (st.KeepRunning()) {
for (auto _ : st) {
...
}
}
@ -523,13 +527,13 @@ template<typename T>
class MyFixture : public benchmark::Fixture {};
BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) {
while (st.KeepRunning()) {
for (auto _ : st) {
...
}
}
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) {
while (st.KeepRunning()) {
for (auto _ : st) {
...
}
}
@ -545,7 +549,7 @@ will add columns "Foo", "Bar" and "Baz" in its output:
```c++
static void UserCountersExample1(benchmark::State& state) {
double numFoos = 0, numBars = 0, numBazs = 0;
while (state.KeepRunning()) {
for (auto _ : state) {
// ... count Foo,Bar,Baz events
}
state.counters["Foo"] = numFoos;
@ -668,11 +672,12 @@ When errors caused by external influences, such as file I/O and network
communication, occur within a benchmark the
`State::SkipWithError(const char* msg)` function can be used to skip that run
of benchmark and report the error. Note that only future iterations of the
`KeepRunning()` are skipped. Users may explicitly return to exit the
benchmark immediately.
`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop
Users must explicitly exit the loop, otherwise all iterations will be performed.
Users may explicitly return to exit the benchmark immediately.
The `SkipWithError(...)` function may be used at any point within the benchmark,
including before and after the `KeepRunning()` loop.
including before and after the benchmark loop.
For example:
@ -683,7 +688,7 @@ static void BM_test(benchmark::State& state) {
state.SkipWithError("Resource is not good!");
// KeepRunning() loop will not be entered.
}
while (state.KeepRunning()) {
for (state.KeepRunning()) {
auto data = resource.read_data();
if (!resource.good()) {
state.SkipWithError("Failed to read data!");
@ -692,6 +697,14 @@ static void BM_test(benchmark::State& state) {
do_stuff(data);
}
}
static void BM_test_ranged_fo(benchmark::State & state) {
state.SkipWithError("test will not be entered");
for (auto _ : state) {
state.SkipWithError("Failed!");
break; // REQUIRED to prevent all further iterations.
}
}
```
## Running a subset of the benchmarks

View File

@ -18,7 +18,7 @@
// Define a function that executes the code to be measured a
// specified number of times:
static void BM_StringCreation(benchmark::State& state) {
while (state.KeepRunning())
for (auto _ : state)
std::string empty_string;
}
@ -28,7 +28,7 @@ BENCHMARK(BM_StringCreation);
// Define another benchmark
static void BM_StringCopy(benchmark::State& state) {
std::string x = "hello";
while (state.KeepRunning())
for (auto _ : state)
std::string copy(x);
}
BENCHMARK(BM_StringCopy);
@ -54,7 +54,7 @@ int main(int argc, char** argv) {
static void BM_memcpy(benchmark::State& state) {
char* src = new char[state.range(0)]; char* dst = new char[state.range(0)];
memset(src, 'x', state.range(0));
while (state.KeepRunning())
for (auto _ : state)
memcpy(dst, src, state.range(0));
state.SetBytesProcessed(int64_t(state.iterations()) *
int64_t(state.range(0)));
@ -72,7 +72,7 @@ BENCHMARK(BM_memcpy)->Range(8, 8<<10);
// example, the following code defines a family of microbenchmarks for
// measuring the speed of set insertion.
static void BM_SetInsert(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
state.PauseTiming();
set<int> data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
@ -114,7 +114,7 @@ BENCHMARK(BM_SetInsert)->Apply(CustomArguments);
template <class Q> int BM_Sequential(benchmark::State& state) {
Q q;
typename Q::value_type v;
while (state.KeepRunning()) {
for (auto _ : state) {
for (int i = state.range(0); i--; )
q.push(v);
for (int e = state.range(0); e--; )
@ -135,15 +135,15 @@ void BM_test(benchmark::State& state) {
BENCHMARK(BM_test)->MinTime(2.0); // Run for at least 2 seconds.
In a multithreaded test, it is guaranteed that none of the threads will start
until all have called KeepRunning, and all will have finished before KeepRunning
returns false. As such, any global setup or teardown you want to do can be
wrapped in a check against the thread index:
until all have reached the loop start, and all will have finished before any
thread exits the loop body. As such, any global setup or teardown you want to
do can be wrapped in a check against the thread index:
static void BM_MultiThreaded(benchmark::State& state) {
if (state.thread_index == 0) {
// Setup code here.
}
while (state.KeepRunning()) {
for (auto _ : state) {
// Run the test as normal.
}
if (state.thread_index == 0) {
@ -442,7 +442,7 @@ class State {
// REQUIRES: timer is running and 'SkipWithError(...)' has not been called
// by the current thread.
// Stop the benchmark timer. If not called, the timer will be
// automatically stopped after KeepRunning() returns false for the first time.
// automatically stopped after the last iteration of the benchmark loop.
//
// For threaded benchmarks the PauseTiming() function only pauses the timing
// for the current thread.
@ -458,7 +458,8 @@ class State {
// REQUIRES: timer is not running and 'SkipWithError(...)' has not been called
// by the current thread.
// Start the benchmark timer. The timer is NOT running on entrance to the
// benchmark function. It begins running after the first call to KeepRunning()
// benchmark function. It begins running after control flow enters the
// benchmark loop.
//
// NOTE: PauseTiming()/ResumeTiming() are relatively
// heavyweight, and so their use should generally be avoided
@ -486,7 +487,7 @@ class State {
// responsibility to exit the scope as needed.
void SkipWithError(const char* msg);
// REQUIRES: called exactly once per iteration of the KeepRunning loop.
// REQUIRES: called exactly once per iteration of the benchmarking loop.
// Set the manually measured time for this benchmark iteration, which
// is used instead of automatically measured time if UseManualTime() was
// specified.
@ -501,7 +502,7 @@ class State {
// value > 0, the report is printed in MB/sec instead of nanoseconds
// per iteration.
//
// REQUIRES: a benchmark has exited its KeepRunning loop.
// REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
void SetBytesProcessed(size_t bytes) { bytes_processed_ = bytes; }
@ -524,7 +525,7 @@ class State {
// executing benchmark. It is typically called at the end of a processing
// benchmark where a processing items/second output is desired.
//
// REQUIRES: a benchmark has exited its KeepRunning loop.
// REQUIRES: a benchmark has exited its benchmarking loop.
BENCHMARK_ALWAYS_INLINE
void SetItemsProcessed(size_t items) { items_processed_ = items; }
@ -542,7 +543,7 @@ class State {
// Produces output that looks like:
// BM_Compress 50 50 14115038 compress:27.3%
//
// REQUIRES: a benchmark has exited its KeepRunning loop.
// REQUIRES: a benchmark has exited its benchmarking loop.
void SetLabel(const char* label);
void BENCHMARK_ALWAYS_INLINE SetLabel(const std::string& str) {

View File

@ -4,7 +4,7 @@
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
void BM_empty(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
}
@ -12,7 +12,7 @@ BENCHMARK(BM_empty);
BENCHMARK(BM_empty)->ThreadPerCpu();
void BM_spin_empty(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
for (int x = 0; x < state.range(0); ++x) {
benchmark::DoNotOptimize(x);
}
@ -25,7 +25,7 @@ void BM_spin_pause_before(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
while (state.KeepRunning()) {
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
@ -35,7 +35,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before);
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
void BM_spin_pause_during(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
state.PauseTiming();
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
@ -50,7 +50,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_during);
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
void BM_pause_during(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
state.PauseTiming();
state.ResumeTiming();
}
@ -61,7 +61,7 @@ BENCHMARK(BM_pause_during)->UseRealTime();
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
void BM_spin_pause_after(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
@ -77,7 +77,7 @@ void BM_spin_pause_before_and_after(benchmark::State& state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
while (state.KeepRunning()) {
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i) {
benchmark::DoNotOptimize(i);
}
@ -90,7 +90,7 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
void BM_empty_stop_start(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_empty_stop_start);

View File

@ -53,7 +53,7 @@ std::vector<int>* test_vector = nullptr;
static void BM_Factorial(benchmark::State& state) {
int fac_42 = 0;
while (state.KeepRunning()) fac_42 = Factorial(8);
for (auto _ : state) fac_42 = Factorial(8);
// Prevent compiler optimizations
std::stringstream ss;
ss << fac_42;
@ -64,7 +64,7 @@ BENCHMARK(BM_Factorial)->UseRealTime();
static void BM_CalculatePiRange(benchmark::State& state) {
double pi = 0.0;
while (state.KeepRunning()) pi = CalculatePi(state.range(0));
for (auto _ : state) pi = CalculatePi(state.range(0));
std::stringstream ss;
ss << pi;
state.SetLabel(ss.str());
@ -73,7 +73,7 @@ BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
static void BM_CalculatePi(benchmark::State& state) {
static const int depth = 1024;
while (state.KeepRunning()) {
for (auto _ : state) {
benchmark::DoNotOptimize(CalculatePi(depth));
}
}
@ -82,7 +82,7 @@ BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
static void BM_SetInsert(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
state.PauseTiming();
std::set<int> data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
@ -97,7 +97,7 @@ template <typename Container,
typename ValueType = typename Container::value_type>
static void BM_Sequential(benchmark::State& state) {
ValueType v = 42;
while (state.KeepRunning()) {
for (auto _ : state) {
Container c;
for (int i = state.range(0); --i;) c.push_back(v);
}
@ -116,7 +116,7 @@ BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
static void BM_StringCompare(benchmark::State& state) {
std::string s1(state.range(0), '-');
std::string s2(state.range(0), '-');
while (state.KeepRunning()) benchmark::DoNotOptimize(s1.compare(s2));
for (auto _ : state) benchmark::DoNotOptimize(s1.compare(s2));
}
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
@ -126,7 +126,7 @@ static void BM_SetupTeardown(benchmark::State& state) {
test_vector = new std::vector<int>();
}
int i = 0;
while (state.KeepRunning()) {
for (auto _ : state) {
std::lock_guard<std::mutex> l(test_vector_mu);
if (i % 2 == 0)
test_vector->push_back(i);
@ -142,7 +142,7 @@ BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
static void BM_LongTest(benchmark::State& state) {
double tracker = 0.0;
while (state.KeepRunning()) {
for (auto _ : state) {
for (int i = 0; i < state.range(0); ++i)
benchmark::DoNotOptimize(tracker += i);
}
@ -159,7 +159,7 @@ static void BM_ParallelMemset(benchmark::State& state) {
test_vector = new std::vector<int>(size);
}
while (state.KeepRunning()) {
for (auto _ : state) {
for (int i = from; i < to; i++) {
// No need to lock test_vector_mu as ranges
// do not overlap between threads.
@ -179,7 +179,7 @@ static void BM_ManualTiming(benchmark::State& state) {
std::chrono::duration<double, std::micro> sleep_duration{
static_cast<double>(microseconds)};
while (state.KeepRunning()) {
for (auto _ : state) {
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(
@ -201,7 +201,7 @@ BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
template <class... Args>
void BM_with_args(benchmark::State& state, Args&&...) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);

View File

@ -46,7 +46,7 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name,
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
for (int i = 0; i < 1024; ++i) {
benchmark::DoNotOptimize(&i);
}
@ -94,7 +94,7 @@ void BM_Complexity_O_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
const int item_not_in_vector =
state.range(0) * 2; // Test worst case scenario (item not in vector)
while (state.KeepRunning()) {
for (auto _ : state) {
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector));
}
state.SetComplexityN(state.range(0));
@ -129,7 +129,7 @@ ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n);
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range(0));
while (state.KeepRunning()) {
for (auto _ : state) {
std::sort(v.begin(), v.end());
}
state.SetComplexityN(state.range(0));

View File

@ -47,7 +47,7 @@ void BM_diagnostic_test(benchmark::State& state) {
if (called_once == false) try_invalid_pause_resume(state);
while (state.KeepRunning()) {
for (auto _ : state) {
benchmark::DoNotOptimize(state.iterations());
}
@ -57,6 +57,22 @@ void BM_diagnostic_test(benchmark::State& state) {
}
BENCHMARK(BM_diagnostic_test);
void BM_diagnostic_test_keep_running(benchmark::State& state) {
static bool called_once = false;
if (called_once == false) try_invalid_pause_resume(state);
while(state.KeepRunning()) {
benchmark::DoNotOptimize(state.iterations());
}
if (called_once == false) try_invalid_pause_resume(state);
called_once = true;
}
BENCHMARK(BM_diagnostic_test_keep_running);
int main(int argc, char* argv[]) {
benchmark::internal::GetAbortHandler() = &TestHandler;
benchmark::Initialize(&argc, argv);

View File

@ -36,31 +36,31 @@ class TestReporter : public benchmark::ConsoleReporter {
} // end namespace
static void NoPrefix(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(NoPrefix);
static void BM_Foo(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_Foo);
static void BM_Bar(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_Bar);
static void BM_FooBar(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_FooBar);
static void BM_FooBa(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_FooBa);

View File

@ -28,7 +28,7 @@ class MyFixture : public ::benchmark::Fixture {
BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) {
assert(data.get() != nullptr);
assert(*data == 42);
while (st.KeepRunning()) {
for (auto _ : st) {
}
}
@ -37,7 +37,7 @@ BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) {
assert(data.get() != nullptr);
assert(*data == 42);
}
while (st.KeepRunning()) {
for (auto _ : st) {
assert(data.get() != nullptr);
assert(*data == 42);
}

View File

@ -18,7 +18,7 @@ std::map<int, int> ConstructRandomMap(int size) {
// Basic version.
static void BM_MapLookup(benchmark::State& state) {
const int size = state.range(0);
while (state.KeepRunning()) {
for (auto _ : state) {
state.PauseTiming();
std::map<int, int> m = ConstructRandomMap(size);
state.ResumeTiming();
@ -44,7 +44,7 @@ class MapFixture : public ::benchmark::Fixture {
BENCHMARK_DEFINE_F(MapFixture, Lookup)(benchmark::State& state) {
const int size = state.range(0);
while (state.KeepRunning()) {
for (auto _ : state) {
for (int i = 0; i < size; ++i) {
benchmark::DoNotOptimize(m.find(rand() % size));
}

View File

@ -43,7 +43,7 @@ class MultipleRangesFixture : public ::benchmark::Fixture {
};
BENCHMARK_DEFINE_F(MultipleRangesFixture, Empty)(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
int product = state.range(0) * state.range(1) * state.range(2);
for (int x = 0; x < product; x++) {
benchmark::DoNotOptimize(x);
@ -60,13 +60,13 @@ void BM_CheckDefaultArgument(benchmark::State& state) {
// Test that the 'range()' without an argument is the same as 'range(0)'.
assert(state.range() == state.range(0));
assert(state.range() != state.range(1));
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_CheckDefaultArgument)->Ranges({{1, 5}, {6, 10}});
static void BM_MultipleRanges(benchmark::State& st) {
while (st.KeepRunning()) {
for (auto _ : st) {
}
}
BENCHMARK(BM_MultipleRanges)->Ranges({{5, 5}, {6, 6}});

View File

@ -8,13 +8,13 @@
#include <cassert>
void BM_basic(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
void BM_basic_slow(benchmark::State& state) {
std::chrono::milliseconds sleep_duration(state.range(0));
while (state.KeepRunning()) {
for (auto _ : state) {
std::this_thread::sleep_for(
std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
}
@ -44,7 +44,7 @@ void CustomArgs(benchmark::internal::Benchmark* b) {
BENCHMARK(BM_basic)->Apply(CustomArgs);
void BM_explicit_iteration_count(benchmark::State& st) {
void BM_explicit_iteration_count(benchmark::State& state) {
// Test that benchmarks specified with an explicit iteration count are
// only run once.
static bool invoked_before = false;
@ -52,12 +52,12 @@ void BM_explicit_iteration_count(benchmark::State& st) {
invoked_before = true;
// Test that the requested iteration count is respected.
assert(st.max_iterations == 42);
assert(state.max_iterations == 42);
size_t actual_iterations = 0;
while (st.KeepRunning())
for (auto _ : state)
++actual_iterations;
assert(st.iterations() == st.max_iterations);
assert(st.iterations() == 42);
assert(state.iterations() == state.max_iterations);
assert(state.iterations() == 42);
}
BENCHMARK(BM_explicit_iteration_count)->Iterations(42);

View File

@ -61,7 +61,7 @@ typedef benchmark::internal::Benchmark* ReturnVal;
// Test RegisterBenchmark with no additional arguments
//----------------------------------------------------------------------------//
void BM_function(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_function);
@ -77,7 +77,7 @@ ADD_CASES({"BM_function"}, {"BM_function_manual_registration"});
#ifndef BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK
void BM_extra_args(benchmark::State& st, const char* label) {
while (st.KeepRunning()) {
for (auto _ : st) {
}
st.SetLabel(label);
}
@ -99,7 +99,7 @@ ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"});
struct CustomFixture {
void operator()(benchmark::State& st) {
while (st.KeepRunning()) {
for (auto _ : st) {
}
}
};
@ -116,7 +116,7 @@ void TestRegistrationAtRuntime() {
{
const char* x = "42";
auto capturing_lam = [=](benchmark::State& st) {
while (st.KeepRunning()) {
for (auto _ : st) {
}
st.SetLabel(x);
};

View File

@ -20,7 +20,7 @@ ADD_CASES(TC_CSVOut, {{"%csv_header"}});
// ========================================================================= //
void BM_basic(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_basic);
@ -39,14 +39,14 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}});
// ========================================================================= //
void BM_bytes_per_second(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
state.SetBytesProcessed(1);
}
BENCHMARK(BM_bytes_per_second);
ADD_CASES(TC_ConsoleOut,
{{"^BM_bytes_per_second %console_report +%floatB/s$"}});
{{"^BM_bytes_per_second %console_report +%float[kM]{0,1}B/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
@ -61,14 +61,14 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}});
// ========================================================================= //
void BM_items_per_second(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
state.SetItemsProcessed(1);
}
BENCHMARK(BM_items_per_second);
ADD_CASES(TC_ConsoleOut,
{{"^BM_items_per_second %console_report +%float items/s$"}});
{{"^BM_items_per_second %console_report +%float[kM]{0,1} items/s$"}});
ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"},
{"\"iterations\": %int,$", MR_Next},
{"\"real_time\": %float,$", MR_Next},
@ -83,7 +83,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_items_per_second\",%csv_items_report$"}});
// ========================================================================= //
void BM_label(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
state.SetLabel("some label");
}
@ -106,7 +106,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some "
void BM_error(benchmark::State& state) {
state.SkipWithError("message");
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_error);
@ -123,7 +123,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_error\",,,,,,,,true,\"message\"$"}});
// ========================================================================= //
void BM_no_arg_name(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_no_arg_name)->Arg(3);
@ -136,7 +136,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}});
// ========================================================================= //
void BM_arg_name(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3);
@ -149,7 +149,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}});
// ========================================================================= //
void BM_arg_names(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"});
@ -163,7 +163,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}});
// ========================================================================= //
void BM_Complexity_O1(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
state.SetComplexityN(state.range(0));
}
@ -179,7 +179,7 @@ ADD_CASES(TC_ConsoleOut, {{"^BM_Complexity_O1_BigO %bigOStr %bigOStr[ ]*$"},
// Test that non-aggregate data is printed by default
void BM_Repeat(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
// need two repetitions min to be able to output any aggregate output
@ -246,7 +246,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"},
// Test that a non-repeated test still prints non-aggregate results even when
// only-aggregate reports have been requested
void BM_RepeatOnce(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly();
@ -256,7 +256,7 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}});
// Test that non-aggregate data is not reported
void BM_SummaryRepeat(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly();
@ -275,7 +275,7 @@ ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not},
{"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}});
void BM_RepeatTimeUnit(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_RepeatTimeUnit)
@ -308,7 +308,7 @@ const auto UserStatistics = [](const std::vector<double>& v) {
return v.back();
};
void BM_UserStats(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
}
BENCHMARK(BM_UserStats)

View File

@ -13,13 +13,13 @@ public:
};
BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st) {
while (st.KeepRunning()) {
for (auto _ : st) {
data += 1;
}
}
BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, Bar, double)(benchmark::State& st) {
while (st.KeepRunning()) {
for (auto _ : st) {
data += 1.0;
}
}

View File

@ -54,7 +54,7 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,"
// ========================================================================= //
void BM_Counters_Tabular(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
@ -98,7 +98,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular);
// ========================================================================= //
void BM_CounterRates_Tabular(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
@ -145,7 +145,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int",
// set only some of the counters
void BM_CounterSet0_Tabular(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
@ -177,7 +177,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet0_Tabular", &CheckSet0);
// again.
void BM_CounterSet1_Tabular(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({
@ -213,7 +213,7 @@ CHECK_BENCHMARK_RESULTS("BM_CounterSet1_Tabular/threads:%int", &CheckSet1);
// set only some of the counters, different set now.
void BM_CounterSet2_Tabular(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters.insert({

View File

@ -19,7 +19,7 @@ ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}});
// ========================================================================= //
void BM_Counters_Simple(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
state.counters["foo"] = 1;
state.counters["bar"] = 2 * (double)state.iterations();
@ -51,7 +51,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple);
namespace { int num_calls1 = 0; }
void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
state.counters["foo"] = 1;
state.counters["bar"] = ++num_calls1;
@ -92,7 +92,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec",
// ========================================================================= //
void BM_Counters_Rate(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate};
@ -124,7 +124,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate);
// ========================================================================= //
void BM_Counters_Threads(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
state.counters["foo"] = 1;
state.counters["bar"] = 2;
@ -153,7 +153,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Threads/threads:%int", &CheckThreads);
// ========================================================================= //
void BM_Counters_AvgThreads(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreads};
@ -184,7 +184,7 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int",
// ========================================================================= //
void BM_Counters_AvgThreadsRate(benchmark::State& state) {
while (state.KeepRunning()) {
for (auto _ : state) {
}
namespace bm = benchmark;
state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate};