Merge branch 'master' into skip_with_error

This commit is contained in:
Eric Fiselier 2016-05-24 21:52:55 -06:00
commit 90a069f67f
4 changed files with 44 additions and 3 deletions

View file

@ -279,6 +279,17 @@ the minimum time, or the wallclock time is 5x minimum time. The minimum time is
set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on
the registered benchmark object.
## Reporting the mean and standard devation by repeated benchmarks
By default each benchmark is run once and that single result is reported.
However benchmarks are often noisy and a single result may not be representative
of the overall behavior. For this reason it's possible to repeatedly rerun the
benchmark.
The number of runs of each benchmark is specified globally by the
`--benchmark_repetitions` flag or on a per benchmark basis by calling
`Repetitions` on the registered benchmark object. When a benchmark is run
more than once the mean and standard deviation of the runs will be reported.
## Fixtures
Fixture tests are created by
first defining a type that derives from ::benchmark::Fixture and then

View file

@ -501,8 +501,14 @@ public:
// Set the minimum amount of time to use when running this benchmark. This
// option overrides the `benchmark_min_time` flag.
// REQUIRES: `t > 0`
Benchmark* MinTime(double t);
// Specify the amount of times to repeat this benchmark. This option overrides
// the `benchmark_repetitions` flag.
// REQUIRES: `n > 0`
Benchmark* Repetitions(int n);
// If a particular benchmark is I/O bound, runs multiple threads internally or
// if for some reason CPU timings are not representative, call this method. If
// called, the elapsed time will be used to control how many iterations are

View file

@ -311,6 +311,7 @@ struct Benchmark::Instance {
bool use_manual_time;
BigO complexity;
bool last_benchmark_instance;
int repetitions;
double min_time;
int threads; // Number of concurrent threads to use
bool multithreaded; // Is benchmark multi-threaded?
@ -350,6 +351,7 @@ public:
void RangePair(int lo1, int hi1, int lo2, int hi2);
void RangeMultiplier(int multiplier);
void MinTime(double n);
void Repetitions(int n);
void UseRealTime();
void UseManualTime();
void Complexity(BigO complexity);
@ -369,6 +371,7 @@ private:
TimeUnit time_unit_;
int range_multiplier_;
double min_time_;
int repetitions_;
bool use_real_time_;
bool use_manual_time_;
BigO complexity_;
@ -432,6 +435,7 @@ bool BenchmarkFamilies::FindBenchmarks(
instance.time_unit = family->time_unit_;
instance.range_multiplier = family->range_multiplier_;
instance.min_time = family->min_time_;
instance.repetitions = family->repetitions_;
instance.use_real_time = family->use_real_time_;
instance.use_manual_time = family->use_manual_time_;
instance.complexity = family->complexity_;
@ -448,6 +452,9 @@ bool BenchmarkFamilies::FindBenchmarks(
if (!IsZero(family->min_time_)) {
instance.name += StringPrintF("/min_time:%0.3f", family->min_time_);
}
if (family->repetitions_ != 0) {
instance.name += StringPrintF("/repeats:%d", family->repetitions_);
}
if (family->use_manual_time_) {
instance.name += "/manual_time";
} else if (family->use_real_time_) {
@ -471,7 +478,7 @@ bool BenchmarkFamilies::FindBenchmarks(
BenchmarkImp::BenchmarkImp(const char* name)
: name_(name), arg_count_(-1), time_unit_(kNanosecond),
range_multiplier_(kRangeMultiplier), min_time_(0.0),
range_multiplier_(kRangeMultiplier), min_time_(0.0), repetitions_(0),
use_real_time_(false), use_manual_time_(false),
complexity_(oNone) {
}
@ -539,6 +546,12 @@ void BenchmarkImp::MinTime(double t) {
min_time_ = t;
}
void BenchmarkImp::Repetitions(int n) {
CHECK(n > 0);
repetitions_ = n;
}
void BenchmarkImp::UseRealTime() {
CHECK(!use_manual_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
use_real_time_ = true;
@ -651,6 +664,12 @@ Benchmark* Benchmark::RangeMultiplier(int multiplier) {
return this;
}
Benchmark* Benchmark::Repetitions(int t) {
imp_->Repetitions(t);
return this;
}
Benchmark* Benchmark::MinTime(double t) {
imp_->MinTime(t);
return this;
@ -730,7 +749,9 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b,
if (b.multithreaded)
pool.resize(b.threads);
for (int i = 0; i < FLAGS_benchmark_repetitions; i++) {
const int repeats = b.repetitions != 0 ? b.repetitions
: FLAGS_benchmark_repetitions;
for (int i = 0; i < repeats; i++) {
std::string mem;
for (;;) {
// Try benchmark
@ -929,12 +950,14 @@ void RunMatchingBenchmarks(const std::vector<Benchmark::Instance>& benchmarks,
CHECK(reporter != nullptr);
// Determine the width of the name field using a minimum width of 10.
bool has_repetitions = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
for (const Benchmark::Instance& benchmark : benchmarks) {
name_field_width =
std::max<size_t>(name_field_width, benchmark.name.size());
has_repetitions |= benchmark.repetitions > 1;
}
if (FLAGS_benchmark_repetitions > 1)
if (has_repetitions)
name_field_width += std::strlen("_stddev");
// Print header here

View file

@ -31,6 +31,7 @@ BENCHMARK(BM_basic)->MinTime(0.7);
BENCHMARK(BM_basic)->UseRealTime();
BENCHMARK(BM_basic)->ThreadRange(2, 4);
BENCHMARK(BM_basic)->ThreadPerCpu();
BENCHMARK(BM_basic)->Repetitions(3);
void CustomArgs(benchmark::internal::Benchmark* b) {
for (int i = 0; i < 10; ++i) {