implemented complexity reporting

This commit is contained in:
Ismael 2016-05-21 08:55:43 +02:00
parent 872ff01a49
commit 2e5c397b48
7 changed files with 95 additions and 29 deletions

View File

@ -49,9 +49,11 @@ class BenchmarkReporter {
bytes_per_second(0),
items_per_second(0),
max_heapbytes_used(0),
complexity(O_1),
complexity(O_None),
arg1(0),
arg2(0) {}
arg2(0),
report_bigO(false),
report_rms(false) {}
std::string benchmark_name;
std::string report_label; // Empty if not set by benchmark.
@ -71,6 +73,10 @@ class BenchmarkReporter {
BigO complexity;
int arg1;
int arg2;
// Inform print function if the current run is a complexity report
bool report_bigO;
bool report_rms;
};
// Called once for every suite of benchmarks run.
@ -102,6 +108,7 @@ protected:
static void ComputeStats(const std::vector<Run> & reports, Run& mean, Run& stddev);
static void ComputeBigO(const std::vector<Run> & reports, Run& bigO, Run& rms);
static TimeUnitMultiplier GetTimeUnitAndMultiplier(TimeUnit unit);
static std::string GetBigO(BigO complexity);
};
// Simple reporter that outputs benchmark data to the console. This is the

View File

@ -88,7 +88,7 @@ void ConsoleReporter::ReportComplexity(const std::vector<Run> & complexity_repor
Run bigO_data;
Run rms_data;
BenchmarkReporter::ComputeBigO(complexity_reports, bigO_data, rms_data);
// Output using PrintRun.
PrintRunData(bigO_data);
PrintRunData(rms_data);
@ -115,7 +115,22 @@ void ConsoleReporter::PrintRunData(const Run& result) {
ColorPrintf(COLOR_GREEN, "%-*s ",
name_field_width_, result.benchmark_name.c_str());
if (result.iterations == 0) {
if(result.report_bigO) {
std::string big_o = result.report_bigO ? GetBigO(result.complexity) : "";
ColorPrintf(COLOR_YELLOW, "%10.4f %s %10.4f %s ",
result.real_accumulated_time * multiplier,
big_o.c_str(),
result.cpu_accumulated_time * multiplier,
big_o.c_str());
}
else if(result.report_rms) {
ColorPrintf(COLOR_YELLOW, "%10.0f %s %10.0f %s ",
result.real_accumulated_time * multiplier * 100,
"%",
result.cpu_accumulated_time * multiplier * 100,
"%");
}
else if (result.iterations == 0) {
ColorPrintf(COLOR_YELLOW, "%10.0f %s %10.0f %s ",
result.real_accumulated_time * multiplier,
timeLabel,

View File

@ -121,13 +121,23 @@ void JSONReporter::ReportComplexity(const std::vector<Run> & complexity_reports)
return;
}
std::string indent(4, ' ');
std::ostream& out = std::cout;
if (!first_report_) {
out << ",\n";
}
Run bigO_data;
Run rms_data;
BenchmarkReporter::ComputeBigO(complexity_reports, bigO_data, rms_data);
// Output using PrintRun.
out << indent << "{\n";
PrintRunData(bigO_data);
out << indent << "},\n";
out << indent << "{\n";
PrintRunData(rms_data);
out << indent << '}';
}
void JSONReporter::Finalize() {

View File

@ -41,7 +41,7 @@ double fittingCurve(double N, benchmark::BigO Complexity) {
// - Complexity : Fitting curve.
// For a deeper explanation on the algorithm logic, look the README file at http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit
LeastSq leastSq(const std::vector<int>& N, const std::vector<int>& Time, const benchmark::BigO Complexity) {
LeastSq leastSq(const std::vector<int>& N, const std::vector<double>& Time, const benchmark::BigO Complexity) {
assert(N.size() == Time.size() && N.size() >= 2);
assert(Complexity != benchmark::O_None &&
Complexity != benchmark::O_Auto);
@ -79,7 +79,7 @@ LeastSq leastSq(const std::vector<int>& N, const std::vector<int>& Time, const b
double mean = sigmaTime / N.size();
result.rms = sqrt(rms) / mean; // Normalized RMS by the mean of the observed values
result.rms = sqrt(rms / N.size()) / mean; // Normalized RMS by the mean of the observed values
return result;
}
@ -90,7 +90,7 @@ LeastSq leastSq(const std::vector<int>& N, const std::vector<int>& Time, const b
// - Complexity : If different than O_Auto, the fitting curve will stick to this one. If it is O_Auto, it will be calculated
// the best fitting curve.
LeastSq minimalLeastSq(const std::vector<int>& N, const std::vector<int>& Time, const benchmark::BigO Complexity) {
LeastSq minimalLeastSq(const std::vector<int>& N, const std::vector<double>& Time, const benchmark::BigO Complexity) {
assert(N.size() == Time.size() && N.size() >= 2); // Do not compute fitting curve is less than two benchmark runs are given
assert(Complexity != benchmark::O_None); // Check that complexity is a valid parameter.

View File

@ -41,6 +41,6 @@ struct LeastSq {
};
// Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error.
LeastSq minimalLeastSq(const std::vector<int>& N, const std::vector<int>& Time, const benchmark::BigO Complexity = benchmark::O_Auto);
LeastSq minimalLeastSq(const std::vector<int>& N, const std::vector<double>& Time, const benchmark::BigO Complexity = benchmark::O_Auto);
#endif

View File

@ -17,6 +17,7 @@
#include <cstdlib>
#include <vector>
#include <tuple>
#include "check.h"
#include "stat.h"
@ -83,35 +84,67 @@ void BenchmarkReporter::ComputeBigO(
Run& bigO, Run& rms) {
CHECK(reports.size() >= 2) << "Cannot compute asymptotic complexity for less than 2 reports";
// Accumulators.
Stat1_d real_accumulated_time_stat;
Stat1_d cpu_accumulated_time_stat;
std::vector<int> N;
std::vector<double> RealTime;
std::vector<double> CpuTime;
// Populate the accumulators.
for (Run const& run : reports) {
real_accumulated_time_stat +=
Stat1_d(run.real_accumulated_time/run.iterations, run.iterations);
cpu_accumulated_time_stat +=
Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations);
N.push_back(run.arg1);
RealTime.push_back(run.real_accumulated_time/run.iterations);
CpuTime.push_back(run.cpu_accumulated_time/run.iterations);
}
LeastSq resultCpu = minimalLeastSq(N, CpuTime, reports[0].complexity);
// resultCpu.complexity is passed as parameter to resultReal because in case
// reports[0].complexity is O_Auto, the noise on the measured data could make
// the best fit function of Cpu and Real differ. In order to solve this, we take
// the best fitting function for the Cpu, and apply it to Real data.
LeastSq resultReal = minimalLeastSq(N, RealTime, resultCpu.complexity);
std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/'));
// Get the data from the accumulator to BenchmarkReporter::Run's.
bigO.benchmark_name = benchmark_name + "_BigO";
bigO.iterations = 0;
bigO.real_accumulated_time = real_accumulated_time_stat.Mean();
bigO.cpu_accumulated_time = cpu_accumulated_time_stat.Mean();
bigO.real_accumulated_time = resultReal.coef;
bigO.cpu_accumulated_time = resultCpu.coef;
bigO.report_bigO = true;
bigO.complexity = resultCpu.complexity;
double multiplier;
const char* timeLabel;
std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(reports[0].time_unit);
// Only add label to mean/stddev if it is same for all runs
bigO.report_label = reports[0].report_label;
rms.benchmark_name = benchmark_name + "_RMS";
rms.report_label = bigO.report_label;
rms.iterations = 0;
rms.real_accumulated_time =
real_accumulated_time_stat.StdDev();
rms.cpu_accumulated_time =
cpu_accumulated_time_stat.StdDev();
rms.real_accumulated_time = resultReal.rms / multiplier;
rms.cpu_accumulated_time = resultCpu.rms / multiplier;
rms.report_rms = true;
rms.complexity = resultCpu.complexity;
}
std::string BenchmarkReporter::GetBigO(BigO complexity) {
switch (complexity) {
case O_N:
return "* N";
case O_N_Squared:
return "* N**2";
case O_N_Cubed:
return "* N**3";
case O_log_N:
return "* lgN";
case O_N_log_N:
return "* NlgN";
case O_1:
return "* 1";
default:
return "";
}
}
TimeUnitMultiplier BenchmarkReporter::GetTimeUnitAndMultiplier(TimeUnit unit) {

View File

@ -27,7 +27,7 @@ void BM_Complexity_O1(benchmark::State& state) {
while (state.KeepRunning()) {
}
}
BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<17) -> Complexity(benchmark::O_1);
BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::O_1);
static void BM_Complexity_O_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range_x());
@ -36,9 +36,9 @@ static void BM_Complexity_O_N(benchmark::State& state) {
benchmark::DoNotOptimize(std::find(v.begin(), v.end(), itemNotInVector));
}
}
BENCHMARK(BM_Complexity_O_N) -> Range(1, 1<<10) -> Complexity(benchmark::O_N);
BENCHMARK(BM_Complexity_O_N) -> Range(1, 1<<10) -> Complexity(benchmark::O_Auto);
BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_N);
BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_Auto);
static void BM_Complexity_O_N_Squared(benchmark::State& state) {
std::string s1(state.range_x(), '-');
std::string s2(state.range_x(), '-');
@ -76,7 +76,8 @@ static void BM_Complexity_O_log_N(benchmark::State& state) {
benchmark::DoNotOptimize(m.find(itemNotInVector));
}
}
BENCHMARK(BM_Complexity_O_log_N) -> Range(1, 1<<10) -> Complexity(benchmark::O_log_N);
BENCHMARK(BM_Complexity_O_log_N)
->RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_log_N);
static void BM_Complexity_O_N_log_N(benchmark::State& state) {
auto v = ConstructRandomVector(state.range_x());
@ -84,10 +85,10 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) {
std::sort(v.begin(), v.end());
}
}
BENCHMARK(BM_Complexity_O_N_log_N) -> Range(1, 1<<16) -> Complexity(benchmark::O_N_log_N);
BENCHMARK(BM_Complexity_O_N_log_N) -> Range(1, 1<<16) -> Complexity(benchmark::O_Auto);
BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_N_log_N);
BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_Auto);
// Test benchmark with no range. Complexity is always calculated as O(1).
// Test benchmark with no range and check no complexity is calculated.
void BM_Extreme_Cases(benchmark::State& state) {
while (state.KeepRunning()) {
}