From 27f0baa190b4ebd31436b3e8a89bee7fbdc50eb9 Mon Sep 17 00:00:00 2001 From: Ismael Date: Wed, 18 May 2016 19:59:34 +0200 Subject: [PATCH 01/26] Added test file and Complexity() interface --- include/benchmark/benchmark_api.h | 18 +++++ src/benchmark.cc | 16 ++++- test/CMakeLists.txt | 5 +- test/complexity_test.cc | 107 ++++++++++++++++++++++++++++++ 4 files changed, 144 insertions(+), 2 deletions(-) create mode 100644 test/complexity_test.cc diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index 2ded4814..8878b58d 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -231,6 +231,20 @@ enum TimeUnit { kMillisecond }; +// BigO is passed to a benchmark in order to specify the asymptotic computational +// complexity for the benchmark. +enum BigO { + O_None, + O_1, + O_N, + O_M_plus_N, + O_N_Squared, + O_N_Cubed, + O_log_N, + O_N_log_N, + O_Auto +}; + // State is passed to a running Benchmark and contains state for the // benchmark to use. class State { @@ -465,6 +479,10 @@ public: // to control how many iterations are run, and in the printing of items/second // or MB/second values. Benchmark* UseManualTime(); + + // Set the asymptotic computational complexity for the benchmark. This option + // called the asymptotic computational complexity will be shown on the output. + Benchmark* Complexity(BigO complexity); // Support for running multiple copies of the same benchmark concurrently // in multiple threads. This may be useful when measuring the scaling diff --git a/src/benchmark.cc b/src/benchmark.cc index 3d12d283..afe1e441 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -290,6 +290,7 @@ struct Benchmark::Instance { int range_multiplier; bool use_real_time; bool use_manual_time; + BigO complexity; double min_time; int threads; // Number of concurrent threads to use bool multithreaded; // Is benchmark multi-threaded? @@ -331,6 +332,7 @@ public: void MinTime(double n); void UseRealTime(); void UseManualTime(); + void Complexity(BigO complexity); void Threads(int t); void ThreadRange(int min_threads, int max_threads); void ThreadPerCpu(); @@ -349,6 +351,7 @@ private: double min_time_; bool use_real_time_; bool use_manual_time_; + BigO complexity_; std::vector thread_counts_; BenchmarkImp& operator=(BenchmarkImp const&); @@ -411,6 +414,7 @@ bool BenchmarkFamilies::FindBenchmarks( instance.min_time = family->min_time_; instance.use_real_time = family->use_real_time_; instance.use_manual_time = family->use_manual_time_; + instance.complexity = family->complexity_; instance.threads = num_threads; instance.multithreaded = !(family->thread_counts_.empty()); @@ -447,7 +451,8 @@ bool BenchmarkFamilies::FindBenchmarks( BenchmarkImp::BenchmarkImp(const char* name) : name_(name), arg_count_(-1), time_unit_(kNanosecond), range_multiplier_(kRangeMultiplier), min_time_(0.0), - use_real_time_(false), use_manual_time_(false) { + use_real_time_(false), use_manual_time_(false), + complexity_(O_None) { } BenchmarkImp::~BenchmarkImp() { @@ -523,6 +528,10 @@ void BenchmarkImp::UseManualTime() { use_manual_time_ = true; } +void BenchmarkImp::Complexity(BigO complexity){ + complexity_ = complexity; +} + void BenchmarkImp::Threads(int t) { CHECK_GT(t, 0); thread_counts_.push_back(t); @@ -636,6 +645,11 @@ Benchmark* Benchmark::UseManualTime() { return this; } +Benchmark* Benchmark::Complexity(BigO complexity) { + imp_->Complexity(complexity); + return this; +} + Benchmark* Benchmark::Threads(int t) { imp_->Threads(t); return this; diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index a10a53a9..1bc9dfeb 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -47,6 +47,9 @@ set_target_properties(cxx03_test PROPERTIES COMPILE_FLAGS "${CXX03_FLAGS}") add_test(cxx03 cxx03_test --benchmark_min_time=0.01) +compile_benchmark_test(complexity_test) +add_test(complexity_benchmark complexity_test --benchmark_min_time=0.01) + # Add the coverage command(s) if(CMAKE_BUILD_TYPE) string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER) @@ -66,7 +69,7 @@ if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage") COMMAND ${LCOV} -q -a before.lcov -a after.lcov --output-file final.lcov COMMAND ${LCOV} -q -r final.lcov "'${CMAKE_SOURCE_DIR}/test/*'" -o final.lcov COMMAND ${GENHTML} final.lcov -o lcov --demangle-cpp --sort -p "${CMAKE_BINARY_DIR}" -t benchmark - DEPENDS filter_test benchmark_test options_test basic_test fixture_test cxx03_test + DEPENDS filter_test benchmark_test options_test basic_test fixture_test cxx03_test complexity_test WORKING_DIRECTORY ${CMAKE_BINARY_DIR} COMMENT "Running LCOV" ) diff --git a/test/complexity_test.cc b/test/complexity_test.cc new file mode 100644 index 00000000..777c0f39 --- /dev/null +++ b/test/complexity_test.cc @@ -0,0 +1,107 @@ + +#include "benchmark/benchmark_api.h" + +#include +#include +#include +#include + +std::vector ConstructRandomVector(int size) { + std::vector v; + v.reserve(size); + for (int i = 0; i < size; ++i) { + v.push_back(rand() % size); + } + return v; +} + +std::map ConstructRandomMap(int size) { + std::map m; + for (int i = 0; i < size; ++i) { + m.insert(std::make_pair(rand() % size, rand() % size)); + } + return m; +} + +void BM_Complexity_O1(benchmark::State& state) { + while (state.KeepRunning()) { + } +} +BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<17) -> Complexity(benchmark::O_1); + +static void BM_Complexity_O_N(benchmark::State& state) { + auto v = ConstructRandomVector(state.range_x()); + const int itemNotInVector = state.range_x()*2; // Test worst case scenario (item not in vector) + while (state.KeepRunning()) { + benchmark::DoNotOptimize(std::find(v.begin(), v.end(), itemNotInVector)); + } +} +BENCHMARK(BM_Complexity_O_N) -> Range(1, 1<<10) -> Complexity(benchmark::O_N); +BENCHMARK(BM_Complexity_O_N) -> Range(1, 1<<10) -> Complexity(benchmark::O_Auto); + +static void BM_Complexity_O_M_plus_N(benchmark::State& state) { + std::string s1(state.range_x(), '-'); + std::string s2(state.range_x(), '-'); + while (state.KeepRunning()) + benchmark::DoNotOptimize(s1.compare(s2)); +} +BENCHMARK(BM_Complexity_O_M_plus_N) + ->RangeMultiplier(2)->Range(1<<10, 1<<18) -> Complexity(benchmark::O_M_plus_N); + +static void BM_Complexity_O_N_Squared(benchmark::State& state) { + std::string s1(state.range_x(), '-'); + std::string s2(state.range_x(), '-'); + while (state.KeepRunning()) + for(char& c1 : s1) { + for(char& c2 : s2) { + benchmark::DoNotOptimize(c1 = 'a'); + benchmark::DoNotOptimize(c2 = 'b'); + } + } +} +BENCHMARK(BM_Complexity_O_N_Squared) -> Range(1, 1<<8) -> Complexity(benchmark::O_N_Squared); + +static void BM_Complexity_O_N_Cubed(benchmark::State& state) { + std::string s1(state.range_x(), '-'); + std::string s2(state.range_x(), '-'); + std::string s3(state.range_x(), '-'); + while (state.KeepRunning()) + for(char& c1 : s1) { + for(char& c2 : s2) { + for(char& c3 : s3) { + benchmark::DoNotOptimize(c1 = 'a'); + benchmark::DoNotOptimize(c2 = 'b'); + benchmark::DoNotOptimize(c3 = 'c'); + } + } + } +} +BENCHMARK(BM_Complexity_O_N_Cubed) -> DenseRange(1, 8) -> Complexity(benchmark::O_N_Cubed); + +static void BM_Complexity_O_log_N(benchmark::State& state) { + auto m = ConstructRandomMap(state.range_x()); + const int itemNotInVector = state.range_x()*2; // Test worst case scenario (item not in vector) + while (state.KeepRunning()) { + benchmark::DoNotOptimize(m.find(itemNotInVector)); + } +} +BENCHMARK(BM_Complexity_O_log_N) -> Range(1, 1<<10) -> Complexity(benchmark::O_log_N); + +static void BM_Complexity_O_N_log_N(benchmark::State& state) { + auto v = ConstructRandomVector(state.range_x()); + while (state.KeepRunning()) { + std::sort(v.begin(), v.end()); + } +} +BENCHMARK(BM_Complexity_O_N_log_N) -> Range(1, 1<<16) -> Complexity(benchmark::O_N_log_N); +BENCHMARK(BM_Complexity_O_N_log_N) -> Range(1, 1<<16) -> Complexity(benchmark::O_Auto); + +// Test benchmark with no range. Complexity is always calculated as O(1). +void BM_Extreme_Cases(benchmark::State& state) { + while (state.KeepRunning()) { + } +} +BENCHMARK(BM_Extreme_Cases); +BENCHMARK(BM_Extreme_Cases)->Arg(42); + +BENCHMARK_MAIN() \ No newline at end of file From b73dc22944cb933289bbdbf5bb6616dbfc50168f Mon Sep 17 00:00:00 2001 From: Ismael Date: Wed, 18 May 2016 21:25:00 +0200 Subject: [PATCH 02/26] implemented Complexity for O(1) --- include/benchmark/reporter.h | 22 ++++++++++-- src/benchmark.cc | 23 ++++++++++-- src/console_reporter.cc | 17 ++++++++- src/csv_reporter.cc | 21 +++++++++-- src/json_reporter.cc | 17 ++++++++- src/reporter.cc | 68 +++++++++++++++++++++++++++--------- test/complexity_test.cc | 4 +-- 7 files changed, 145 insertions(+), 27 deletions(-) diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index aaf5fbff..b3988002 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -48,7 +48,10 @@ class BenchmarkReporter { cpu_accumulated_time(0), bytes_per_second(0), items_per_second(0), - max_heapbytes_used(0) {} + max_heapbytes_used(0), + complexity(O_1), + arg1(0), + arg2(0) {} std::string benchmark_name; std::string report_label; // Empty if not set by benchmark. @@ -63,6 +66,11 @@ class BenchmarkReporter { // This is set to 0.0 if memory tracing is not enabled. double max_heapbytes_used; + + // Keep track of arguments to compute asymptotic complexity + BigO complexity; + int arg1; + int arg2; }; // Called once for every suite of benchmarks run. @@ -78,6 +86,12 @@ class BenchmarkReporter { // Note that all the grouped benchmark runs should refer to the same // benchmark, thus have the same name. virtual void ReportRuns(const std::vector& report) = 0; + + // Called once at the last instance of a benchmark range, gives information about + // asymptotic complexity and RMS. + // Note that all the benchmark runs in a range should refer to the same benchmark, + // thus have the same name. + virtual void ReportComplexity(const std::vector& complexity_reports) = 0; // Called once and only once after ever group of benchmarks is run and // reported. @@ -85,7 +99,8 @@ class BenchmarkReporter { virtual ~BenchmarkReporter(); protected: - static void ComputeStats(std::vector const& reports, Run* mean, Run* stddev); + static void ComputeStats(const std::vector & reports, Run& mean, Run& stddev); + static void ComputeBigO(const std::vector & reports, Run& bigO, Run& rms); static TimeUnitMultiplier GetTimeUnitAndMultiplier(TimeUnit unit); }; @@ -95,6 +110,7 @@ class ConsoleReporter : public BenchmarkReporter { public: virtual bool ReportContext(const Context& context); virtual void ReportRuns(const std::vector& reports); + virtual void ReportComplexity(const std::vector& complexity_reports); protected: virtual void PrintRunData(const Run& report); @@ -107,6 +123,7 @@ public: JSONReporter() : first_report_(true) {} virtual bool ReportContext(const Context& context); virtual void ReportRuns(const std::vector& reports); + virtual void ReportComplexity(const std::vector& complexity_reports); virtual void Finalize(); private: @@ -119,6 +136,7 @@ class CSVReporter : public BenchmarkReporter { public: virtual bool ReportContext(const Context& context); virtual void ReportRuns(const std::vector& reports); + virtual void ReportComplexity(const std::vector& complexity_reports); private: void PrintRunData(const Run& report); diff --git a/src/benchmark.cc b/src/benchmark.cc index afe1e441..874dc0c7 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -291,6 +291,7 @@ struct Benchmark::Instance { bool use_real_time; bool use_manual_time; BigO complexity; + bool last_benchmark_instance; double min_time; int threads; // Number of concurrent threads to use bool multithreaded; // Is benchmark multi-threaded? @@ -414,6 +415,7 @@ bool BenchmarkFamilies::FindBenchmarks( instance.min_time = family->min_time_; instance.use_real_time = family->use_real_time_; instance.use_manual_time = family->use_manual_time_; + instance.last_benchmark_instance = (args == family->args_.back()); instance.complexity = family->complexity_; instance.threads = num_threads; instance.multithreaded = !(family->thread_counts_.empty()); @@ -697,7 +699,8 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b, } void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, - BenchmarkReporter* br) EXCLUDES(GetBenchmarkLock()) { + BenchmarkReporter* br, + std::vector& complexity_reports) EXCLUDES(GetBenchmarkLock()) { size_t iters = 1; std::vector reports; @@ -795,7 +798,14 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, report.cpu_accumulated_time = cpu_accumulated_time; report.bytes_per_second = bytes_per_second; report.items_per_second = items_per_second; + report.arg1 = b.arg1; + report.arg2 = b.arg2; + report.complexity = b.complexity; reports.push_back(report); + + if(report.complexity != O_None) + complexity_reports.push_back(report); + break; } @@ -819,6 +829,12 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, } } br->ReportRuns(reports); + + if((b.complexity != O_None) && b.last_benchmark_instance) { + br->ReportComplexity(complexity_reports); + complexity_reports.clear(); + } + if (b.multithreaded) { for (std::thread& thread : pool) thread.join(); @@ -903,9 +919,12 @@ void RunMatchingBenchmarks(const std::string& spec, context.cpu_scaling_enabled = CpuScalingEnabled(); context.name_field_width = name_field_width; + // Keep track of runing times of all instances of current benchmark + std::vector complexity_reports; + if (reporter->ReportContext(context)) { for (const auto& benchmark : benchmarks) { - RunBenchmark(benchmark, reporter); + RunBenchmark(benchmark, reporter, complexity_reports); } } } diff --git a/src/console_reporter.cc b/src/console_reporter.cc index 56bd3ced..0d8ab1dc 100644 --- a/src/console_reporter.cc +++ b/src/console_reporter.cc @@ -72,13 +72,28 @@ void ConsoleReporter::ReportRuns(const std::vector& reports) { Run mean_data; Run stddev_data; - BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data); + BenchmarkReporter::ComputeStats(reports, mean_data, stddev_data); // Output using PrintRun. PrintRunData(mean_data); PrintRunData(stddev_data); } +void ConsoleReporter::ReportComplexity(const std::vector & complexity_reports) { + if (complexity_reports.size() < 2) { + // We don't report asymptotic complexity data if there was a single run. + return; + } + + Run bigO_data; + Run rms_data; + BenchmarkReporter::ComputeBigO(complexity_reports, bigO_data, rms_data); + + // Output using PrintRun. + PrintRunData(bigO_data); + PrintRunData(rms_data); +} + void ConsoleReporter::PrintRunData(const Run& result) { // Format bytes per second std::string rate; diff --git a/src/csv_reporter.cc b/src/csv_reporter.cc index 3f67d1de..f13a5f8b 100644 --- a/src/csv_reporter.cc +++ b/src/csv_reporter.cc @@ -48,7 +48,7 @@ bool CSVReporter::ReportContext(const Context& context) { return true; } -void CSVReporter::ReportRuns(std::vector const& reports) { +void CSVReporter::ReportRuns(const std::vector & reports) { if (reports.empty()) { return; } @@ -57,7 +57,7 @@ void CSVReporter::ReportRuns(std::vector const& reports) { if (reports.size() >= 2) { Run mean_data; Run stddev_data; - BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data); + BenchmarkReporter::ComputeStats(reports, mean_data, stddev_data); reports_cp.push_back(mean_data); reports_cp.push_back(stddev_data); } @@ -66,7 +66,22 @@ void CSVReporter::ReportRuns(std::vector const& reports) { } } -void CSVReporter::PrintRunData(Run const& run) { +void CSVReporter::ReportComplexity(const std::vector & complexity_reports) { + if (complexity_reports.size() < 2) { + // We don't report asymptotic complexity data if there was a single run. + return; + } + + Run bigO_data; + Run rms_data; + BenchmarkReporter::ComputeBigO(complexity_reports, bigO_data, rms_data); + + // Output using PrintRun. + PrintRunData(bigO_data); + PrintRunData(rms_data); +} + +void CSVReporter::PrintRunData(const Run & run) { double multiplier; const char* timeLabel; std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(run.time_unit); diff --git a/src/json_reporter.cc b/src/json_reporter.cc index 7ed141fc..07fc3662 100644 --- a/src/json_reporter.cc +++ b/src/json_reporter.cc @@ -100,7 +100,7 @@ void JSONReporter::ReportRuns(std::vector const& reports) { if (reports.size() >= 2) { Run mean_data; Run stddev_data; - BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data); + BenchmarkReporter::ComputeStats(reports, mean_data, stddev_data); reports_cp.push_back(mean_data); reports_cp.push_back(stddev_data); } @@ -115,6 +115,21 @@ void JSONReporter::ReportRuns(std::vector const& reports) { } } +void JSONReporter::ReportComplexity(const std::vector & complexity_reports) { + if (complexity_reports.size() < 2) { + // We don't report asymptotic complexity data if there was a single run. + return; + } + + Run bigO_data; + Run rms_data; + BenchmarkReporter::ComputeBigO(complexity_reports, bigO_data, rms_data); + + // Output using PrintRun. + PrintRunData(bigO_data); + PrintRunData(rms_data); +} + void JSONReporter::Finalize() { // Close the list of benchmarks and the top level object. std::cout << "\n ]\n}\n"; diff --git a/src/reporter.cc b/src/reporter.cc index 036546e7..fd97aba4 100644 --- a/src/reporter.cc +++ b/src/reporter.cc @@ -24,7 +24,7 @@ namespace benchmark { void BenchmarkReporter::ComputeStats( const std::vector& reports, - Run* mean_data, Run* stddev_data) { + Run& mean_data, Run& stddev_data) { CHECK(reports.size() >= 2) << "Cannot compute stats for less than 2 reports"; // Accumulators. Stat1_d real_accumulated_time_stat; @@ -48,33 +48,69 @@ void BenchmarkReporter::ComputeStats( } // Get the data from the accumulator to BenchmarkReporter::Run's. - mean_data->benchmark_name = reports[0].benchmark_name + "_mean"; - mean_data->iterations = run_iterations; - mean_data->real_accumulated_time = real_accumulated_time_stat.Mean() * + mean_data.benchmark_name = reports[0].benchmark_name + "_mean"; + mean_data.iterations = run_iterations; + mean_data.real_accumulated_time = real_accumulated_time_stat.Mean() * run_iterations; - mean_data->cpu_accumulated_time = cpu_accumulated_time_stat.Mean() * + mean_data.cpu_accumulated_time = cpu_accumulated_time_stat.Mean() * run_iterations; - mean_data->bytes_per_second = bytes_per_second_stat.Mean(); - mean_data->items_per_second = items_per_second_stat.Mean(); + mean_data.bytes_per_second = bytes_per_second_stat.Mean(); + mean_data.items_per_second = items_per_second_stat.Mean(); // Only add label to mean/stddev if it is same for all runs - mean_data->report_label = reports[0].report_label; + mean_data.report_label = reports[0].report_label; for (std::size_t i = 1; i < reports.size(); i++) { if (reports[i].report_label != reports[0].report_label) { - mean_data->report_label = ""; + mean_data.report_label = ""; break; } } - stddev_data->benchmark_name = reports[0].benchmark_name + "_stddev"; - stddev_data->report_label = mean_data->report_label; - stddev_data->iterations = 0; - stddev_data->real_accumulated_time = + stddev_data.benchmark_name = reports[0].benchmark_name + "_stddev"; + stddev_data.report_label = mean_data.report_label; + stddev_data.iterations = 0; + stddev_data.real_accumulated_time = real_accumulated_time_stat.StdDev(); - stddev_data->cpu_accumulated_time = + stddev_data.cpu_accumulated_time = + cpu_accumulated_time_stat.StdDev(); + stddev_data.bytes_per_second = bytes_per_second_stat.StdDev(); + stddev_data.items_per_second = items_per_second_stat.StdDev(); +} + +void BenchmarkReporter::ComputeBigO( + const std::vector& reports, + Run& bigO, Run& rms) { + CHECK(reports.size() >= 2) << "Cannot compute asymptotic complexity for less than 2 reports"; + // Accumulators. + Stat1_d real_accumulated_time_stat; + Stat1_d cpu_accumulated_time_stat; + + // Populate the accumulators. + for (Run const& run : reports) { + real_accumulated_time_stat += + Stat1_d(run.real_accumulated_time/run.iterations, run.iterations); + cpu_accumulated_time_stat += + Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations); + } + + std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/')); + + // Get the data from the accumulator to BenchmarkReporter::Run's. + bigO.benchmark_name = benchmark_name + "_BigO"; + bigO.iterations = 0; + bigO.real_accumulated_time = real_accumulated_time_stat.Mean(); + bigO.cpu_accumulated_time = cpu_accumulated_time_stat.Mean(); + + // Only add label to mean/stddev if it is same for all runs + bigO.report_label = reports[0].report_label; + + rms.benchmark_name = benchmark_name + "_RMS"; + rms.report_label = bigO.report_label; + rms.iterations = 0; + rms.real_accumulated_time = + real_accumulated_time_stat.StdDev(); + rms.cpu_accumulated_time = cpu_accumulated_time_stat.StdDev(); - stddev_data->bytes_per_second = bytes_per_second_stat.StdDev(); - stddev_data->items_per_second = items_per_second_stat.StdDev(); } TimeUnitMultiplier BenchmarkReporter::GetTimeUnitAndMultiplier(TimeUnit unit) { diff --git a/test/complexity_test.cc b/test/complexity_test.cc index 777c0f39..afa82edb 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -101,7 +101,7 @@ void BM_Extreme_Cases(benchmark::State& state) { while (state.KeepRunning()) { } } -BENCHMARK(BM_Extreme_Cases); -BENCHMARK(BM_Extreme_Cases)->Arg(42); +BENCHMARK(BM_Extreme_Cases) -> Complexity(benchmark::O_N_log_N); +BENCHMARK(BM_Extreme_Cases)->Arg(42) -> Complexity(benchmark::O_Auto); BENCHMARK_MAIN() \ No newline at end of file From 872ff01a49390ccaf8ee5f13c18ae7be9cce8275 Mon Sep 17 00:00:00 2001 From: Ismael Date: Fri, 20 May 2016 16:49:39 +0200 Subject: [PATCH 03/26] addaptation of minimal_leastsq library --- include/benchmark/benchmark_api.h | 17 +++-- src/CMakeLists.txt | 2 +- src/minimal_leastsq.cc | 113 ++++++++++++++++++++++++++++++ src/minimal_leastsq.h | 46 ++++++++++++ src/reporter.cc | 1 + test/complexity_test.cc | 9 --- 6 files changed, 169 insertions(+), 19 deletions(-) create mode 100644 src/minimal_leastsq.cc create mode 100644 src/minimal_leastsq.h diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index 8878b58d..146a8cc8 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -234,15 +234,14 @@ enum TimeUnit { // BigO is passed to a benchmark in order to specify the asymptotic computational // complexity for the benchmark. enum BigO { - O_None, - O_1, - O_N, - O_M_plus_N, - O_N_Squared, - O_N_Cubed, - O_log_N, - O_N_log_N, - O_Auto + O_None, + O_1, + O_N, + O_N_Squared, + O_N_Cubed, + O_log_N, + O_N_log_N, + O_Auto }; // State is passed to a running Benchmark and contains state for the diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 811d0755..a681b35d 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -5,7 +5,7 @@ include_directories(${PROJECT_SOURCE_DIR}/src) set(SOURCE_FILES "benchmark.cc" "colorprint.cc" "commandlineflags.cc" "console_reporter.cc" "csv_reporter.cc" "json_reporter.cc" "log.cc" "reporter.cc" "sleep.cc" "string_util.cc" - "sysinfo.cc" "walltime.cc") + "sysinfo.cc" "walltime.cc" "minimal_leastsq.cc") # Determine the correct regular expression engine to use if(HAVE_STD_REGEX) set(RE_FILES "re_std.cc") diff --git a/src/minimal_leastsq.cc b/src/minimal_leastsq.cc new file mode 100644 index 00000000..c4627d38 --- /dev/null +++ b/src/minimal_leastsq.cc @@ -0,0 +1,113 @@ +// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Source project : https://github.com/ismaelJimenez/cpp.leastsq +// Addapted to be used with google benchmark + +#include "minimal_leastsq.h" + +#include + +// Internal function to calculate the different scalability forms +double fittingCurve(double N, benchmark::BigO Complexity) { + if (Complexity == benchmark::O_N) + return N; + else if (Complexity == benchmark::O_N_Squared) + return pow(N, 2); + else if (Complexity == benchmark::O_N_Cubed) + return pow(N, 3); + else if (Complexity == benchmark::O_log_N) + return log2(N); + else if (Complexity == benchmark::O_N_log_N) + return N * log2(N); + + return 1; // Default value for O_1 +} + +// Internal function to find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. +// - N : Vector containing the size of the benchmark tests. +// - Time : Vector containing the times for the benchmark tests. +// - Complexity : Fitting curve. +// For a deeper explanation on the algorithm logic, look the README file at http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit + +LeastSq leastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity) { + assert(N.size() == Time.size() && N.size() >= 2); + assert(Complexity != benchmark::O_None && + Complexity != benchmark::O_Auto); + + double sigmaGN = 0; + double sigmaGNSquared = 0; + double sigmaTime = 0; + double sigmaTimeGN = 0; + + // Calculate least square fitting parameter + for (size_t i = 0; i < N.size(); ++i) { + double GNi = fittingCurve(N[i], Complexity); + sigmaGN += GNi; + sigmaGNSquared += GNi * GNi; + sigmaTime += Time[i]; + sigmaTimeGN += Time[i] * GNi; + } + + LeastSq result; + result.complexity = Complexity; + + // Calculate complexity. + // O_1 is treated as an special case + if (Complexity != benchmark::O_1) + result.coef = sigmaTimeGN / sigmaGNSquared; + else + result.coef = sigmaTime / N.size(); + + // Calculate RMS + double rms = 0; + for (size_t i = 0; i < N.size(); ++i) { + double fit = result.coef * fittingCurve(N[i], Complexity); + rms += pow((Time[i] - fit), 2); + } + + double mean = sigmaTime / N.size(); + + result.rms = sqrt(rms) / mean; // Normalized RMS by the mean of the observed values + + return result; +} + +// Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. +// - N : Vector containing the size of the benchmark tests. +// - Time : Vector containing the times for the benchmark tests. +// - Complexity : If different than O_Auto, the fitting curve will stick to this one. If it is O_Auto, it will be calculated +// the best fitting curve. + +LeastSq minimalLeastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity) { + assert(N.size() == Time.size() && N.size() >= 2); // Do not compute fitting curve is less than two benchmark runs are given + assert(Complexity != benchmark::O_None); // Check that complexity is a valid parameter. + + if(Complexity == benchmark::O_Auto) { + std::vector fitCurves = { benchmark::O_log_N, benchmark::O_N, benchmark::O_N_log_N, benchmark::O_N_Squared, benchmark::O_N_Cubed }; + + LeastSq best_fit = leastSq(N, Time, benchmark::O_1); // Take O_1 as default best fitting curve + + // Compute all possible fitting curves and stick to the best one + for (const auto& fit : fitCurves) { + LeastSq current_fit = leastSq(N, Time, fit); + if (current_fit.rms < best_fit.rms) + best_fit = current_fit; + } + + return best_fit; + } + else + return leastSq(N, Time, Complexity); +} \ No newline at end of file diff --git a/src/minimal_leastsq.h b/src/minimal_leastsq.h new file mode 100644 index 00000000..ae725d1b --- /dev/null +++ b/src/minimal_leastsq.h @@ -0,0 +1,46 @@ +// Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Source project : https://github.com/ismaelJimenez/cpp.leastsq +// Addapted to be used with google benchmark + +#if !defined(MINIMAL_LEASTSQ_H_) +#define MINIMAL_LEASTSQ_H_ + +#include "benchmark/benchmark_api.h" + +#include + +// This data structure will contain the result returned vy minimalLeastSq +// - coef : Estimated coeficient for the high-order term as interpolated from data. +// - rms : Normalized Root Mean Squared Error. +// - complexity : Scalability form (e.g. O_N, O_N_log_N). In case a scalability form has been provided to minimalLeastSq +// this will return the same value. In case BigO::O_Auto has been selected, this parameter will return the +// best fitting curve detected. + +struct LeastSq { + LeastSq() : + coef(0), + rms(0), + complexity(benchmark::O_None) {} + + double coef; + double rms; + benchmark::BigO complexity; +}; + +// Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. +LeastSq minimalLeastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity = benchmark::O_Auto); + +#endif diff --git a/src/reporter.cc b/src/reporter.cc index fd97aba4..dc7b76b8 100644 --- a/src/reporter.cc +++ b/src/reporter.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "benchmark/reporter.h" +#include "minimal_leastsq.h" #include #include diff --git a/test/complexity_test.cc b/test/complexity_test.cc index afa82edb..54a6cff8 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -38,15 +38,6 @@ static void BM_Complexity_O_N(benchmark::State& state) { } BENCHMARK(BM_Complexity_O_N) -> Range(1, 1<<10) -> Complexity(benchmark::O_N); BENCHMARK(BM_Complexity_O_N) -> Range(1, 1<<10) -> Complexity(benchmark::O_Auto); - -static void BM_Complexity_O_M_plus_N(benchmark::State& state) { - std::string s1(state.range_x(), '-'); - std::string s2(state.range_x(), '-'); - while (state.KeepRunning()) - benchmark::DoNotOptimize(s1.compare(s2)); -} -BENCHMARK(BM_Complexity_O_M_plus_N) - ->RangeMultiplier(2)->Range(1<<10, 1<<18) -> Complexity(benchmark::O_M_plus_N); static void BM_Complexity_O_N_Squared(benchmark::State& state) { std::string s1(state.range_x(), '-'); From 2e5c397b4829503a5cb023ac67d2a1f13ebda3aa Mon Sep 17 00:00:00 2001 From: Ismael Date: Sat, 21 May 2016 08:55:43 +0200 Subject: [PATCH 04/26] implemented complexity reporting --- include/benchmark/reporter.h | 11 +++++-- src/console_reporter.cc | 19 ++++++++++-- src/json_reporter.cc | 10 ++++++ src/minimal_leastsq.cc | 6 ++-- src/minimal_leastsq.h | 2 +- src/reporter.cc | 59 ++++++++++++++++++++++++++++-------- test/complexity_test.cc | 17 ++++++----- 7 files changed, 95 insertions(+), 29 deletions(-) diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index b3988002..24c26919 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -49,9 +49,11 @@ class BenchmarkReporter { bytes_per_second(0), items_per_second(0), max_heapbytes_used(0), - complexity(O_1), + complexity(O_None), arg1(0), - arg2(0) {} + arg2(0), + report_bigO(false), + report_rms(false) {} std::string benchmark_name; std::string report_label; // Empty if not set by benchmark. @@ -71,6 +73,10 @@ class BenchmarkReporter { BigO complexity; int arg1; int arg2; + + // Inform print function if the current run is a complexity report + bool report_bigO; + bool report_rms; }; // Called once for every suite of benchmarks run. @@ -102,6 +108,7 @@ protected: static void ComputeStats(const std::vector & reports, Run& mean, Run& stddev); static void ComputeBigO(const std::vector & reports, Run& bigO, Run& rms); static TimeUnitMultiplier GetTimeUnitAndMultiplier(TimeUnit unit); + static std::string GetBigO(BigO complexity); }; // Simple reporter that outputs benchmark data to the console. This is the diff --git a/src/console_reporter.cc b/src/console_reporter.cc index 0d8ab1dc..b0b41309 100644 --- a/src/console_reporter.cc +++ b/src/console_reporter.cc @@ -88,7 +88,7 @@ void ConsoleReporter::ReportComplexity(const std::vector & complexity_repor Run bigO_data; Run rms_data; BenchmarkReporter::ComputeBigO(complexity_reports, bigO_data, rms_data); - + // Output using PrintRun. PrintRunData(bigO_data); PrintRunData(rms_data); @@ -115,7 +115,22 @@ void ConsoleReporter::PrintRunData(const Run& result) { ColorPrintf(COLOR_GREEN, "%-*s ", name_field_width_, result.benchmark_name.c_str()); - if (result.iterations == 0) { + if(result.report_bigO) { + std::string big_o = result.report_bigO ? GetBigO(result.complexity) : ""; + ColorPrintf(COLOR_YELLOW, "%10.4f %s %10.4f %s ", + result.real_accumulated_time * multiplier, + big_o.c_str(), + result.cpu_accumulated_time * multiplier, + big_o.c_str()); + } + else if(result.report_rms) { + ColorPrintf(COLOR_YELLOW, "%10.0f %s %10.0f %s ", + result.real_accumulated_time * multiplier * 100, + "%", + result.cpu_accumulated_time * multiplier * 100, + "%"); + } + else if (result.iterations == 0) { ColorPrintf(COLOR_YELLOW, "%10.0f %s %10.0f %s ", result.real_accumulated_time * multiplier, timeLabel, diff --git a/src/json_reporter.cc b/src/json_reporter.cc index 07fc3662..4874fe74 100644 --- a/src/json_reporter.cc +++ b/src/json_reporter.cc @@ -121,13 +121,23 @@ void JSONReporter::ReportComplexity(const std::vector & complexity_reports) return; } + std::string indent(4, ' '); + std::ostream& out = std::cout; + if (!first_report_) { + out << ",\n"; + } + Run bigO_data; Run rms_data; BenchmarkReporter::ComputeBigO(complexity_reports, bigO_data, rms_data); // Output using PrintRun. + out << indent << "{\n"; PrintRunData(bigO_data); + out << indent << "},\n"; + out << indent << "{\n"; PrintRunData(rms_data); + out << indent << '}'; } void JSONReporter::Finalize() { diff --git a/src/minimal_leastsq.cc b/src/minimal_leastsq.cc index c4627d38..32d27455 100644 --- a/src/minimal_leastsq.cc +++ b/src/minimal_leastsq.cc @@ -41,7 +41,7 @@ double fittingCurve(double N, benchmark::BigO Complexity) { // - Complexity : Fitting curve. // For a deeper explanation on the algorithm logic, look the README file at http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit -LeastSq leastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity) { +LeastSq leastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity) { assert(N.size() == Time.size() && N.size() >= 2); assert(Complexity != benchmark::O_None && Complexity != benchmark::O_Auto); @@ -79,7 +79,7 @@ LeastSq leastSq(const std::vector& N, const std::vector& Time, const b double mean = sigmaTime / N.size(); - result.rms = sqrt(rms) / mean; // Normalized RMS by the mean of the observed values + result.rms = sqrt(rms / N.size()) / mean; // Normalized RMS by the mean of the observed values return result; } @@ -90,7 +90,7 @@ LeastSq leastSq(const std::vector& N, const std::vector& Time, const b // - Complexity : If different than O_Auto, the fitting curve will stick to this one. If it is O_Auto, it will be calculated // the best fitting curve. -LeastSq minimalLeastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity) { +LeastSq minimalLeastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity) { assert(N.size() == Time.size() && N.size() >= 2); // Do not compute fitting curve is less than two benchmark runs are given assert(Complexity != benchmark::O_None); // Check that complexity is a valid parameter. diff --git a/src/minimal_leastsq.h b/src/minimal_leastsq.h index ae725d1b..d0d58223 100644 --- a/src/minimal_leastsq.h +++ b/src/minimal_leastsq.h @@ -41,6 +41,6 @@ struct LeastSq { }; // Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. -LeastSq minimalLeastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity = benchmark::O_Auto); +LeastSq minimalLeastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity = benchmark::O_Auto); #endif diff --git a/src/reporter.cc b/src/reporter.cc index dc7b76b8..da40db61 100644 --- a/src/reporter.cc +++ b/src/reporter.cc @@ -17,6 +17,7 @@ #include #include +#include #include "check.h" #include "stat.h" @@ -83,35 +84,67 @@ void BenchmarkReporter::ComputeBigO( Run& bigO, Run& rms) { CHECK(reports.size() >= 2) << "Cannot compute asymptotic complexity for less than 2 reports"; // Accumulators. - Stat1_d real_accumulated_time_stat; - Stat1_d cpu_accumulated_time_stat; + std::vector N; + std::vector RealTime; + std::vector CpuTime; // Populate the accumulators. for (Run const& run : reports) { - real_accumulated_time_stat += - Stat1_d(run.real_accumulated_time/run.iterations, run.iterations); - cpu_accumulated_time_stat += - Stat1_d(run.cpu_accumulated_time/run.iterations, run.iterations); + N.push_back(run.arg1); + RealTime.push_back(run.real_accumulated_time/run.iterations); + CpuTime.push_back(run.cpu_accumulated_time/run.iterations); } + + LeastSq resultCpu = minimalLeastSq(N, CpuTime, reports[0].complexity); + + // resultCpu.complexity is passed as parameter to resultReal because in case + // reports[0].complexity is O_Auto, the noise on the measured data could make + // the best fit function of Cpu and Real differ. In order to solve this, we take + // the best fitting function for the Cpu, and apply it to Real data. + LeastSq resultReal = minimalLeastSq(N, RealTime, resultCpu.complexity); std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/')); // Get the data from the accumulator to BenchmarkReporter::Run's. bigO.benchmark_name = benchmark_name + "_BigO"; bigO.iterations = 0; - bigO.real_accumulated_time = real_accumulated_time_stat.Mean(); - bigO.cpu_accumulated_time = cpu_accumulated_time_stat.Mean(); + bigO.real_accumulated_time = resultReal.coef; + bigO.cpu_accumulated_time = resultCpu.coef; + bigO.report_bigO = true; + bigO.complexity = resultCpu.complexity; + + double multiplier; + const char* timeLabel; + std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(reports[0].time_unit); // Only add label to mean/stddev if it is same for all runs bigO.report_label = reports[0].report_label; - rms.benchmark_name = benchmark_name + "_RMS"; rms.report_label = bigO.report_label; rms.iterations = 0; - rms.real_accumulated_time = - real_accumulated_time_stat.StdDev(); - rms.cpu_accumulated_time = - cpu_accumulated_time_stat.StdDev(); + rms.real_accumulated_time = resultReal.rms / multiplier; + rms.cpu_accumulated_time = resultCpu.rms / multiplier; + rms.report_rms = true; + rms.complexity = resultCpu.complexity; +} + +std::string BenchmarkReporter::GetBigO(BigO complexity) { + switch (complexity) { + case O_N: + return "* N"; + case O_N_Squared: + return "* N**2"; + case O_N_Cubed: + return "* N**3"; + case O_log_N: + return "* lgN"; + case O_N_log_N: + return "* NlgN"; + case O_1: + return "* 1"; + default: + return ""; + } } TimeUnitMultiplier BenchmarkReporter::GetTimeUnitAndMultiplier(TimeUnit unit) { diff --git a/test/complexity_test.cc b/test/complexity_test.cc index 54a6cff8..321fdadb 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -27,7 +27,7 @@ void BM_Complexity_O1(benchmark::State& state) { while (state.KeepRunning()) { } } -BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<17) -> Complexity(benchmark::O_1); +BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::O_1); static void BM_Complexity_O_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range_x()); @@ -36,9 +36,9 @@ static void BM_Complexity_O_N(benchmark::State& state) { benchmark::DoNotOptimize(std::find(v.begin(), v.end(), itemNotInVector)); } } -BENCHMARK(BM_Complexity_O_N) -> Range(1, 1<<10) -> Complexity(benchmark::O_N); -BENCHMARK(BM_Complexity_O_N) -> Range(1, 1<<10) -> Complexity(benchmark::O_Auto); - +BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_N); +BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_Auto); + static void BM_Complexity_O_N_Squared(benchmark::State& state) { std::string s1(state.range_x(), '-'); std::string s2(state.range_x(), '-'); @@ -76,7 +76,8 @@ static void BM_Complexity_O_log_N(benchmark::State& state) { benchmark::DoNotOptimize(m.find(itemNotInVector)); } } -BENCHMARK(BM_Complexity_O_log_N) -> Range(1, 1<<10) -> Complexity(benchmark::O_log_N); +BENCHMARK(BM_Complexity_O_log_N) + ->RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_log_N); static void BM_Complexity_O_N_log_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range_x()); @@ -84,10 +85,10 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) { std::sort(v.begin(), v.end()); } } -BENCHMARK(BM_Complexity_O_N_log_N) -> Range(1, 1<<16) -> Complexity(benchmark::O_N_log_N); -BENCHMARK(BM_Complexity_O_N_log_N) -> Range(1, 1<<16) -> Complexity(benchmark::O_Auto); +BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_N_log_N); +BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_Auto); -// Test benchmark with no range. Complexity is always calculated as O(1). +// Test benchmark with no range and check no complexity is calculated. void BM_Extreme_Cases(benchmark::State& state) { while (state.KeepRunning()) { } From 290bd60289ef571875415cf82be805f9a446c6a9 Mon Sep 17 00:00:00 2001 From: Ismael Date: Sat, 21 May 2016 11:51:42 +0200 Subject: [PATCH 05/26] Refactor for pull request --- AUTHORS | 1 + CONTRIBUTORS | 1 + include/benchmark/benchmark_api.h | 7 ++++--- include/benchmark/reporter.h | 2 +- src/minimal_leastsq.cc | 2 +- src/minimal_leastsq.h | 4 ++-- src/reporter.cc | 4 ++-- test/complexity_test.cc | 12 ++++++------ 8 files changed, 18 insertions(+), 15 deletions(-) diff --git a/AUTHORS b/AUTHORS index 9da43c73..7ddffd8c 100644 --- a/AUTHORS +++ b/AUTHORS @@ -16,6 +16,7 @@ Eugene Zhuk Evgeny Safronov Felix Homann Google Inc. +Ismael Jimenez Martinez JianXiong Zhou Jussi Knuuttila Kaito Udagawa diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 67ecb280..a575ef1b 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -31,6 +31,7 @@ Dominic Hamon Eugene Zhuk Evgeny Safronov Felix Homann +Ismael Jimenez Martinez JianXiong Zhou Jussi Knuuttila Kaito Udagawa diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index 146a8cc8..d7cf83f6 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -232,7 +232,8 @@ enum TimeUnit { }; // BigO is passed to a benchmark in order to specify the asymptotic computational -// complexity for the benchmark. +// complexity for the benchmark. In case O_Auto is selected, complexity will be +// calculated automatically to the best fit. enum BigO { O_None, O_1, @@ -479,8 +480,8 @@ public: // or MB/second values. Benchmark* UseManualTime(); - // Set the asymptotic computational complexity for the benchmark. This option - // called the asymptotic computational complexity will be shown on the output. + // Set the asymptotic computational complexity for the benchmark. If called + // the asymptotic computational complexity will be shown on the output. Benchmark* Complexity(BigO complexity); // Support for running multiple copies of the same benchmark concurrently diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index 24c26919..d6b713a4 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -74,7 +74,7 @@ class BenchmarkReporter { int arg1; int arg2; - // Inform print function if the current run is a complexity report + // Inform print function whether the current run is a complexity report bool report_bigO; bool report_rms; }; diff --git a/src/minimal_leastsq.cc b/src/minimal_leastsq.cc index 32d27455..07a47b99 100644 --- a/src/minimal_leastsq.cc +++ b/src/minimal_leastsq.cc @@ -13,7 +13,7 @@ // limitations under the License. // Source project : https://github.com/ismaelJimenez/cpp.leastsq -// Addapted to be used with google benchmark +// Adapted to be used with google benchmark #include "minimal_leastsq.h" diff --git a/src/minimal_leastsq.h b/src/minimal_leastsq.h index d0d58223..0b137fb7 100644 --- a/src/minimal_leastsq.h +++ b/src/minimal_leastsq.h @@ -13,7 +13,7 @@ // limitations under the License. // Source project : https://github.com/ismaelJimenez/cpp.leastsq -// Addapted to be used with google benchmark +// Adapted to be used with google benchmark #if !defined(MINIMAL_LEASTSQ_H_) #define MINIMAL_LEASTSQ_H_ @@ -22,7 +22,7 @@ #include -// This data structure will contain the result returned vy minimalLeastSq +// This data structure will contain the result returned by minimalLeastSq // - coef : Estimated coeficient for the high-order term as interpolated from data. // - rms : Normalized Root Mean Squared Error. // - complexity : Scalability form (e.g. O_N, O_N_log_N). In case a scalability form has been provided to minimalLeastSq diff --git a/src/reporter.cc b/src/reporter.cc index da40db61..61a6d5c3 100644 --- a/src/reporter.cc +++ b/src/reporter.cc @@ -85,11 +85,11 @@ void BenchmarkReporter::ComputeBigO( CHECK(reports.size() >= 2) << "Cannot compute asymptotic complexity for less than 2 reports"; // Accumulators. std::vector N; - std::vector RealTime; + std::vector RealTime; std::vector CpuTime; // Populate the accumulators. - for (Run const& run : reports) { + for (const Run& run : reports) { N.push_back(run.arg1); RealTime.push_back(run.real_accumulated_time/run.iterations); CpuTime.push_back(run.cpu_accumulated_time/run.iterations); diff --git a/test/complexity_test.cc b/test/complexity_test.cc index 321fdadb..e7e16d31 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -36,8 +36,8 @@ static void BM_Complexity_O_N(benchmark::State& state) { benchmark::DoNotOptimize(std::find(v.begin(), v.end(), itemNotInVector)); } } -BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_N); -BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_Auto); +BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::O_N); +BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::O_Auto); static void BM_Complexity_O_N_Squared(benchmark::State& state) { std::string s1(state.range_x(), '-'); @@ -77,7 +77,7 @@ static void BM_Complexity_O_log_N(benchmark::State& state) { } } BENCHMARK(BM_Complexity_O_log_N) - ->RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_log_N); + -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::O_log_N); static void BM_Complexity_O_N_log_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range_x()); @@ -85,8 +85,8 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) { std::sort(v.begin(), v.end()); } } -BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_N_log_N); -BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2)->Range(1<<10, 1<<16) -> Complexity(benchmark::O_Auto); +BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::O_N_log_N); +BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::O_Auto); // Test benchmark with no range and check no complexity is calculated. void BM_Extreme_Cases(benchmark::State& state) { @@ -94,6 +94,6 @@ void BM_Extreme_Cases(benchmark::State& state) { } } BENCHMARK(BM_Extreme_Cases) -> Complexity(benchmark::O_N_log_N); -BENCHMARK(BM_Extreme_Cases)->Arg(42) -> Complexity(benchmark::O_Auto); +BENCHMARK(BM_Extreme_Cases) -> Arg(42) -> Complexity(benchmark::O_Auto); BENCHMARK_MAIN() \ No newline at end of file From 5812d545efcd3bcbfd4e2e8d203bd42b8e5a7148 Mon Sep 17 00:00:00 2001 From: Ismael Date: Sat, 21 May 2016 12:16:40 +0200 Subject: [PATCH 06/26] Added range multiplier to Readme --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 051b3011..325675af 100644 --- a/README.md +++ b/README.md @@ -61,6 +61,13 @@ the specified range and will generate a benchmark for each such argument. BENCHMARK(BM_memcpy)->Range(8, 8<<10); ``` +By default the arguments in a range are generated in multiples of eight and the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the range multiplier is changed to multiples of two. + +```c++ +BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10); +``` +Now the arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ]. + You might have a benchmark that depends on two inputs. For example, the following code defines a family of benchmarks for measuring the speed of set insertion. From dc667d048678f3cb6b4355d4da8b5d121db8bbf2 Mon Sep 17 00:00:00 2001 From: Ismael Date: Sat, 21 May 2016 12:40:27 +0200 Subject: [PATCH 07/26] Added asymptotic complexity to Readme --- README.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/README.md b/README.md index 325675af..f052cb8e 100644 --- a/README.md +++ b/README.md @@ -116,6 +116,27 @@ static void CustomArguments(benchmark::internal::Benchmark* b) { BENCHMARK(BM_SetInsert)->Apply(CustomArguments); ``` +### Calculate asymptotic complexity (Big O) +Asymptotic complexity might be calculated for a family of benchmarks. The following code will calculate the coefficient for the high-order term in the running time and the normalized root-mean square error of string comparison. + +```c++ +static void BM_StringCompare(benchmark::State& state) { + std::string s1(state.range_x(), '-'); + std::string s2(state.range_x(), '-'); + while (state.KeepRunning()) + benchmark::DoNotOptimize(s1.compare(s2)); +} +BENCHMARK(BM_StringCompare) + ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::O_N); +``` + +As shown on the following invocation, asymptotic complexity might also be calculated automatically. + +```c++ +BENCHMARK(BM_StringCompare) + ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::O_Auto); +``` + ### Templated benchmarks Templated benchmarks work the same way: This example produces and consumes messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the From 07efafbf5c0e95fabbae284eedcc1dfe3d57a396 Mon Sep 17 00:00:00 2001 From: Ismael Date: Sat, 21 May 2016 16:34:12 +0200 Subject: [PATCH 08/26] Update Readme --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index f052cb8e..cd78c96a 100644 --- a/README.md +++ b/README.md @@ -61,12 +61,12 @@ the specified range and will generate a benchmark for each such argument. BENCHMARK(BM_memcpy)->Range(8, 8<<10); ``` -By default the arguments in a range are generated in multiples of eight and the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the range multiplier is changed to multiples of two. +By default the arguments in the range are generated in multiples of eight and the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the range multiplier is changed to multiples of two. ```c++ BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10); ``` -Now the arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ]. +Now arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ]. You might have a benchmark that depends on two inputs. For example, the following code defines a family of benchmarks for measuring the speed of set @@ -130,7 +130,7 @@ BENCHMARK(BM_StringCompare) ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::O_N); ``` -As shown on the following invocation, asymptotic complexity might also be calculated automatically. +As shown in the following invocation, asymptotic complexity might also be calculated automatically. ```c++ BENCHMARK(BM_StringCompare) From 8afbf0ed3801ad12c4066d10e9d25764181321f4 Mon Sep 17 00:00:00 2001 From: Ismael Date: Sat, 21 May 2016 16:45:45 +0200 Subject: [PATCH 09/26] reworked comment for complexity report --- include/benchmark/reporter.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index d6b713a4..3bf45d87 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -93,8 +93,8 @@ class BenchmarkReporter { // benchmark, thus have the same name. virtual void ReportRuns(const std::vector& report) = 0; - // Called once at the last instance of a benchmark range, gives information about - // asymptotic complexity and RMS. + // Called once at the last benchmark in a family of benchmarks, gives information + // about asymptotic complexity and RMS. // Note that all the benchmark runs in a range should refer to the same benchmark, // thus have the same name. virtual void ReportComplexity(const std::vector& complexity_reports) = 0; From 5f9823bd92b2a24da06fac7b43f6658ec20cc901 Mon Sep 17 00:00:00 2001 From: Ismael Date: Mon, 23 May 2016 18:51:29 +0200 Subject: [PATCH 10/26] fixed non-const reference arguments --- include/benchmark/reporter.h | 8 ++--- src/console_reporter.cc | 12 +++---- src/csv_reporter.cc | 4 +-- src/json_reporter.cc | 4 +-- src/reporter.cc | 62 ++++++++++++++++++------------------ 5 files changed, 45 insertions(+), 45 deletions(-) diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index 3bf45d87..564219a1 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -52,7 +52,7 @@ class BenchmarkReporter { complexity(O_None), arg1(0), arg2(0), - report_bigO(false), + report_big_o(false), report_rms(false) {} std::string benchmark_name; @@ -75,7 +75,7 @@ class BenchmarkReporter { int arg2; // Inform print function whether the current run is a complexity report - bool report_bigO; + bool report_big_o; bool report_rms; }; @@ -105,8 +105,8 @@ class BenchmarkReporter { virtual ~BenchmarkReporter(); protected: - static void ComputeStats(const std::vector & reports, Run& mean, Run& stddev); - static void ComputeBigO(const std::vector & reports, Run& bigO, Run& rms); + static void ComputeStats(const std::vector & reports, Run* mean, Run* stddev); + static void ComputeBigO(const std::vector & reports, Run* bigO, Run* rms); static TimeUnitMultiplier GetTimeUnitAndMultiplier(TimeUnit unit); static std::string GetBigO(BigO complexity); }; diff --git a/src/console_reporter.cc b/src/console_reporter.cc index b0b41309..09b91c2b 100644 --- a/src/console_reporter.cc +++ b/src/console_reporter.cc @@ -72,7 +72,7 @@ void ConsoleReporter::ReportRuns(const std::vector& reports) { Run mean_data; Run stddev_data; - BenchmarkReporter::ComputeStats(reports, mean_data, stddev_data); + BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data); // Output using PrintRun. PrintRunData(mean_data); @@ -85,12 +85,12 @@ void ConsoleReporter::ReportComplexity(const std::vector & complexity_repor return; } - Run bigO_data; + Run big_o_data; Run rms_data; - BenchmarkReporter::ComputeBigO(complexity_reports, bigO_data, rms_data); + BenchmarkReporter::ComputeBigO(complexity_reports, &big_o_data, &rms_data); // Output using PrintRun. - PrintRunData(bigO_data); + PrintRunData(big_o_data); PrintRunData(rms_data); } @@ -115,8 +115,8 @@ void ConsoleReporter::PrintRunData(const Run& result) { ColorPrintf(COLOR_GREEN, "%-*s ", name_field_width_, result.benchmark_name.c_str()); - if(result.report_bigO) { - std::string big_o = result.report_bigO ? GetBigO(result.complexity) : ""; + if(result.report_big_o) { + std::string big_o = result.report_big_o ? GetBigO(result.complexity) : ""; ColorPrintf(COLOR_YELLOW, "%10.4f %s %10.4f %s ", result.real_accumulated_time * multiplier, big_o.c_str(), diff --git a/src/csv_reporter.cc b/src/csv_reporter.cc index f13a5f8b..031736e5 100644 --- a/src/csv_reporter.cc +++ b/src/csv_reporter.cc @@ -57,7 +57,7 @@ void CSVReporter::ReportRuns(const std::vector & reports) { if (reports.size() >= 2) { Run mean_data; Run stddev_data; - BenchmarkReporter::ComputeStats(reports, mean_data, stddev_data); + BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data); reports_cp.push_back(mean_data); reports_cp.push_back(stddev_data); } @@ -74,7 +74,7 @@ void CSVReporter::ReportComplexity(const std::vector & complexity_reports) Run bigO_data; Run rms_data; - BenchmarkReporter::ComputeBigO(complexity_reports, bigO_data, rms_data); + BenchmarkReporter::ComputeBigO(complexity_reports, &bigO_data, &rms_data); // Output using PrintRun. PrintRunData(bigO_data); diff --git a/src/json_reporter.cc b/src/json_reporter.cc index 4874fe74..c15fb105 100644 --- a/src/json_reporter.cc +++ b/src/json_reporter.cc @@ -100,7 +100,7 @@ void JSONReporter::ReportRuns(std::vector const& reports) { if (reports.size() >= 2) { Run mean_data; Run stddev_data; - BenchmarkReporter::ComputeStats(reports, mean_data, stddev_data); + BenchmarkReporter::ComputeStats(reports, &mean_data, &stddev_data); reports_cp.push_back(mean_data); reports_cp.push_back(stddev_data); } @@ -129,7 +129,7 @@ void JSONReporter::ReportComplexity(const std::vector & complexity_reports) Run bigO_data; Run rms_data; - BenchmarkReporter::ComputeBigO(complexity_reports, bigO_data, rms_data); + BenchmarkReporter::ComputeBigO(complexity_reports, &bigO_data, &rms_data); // Output using PrintRun. out << indent << "{\n"; diff --git a/src/reporter.cc b/src/reporter.cc index 61a6d5c3..27dca856 100644 --- a/src/reporter.cc +++ b/src/reporter.cc @@ -26,7 +26,7 @@ namespace benchmark { void BenchmarkReporter::ComputeStats( const std::vector& reports, - Run& mean_data, Run& stddev_data) { + Run* mean_data, Run* stddev_data) { CHECK(reports.size() >= 2) << "Cannot compute stats for less than 2 reports"; // Accumulators. Stat1_d real_accumulated_time_stat; @@ -50,38 +50,38 @@ void BenchmarkReporter::ComputeStats( } // Get the data from the accumulator to BenchmarkReporter::Run's. - mean_data.benchmark_name = reports[0].benchmark_name + "_mean"; - mean_data.iterations = run_iterations; - mean_data.real_accumulated_time = real_accumulated_time_stat.Mean() * + mean_data->benchmark_name = reports[0].benchmark_name + "_mean"; + mean_data->iterations = run_iterations; + mean_data->real_accumulated_time = real_accumulated_time_stat.Mean() * run_iterations; - mean_data.cpu_accumulated_time = cpu_accumulated_time_stat.Mean() * + mean_data->cpu_accumulated_time = cpu_accumulated_time_stat.Mean() * run_iterations; - mean_data.bytes_per_second = bytes_per_second_stat.Mean(); - mean_data.items_per_second = items_per_second_stat.Mean(); + mean_data->bytes_per_second = bytes_per_second_stat.Mean(); + mean_data->items_per_second = items_per_second_stat.Mean(); // Only add label to mean/stddev if it is same for all runs - mean_data.report_label = reports[0].report_label; + mean_data->report_label = reports[0].report_label; for (std::size_t i = 1; i < reports.size(); i++) { if (reports[i].report_label != reports[0].report_label) { - mean_data.report_label = ""; + mean_data->report_label = ""; break; } } - stddev_data.benchmark_name = reports[0].benchmark_name + "_stddev"; - stddev_data.report_label = mean_data.report_label; - stddev_data.iterations = 0; - stddev_data.real_accumulated_time = + stddev_data->benchmark_name = reports[0].benchmark_name + "_stddev"; + stddev_data->report_label = mean_data->report_label; + stddev_data->iterations = 0; + stddev_data->real_accumulated_time = real_accumulated_time_stat.StdDev(); - stddev_data.cpu_accumulated_time = + stddev_data->cpu_accumulated_time = cpu_accumulated_time_stat.StdDev(); - stddev_data.bytes_per_second = bytes_per_second_stat.StdDev(); - stddev_data.items_per_second = items_per_second_stat.StdDev(); + stddev_data->bytes_per_second = bytes_per_second_stat.StdDev(); + stddev_data->items_per_second = items_per_second_stat.StdDev(); } void BenchmarkReporter::ComputeBigO( const std::vector& reports, - Run& bigO, Run& rms) { + Run* big_o, Run* rms) { CHECK(reports.size() >= 2) << "Cannot compute asymptotic complexity for less than 2 reports"; // Accumulators. std::vector N; @@ -106,26 +106,26 @@ void BenchmarkReporter::ComputeBigO( std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/')); // Get the data from the accumulator to BenchmarkReporter::Run's. - bigO.benchmark_name = benchmark_name + "_BigO"; - bigO.iterations = 0; - bigO.real_accumulated_time = resultReal.coef; - bigO.cpu_accumulated_time = resultCpu.coef; - bigO.report_bigO = true; - bigO.complexity = resultCpu.complexity; + big_o->benchmark_name = benchmark_name + "_BigO"; + big_o->iterations = 0; + big_o->real_accumulated_time = resultReal.coef; + big_o->cpu_accumulated_time = resultCpu.coef; + big_o->report_big_o = true; + big_o->complexity = resultCpu.complexity; double multiplier; const char* timeLabel; std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(reports[0].time_unit); // Only add label to mean/stddev if it is same for all runs - bigO.report_label = reports[0].report_label; - rms.benchmark_name = benchmark_name + "_RMS"; - rms.report_label = bigO.report_label; - rms.iterations = 0; - rms.real_accumulated_time = resultReal.rms / multiplier; - rms.cpu_accumulated_time = resultCpu.rms / multiplier; - rms.report_rms = true; - rms.complexity = resultCpu.complexity; + big_o->report_label = reports[0].report_label; + rms->benchmark_name = benchmark_name + "_RMS"; + rms->report_label = big_o->report_label; + rms->iterations = 0; + rms->real_accumulated_time = resultReal.rms / multiplier; + rms->cpu_accumulated_time = resultCpu.rms / multiplier; + rms->report_rms = true; + rms->complexity = resultCpu.complexity; } std::string BenchmarkReporter::GetBigO(BigO complexity) { From 5e52d2d6c048205626c4103d397bedf0527f67d8 Mon Sep 17 00:00:00 2001 From: Ismael Date: Mon, 23 May 2016 19:19:29 +0200 Subject: [PATCH 11/26] refactor fitting curve --- src/minimal_leastsq.cc | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/src/minimal_leastsq.cc b/src/minimal_leastsq.cc index 07a47b99..f3c841d8 100644 --- a/src/minimal_leastsq.cc +++ b/src/minimal_leastsq.cc @@ -20,19 +20,22 @@ #include // Internal function to calculate the different scalability forms -double fittingCurve(double N, benchmark::BigO Complexity) { - if (Complexity == benchmark::O_N) - return N; - else if (Complexity == benchmark::O_N_Squared) - return pow(N, 2); - else if (Complexity == benchmark::O_N_Cubed) - return pow(N, 3); - else if (Complexity == benchmark::O_log_N) - return log2(N); - else if (Complexity == benchmark::O_N_log_N) - return N * log2(N); - - return 1; // Default value for O_1 +double fittingCurve(double n, benchmark::BigO complexity) { + switch (complexity) { + case benchmark::O_N: + return n; + case benchmark::O_N_Squared: + return pow(n, 2); + case benchmark::O_N_Cubed: + return pow(n, 3); + case benchmark::O_log_N: + return log2(n); + case benchmark::O_N_log_N: + return n * log2(n); + case benchmark::O_1: + default: + return 1; + } } // Internal function to find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. From ac05c045335d3e32ec75e3aae930ecc1c6533212 Mon Sep 17 00:00:00 2001 From: Ismael Date: Mon, 23 May 2016 20:12:54 +0200 Subject: [PATCH 12/26] refactor MinimalLEastSq --- README.md | 4 +- include/benchmark/benchmark_api.h | 18 +++--- include/benchmark/reporter.h | 2 +- src/benchmark.cc | 6 +- src/csv_reporter.cc | 6 +- src/json_reporter.cc | 6 +- src/minimal_leastsq.cc | 93 +++++++++++++++---------------- src/minimal_leastsq.h | 10 ++-- src/reporter.cc | 18 +++--- test/complexity_test.cc | 20 +++---- 10 files changed, 91 insertions(+), 92 deletions(-) diff --git a/README.md b/README.md index cd78c96a..c989e571 100644 --- a/README.md +++ b/README.md @@ -127,14 +127,14 @@ static void BM_StringCompare(benchmark::State& state) { benchmark::DoNotOptimize(s1.compare(s2)); } BENCHMARK(BM_StringCompare) - ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::O_N); + ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN); ``` As shown in the following invocation, asymptotic complexity might also be calculated automatically. ```c++ BENCHMARK(BM_StringCompare) - ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::O_Auto); + ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oAuto); ``` ### Templated benchmarks diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index d7cf83f6..385cbbc6 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -232,17 +232,17 @@ enum TimeUnit { }; // BigO is passed to a benchmark in order to specify the asymptotic computational -// complexity for the benchmark. In case O_Auto is selected, complexity will be +// complexity for the benchmark. In case oAuto is selected, complexity will be // calculated automatically to the best fit. enum BigO { - O_None, - O_1, - O_N, - O_N_Squared, - O_N_Cubed, - O_log_N, - O_N_log_N, - O_Auto + oNone, + o1, + oN, + oNSquared, + oNCubed, + oLogN, + oNLogN, + oAuto }; // State is passed to a running Benchmark and contains state for the diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index 564219a1..5e0a5522 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -49,7 +49,7 @@ class BenchmarkReporter { bytes_per_second(0), items_per_second(0), max_heapbytes_used(0), - complexity(O_None), + complexity(oNone), arg1(0), arg2(0), report_big_o(false), diff --git a/src/benchmark.cc b/src/benchmark.cc index 874dc0c7..dbab503e 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -454,7 +454,7 @@ BenchmarkImp::BenchmarkImp(const char* name) : name_(name), arg_count_(-1), time_unit_(kNanosecond), range_multiplier_(kRangeMultiplier), min_time_(0.0), use_real_time_(false), use_manual_time_(false), - complexity_(O_None) { + complexity_(oNone) { } BenchmarkImp::~BenchmarkImp() { @@ -803,7 +803,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, report.complexity = b.complexity; reports.push_back(report); - if(report.complexity != O_None) + if(report.complexity != oNone) complexity_reports.push_back(report); break; @@ -830,7 +830,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, } br->ReportRuns(reports); - if((b.complexity != O_None) && b.last_benchmark_instance) { + if((b.complexity != oNone) && b.last_benchmark_instance) { br->ReportComplexity(complexity_reports); complexity_reports.clear(); } diff --git a/src/csv_reporter.cc b/src/csv_reporter.cc index 031736e5..df662e4a 100644 --- a/src/csv_reporter.cc +++ b/src/csv_reporter.cc @@ -72,12 +72,12 @@ void CSVReporter::ReportComplexity(const std::vector & complexity_reports) return; } - Run bigO_data; + Run big_o_data; Run rms_data; - BenchmarkReporter::ComputeBigO(complexity_reports, &bigO_data, &rms_data); + BenchmarkReporter::ComputeBigO(complexity_reports, &big_o_data, &rms_data); // Output using PrintRun. - PrintRunData(bigO_data); + PrintRunData(big_o_data); PrintRunData(rms_data); } diff --git a/src/json_reporter.cc b/src/json_reporter.cc index c15fb105..bfa85e47 100644 --- a/src/json_reporter.cc +++ b/src/json_reporter.cc @@ -127,13 +127,13 @@ void JSONReporter::ReportComplexity(const std::vector & complexity_reports) out << ",\n"; } - Run bigO_data; + Run big_o_data; Run rms_data; - BenchmarkReporter::ComputeBigO(complexity_reports, &bigO_data, &rms_data); + BenchmarkReporter::ComputeBigO(complexity_reports, &big_o_data, &rms_data); // Output using PrintRun. out << indent << "{\n"; - PrintRunData(bigO_data); + PrintRunData(big_o_data); out << indent << "},\n"; out << indent << "{\n"; PrintRunData(rms_data); diff --git a/src/minimal_leastsq.cc b/src/minimal_leastsq.cc index f3c841d8..0b6e6653 100644 --- a/src/minimal_leastsq.cc +++ b/src/minimal_leastsq.cc @@ -16,95 +16,94 @@ // Adapted to be used with google benchmark #include "minimal_leastsq.h" - +#include "check.h" #include // Internal function to calculate the different scalability forms -double fittingCurve(double n, benchmark::BigO complexity) { +double FittingCurve(double n, benchmark::BigO complexity) { switch (complexity) { - case benchmark::O_N: + case benchmark::oN: return n; - case benchmark::O_N_Squared: + case benchmark::oNSquared: return pow(n, 2); - case benchmark::O_N_Cubed: + case benchmark::oNCubed: return pow(n, 3); - case benchmark::O_log_N: + case benchmark::oLogN: return log2(n); - case benchmark::O_N_log_N: + case benchmark::oNLogN: return n * log2(n); - case benchmark::O_1: + case benchmark::o1: default: return 1; } } // Internal function to find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. -// - N : Vector containing the size of the benchmark tests. -// - Time : Vector containing the times for the benchmark tests. -// - Complexity : Fitting curve. +// - n : Vector containing the size of the benchmark tests. +// - time : Vector containing the times for the benchmark tests. +// - complexity : Fitting curve. // For a deeper explanation on the algorithm logic, look the README file at http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit -LeastSq leastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity) { - assert(N.size() == Time.size() && N.size() >= 2); - assert(Complexity != benchmark::O_None && - Complexity != benchmark::O_Auto); +LeastSq CalculateLeastSq(const std::vector& n, const std::vector& time, const benchmark::BigO complexity) { + CHECK_NE(complexity, benchmark::oAuto); - double sigmaGN = 0; - double sigmaGNSquared = 0; - double sigmaTime = 0; - double sigmaTimeGN = 0; + double sigma_gn = 0; + double sigma_gn_squared = 0; + double sigma_time = 0; + double sigma_time_gn = 0; // Calculate least square fitting parameter - for (size_t i = 0; i < N.size(); ++i) { - double GNi = fittingCurve(N[i], Complexity); - sigmaGN += GNi; - sigmaGNSquared += GNi * GNi; - sigmaTime += Time[i]; - sigmaTimeGN += Time[i] * GNi; + for (size_t i = 0; i < n.size(); ++i) { + double gn_i = FittingCurve(n[i], complexity); + sigma_gn += gn_i; + sigma_gn_squared += gn_i * gn_i; + sigma_time += time[i]; + sigma_time_gn += time[i] * gn_i; } LeastSq result; - result.complexity = Complexity; + result.complexity = complexity; // Calculate complexity. - // O_1 is treated as an special case - if (Complexity != benchmark::O_1) - result.coef = sigmaTimeGN / sigmaGNSquared; + // o1 is treated as an special case + if (complexity != benchmark::o1) + result.coef = sigma_time_gn / sigma_gn_squared; else - result.coef = sigmaTime / N.size(); + result.coef = sigma_time / n.size(); // Calculate RMS double rms = 0; - for (size_t i = 0; i < N.size(); ++i) { - double fit = result.coef * fittingCurve(N[i], Complexity); - rms += pow((Time[i] - fit), 2); + for (size_t i = 0; i < n.size(); ++i) { + double fit = result.coef * FittingCurve(n[i], complexity); + rms += pow((time[i] - fit), 2); } - double mean = sigmaTime / N.size(); + double mean = sigma_time / n.size(); - result.rms = sqrt(rms / N.size()) / mean; // Normalized RMS by the mean of the observed values + result.rms = sqrt(rms / n.size()) / mean; // Normalized RMS by the mean of the observed values return result; } // Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. -// - N : Vector containing the size of the benchmark tests. -// - Time : Vector containing the times for the benchmark tests. -// - Complexity : If different than O_Auto, the fitting curve will stick to this one. If it is O_Auto, it will be calculated +// - n : Vector containing the size of the benchmark tests. +// - time : Vector containing the times for the benchmark tests. +// - complexity : If different than oAuto, the fitting curve will stick to this one. If it is oAuto, it will be calculated // the best fitting curve. -LeastSq minimalLeastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity) { - assert(N.size() == Time.size() && N.size() >= 2); // Do not compute fitting curve is less than two benchmark runs are given - assert(Complexity != benchmark::O_None); // Check that complexity is a valid parameter. +LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, const benchmark::BigO complexity) { + CHECK_EQ(n.size(), time.size()); + CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two benchmark runs are given + CHECK_NE(complexity, benchmark::oNone); - if(Complexity == benchmark::O_Auto) { - std::vector fitCurves = { benchmark::O_log_N, benchmark::O_N, benchmark::O_N_log_N, benchmark::O_N_Squared, benchmark::O_N_Cubed }; + if(complexity == benchmark::oAuto) { + std::vector fit_curves = { benchmark::oLogN, benchmark::oN, benchmark::oNLogN, benchmark::oNSquared, benchmark::oNCubed }; - LeastSq best_fit = leastSq(N, Time, benchmark::O_1); // Take O_1 as default best fitting curve + LeastSq best_fit = CalculateLeastSq(n, time, benchmark::o1); // Take o1 as default best fitting curve // Compute all possible fitting curves and stick to the best one - for (const auto& fit : fitCurves) { - LeastSq current_fit = leastSq(N, Time, fit); + for (const auto& fit : fit_curves) { + LeastSq current_fit = CalculateLeastSq(n, time, fit); if (current_fit.rms < best_fit.rms) best_fit = current_fit; } @@ -112,5 +111,5 @@ LeastSq minimalLeastSq(const std::vector& N, const std::vector& Tim return best_fit; } else - return leastSq(N, Time, Complexity); + return CalculateLeastSq(n, time, complexity); } \ No newline at end of file diff --git a/src/minimal_leastsq.h b/src/minimal_leastsq.h index 0b137fb7..3f7be346 100644 --- a/src/minimal_leastsq.h +++ b/src/minimal_leastsq.h @@ -22,18 +22,18 @@ #include -// This data structure will contain the result returned by minimalLeastSq +// This data structure will contain the result returned by MinimalLeastSq // - coef : Estimated coeficient for the high-order term as interpolated from data. // - rms : Normalized Root Mean Squared Error. -// - complexity : Scalability form (e.g. O_N, O_N_log_N). In case a scalability form has been provided to minimalLeastSq -// this will return the same value. In case BigO::O_Auto has been selected, this parameter will return the +// - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability form has been provided to MinimalLeastSq +// this will return the same value. In case BigO::oAuto has been selected, this parameter will return the // best fitting curve detected. struct LeastSq { LeastSq() : coef(0), rms(0), - complexity(benchmark::O_None) {} + complexity(benchmark::oNone) {} double coef; double rms; @@ -41,6 +41,6 @@ struct LeastSq { }; // Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. -LeastSq minimalLeastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity = benchmark::O_Auto); +LeastSq MinimalLeastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity = benchmark::oAuto); #endif diff --git a/src/reporter.cc b/src/reporter.cc index 27dca856..0e1c581e 100644 --- a/src/reporter.cc +++ b/src/reporter.cc @@ -95,13 +95,13 @@ void BenchmarkReporter::ComputeBigO( CpuTime.push_back(run.cpu_accumulated_time/run.iterations); } - LeastSq resultCpu = minimalLeastSq(N, CpuTime, reports[0].complexity); + LeastSq resultCpu = MinimalLeastSq(N, CpuTime, reports[0].complexity); // resultCpu.complexity is passed as parameter to resultReal because in case - // reports[0].complexity is O_Auto, the noise on the measured data could make + // reports[0].complexity is oAuto, the noise on the measured data could make // the best fit function of Cpu and Real differ. In order to solve this, we take // the best fitting function for the Cpu, and apply it to Real data. - LeastSq resultReal = minimalLeastSq(N, RealTime, resultCpu.complexity); + LeastSq resultReal = MinimalLeastSq(N, RealTime, resultCpu.complexity); std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/')); @@ -130,17 +130,17 @@ void BenchmarkReporter::ComputeBigO( std::string BenchmarkReporter::GetBigO(BigO complexity) { switch (complexity) { - case O_N: + case oN: return "* N"; - case O_N_Squared: + case oNSquared: return "* N**2"; - case O_N_Cubed: + case oNCubed: return "* N**3"; - case O_log_N: + case oLogN: return "* lgN"; - case O_N_log_N: + case oNLogN: return "* NlgN"; - case O_1: + case o1: return "* 1"; default: return ""; diff --git a/test/complexity_test.cc b/test/complexity_test.cc index e7e16d31..e81742ee 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -27,7 +27,7 @@ void BM_Complexity_O1(benchmark::State& state) { while (state.KeepRunning()) { } } -BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::O_1); +BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::o1); static void BM_Complexity_O_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range_x()); @@ -36,8 +36,8 @@ static void BM_Complexity_O_N(benchmark::State& state) { benchmark::DoNotOptimize(std::find(v.begin(), v.end(), itemNotInVector)); } } -BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::O_N); -BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::O_Auto); +BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oN); +BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oAuto); static void BM_Complexity_O_N_Squared(benchmark::State& state) { std::string s1(state.range_x(), '-'); @@ -50,7 +50,7 @@ static void BM_Complexity_O_N_Squared(benchmark::State& state) { } } } -BENCHMARK(BM_Complexity_O_N_Squared) -> Range(1, 1<<8) -> Complexity(benchmark::O_N_Squared); +BENCHMARK(BM_Complexity_O_N_Squared) -> Range(1, 1<<8) -> Complexity(benchmark::oNSquared); static void BM_Complexity_O_N_Cubed(benchmark::State& state) { std::string s1(state.range_x(), '-'); @@ -67,7 +67,7 @@ static void BM_Complexity_O_N_Cubed(benchmark::State& state) { } } } -BENCHMARK(BM_Complexity_O_N_Cubed) -> DenseRange(1, 8) -> Complexity(benchmark::O_N_Cubed); +BENCHMARK(BM_Complexity_O_N_Cubed) -> DenseRange(1, 8) -> Complexity(benchmark::oNCubed); static void BM_Complexity_O_log_N(benchmark::State& state) { auto m = ConstructRandomMap(state.range_x()); @@ -77,7 +77,7 @@ static void BM_Complexity_O_log_N(benchmark::State& state) { } } BENCHMARK(BM_Complexity_O_log_N) - -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::O_log_N); + -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oLogN); static void BM_Complexity_O_N_log_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range_x()); @@ -85,15 +85,15 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) { std::sort(v.begin(), v.end()); } } -BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::O_N_log_N); -BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::O_Auto); +BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oNLogN); +BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oAuto); // Test benchmark with no range and check no complexity is calculated. void BM_Extreme_Cases(benchmark::State& state) { while (state.KeepRunning()) { } } -BENCHMARK(BM_Extreme_Cases) -> Complexity(benchmark::O_N_log_N); -BENCHMARK(BM_Extreme_Cases) -> Arg(42) -> Complexity(benchmark::O_Auto); +BENCHMARK(BM_Extreme_Cases) -> Complexity(benchmark::oNLogN); +BENCHMARK(BM_Extreme_Cases) -> Arg(42) -> Complexity(benchmark::oAuto); BENCHMARK_MAIN() \ No newline at end of file From 266addc3f51f07ec182ed34af06e6d75b0f6d09f Mon Sep 17 00:00:00 2001 From: Ismael Date: Mon, 23 May 2016 20:21:34 +0200 Subject: [PATCH 13/26] fixed last_benchmark_instance --- src/benchmark.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/benchmark.cc b/src/benchmark.cc index dbab503e..22453897 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -415,7 +415,6 @@ bool BenchmarkFamilies::FindBenchmarks( instance.min_time = family->min_time_; instance.use_real_time = family->use_real_time_; instance.use_manual_time = family->use_manual_time_; - instance.last_benchmark_instance = (args == family->args_.back()); instance.complexity = family->complexity_; instance.threads = num_threads; instance.multithreaded = !(family->thread_counts_.empty()); @@ -442,6 +441,7 @@ bool BenchmarkFamilies::FindBenchmarks( } if (re.Match(instance.name)) { + instance.last_benchmark_instance = (args == family->args_.back()); benchmarks->push_back(instance); } } From fed9b6f211a9ca67860bcaf183cb450e3695bb07 Mon Sep 17 00:00:00 2001 From: Ismael Date: Mon, 23 May 2016 20:34:01 +0200 Subject: [PATCH 14/26] refactor least square .h --- src/minimal_leastsq.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/minimal_leastsq.h b/src/minimal_leastsq.h index 3f7be346..8feb43dc 100644 --- a/src/minimal_leastsq.h +++ b/src/minimal_leastsq.h @@ -41,6 +41,6 @@ struct LeastSq { }; // Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. -LeastSq MinimalLeastSq(const std::vector& N, const std::vector& Time, const benchmark::BigO Complexity = benchmark::oAuto); +LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, const benchmark::BigO vomplexity = benchmark::oAuto); #endif From ea69a8479046413d96b0eb826f1d982985281a67 Mon Sep 17 00:00:00 2001 From: Ismael Date: Mon, 23 May 2016 20:34:54 +0200 Subject: [PATCH 15/26] fix --- src/minimal_leastsq.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/minimal_leastsq.h b/src/minimal_leastsq.h index 8feb43dc..80d26333 100644 --- a/src/minimal_leastsq.h +++ b/src/minimal_leastsq.h @@ -41,6 +41,6 @@ struct LeastSq { }; // Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. -LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, const benchmark::BigO vomplexity = benchmark::oAuto); +LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, const benchmark::BigO complexity = benchmark::oAuto); #endif From d577987fd76595cb52602bd75b2866886e95b0f2 Mon Sep 17 00:00:00 2001 From: Ismael Date: Mon, 23 May 2016 20:40:41 +0200 Subject: [PATCH 16/26] changed indentation --- src/minimal_leastsq.cc | 122 ++++++++++++++++++++--------------------- src/minimal_leastsq.h | 14 ++--- 2 files changed, 68 insertions(+), 68 deletions(-) diff --git a/src/minimal_leastsq.cc b/src/minimal_leastsq.cc index 0b6e6653..ea6bd463 100644 --- a/src/minimal_leastsq.cc +++ b/src/minimal_leastsq.cc @@ -21,21 +21,21 @@ // Internal function to calculate the different scalability forms double FittingCurve(double n, benchmark::BigO complexity) { - switch (complexity) { - case benchmark::oN: - return n; - case benchmark::oNSquared: - return pow(n, 2); - case benchmark::oNCubed: - return pow(n, 3); - case benchmark::oLogN: - return log2(n); - case benchmark::oNLogN: - return n * log2(n); - case benchmark::o1: - default: - return 1; - } + switch (complexity) { + case benchmark::oN: + return n; + case benchmark::oNSquared: + return pow(n, 2); + case benchmark::oNCubed: + return pow(n, 3); + case benchmark::oLogN: + return log2(n); + case benchmark::oNLogN: + return n * log2(n); + case benchmark::o1: + default: + return 1; + } } // Internal function to find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. @@ -45,44 +45,44 @@ double FittingCurve(double n, benchmark::BigO complexity) { // For a deeper explanation on the algorithm logic, look the README file at http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit LeastSq CalculateLeastSq(const std::vector& n, const std::vector& time, const benchmark::BigO complexity) { - CHECK_NE(complexity, benchmark::oAuto); + CHECK_NE(complexity, benchmark::oAuto); - double sigma_gn = 0; - double sigma_gn_squared = 0; - double sigma_time = 0; - double sigma_time_gn = 0; + double sigma_gn = 0; + double sigma_gn_squared = 0; + double sigma_time = 0; + double sigma_time_gn = 0; - // Calculate least square fitting parameter - for (size_t i = 0; i < n.size(); ++i) { - double gn_i = FittingCurve(n[i], complexity); - sigma_gn += gn_i; - sigma_gn_squared += gn_i * gn_i; - sigma_time += time[i]; - sigma_time_gn += time[i] * gn_i; - } + // Calculate least square fitting parameter + for (size_t i = 0; i < n.size(); ++i) { + double gn_i = FittingCurve(n[i], complexity); + sigma_gn += gn_i; + sigma_gn_squared += gn_i * gn_i; + sigma_time += time[i]; + sigma_time_gn += time[i] * gn_i; + } - LeastSq result; - result.complexity = complexity; + LeastSq result; + result.complexity = complexity; - // Calculate complexity. - // o1 is treated as an special case - if (complexity != benchmark::o1) - result.coef = sigma_time_gn / sigma_gn_squared; - else - result.coef = sigma_time / n.size(); + // Calculate complexity. + // o1 is treated as an special case + if (complexity != benchmark::o1) + result.coef = sigma_time_gn / sigma_gn_squared; + else + result.coef = sigma_time / n.size(); - // Calculate RMS - double rms = 0; - for (size_t i = 0; i < n.size(); ++i) { - double fit = result.coef * FittingCurve(n[i], complexity); - rms += pow((time[i] - fit), 2); - } + // Calculate RMS + double rms = 0; + for (size_t i = 0; i < n.size(); ++i) { + double fit = result.coef * FittingCurve(n[i], complexity); + rms += pow((time[i] - fit), 2); + } - double mean = sigma_time / n.size(); + double mean = sigma_time / n.size(); - result.rms = sqrt(rms / n.size()) / mean; // Normalized RMS by the mean of the observed values + result.rms = sqrt(rms / n.size()) / mean; // Normalized RMS by the mean of the observed values - return result; + return result; } // Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. @@ -92,24 +92,24 @@ LeastSq CalculateLeastSq(const std::vector& n, const std::vector& t // the best fitting curve. LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, const benchmark::BigO complexity) { - CHECK_EQ(n.size(), time.size()); - CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two benchmark runs are given - CHECK_NE(complexity, benchmark::oNone); + CHECK_EQ(n.size(), time.size()); + CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two benchmark runs are given + CHECK_NE(complexity, benchmark::oNone); - if(complexity == benchmark::oAuto) { - std::vector fit_curves = { benchmark::oLogN, benchmark::oN, benchmark::oNLogN, benchmark::oNSquared, benchmark::oNCubed }; + if(complexity == benchmark::oAuto) { + std::vector fit_curves = { benchmark::oLogN, benchmark::oN, benchmark::oNLogN, benchmark::oNSquared, benchmark::oNCubed }; - LeastSq best_fit = CalculateLeastSq(n, time, benchmark::o1); // Take o1 as default best fitting curve + LeastSq best_fit = CalculateLeastSq(n, time, benchmark::o1); // Take o1 as default best fitting curve - // Compute all possible fitting curves and stick to the best one - for (const auto& fit : fit_curves) { - LeastSq current_fit = CalculateLeastSq(n, time, fit); - if (current_fit.rms < best_fit.rms) - best_fit = current_fit; - } + // Compute all possible fitting curves and stick to the best one + for (const auto& fit : fit_curves) { + LeastSq current_fit = CalculateLeastSq(n, time, fit); + if (current_fit.rms < best_fit.rms) + best_fit = current_fit; + } - return best_fit; - } - else - return CalculateLeastSq(n, time, complexity); + return best_fit; + } + else + return CalculateLeastSq(n, time, complexity); } \ No newline at end of file diff --git a/src/minimal_leastsq.h b/src/minimal_leastsq.h index 80d26333..6dcb8940 100644 --- a/src/minimal_leastsq.h +++ b/src/minimal_leastsq.h @@ -30,14 +30,14 @@ // best fitting curve detected. struct LeastSq { - LeastSq() : - coef(0), - rms(0), - complexity(benchmark::oNone) {} + LeastSq() : + coef(0), + rms(0), + complexity(benchmark::oNone) {} - double coef; - double rms; - benchmark::BigO complexity; + double coef; + double rms; + benchmark::BigO complexity; }; // Find the coefficient for the high-order term in the running time, by minimizing the sum of squares of relative error. From 43ef17441cc8767f5523031878a2f43ab1d7790b Mon Sep 17 00:00:00 2001 From: Ismael Date: Mon, 23 May 2016 20:50:35 +0200 Subject: [PATCH 17/26] refactor names --- src/reporter.cc | 34 +++++++++++++++++----------------- test/complexity_test.cc | 8 ++++---- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/reporter.cc b/src/reporter.cc index 0e1c581e..0c05ab0b 100644 --- a/src/reporter.cc +++ b/src/reporter.cc @@ -84,48 +84,48 @@ void BenchmarkReporter::ComputeBigO( Run* big_o, Run* rms) { CHECK(reports.size() >= 2) << "Cannot compute asymptotic complexity for less than 2 reports"; // Accumulators. - std::vector N; - std::vector RealTime; - std::vector CpuTime; + std::vector n; + std::vector real_time; + std::vector cpu_time; // Populate the accumulators. for (const Run& run : reports) { - N.push_back(run.arg1); - RealTime.push_back(run.real_accumulated_time/run.iterations); - CpuTime.push_back(run.cpu_accumulated_time/run.iterations); + n.push_back(run.arg1); + real_time.push_back(run.real_accumulated_time/run.iterations); + cpu_time.push_back(run.cpu_accumulated_time/run.iterations); } - LeastSq resultCpu = MinimalLeastSq(N, CpuTime, reports[0].complexity); + LeastSq result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); - // resultCpu.complexity is passed as parameter to resultReal because in case + // result_cpu.complexity is passed as parameter to result_real because in case // reports[0].complexity is oAuto, the noise on the measured data could make // the best fit function of Cpu and Real differ. In order to solve this, we take // the best fitting function for the Cpu, and apply it to Real data. - LeastSq resultReal = MinimalLeastSq(N, RealTime, resultCpu.complexity); + LeastSq result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); std::string benchmark_name = reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/')); // Get the data from the accumulator to BenchmarkReporter::Run's. big_o->benchmark_name = benchmark_name + "_BigO"; big_o->iterations = 0; - big_o->real_accumulated_time = resultReal.coef; - big_o->cpu_accumulated_time = resultCpu.coef; + big_o->real_accumulated_time = result_real.coef; + big_o->cpu_accumulated_time = result_cpu.coef; big_o->report_big_o = true; - big_o->complexity = resultCpu.complexity; + big_o->complexity = result_cpu.complexity; double multiplier; - const char* timeLabel; - std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(reports[0].time_unit); + const char* time_label; + std::tie(time_label, multiplier) = GetTimeUnitAndMultiplier(reports[0].time_unit); // Only add label to mean/stddev if it is same for all runs big_o->report_label = reports[0].report_label; rms->benchmark_name = benchmark_name + "_RMS"; rms->report_label = big_o->report_label; rms->iterations = 0; - rms->real_accumulated_time = resultReal.rms / multiplier; - rms->cpu_accumulated_time = resultCpu.rms / multiplier; + rms->real_accumulated_time = result_real.rms / multiplier; + rms->cpu_accumulated_time = result_cpu.rms / multiplier; rms->report_rms = true; - rms->complexity = resultCpu.complexity; + rms->complexity = result_cpu.complexity; } std::string BenchmarkReporter::GetBigO(BigO complexity) { diff --git a/test/complexity_test.cc b/test/complexity_test.cc index e81742ee..e169ad96 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -31,9 +31,9 @@ BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::o1); static void BM_Complexity_O_N(benchmark::State& state) { auto v = ConstructRandomVector(state.range_x()); - const int itemNotInVector = state.range_x()*2; // Test worst case scenario (item not in vector) + const int item_not_in_vector = state.range_x()*2; // Test worst case scenario (item not in vector) while (state.KeepRunning()) { - benchmark::DoNotOptimize(std::find(v.begin(), v.end(), itemNotInVector)); + benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); } } BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oN); @@ -71,9 +71,9 @@ BENCHMARK(BM_Complexity_O_N_Cubed) -> DenseRange(1, 8) -> Complexity(benchmark:: static void BM_Complexity_O_log_N(benchmark::State& state) { auto m = ConstructRandomMap(state.range_x()); - const int itemNotInVector = state.range_x()*2; // Test worst case scenario (item not in vector) + const int item_not_in_vector = state.range_x()*2; // Test worst case scenario (item not in vector) while (state.KeepRunning()) { - benchmark::DoNotOptimize(m.find(itemNotInVector)); + benchmark::DoNotOptimize(m.find(item_not_in_vector)); } } BENCHMARK(BM_Complexity_O_log_N) From 266b3bd635a37b28d6e92125c615d3e17f5022ea Mon Sep 17 00:00:00 2001 From: Ismael Date: Mon, 23 May 2016 22:09:55 +0200 Subject: [PATCH 18/26] changed color and remove iterations --- src/console_reporter.cc | 5 +++-- src/csv_reporter.cc | 14 ++++++++++++-- src/json_reporter.cc | 17 ++++++++++------- 3 files changed, 25 insertions(+), 11 deletions(-) diff --git a/src/console_reporter.cc b/src/console_reporter.cc index 09b91c2b..242a94f9 100644 --- a/src/console_reporter.cc +++ b/src/console_reporter.cc @@ -112,7 +112,7 @@ void ConsoleReporter::PrintRunData(const Run& result) { const char* timeLabel; std::tie(timeLabel, multiplier) = GetTimeUnitAndMultiplier(result.time_unit); - ColorPrintf(COLOR_GREEN, "%-*s ", + ColorPrintf((result.report_big_o ||result.report_rms) ? COLOR_BLUE : COLOR_GREEN, "%-*s ", name_field_width_, result.benchmark_name.c_str()); if(result.report_big_o) { @@ -146,7 +146,8 @@ void ConsoleReporter::PrintRunData(const Run& result) { timeLabel); } - ColorPrintf(COLOR_CYAN, "%10lld", result.iterations); + if(!result.report_big_o && !result.report_rms) + ColorPrintf(COLOR_CYAN, "%10lld", result.iterations); if (!rate.empty()) { ColorPrintf(COLOR_DEFAULT, " %*s", 13, rate.c_str()); diff --git a/src/csv_reporter.cc b/src/csv_reporter.cc index df662e4a..9bfd66bf 100644 --- a/src/csv_reporter.cc +++ b/src/csv_reporter.cc @@ -99,10 +99,20 @@ void CSVReporter::PrintRunData(const Run & run) { ReplaceAll(&name, "\"", "\"\""); std::cout << "\"" << name << "\","; - std::cout << run.iterations << ","; + // Do not print iteration on bigO and RMS report + if(!run.report_big_o && !run.report_rms) + std::cout << run.iterations << ","; + else + std::cout << ","; + std::cout << real_time << ","; std::cout << cpu_time << ","; - std::cout << timeLabel << ","; + + // Do not print timeLabel on RMS report + if(!run.report_rms) + std::cout << timeLabel << ","; + else + std::cout << ","; if (run.bytes_per_second > 0.0) { std::cout << run.bytes_per_second; diff --git a/src/json_reporter.cc b/src/json_reporter.cc index bfa85e47..c9d9cf1d 100644 --- a/src/json_reporter.cc +++ b/src/json_reporter.cc @@ -162,17 +162,20 @@ void JSONReporter::PrintRunData(Run const& run) { out << indent << FormatKV("name", run.benchmark_name) << ",\n"; - out << indent - << FormatKV("iterations", run.iterations) - << ",\n"; + if(!run.report_big_o && !run.report_rms) { + out << indent + << FormatKV("iterations", run.iterations) + << ",\n"; + } out << indent << FormatKV("real_time", RoundDouble(real_time)) << ",\n"; out << indent - << FormatKV("cpu_time", RoundDouble(cpu_time)) - << ",\n"; - out << indent - << FormatKV("time_unit", timeLabel); + << FormatKV("cpu_time", RoundDouble(cpu_time)); + if(!run.report_rms) { + out << ",\n" << indent + << FormatKV("time_unit", timeLabel); + } if (run.bytes_per_second > 0.0) { out << ",\n" << indent << FormatKV("bytes_per_second", RoundDouble(run.bytes_per_second)); From 0c23d2852f58dd8e264f67045fd6e454cf481c5c Mon Sep 17 00:00:00 2001 From: Ismael Date: Mon, 23 May 2016 22:31:40 +0200 Subject: [PATCH 19/26] extracted BigO and GetBigO in own file --- include/benchmark/benchmark_api.h | 15 +---------- include/benchmark/complexity.h | 42 +++++++++++++++++++++++++++++++ include/benchmark/reporter.h | 1 - src/reporter.cc | 19 -------------- 4 files changed, 43 insertions(+), 34 deletions(-) create mode 100644 include/benchmark/complexity.h diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index 385cbbc6..b3ebb27a 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -154,6 +154,7 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); #include #include "macros.h" +#include "complexity.h" namespace benchmark { class BenchmarkReporter; @@ -231,20 +232,6 @@ enum TimeUnit { kMillisecond }; -// BigO is passed to a benchmark in order to specify the asymptotic computational -// complexity for the benchmark. In case oAuto is selected, complexity will be -// calculated automatically to the best fit. -enum BigO { - oNone, - o1, - oN, - oNSquared, - oNCubed, - oLogN, - oNLogN, - oAuto -}; - // State is passed to a running Benchmark and contains state for the // benchmark to use. class State { diff --git a/include/benchmark/complexity.h b/include/benchmark/complexity.h new file mode 100644 index 00000000..69392187 --- /dev/null +++ b/include/benchmark/complexity.h @@ -0,0 +1,42 @@ +#ifndef COMPLEXITY_H_ +#define COMPLEXITY_H_ + +#include + +namespace benchmark { + +// BigO is passed to a benchmark in order to specify the asymptotic computational +// complexity for the benchmark. In case oAuto is selected, complexity will be +// calculated automatically to the best fit. +enum BigO { + oNone, + o1, + oN, + oNSquared, + oNCubed, + oLogN, + oNLogN, + oAuto +}; + +inline std::string GetBigO(BigO complexity) { + switch (complexity) { + case oN: + return "* N"; + case oNSquared: + return "* N**2"; + case oNCubed: + return "* N**3"; + case oLogN: + return "* lgN"; + case oNLogN: + return "* NlgN"; + case o1: + return "* 1"; + default: + return ""; + } +} + +} // end namespace benchmark +#endif // COMPLEXITY_H_ \ No newline at end of file diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index 5e0a5522..2e164db8 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -108,7 +108,6 @@ protected: static void ComputeStats(const std::vector & reports, Run* mean, Run* stddev); static void ComputeBigO(const std::vector & reports, Run* bigO, Run* rms); static TimeUnitMultiplier GetTimeUnitAndMultiplier(TimeUnit unit); - static std::string GetBigO(BigO complexity); }; // Simple reporter that outputs benchmark data to the console. This is the diff --git a/src/reporter.cc b/src/reporter.cc index 0c05ab0b..f928d691 100644 --- a/src/reporter.cc +++ b/src/reporter.cc @@ -128,25 +128,6 @@ void BenchmarkReporter::ComputeBigO( rms->complexity = result_cpu.complexity; } -std::string BenchmarkReporter::GetBigO(BigO complexity) { - switch (complexity) { - case oN: - return "* N"; - case oNSquared: - return "* N**2"; - case oNCubed: - return "* N**3"; - case oLogN: - return "* lgN"; - case oNLogN: - return "* NlgN"; - case o1: - return "* 1"; - default: - return ""; - } -} - TimeUnitMultiplier BenchmarkReporter::GetTimeUnitAndMultiplier(TimeUnit unit) { switch (unit) { case kMillisecond: From 855786acf518db0162779f8196d930820e4f6b8c Mon Sep 17 00:00:00 2001 From: Ismael Date: Mon, 23 May 2016 22:49:16 +0200 Subject: [PATCH 20/26] added end of file carriage return --- include/benchmark/complexity.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/benchmark/complexity.h b/include/benchmark/complexity.h index 69392187..82dba829 100644 --- a/include/benchmark/complexity.h +++ b/include/benchmark/complexity.h @@ -39,4 +39,4 @@ inline std::string GetBigO(BigO complexity) { } } // end namespace benchmark -#endif // COMPLEXITY_H_ \ No newline at end of file +#endif // COMPLEXITY_H_ From 36a9ae197f220df65ef7ed1a21015a8fb4ef1ece Mon Sep 17 00:00:00 2001 From: Ismael Date: Tue, 24 May 2016 19:56:49 +0200 Subject: [PATCH 21/26] added SetComplexityN --- include/benchmark/benchmark_api.h | 17 ++++++++++++++++- include/benchmark/reporter.h | 6 ++---- src/benchmark.cc | 10 ++++++---- src/reporter.cc | 2 +- test/complexity_test.cc | 6 ++++++ 5 files changed, 31 insertions(+), 10 deletions(-) diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index b3ebb27a..5f1ee204 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -312,6 +312,19 @@ public: return bytes_processed_; } + // If this routine is called with complexity_n > 0 and complexity report is requested for the + // family benchmark, then current benchmark will be part of the computation and complexity_n will + // represent the length of N. + BENCHMARK_ALWAYS_INLINE + void SetComplexityN(size_t complexity_n) { + complexity_n_ = complexity_n; + } + + BENCHMARK_ALWAYS_INLINE + size_t complexity_n() { + return complexity_n_; + } + // If this routine is called with items > 0, then an items/s // label is printed on the benchmark report line for the currently // executing benchmark. It is typically called at the end of a processing @@ -383,6 +396,8 @@ private: size_t bytes_processed_; size_t items_processed_; + size_t complexity_n_; + public: // Index of the executing thread. Values from [0, threads). const int thread_index; @@ -466,7 +481,7 @@ public: // to control how many iterations are run, and in the printing of items/second // or MB/second values. Benchmark* UseManualTime(); - + // Set the asymptotic computational complexity for the benchmark. If called // the asymptotic computational complexity will be shown on the output. Benchmark* Complexity(BigO complexity); diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index 2e164db8..86564026 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -50,8 +50,7 @@ class BenchmarkReporter { items_per_second(0), max_heapbytes_used(0), complexity(oNone), - arg1(0), - arg2(0), + complexity_n(0), report_big_o(false), report_rms(false) {} @@ -71,8 +70,7 @@ class BenchmarkReporter { // Keep track of arguments to compute asymptotic complexity BigO complexity; - int arg1; - int arg2; + int complexity_n; // Inform print function whether the current run is a complexity report bool report_big_o; diff --git a/src/benchmark.cc b/src/benchmark.cc index 22453897..03f524b8 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -116,9 +116,10 @@ std::string* GetReportLabel() { //static benchmark::MallocCounter *benchmark_mc; struct ThreadStats { - ThreadStats() : bytes_processed(0), items_processed(0) {} + ThreadStats() : bytes_processed(0), items_processed(0), complexity_n(0) {} int64_t bytes_processed; int64_t items_processed; + size_t complexity_n; }; // Timer management class @@ -693,6 +694,7 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b, MutexLock l(GetBenchmarkLock()); total->bytes_processed += st.bytes_processed(); total->items_processed += st.items_processed(); + total->complexity_n += st.complexity_n(); } timer_manager->Finalize(); @@ -798,8 +800,7 @@ void RunBenchmark(const benchmark::internal::Benchmark::Instance& b, report.cpu_accumulated_time = cpu_accumulated_time; report.bytes_per_second = bytes_per_second; report.items_per_second = items_per_second; - report.arg1 = b.arg1; - report.arg2 = b.arg2; + report.complexity_n = total.complexity_n; report.complexity = b.complexity; reports.push_back(report); @@ -851,7 +852,8 @@ State::State(size_t max_iters, bool has_x, int x, bool has_y, int y, bytes_processed_(0), items_processed_(0), thread_index(thread_i), threads(n_threads), - max_iterations(max_iters) + max_iterations(max_iters), + complexity_n_(0) { CHECK(max_iterations != 0) << "At least one iteration must be run"; CHECK_LT(thread_index, threads) << "thread_index must be less than threads"; diff --git a/src/reporter.cc b/src/reporter.cc index f928d691..544df870 100644 --- a/src/reporter.cc +++ b/src/reporter.cc @@ -90,7 +90,7 @@ void BenchmarkReporter::ComputeBigO( // Populate the accumulators. for (const Run& run : reports) { - n.push_back(run.arg1); + n.push_back(run.complexity_n); real_time.push_back(run.real_accumulated_time/run.iterations); cpu_time.push_back(run.cpu_accumulated_time/run.iterations); } diff --git a/test/complexity_test.cc b/test/complexity_test.cc index e169ad96..e454ee46 100644 --- a/test/complexity_test.cc +++ b/test/complexity_test.cc @@ -26,6 +26,7 @@ std::map ConstructRandomMap(int size) { void BM_Complexity_O1(benchmark::State& state) { while (state.KeepRunning()) { } + state.SetComplexityN(state.range_x()); } BENCHMARK(BM_Complexity_O1) -> Range(1, 1<<18) -> Complexity(benchmark::o1); @@ -35,6 +36,7 @@ static void BM_Complexity_O_N(benchmark::State& state) { while (state.KeepRunning()) { benchmark::DoNotOptimize(std::find(v.begin(), v.end(), item_not_in_vector)); } + state.SetComplexityN(state.range_x()); } BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oN); BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oAuto); @@ -42,6 +44,7 @@ BENCHMARK(BM_Complexity_O_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Com static void BM_Complexity_O_N_Squared(benchmark::State& state) { std::string s1(state.range_x(), '-'); std::string s2(state.range_x(), '-'); + state.SetComplexityN(state.range_x()); while (state.KeepRunning()) for(char& c1 : s1) { for(char& c2 : s2) { @@ -56,6 +59,7 @@ static void BM_Complexity_O_N_Cubed(benchmark::State& state) { std::string s1(state.range_x(), '-'); std::string s2(state.range_x(), '-'); std::string s3(state.range_x(), '-'); + state.SetComplexityN(state.range_x()); while (state.KeepRunning()) for(char& c1 : s1) { for(char& c2 : s2) { @@ -75,6 +79,7 @@ static void BM_Complexity_O_log_N(benchmark::State& state) { while (state.KeepRunning()) { benchmark::DoNotOptimize(m.find(item_not_in_vector)); } + state.SetComplexityN(state.range_x()); } BENCHMARK(BM_Complexity_O_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oLogN); @@ -84,6 +89,7 @@ static void BM_Complexity_O_N_log_N(benchmark::State& state) { while (state.KeepRunning()) { std::sort(v.begin(), v.end()); } + state.SetComplexityN(state.range_x()); } BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oNLogN); BENCHMARK(BM_Complexity_O_N_log_N) -> RangeMultiplier(2) -> Range(1<<10, 1<<16) -> Complexity(benchmark::oAuto); From e5cf020d9730d24391e1cd1dfb37aef2c163d82c Mon Sep 17 00:00:00 2001 From: Ismael Date: Tue, 24 May 2016 20:06:54 +0200 Subject: [PATCH 22/26] fixed warning --- src/benchmark.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/benchmark.cc b/src/benchmark.cc index 03f524b8..c515e8dc 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -850,10 +850,10 @@ State::State(size_t max_iters, bool has_x, int x, bool has_y, int y, has_range_x_(has_x), range_x_(x), has_range_y_(has_y), range_y_(y), bytes_processed_(0), items_processed_(0), + complexity_n_(0), thread_index(thread_i), threads(n_threads), - max_iterations(max_iters), - complexity_n_(0) + max_iterations(max_iters) { CHECK(max_iterations != 0) << "At least one iteration must be run"; CHECK_LT(thread_index, threads) << "thread_index must be less than threads"; From 5e10e120db2ddf36d75e910ec2c77adebbf7543f Mon Sep 17 00:00:00 2001 From: Ismael Date: Tue, 24 May 2016 20:26:21 +0200 Subject: [PATCH 23/26] fixed overshadow --- include/benchmark/benchmark_api.h | 2 +- src/benchmark.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/benchmark/benchmark_api.h b/include/benchmark/benchmark_api.h index 5f1ee204..04ec6243 100644 --- a/include/benchmark/benchmark_api.h +++ b/include/benchmark/benchmark_api.h @@ -321,7 +321,7 @@ public: } BENCHMARK_ALWAYS_INLINE - size_t complexity_n() { + size_t complexity_length_n() { return complexity_n_; } diff --git a/src/benchmark.cc b/src/benchmark.cc index c515e8dc..e13fa522 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -694,7 +694,7 @@ void RunInThread(const benchmark::internal::Benchmark::Instance* b, MutexLock l(GetBenchmarkLock()); total->bytes_processed += st.bytes_processed(); total->items_processed += st.items_processed(); - total->complexity_n += st.complexity_n(); + total->complexity_n += st.complexity_length_n(); } timer_manager->Finalize(); From 64d72ee7b27c719be3233b1f1dd691084e07ffde Mon Sep 17 00:00:00 2001 From: Ismael Date: Tue, 24 May 2016 20:35:39 +0200 Subject: [PATCH 24/26] changed complexity_n from int to size_t --- include/benchmark/reporter.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index 86564026..f37ef20d 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -70,7 +70,7 @@ class BenchmarkReporter { // Keep track of arguments to compute asymptotic complexity BigO complexity; - int complexity_n; + size_t complexity_n; // Inform print function whether the current run is a complexity report bool report_big_o; From a24ef95e1199c463bb088d321caa1b0591817813 Mon Sep 17 00:00:00 2001 From: Ismael Date: Tue, 24 May 2016 20:48:34 +0200 Subject: [PATCH 25/26] adapt complexity_n to leastsq inteface --- include/benchmark/reporter.h | 2 +- src/benchmark.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/benchmark/reporter.h b/include/benchmark/reporter.h index f37ef20d..a9124883 100644 --- a/include/benchmark/reporter.h +++ b/include/benchmark/reporter.h @@ -70,7 +70,7 @@ class BenchmarkReporter { // Keep track of arguments to compute asymptotic complexity BigO complexity; - size_t complexity_n; + int complexity_n; // Inform print function whether the current run is a complexity report bool report_big_o; diff --git a/src/benchmark.cc b/src/benchmark.cc index e13fa522..84f88edf 100644 --- a/src/benchmark.cc +++ b/src/benchmark.cc @@ -119,7 +119,7 @@ struct ThreadStats { ThreadStats() : bytes_processed(0), items_processed(0), complexity_n(0) {} int64_t bytes_processed; int64_t items_processed; - size_t complexity_n; + int complexity_n; }; // Timer management class From f126852c8fe0cbd4f702d696b88d3ca8c4911499 Mon Sep 17 00:00:00 2001 From: Dominic Hamon Date: Tue, 24 May 2016 13:15:16 -0700 Subject: [PATCH 26/26] simplify format string for complexity output --- src/console_reporter.cc | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/console_reporter.cc b/src/console_reporter.cc index 242a94f9..cf78a7f8 100644 --- a/src/console_reporter.cc +++ b/src/console_reporter.cc @@ -124,11 +124,9 @@ void ConsoleReporter::PrintRunData(const Run& result) { big_o.c_str()); } else if(result.report_rms) { - ColorPrintf(COLOR_YELLOW, "%10.0f %s %10.0f %s ", + ColorPrintf(COLOR_YELLOW, "%10.0f %% %10.0f %% ", result.real_accumulated_time * multiplier * 100, - "%", - result.cpu_accumulated_time * multiplier * 100, - "%"); + result.cpu_accumulated_time * multiplier * 100); } else if (result.iterations == 0) { ColorPrintf(COLOR_YELLOW, "%10.0f %s %10.0f %s ",