Remove custom testing and benchmarking code.

Snappy includes a testing framework, which implements a subset of the
Google Test API, and can be used when Google Test is not available.
Snappy also includes a micro-benchmark framework, which implements an
old version of the Google Benchmark API.

This CL replaces the custom test and micro-benchmark frameworks with
google/googletest and google/benchmark. The code is vendored in
third_party/ via git submodules. The setup is similar to google/crc32c
and google/leveldb.

This CL also updates the benchmarking code to the modern Google
Benchmark API.

Benchmark results are expected to be more precise, as the old framework
ran each benchmark with a fixed number of iterations, whereas Google
Benchmark keeps iterating until the noise is low.

PiperOrigin-RevId: 347456142
This commit is contained in:
Victor Costan 2020-12-14 21:26:01 +00:00
parent 11f9a77a2f
commit 549685a598
7 changed files with 154 additions and 433 deletions

6
.gitmodules vendored Normal file
View File

@ -0,0 +1,6 @@
[submodule "third_party/benchmark"]
path = third_party/benchmark
url = https://github.com/google/benchmark.git
[submodule "third_party/googletest"]
path = third_party/googletest
url = https://github.com/google/googletest.git

View File

@ -134,6 +134,10 @@ elseif (SNAPPY_REQUIRE_AVX)
endif(HAVE_CLANG_MAVX)
endif(SNAPPY_REQUIRE_AVX2)
# Used by googletest.
check_cxx_compiler_flag(-Wno-missing-field-initializers
SNAPPY_HAVE_NO_MISSING_FIELD_INITIALIZERS)
include(CheckCXXSourceCompiles)
check_cxx_source_compiles("
int main() {
@ -174,11 +178,6 @@ include(CheckSymbolExists)
check_symbol_exists("mmap" "sys/mman.h" HAVE_FUNC_MMAP)
check_symbol_exists("sysconf" "unistd.h" HAVE_FUNC_SYSCONF)
find_package(GTest QUIET)
if(GTEST_FOUND)
set(HAVE_GTEST 1)
endif(GTEST_FOUND)
find_package(Gflags QUIET)
if(GFLAGS_FOUND OR GFLAGS_TARGET)
set(HAVE_GFLAGS 1)
@ -254,6 +253,28 @@ endif(BUILD_SHARED_LIBS)
if(SNAPPY_BUILD_TESTS)
enable_testing()
# Prevent overriding the parent project's compiler/linker settings on Windows.
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
set(install_gtest OFF)
set(install_gmock OFF)
set(build_gmock ON)
# This project is tested using GoogleTest.
add_subdirectory("third_party/googletest")
# This project uses Google benchmark for benchmarking.
set(BENCHMARK_ENABLE_TESTING OFF CACHE BOOL "" FORCE)
set(BENCHMARK_ENABLE_EXCEPTIONS OFF CACHE BOOL "" FORCE)
add_subdirectory("third_party/benchmark")
# GoogleTest triggers a missing field initializers warning.
if(SNAPPY_HAVE_NO_MISSING_FIELD_INITIALIZERS)
set_property(TARGET gtest
APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
set_property(TARGET gmock
APPEND PROPERTY COMPILE_OPTIONS -Wno-missing-field-initializers)
endif(SNAPPY_HAVE_NO_MISSING_FIELD_INITIALIZERS)
add_executable(snappy_unittest "")
target_sources(snappy_unittest
PRIVATE
@ -261,7 +282,8 @@ if(SNAPPY_BUILD_TESTS)
"snappy-test.cc"
)
target_compile_definitions(snappy_unittest PRIVATE -DHAVE_CONFIG_H)
target_link_libraries(snappy_unittest snappy ${GFLAGS_LIBRARIES} ${GTEST_LIBRARY})
target_link_libraries(snappy_unittest
snappy ${GFLAGS_LIBRARIES} gmock gtest benchmark)
if(HAVE_LIBZ)
target_link_libraries(snappy_unittest z)
@ -276,7 +298,6 @@ if(SNAPPY_BUILD_TESTS)
target_include_directories(snappy_unittest
BEFORE PRIVATE
"${PROJECT_SOURCE_DIR}"
"${GTEST_INCLUDE_DIRS}"
"${GFLAGS_INCLUDE_DIRS}"
)

6
NEWS
View File

@ -1,3 +1,9 @@
master, TBD:
* Performance improvements.
* Google Test and Google Benchmark are now bundled in third_party/.
Snappy v1.1.8, January 15th 2020:
* Small performance improvements.

View File

@ -70,6 +70,7 @@ You need the CMake version specified in [CMakeLists.txt](./CMakeLists.txt)
or later to build:
```bash
git submodule update --init
mkdir build
cd build && cmake ../ && make
```
@ -114,14 +115,10 @@ but it contains several useful components for Snappy development.
First of all, it contains unit tests, verifying correctness on your machine in
various scenarios. If you want to change or optimize Snappy, please run the
tests to verify you have not broken anything. Note that if you have the
Google Test library installed, unit test behavior (especially failures) will be
significantly more user-friendly. You can find Google Test at
tests to verify you have not broken anything.
https://github.com/google/googletest
You probably also want the gflags library for handling of command-line flags;
you can find it at
The gflags library for handling of command-line flags is used if it's installed.
You can find it at
https://gflags.github.io/gflags/
@ -145,5 +142,5 @@ test.)
Contact
=======
Snappy is distributed through GitHub. For the latest version, a bug tracker,
and other information, see https://github.com/google/snappy.
Snappy is distributed through GitHub. For the latest version and other
information, see https://github.com/google/snappy.

View File

@ -76,191 +76,6 @@ std::string StrFormat(const char* format, ...) {
return buf;
}
bool benchmark_running = false;
int64_t benchmark_real_time_us = 0;
int64_t benchmark_cpu_time_us = 0;
std::string* benchmark_label = nullptr;
int64_t benchmark_bytes_processed = 0;
void ResetBenchmarkTiming() {
benchmark_real_time_us = 0;
benchmark_cpu_time_us = 0;
}
#ifdef WIN32
LARGE_INTEGER benchmark_start_real;
FILETIME benchmark_start_cpu;
#else // WIN32
struct timeval benchmark_start_real;
struct rusage benchmark_start_cpu;
#endif // WIN32
void StartBenchmarkTiming() {
#ifdef WIN32
QueryPerformanceCounter(&benchmark_start_real);
FILETIME dummy;
CHECK(GetProcessTimes(
GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_start_cpu));
#else
gettimeofday(&benchmark_start_real, NULL);
if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) {
std::perror("getrusage(RUSAGE_SELF)");
std::exit(1);
}
#endif
benchmark_running = true;
}
void StopBenchmarkTiming() {
if (!benchmark_running) {
return;
}
#ifdef WIN32
LARGE_INTEGER benchmark_stop_real;
LARGE_INTEGER benchmark_frequency;
QueryPerformanceCounter(&benchmark_stop_real);
QueryPerformanceFrequency(&benchmark_frequency);
double elapsed_real = static_cast<double>(
benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) /
benchmark_frequency.QuadPart;
benchmark_real_time_us += elapsed_real * 1e6 + 0.5;
FILETIME benchmark_stop_cpu, dummy;
CHECK(GetProcessTimes(
GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_stop_cpu));
ULARGE_INTEGER start_ulargeint;
start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime;
start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime;
ULARGE_INTEGER stop_ulargeint;
stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime;
stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime;
benchmark_cpu_time_us +=
(stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10;
#else // WIN32
struct timeval benchmark_stop_real;
gettimeofday(&benchmark_stop_real, NULL);
benchmark_real_time_us +=
1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec);
benchmark_real_time_us +=
(benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec);
struct rusage benchmark_stop_cpu;
if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) {
std::perror("getrusage(RUSAGE_SELF)");
std::exit(1);
}
benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec -
benchmark_start_cpu.ru_utime.tv_sec);
benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
benchmark_start_cpu.ru_utime.tv_usec);
#endif // WIN32
benchmark_running = false;
}
void SetBenchmarkLabel(const std::string& str) {
if (benchmark_label) {
delete benchmark_label;
}
benchmark_label = new std::string(str);
}
void SetBenchmarkBytesProcessed(int64_t bytes) {
benchmark_bytes_processed = bytes;
}
struct BenchmarkRun {
int64_t real_time_us;
int64_t cpu_time_us;
};
struct BenchmarkCompareCPUTime {
bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const {
return a.cpu_time_us < b.cpu_time_us;
}
};
void Benchmark::Run() {
for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) {
// Run a few iterations first to find out approximately how fast
// the benchmark is.
const int kCalibrateIterations = 100;
ResetBenchmarkTiming();
StartBenchmarkTiming();
(*function_)(kCalibrateIterations, test_case_num);
StopBenchmarkTiming();
// Let each test case run for about 200ms, but at least as many
// as we used to calibrate.
// Run five times and pick the median.
const int kNumRuns = 5;
const int kMedianPos = kNumRuns / 2;
int num_iterations = 0;
if (benchmark_real_time_us > 0) {
num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
}
num_iterations = std::max(num_iterations, kCalibrateIterations);
BenchmarkRun benchmark_runs[kNumRuns];
for (int run = 0; run < kNumRuns; ++run) {
ResetBenchmarkTiming();
StartBenchmarkTiming();
(*function_)(num_iterations, test_case_num);
StopBenchmarkTiming();
benchmark_runs[run].real_time_us = benchmark_real_time_us;
benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
}
std::string heading = StrFormat("%s/%d", name_.c_str(), test_case_num);
std::string human_readable_speed;
std::nth_element(benchmark_runs,
benchmark_runs + kMedianPos,
benchmark_runs + kNumRuns,
BenchmarkCompareCPUTime());
int64_t real_time_us = benchmark_runs[kMedianPos].real_time_us;
int64_t cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
if (cpu_time_us <= 0) {
human_readable_speed = "?";
} else {
int64_t bytes_per_second =
benchmark_bytes_processed * 1000000 / cpu_time_us;
if (bytes_per_second < 1024) {
human_readable_speed =
StrFormat("%dB/s", static_cast<int>(bytes_per_second));
} else if (bytes_per_second < 1024 * 1024) {
human_readable_speed = StrFormat(
"%.1fkB/s", bytes_per_second / 1024.0f);
} else if (bytes_per_second < 1024 * 1024 * 1024) {
human_readable_speed = StrFormat(
"%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
} else {
human_readable_speed = StrFormat(
"%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
}
}
std::fprintf(stderr,
#ifdef WIN32
"%-18s %10I64d %10I64d %10d %s %s\n",
#else
"%-18s %10lld %10lld %10d %s %s\n",
#endif
heading.c_str(),
static_cast<long long>(real_time_us * 1000 / num_iterations),
static_cast<long long>(cpu_time_us * 1000 / num_iterations),
num_iterations,
human_readable_speed.c_str(),
benchmark_label->c_str());
}
}
#ifdef HAVE_LIBZ
ZLib::ZLib()

View File

@ -51,31 +51,12 @@
#endif
#ifdef HAVE_WINDOWS_H
// Needed to be able to use std::max without workarounds in the source code.
// https://support.microsoft.com/en-us/help/143208/prb-using-stl-in-windows-program-can-cause-min-max-conflicts
#define NOMINMAX
#include <windows.h>
#endif
#ifdef HAVE_GTEST
#include <gtest/gtest.h>
#undef TYPED_TEST
#define TYPED_TEST TEST
#define INIT_GTEST(argc, argv) ::testing::InitGoogleTest(argc, *argv)
#else
// Stubs for if the user doesn't have Google Test installed.
#define TEST(test_case, test_subcase) \
void Test_ ## test_case ## _ ## test_subcase()
#define INIT_GTEST(argc, argv)
#define TYPED_TEST TEST
#define EXPECT_EQ CHECK_EQ
#define EXPECT_NE CHECK_NE
#define EXPECT_FALSE(cond) CHECK(!(cond))
#endif
#ifdef HAVE_GFLAGS
#include <gflags/gflags.h>
@ -84,7 +65,6 @@
// arguments. Google Test seems to be the most happy with unknown arguments,
// though, so we call it first and hope for the best.
#define InitGoogle(argv0, argc, argv, remove_flags) \
INIT_GTEST(argc, argv); \
google::ParseCommandLineFlags(argc, argv, remove_flags);
#else
@ -94,8 +74,7 @@
#define DEFINE_int32(flag_name, default_value, description) \
static int FLAGS_ ## flag_name = default_value;
#define InitGoogle(argv0, argc, argv, remove_flags) \
INIT_GTEST(argc, argv)
#define InitGoogle(argv0, argc, argv, remove_flags) ((void)(0))
#endif
@ -170,19 +149,6 @@ namespace file {
namespace snappy {
#define FLAGS_test_random_seed 301
using TypeParam = std::string;
void Test_CorruptedTest_VerifyCorrupted();
void Test_Snappy_SimpleTests();
void Test_Snappy_MaxBlowup();
void Test_Snappy_RandomData();
void Test_Snappy_FourByteOffset();
void Test_SnappyCorruption_TruncatedVarint();
void Test_SnappyCorruption_UnterminatedVarint();
void Test_SnappyCorruption_OverflowingVarint();
void Test_Snappy_ReadPastEndOfBuffer();
void Test_Snappy_FindMatchLength();
void Test_Snappy_FindMatchLengthRandom();
std::string ReadTestDataFile(const std::string& base, size_t size_limit);
@ -238,45 +204,6 @@ class CycleTimer {
#endif
};
// Minimalistic microbenchmark framework.
typedef void (*BenchmarkFunction)(int, int);
class Benchmark {
public:
Benchmark(const std::string& name, BenchmarkFunction function)
: name_(name), function_(function) {}
Benchmark* DenseRange(int start, int stop) {
start_ = start;
stop_ = stop;
return this;
}
void Run();
private:
const std::string name_;
const BenchmarkFunction function_;
int start_, stop_;
};
#define BENCHMARK(benchmark_name) \
Benchmark* Benchmark_ ## benchmark_name = \
(new Benchmark(#benchmark_name, benchmark_name))
extern Benchmark* Benchmark_BM_UFlat;
extern Benchmark* Benchmark_BM_UIOVec;
extern Benchmark* Benchmark_BM_UValidate;
extern Benchmark* Benchmark_BM_ZFlat;
extern Benchmark* Benchmark_BM_ZFlatAll;
extern Benchmark* Benchmark_BM_ZFlatIncreasingTableSize;
void ResetBenchmarkTiming();
void StartBenchmarkTiming();
void StopBenchmarkTiming();
void SetBenchmarkLabel(const std::string& str);
void SetBenchmarkBytesProcessed(int64_t bytes);
#ifdef HAVE_LIBZ
// Object-oriented wrapper around zlib.
@ -400,57 +327,6 @@ class ZLib {
} // namespace snappy
DECLARE_bool(run_microbenchmarks);
static inline void RunSpecifiedBenchmarks() {
if (!FLAGS_run_microbenchmarks) {
return;
}
std::fprintf(stderr, "Running microbenchmarks.\n");
#ifndef NDEBUG
std::fprintf(stderr,
"WARNING: Compiled with assertions enabled, will be slow.\n");
#endif
#ifndef __OPTIMIZE__
std::fprintf(stderr,
"WARNING: Compiled without optimization, will be slow.\n");
#endif
std::fprintf(stderr, "Benchmark Time(ns) CPU(ns) Iterations\n");
std::fprintf(stderr, "---------------------------------------------------\n");
snappy::Benchmark_BM_UFlat->Run();
snappy::Benchmark_BM_UIOVec->Run();
snappy::Benchmark_BM_UValidate->Run();
snappy::Benchmark_BM_ZFlat->Run();
snappy::Benchmark_BM_ZFlatAll->Run();
snappy::Benchmark_BM_ZFlatIncreasingTableSize->Run();
std::fprintf(stderr, "\n");
}
#ifndef HAVE_GTEST
static inline int RUN_ALL_TESTS() {
std::fprintf(stderr, "Running correctness tests.\n");
snappy::Test_CorruptedTest_VerifyCorrupted();
snappy::Test_Snappy_SimpleTests();
snappy::Test_Snappy_MaxBlowup();
snappy::Test_Snappy_RandomData();
snappy::Test_Snappy_FourByteOffset();
snappy::Test_SnappyCorruption_TruncatedVarint();
snappy::Test_SnappyCorruption_UnterminatedVarint();
snappy::Test_SnappyCorruption_OverflowingVarint();
snappy::Test_Snappy_ReadPastEndOfBuffer();
snappy::Test_Snappy_FindMatchLength();
snappy::Test_Snappy_FindMatchLengthRandom();
std::fprintf(stderr, "All tests passed.\n");
return 0;
}
#endif // HAVE_GTEST
// For main().
namespace snappy {

View File

@ -35,9 +35,13 @@
#include <utility>
#include <vector>
#include "snappy-test.h"
#include "benchmark/benchmark.h"
#include "gtest/gtest.h"
#include "snappy.h"
#include "snappy-internal.h"
#include "snappy-test.h"
#include "snappy-sinksource.h"
DEFINE_int32(start_len, -1,
@ -1326,27 +1330,26 @@ TEST(Snappy, TestBenchmarkFiles) {
}
}
static void BM_UFlat(int iters, int arg) {
StopBenchmarkTiming();
void BM_UFlat(benchmark::State& state) {
// Pick file to process based on state.range(0).
int file_index = state.range(0);
// Pick file to process based on "arg"
CHECK_GE(arg, 0);
CHECK_LT(arg, ARRAYSIZE(files));
std::string contents =
ReadTestDataFile(files[arg].filename, files[arg].size_limit);
CHECK_GE(file_index, 0);
CHECK_LT(file_index, ARRAYSIZE(files));
std::string contents = ReadTestDataFile(files[file_index].filename,
files[file_index].size_limit);
std::string zcontents;
snappy::Compress(contents.data(), contents.size(), &zcontents);
char* dst = new char[contents.size()];
SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) *
static_cast<int64_t>(contents.size()));
SetBenchmarkLabel(files[arg].label);
StartBenchmarkTiming();
while (iters-- > 0) {
for (auto s : state) {
CHECK(snappy::RawUncompress(zcontents.data(), zcontents.size(), dst));
benchmark::DoNotOptimize(dst);
}
StopBenchmarkTiming();
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
static_cast<int64_t>(contents.size()));
state.SetLabel(files[file_index].label);
delete[] dst;
}
@ -1368,69 +1371,76 @@ struct SourceFiles {
size_t max_size = 0;
};
static void BM_UFlatMedley(int iters) {
void BM_UFlatMedley(benchmark::State& state) {
static const SourceFiles* const source = new SourceFiles();
std::vector<char> dst(source->max_size);
size_t processed = 0;
while (iters-- > 0) {
for (auto s : state) {
for (int i = 0; i < SourceFiles::kFiles; i++) {
CHECK(snappy::RawUncompress(source->zcontents[i].data(),
source->zcontents[i].size(), dst.data()));
processed += source->sizes[i];
benchmark::DoNotOptimize(dst);
}
}
SetBenchmarkBytesProcessed(processed);
int64_t source_sizes = 0;
for (int i = 0; i < SourceFiles::kFiles; i++) {
source_sizes += static_cast<int64_t>(source->sizes[i]);
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
source_sizes);
}
BENCHMARK(BM_UFlatMedley);
static void BM_UValidate(int iters, int arg) {
StopBenchmarkTiming();
void BM_UValidate(benchmark::State& state) {
// Pick file to process based on state.range(0).
int file_index = state.range(0);
// Pick file to process based on "arg"
CHECK_GE(arg, 0);
CHECK_LT(arg, ARRAYSIZE(files));
std::string contents =
ReadTestDataFile(files[arg].filename, files[arg].size_limit);
CHECK_GE(file_index, 0);
CHECK_LT(file_index, ARRAYSIZE(files));
std::string contents = ReadTestDataFile(files[file_index].filename,
files[file_index].size_limit);
std::string zcontents;
snappy::Compress(contents.data(), contents.size(), &zcontents);
SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) *
static_cast<int64_t>(contents.size()));
SetBenchmarkLabel(files[arg].label);
StartBenchmarkTiming();
while (iters-- > 0) {
for (auto s : state) {
CHECK(snappy::IsValidCompressedBuffer(zcontents.data(), zcontents.size()));
}
StopBenchmarkTiming();
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
static_cast<int64_t>(contents.size()));
state.SetLabel(files[file_index].label);
}
BENCHMARK(BM_UValidate)->DenseRange(0, ARRAYSIZE(files) - 1);
static void BM_UValidateMedley(int iters) {
void BM_UValidateMedley(benchmark::State& state) {
static const SourceFiles* const source = new SourceFiles();
size_t processed = 0;
while (iters-- > 0) {
for (auto s : state) {
for (int i = 0; i < SourceFiles::kFiles; i++) {
CHECK(snappy::IsValidCompressedBuffer(source->zcontents[i].data(),
source->zcontents[i].size()));
processed += source->sizes[i];
}
}
SetBenchmarkBytesProcessed(processed);
int64_t source_sizes = 0;
for (int i = 0; i < SourceFiles::kFiles; i++) {
source_sizes += static_cast<int64_t>(source->sizes[i]);
}
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
source_sizes);
}
BENCHMARK(BM_UValidateMedley);
static void BM_UIOVec(int iters, int arg) {
StopBenchmarkTiming();
void BM_UIOVec(benchmark::State& state) {
// Pick file to process based on state.range(0).
int file_index = state.range(0);
// Pick file to process based on "arg"
CHECK_GE(arg, 0);
CHECK_LT(arg, ARRAYSIZE(files));
std::string contents =
ReadTestDataFile(files[arg].filename, files[arg].size_limit);
CHECK_GE(file_index, 0);
CHECK_LT(file_index, ARRAYSIZE(files));
std::string contents = ReadTestDataFile(files[file_index].filename,
files[file_index].size_limit);
std::string zcontents;
snappy::Compress(contents.data(), contents.size(), &zcontents);
@ -1455,43 +1465,41 @@ static void BM_UIOVec(int iters, int arg) {
used_so_far += iov[i].iov_len;
}
SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) *
static_cast<int64_t>(contents.size()));
SetBenchmarkLabel(files[arg].label);
StartBenchmarkTiming();
while (iters-- > 0) {
for (auto s : state) {
CHECK(snappy::RawUncompressToIOVec(zcontents.data(), zcontents.size(), iov,
kNumEntries));
benchmark::DoNotOptimize(iov);
}
StopBenchmarkTiming();
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
static_cast<int64_t>(contents.size()));
state.SetLabel(files[file_index].label);
delete[] dst;
}
BENCHMARK(BM_UIOVec)->DenseRange(0, 4);
static void BM_UFlatSink(int iters, int arg) {
StopBenchmarkTiming();
void BM_UFlatSink(benchmark::State& state) {
// Pick file to process based on state.range(0).
int file_index = state.range(0);
// Pick file to process based on "arg"
CHECK_GE(arg, 0);
CHECK_LT(arg, ARRAYSIZE(files));
std::string contents =
ReadTestDataFile(files[arg].filename, files[arg].size_limit);
CHECK_GE(file_index, 0);
CHECK_LT(file_index, ARRAYSIZE(files));
std::string contents = ReadTestDataFile(files[file_index].filename,
files[file_index].size_limit);
std::string zcontents;
snappy::Compress(contents.data(), contents.size(), &zcontents);
char* dst = new char[contents.size()];
SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) *
static_cast<int64_t>(contents.size()));
SetBenchmarkLabel(files[arg].label);
StartBenchmarkTiming();
while (iters-- > 0) {
for (auto s : state) {
snappy::ByteArraySource source(zcontents.data(), zcontents.size());
snappy::UncheckedByteArraySink sink(dst);
CHECK(snappy::Uncompress(&source, &sink));
benchmark::DoNotOptimize(sink);
}
StopBenchmarkTiming();
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
static_cast<int64_t>(contents.size()));
state.SetLabel(files[file_index].label);
std::string s(dst, contents.size());
CHECK_EQ(contents, s);
@ -1501,41 +1509,34 @@ static void BM_UFlatSink(int iters, int arg) {
BENCHMARK(BM_UFlatSink)->DenseRange(0, ARRAYSIZE(files) - 1);
static void BM_ZFlat(int iters, int arg) {
StopBenchmarkTiming();
// Pick file to process based on "arg"
CHECK_GE(arg, 0);
CHECK_LT(arg, ARRAYSIZE(files));
std::string contents =
ReadTestDataFile(files[arg].filename, files[arg].size_limit);
void BM_ZFlat(benchmark::State& state) {
// Pick file to process based on state.range(0).
int file_index = state.range(0);
CHECK_GE(file_index, 0);
CHECK_LT(file_index, ARRAYSIZE(files));
std::string contents = ReadTestDataFile(files[file_index].filename,
files[file_index].size_limit);
char* dst = new char[snappy::MaxCompressedLength(contents.size())];
SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) *
static_cast<int64_t>(contents.size()));
StartBenchmarkTiming();
size_t zsize = 0;
while (iters-- > 0) {
for (auto s : state) {
snappy::RawCompress(contents.data(), contents.size(), dst, &zsize);
benchmark::DoNotOptimize(dst);
}
StopBenchmarkTiming();
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
static_cast<int64_t>(contents.size()));
const double compression_ratio =
static_cast<double>(zsize) / std::max<size_t>(1, contents.size());
SetBenchmarkLabel(StrFormat("%s (%.2f %%)", files[arg].label,
state.SetLabel(StrFormat("%s (%.2f %%)", files[file_index].label,
100.0 * compression_ratio));
VLOG(0) << StrFormat("compression for %s: %zd -> %zd bytes",
files[arg].label, static_cast<int>(contents.size()),
static_cast<int>(zsize));
VLOG(0) << StrFormat("compression for %s: %d -> %d bytes",
files[file_index].label, contents.size(), zsize);
delete[] dst;
}
BENCHMARK(BM_ZFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
static void BM_ZFlatAll(int iters, int arg) {
StopBenchmarkTiming();
CHECK_EQ(arg, 0);
void BM_ZFlatAll(benchmark::State& state) {
const int num_files = ARRAYSIZE(files);
std::vector<std::string> contents(num_files);
@ -1548,29 +1549,26 @@ static void BM_ZFlatAll(int iters, int arg) {
total_contents_size += contents[i].size();
}
SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) * total_contents_size);
StartBenchmarkTiming();
size_t zsize = 0;
while (iters-- > 0) {
for (auto s : state) {
for (int i = 0; i < num_files; ++i) {
snappy::RawCompress(contents[i].data(), contents[i].size(), dst[i],
&zsize);
benchmark::DoNotOptimize(dst);
}
}
StopBenchmarkTiming();
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
total_contents_size);
for (char* dst_item : dst) {
delete[] dst_item;
}
SetBenchmarkLabel(StrFormat("%d files", num_files));
state.SetLabel(StrFormat("%d files", num_files));
}
BENCHMARK(BM_ZFlatAll)->DenseRange(0, 0);
BENCHMARK(BM_ZFlatAll);
static void BM_ZFlatIncreasingTableSize(int iters, int arg) {
StopBenchmarkTiming();
CHECK_EQ(arg, 0);
void BM_ZFlatIncreasingTableSize(benchmark::State& state) {
CHECK_GT(ARRAYSIZE(files), 0);
const std::string base_content =
ReadTestDataFile(files[0].filename, files[0].size_limit);
@ -1588,28 +1586,30 @@ static void BM_ZFlatIncreasingTableSize(int iters, int arg) {
}
size_t zsize = 0;
SetBenchmarkBytesProcessed(static_cast<int64_t>(iters) * total_contents_size);
StartBenchmarkTiming();
while (iters-- > 0) {
for (auto s : state) {
for (size_t i = 0; i < contents.size(); ++i) {
snappy::RawCompress(contents[i].data(), contents[i].size(), dst[i],
&zsize);
benchmark::DoNotOptimize(dst);
}
}
StopBenchmarkTiming();
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) *
total_contents_size);
for (char* dst_item : dst) {
delete[] dst_item;
}
SetBenchmarkLabel(StrFormat("%zd tables", contents.size()));
state.SetLabel(StrFormat("%d tables", contents.size()));
}
BENCHMARK(BM_ZFlatIncreasingTableSize)->DenseRange(0, 0);
BENCHMARK(BM_ZFlatIncreasingTableSize);
} // namespace snappy
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
InitGoogle(argv[0], &argc, &argv, true);
RunSpecifiedBenchmarks();
::benchmark::RunSpecifiedBenchmarks();
if (argc >= 2) {
for (int arg = 1; arg < argc; ++arg) {