From 11e5165b98c32038fad44ee282619484ed3b80da Mon Sep 17 00:00:00 2001 From: Snappy Team Date: Fri, 13 Nov 2020 13:12:41 +0000 Subject: [PATCH] Add a benchmark that decreased the branch prediction memorization by increasing the amount of independent branches executed per benchmark iteration. PiperOrigin-RevId: 342242843 --- snappy_unittest.cc | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/snappy_unittest.cc b/snappy_unittest.cc index d14c56b..d433545 100644 --- a/snappy_unittest.cc +++ b/snappy_unittest.cc @@ -1284,6 +1284,33 @@ static void BM_UFlat(int iters, int arg) { } BENCHMARK(BM_UFlat)->DenseRange(0, ARRAYSIZE(files) - 1); +static void BM_UFlatMedley(testing::benchmark::State& state) { + constexpr int kFiles = ARRAYSIZE(files); + std::string zcontents[kFiles]; + size_t sizes[kFiles]; + size_t max_size = 0; + for (int i = 0; i < kFiles; i++) { + std::string contents = + ReadTestDataFile(files[i].filename, files[i].size_limit); + max_size = std::max(max_size, contents.size()); + sizes[i] = contents.size(); + snappy::Compress(contents.data(), contents.size(), &zcontents[i]); + } + + std::vector dst(max_size); + + size_t processed = 0; + for (auto s : state) { + for (int i = 0; i < kFiles; i++) { + CHECK(snappy::RawUncompress(zcontents[i].data(), zcontents[i].size(), + dst.data())); + processed += sizes[i]; + } + } + SetBenchmarkBytesProcessed(processed); +} +BENCHMARK(BM_UFlatMedley); + static void BM_UValidate(int iters, int arg) { StopBenchmarkTiming();