Add a benchmark that decreased the branch prediction memorization by increasing the amount of independent branches executed per benchmark iteration.

PiperOrigin-RevId: 342242843
This commit is contained in:
Snappy Team 2020-11-13 13:12:41 +00:00 committed by Victor Costan
parent 6835abd953
commit 11e5165b98
1 changed files with 27 additions and 0 deletions

View File

@ -1284,6 +1284,33 @@ static void BM_UFlat(int iters, int arg) {
} }
BENCHMARK(BM_UFlat)->DenseRange(0, ARRAYSIZE(files) - 1); BENCHMARK(BM_UFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
static void BM_UFlatMedley(testing::benchmark::State& state) {
constexpr int kFiles = ARRAYSIZE(files);
std::string zcontents[kFiles];
size_t sizes[kFiles];
size_t max_size = 0;
for (int i = 0; i < kFiles; i++) {
std::string contents =
ReadTestDataFile(files[i].filename, files[i].size_limit);
max_size = std::max(max_size, contents.size());
sizes[i] = contents.size();
snappy::Compress(contents.data(), contents.size(), &zcontents[i]);
}
std::vector<char> dst(max_size);
size_t processed = 0;
for (auto s : state) {
for (int i = 0; i < kFiles; i++) {
CHECK(snappy::RawUncompress(zcontents[i].data(), zcontents[i].size(),
dst.data()));
processed += sizes[i];
}
}
SetBenchmarkBytesProcessed(processed);
}
BENCHMARK(BM_UFlatMedley);
static void BM_UValidate(int iters, int arg) { static void BM_UValidate(int iters, int arg) {
StopBenchmarkTiming(); StopBenchmarkTiming();