mirror of https://github.com/google/snappy.git
Add a benchmark that decreased the branch prediction memorization by increasing the amount of independent branches executed per benchmark iteration.
PiperOrigin-RevId: 342242843
This commit is contained in:
parent
6835abd953
commit
11e5165b98
|
@ -1284,6 +1284,33 @@ static void BM_UFlat(int iters, int arg) {
|
|||
}
|
||||
BENCHMARK(BM_UFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
|
||||
|
||||
static void BM_UFlatMedley(testing::benchmark::State& state) {
|
||||
constexpr int kFiles = ARRAYSIZE(files);
|
||||
std::string zcontents[kFiles];
|
||||
size_t sizes[kFiles];
|
||||
size_t max_size = 0;
|
||||
for (int i = 0; i < kFiles; i++) {
|
||||
std::string contents =
|
||||
ReadTestDataFile(files[i].filename, files[i].size_limit);
|
||||
max_size = std::max(max_size, contents.size());
|
||||
sizes[i] = contents.size();
|
||||
snappy::Compress(contents.data(), contents.size(), &zcontents[i]);
|
||||
}
|
||||
|
||||
std::vector<char> dst(max_size);
|
||||
|
||||
size_t processed = 0;
|
||||
for (auto s : state) {
|
||||
for (int i = 0; i < kFiles; i++) {
|
||||
CHECK(snappy::RawUncompress(zcontents[i].data(), zcontents[i].size(),
|
||||
dst.data()));
|
||||
processed += sizes[i];
|
||||
}
|
||||
}
|
||||
SetBenchmarkBytesProcessed(processed);
|
||||
}
|
||||
BENCHMARK(BM_UFlatMedley);
|
||||
|
||||
static void BM_UValidate(int iters, int arg) {
|
||||
StopBenchmarkTiming();
|
||||
|
||||
|
|
Loading…
Reference in New Issue