mirror of https://github.com/google/snappy.git
Extend validate benchmarks over all types and also add a medley for validation.
I also made the compression happen only once per benchmark. This way we get a cleaner measurement of #branch-misses using "perf stat". Compression suffers naturally from a large number of branch misses which was polluting the measurements. This showed that with the new decompression the branch misses is actually much lower then initially reported, only .2% and very stable, ie. doesn't really fluctuate with how you execute the benchmarks. PiperOrigin-RevId: 342628576
This commit is contained in:
parent
719bed0ae2
commit
e4a6e97b91
|
@ -1284,27 +1284,33 @@ static void BM_UFlat(int iters, int arg) {
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_UFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
|
BENCHMARK(BM_UFlat)->DenseRange(0, ARRAYSIZE(files) - 1);
|
||||||
|
|
||||||
static void BM_UFlatMedley(testing::benchmark::State& state) {
|
struct SourceFiles {
|
||||||
constexpr int kFiles = ARRAYSIZE(files);
|
SourceFiles() {
|
||||||
|
for (int i = 0; i < kFiles; i++) {
|
||||||
|
std::string contents =
|
||||||
|
ReadTestDataFile(files[i].filename, files[i].size_limit);
|
||||||
|
max_size = std::max(max_size, contents.size());
|
||||||
|
sizes[i] = contents.size();
|
||||||
|
snappy::Compress(contents.data(), contents.size(), &zcontents[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
static constexpr int kFiles = ARRAYSIZE(files);
|
||||||
std::string zcontents[kFiles];
|
std::string zcontents[kFiles];
|
||||||
size_t sizes[kFiles];
|
size_t sizes[kFiles];
|
||||||
size_t max_size = 0;
|
size_t max_size = 0;
|
||||||
for (int i = 0; i < kFiles; i++) {
|
};
|
||||||
std::string contents =
|
|
||||||
ReadTestDataFile(files[i].filename, files[i].size_limit);
|
|
||||||
max_size = std::max(max_size, contents.size());
|
|
||||||
sizes[i] = contents.size();
|
|
||||||
snappy::Compress(contents.data(), contents.size(), &zcontents[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<char> dst(max_size);
|
static void BM_UFlatMedley(testing::benchmark::State& state) {
|
||||||
|
static const SourceFiles* const source = new SourceFiles();
|
||||||
|
|
||||||
|
std::vector<char> dst(source->max_size);
|
||||||
|
|
||||||
size_t processed = 0;
|
size_t processed = 0;
|
||||||
for (auto s : state) {
|
for (auto s : state) {
|
||||||
for (int i = 0; i < kFiles; i++) {
|
for (int i = 0; i < SourceFiles::kFiles; i++) {
|
||||||
CHECK(snappy::RawUncompress(zcontents[i].data(), zcontents[i].size(),
|
CHECK(snappy::RawUncompress(source->zcontents[i].data(),
|
||||||
dst.data()));
|
source->zcontents[i].size(), dst.data()));
|
||||||
processed += sizes[i];
|
processed += source->sizes[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SetBenchmarkBytesProcessed(processed);
|
SetBenchmarkBytesProcessed(processed);
|
||||||
|
@ -1332,7 +1338,22 @@ static void BM_UValidate(int iters, int arg) {
|
||||||
}
|
}
|
||||||
StopBenchmarkTiming();
|
StopBenchmarkTiming();
|
||||||
}
|
}
|
||||||
BENCHMARK(BM_UValidate)->DenseRange(0, 4);
|
BENCHMARK(BM_UValidate)->DenseRange(0, ARRAYSIZE(files) - 1);
|
||||||
|
|
||||||
|
static void BM_UValidateMedley(testing::benchmark::State& state) {
|
||||||
|
static const SourceFiles* const source = new SourceFiles();
|
||||||
|
|
||||||
|
size_t processed = 0;
|
||||||
|
for (auto s : state) {
|
||||||
|
for (int i = 0; i < SourceFiles::kFiles; i++) {
|
||||||
|
CHECK(snappy::IsValidCompressedBuffer(source->zcontents[i].data(),
|
||||||
|
source->zcontents[i].size()));
|
||||||
|
processed += source->sizes[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SetBenchmarkBytesProcessed(processed);
|
||||||
|
}
|
||||||
|
BENCHMARK(BM_UValidateMedley);
|
||||||
|
|
||||||
static void BM_UIOVec(int iters, int arg) {
|
static void BM_UIOVec(int iters, int arg) {
|
||||||
StopBenchmarkTiming();
|
StopBenchmarkTiming();
|
||||||
|
|
Loading…
Reference in New Issue