2011-03-18 17:14:15 +00:00
|
|
|
// Copyright 2005 Google Inc. All Rights Reserved.
|
|
|
|
//
|
2011-03-26 02:34:34 +00:00
|
|
|
// Redistribution and use in source and binary forms, with or without
|
|
|
|
// modification, are permitted provided that the following conditions are
|
|
|
|
// met:
|
2011-03-18 17:14:15 +00:00
|
|
|
//
|
2011-03-26 02:34:34 +00:00
|
|
|
// * Redistributions of source code must retain the above copyright
|
|
|
|
// notice, this list of conditions and the following disclaimer.
|
|
|
|
// * Redistributions in binary form must reproduce the above
|
|
|
|
// copyright notice, this list of conditions and the following disclaimer
|
|
|
|
// in the documentation and/or other materials provided with the
|
|
|
|
// distribution.
|
|
|
|
// * Neither the name of Google Inc. nor the names of its
|
|
|
|
// contributors may be used to endorse or promote products derived from
|
|
|
|
// this software without specific prior written permission.
|
2011-03-18 17:14:15 +00:00
|
|
|
//
|
2011-03-26 02:34:34 +00:00
|
|
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
#include "snappy-internal.h"
|
|
|
|
#include "snappy-sinksource.h"
|
2020-10-30 17:37:07 +00:00
|
|
|
#include "snappy.h"
|
2019-01-08 14:06:34 +00:00
|
|
|
#if !defined(SNAPPY_HAVE_BMI2)
|
|
|
|
// __BMI2__ is defined by GCC and Clang. Visual Studio doesn't target BMI2
|
|
|
|
// specifically, but it does define __AVX2__ when AVX2 support is available.
|
|
|
|
// Fortunately, AVX2 was introduced in Haswell, just like BMI2.
|
|
|
|
//
|
|
|
|
// BMI2 is not defined as a subset of AVX2 (unlike SSSE3 and AVX above). So,
|
|
|
|
// GCC and Clang can build code with AVX2 enabled but BMI2 disabled, in which
|
|
|
|
// case issuing BMI2 instructions results in a compiler error.
|
|
|
|
#if defined(__BMI2__) || (defined(_MSC_VER) && defined(__AVX2__))
|
|
|
|
#define SNAPPY_HAVE_BMI2 1
|
|
|
|
#else
|
|
|
|
#define SNAPPY_HAVE_BMI2 0
|
|
|
|
#endif
|
|
|
|
#endif // !defined(SNAPPY_HAVE_BMI2)
|
|
|
|
|
2022-11-03 20:36:33 +00:00
|
|
|
#if !defined(SNAPPY_HAVE_X86_CRC32)
|
|
|
|
#if defined(__SSE4_2__)
|
|
|
|
#define SNAPPY_HAVE_X86_CRC32 1
|
|
|
|
#else
|
|
|
|
#define SNAPPY_HAVE_X86_CRC32 0
|
|
|
|
#endif
|
|
|
|
#endif // !defined(SNAPPY_HAVE_X86_CRC32)
|
|
|
|
|
|
|
|
#if !defined(SNAPPY_HAVE_NEON_CRC32)
|
|
|
|
#if SNAPPY_HAVE_NEON && defined(__ARM_FEATURE_CRC32)
|
|
|
|
#define SNAPPY_HAVE_NEON_CRC32 1
|
|
|
|
#else
|
|
|
|
#define SNAPPY_HAVE_NEON_CRC32 0
|
|
|
|
#endif
|
|
|
|
#endif // !defined(SNAPPY_HAVE_NEON_CRC32)
|
|
|
|
|
|
|
|
#if SNAPPY_HAVE_BMI2 || SNAPPY_HAVE_X86_CRC32
|
2019-01-08 14:06:34 +00:00
|
|
|
// Please do not replace with <x86intrin.h>. or with headers that assume more
|
|
|
|
// advanced SSE versions without checking with all the OWNERS.
|
|
|
|
#include <immintrin.h>
|
2022-11-03 20:36:33 +00:00
|
|
|
#elif SNAPPY_HAVE_NEON_CRC32
|
|
|
|
#include <arm_acle.h>
|
2017-01-27 08:10:36 +00:00
|
|
|
#endif
|
2018-08-08 21:41:36 +00:00
|
|
|
|
2011-03-18 17:14:15 +00:00
|
|
|
#include <algorithm>
|
2020-11-19 13:42:42 +00:00
|
|
|
#include <array>
|
2020-12-10 00:30:54 +00:00
|
|
|
#include <cstddef>
|
2020-11-19 13:42:42 +00:00
|
|
|
#include <cstdint>
|
2020-04-29 19:32:47 +00:00
|
|
|
#include <cstdio>
|
2020-04-12 00:01:01 +00:00
|
|
|
#include <cstring>
|
2024-01-19 16:14:10 +00:00
|
|
|
#include <memory>
|
2011-03-18 17:14:15 +00:00
|
|
|
#include <string>
|
2020-12-09 02:27:22 +00:00
|
|
|
#include <utility>
|
2011-03-18 17:14:15 +00:00
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
namespace snappy {
|
|
|
|
|
2020-12-10 00:30:54 +00:00
|
|
|
namespace {
|
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
// The amount of slop bytes writers are using for unconditional copies.
|
|
|
|
constexpr int kSlopBytes = 64;
|
|
|
|
|
2020-02-07 14:38:49 +00:00
|
|
|
using internal::char_table;
|
2015-08-19 09:37:51 +00:00
|
|
|
using internal::COPY_1_BYTE_OFFSET;
|
|
|
|
using internal::COPY_2_BYTE_OFFSET;
|
2020-02-07 14:38:49 +00:00
|
|
|
using internal::COPY_4_BYTE_OFFSET;
|
2015-08-19 09:37:51 +00:00
|
|
|
using internal::kMaximumTagLength;
|
2020-02-07 14:38:49 +00:00
|
|
|
using internal::LITERAL;
|
2021-06-24 17:09:34 +00:00
|
|
|
#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
|
|
|
|
using internal::V128;
|
|
|
|
using internal::V128_Load;
|
|
|
|
using internal::V128_LoadU;
|
|
|
|
using internal::V128_Shuffle;
|
|
|
|
using internal::V128_StoreU;
|
2021-07-07 19:22:28 +00:00
|
|
|
using internal::V128_DupChar;
|
2021-06-24 17:09:34 +00:00
|
|
|
#endif
|
2015-08-19 09:37:51 +00:00
|
|
|
|
2020-11-14 15:27:36 +00:00
|
|
|
// We translate the information encoded in a tag through a lookup table to a
|
2020-12-10 00:30:54 +00:00
|
|
|
// format that requires fewer instructions to decode. Effectively we store
|
|
|
|
// the length minus the tag part of the offset. The lowest significant byte
|
|
|
|
// thus stores the length. While total length - offset is given by
|
|
|
|
// entry - ExtractOffset(type). The nice thing is that the subtraction
|
|
|
|
// immediately sets the flags for the necessary check that offset >= length.
|
|
|
|
// This folds the cmp with sub. We engineer the long literals and copy-4 to
|
|
|
|
// always fail this check, so their presence doesn't affect the fast path.
|
|
|
|
// To prevent literals from triggering the guard against offset < length (offset
|
|
|
|
// does not apply to literals) the table is giving them a spurious offset of
|
|
|
|
// 256.
|
|
|
|
inline constexpr int16_t MakeEntry(int16_t len, int16_t offset) {
|
|
|
|
return len - (offset << 8);
|
2020-11-19 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
2020-12-10 00:30:54 +00:00
|
|
|
inline constexpr int16_t LengthMinusOffset(int data, int type) {
|
|
|
|
return type == 3 ? 0xFF // copy-4 (or type == 3)
|
|
|
|
: type == 2 ? MakeEntry(data + 1, 0) // copy-2
|
|
|
|
: type == 1 ? MakeEntry((data & 7) + 4, data >> 3) // copy-1
|
|
|
|
: data < 60 ? MakeEntry(data + 1, 1) // note spurious offset.
|
|
|
|
: 0xFF; // long literal
|
2020-11-19 13:42:42 +00:00
|
|
|
}
|
|
|
|
|
2020-12-10 00:30:54 +00:00
|
|
|
inline constexpr int16_t LengthMinusOffset(uint8_t tag) {
|
|
|
|
return LengthMinusOffset(tag >> 2, tag & 3);
|
2020-11-14 15:27:36 +00:00
|
|
|
}
|
|
|
|
|
2020-11-19 13:42:42 +00:00
|
|
|
template <size_t... Ints>
|
|
|
|
struct index_sequence {};
|
|
|
|
|
|
|
|
template <std::size_t N, size_t... Is>
|
|
|
|
struct make_index_sequence : make_index_sequence<N - 1, N - 1, Is...> {};
|
|
|
|
|
|
|
|
template <size_t... Is>
|
|
|
|
struct make_index_sequence<0, Is...> : index_sequence<Is...> {};
|
|
|
|
|
|
|
|
template <size_t... seq>
|
2020-12-10 00:30:54 +00:00
|
|
|
constexpr std::array<int16_t, 256> MakeTable(index_sequence<seq...>) {
|
|
|
|
return std::array<int16_t, 256>{LengthMinusOffset(seq)...};
|
2020-11-14 15:27:36 +00:00
|
|
|
}
|
|
|
|
|
2021-07-29 13:26:45 +00:00
|
|
|
alignas(64) const std::array<int16_t, 256> kLengthMinusOffset =
|
|
|
|
MakeTable(make_index_sequence<256>{});
|
2020-11-14 15:27:36 +00:00
|
|
|
|
2022-11-03 20:36:33 +00:00
|
|
|
// Given a table of uint16_t whose size is mask / 2 + 1, return a pointer to the
|
|
|
|
// relevant entry, if any, for the given bytes. Any hash function will do,
|
|
|
|
// but a good hash function reduces the number of collisions and thus yields
|
|
|
|
// better compression for compressible input.
|
|
|
|
//
|
|
|
|
// REQUIRES: mask is 2 * (table_size - 1), and table_size is a power of two.
|
|
|
|
inline uint16_t* TableEntry(uint16_t* table, uint32_t bytes, uint32_t mask) {
|
|
|
|
// Our choice is quicker-and-dirtier than the typical hash function;
|
|
|
|
// empirically, that seems beneficial. The upper bits of kMagic * bytes are a
|
|
|
|
// higher-quality hash than the lower bits, so when using kMagic * bytes we
|
|
|
|
// also shift right to get a higher-quality end result. There's no similar
|
|
|
|
// issue with a CRC because all of the output bits of a CRC are equally good
|
|
|
|
// "hashes." So, a CPU instruction for CRC, if available, tends to be a good
|
|
|
|
// choice.
|
|
|
|
#if SNAPPY_HAVE_NEON_CRC32
|
|
|
|
// We use mask as the second arg to the CRC function, as it's about to
|
|
|
|
// be used anyway; it'd be equally correct to use 0 or some constant.
|
|
|
|
// Mathematically, _mm_crc32_u32 (or similar) is a function of the
|
|
|
|
// xor of its arguments.
|
|
|
|
const uint32_t hash = __crc32cw(bytes, mask);
|
|
|
|
#elif SNAPPY_HAVE_X86_CRC32
|
|
|
|
const uint32_t hash = _mm_crc32_u32(bytes, mask);
|
|
|
|
#else
|
2020-11-09 23:32:45 +00:00
|
|
|
constexpr uint32_t kMagic = 0x1e35a7bd;
|
2022-11-03 20:36:33 +00:00
|
|
|
const uint32_t hash = (kMagic * bytes) >> (31 - kMaxHashTableBits);
|
|
|
|
#endif
|
|
|
|
return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(table) +
|
|
|
|
(hash & mask));
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
|
2024-04-03 09:40:00 +00:00
|
|
|
inline uint16_t* TableEntry4ByteMatch(uint16_t* table, uint32_t bytes,
|
|
|
|
uint32_t mask) {
|
|
|
|
constexpr uint32_t kMagic = 2654435761U;
|
|
|
|
const uint32_t hash = (kMagic * bytes) >> (32 - kMaxHashTableBits);
|
|
|
|
return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(table) +
|
|
|
|
(hash & mask));
|
|
|
|
}
|
|
|
|
|
|
|
|
inline uint16_t* TableEntry8ByteMatch(uint16_t* table, uint64_t bytes,
|
|
|
|
uint32_t mask) {
|
|
|
|
constexpr uint64_t kMagic = 58295818150454627ULL;
|
|
|
|
const uint32_t hash = (kMagic * bytes) >> (64 - kMaxHashTableBits);
|
|
|
|
return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(table) +
|
|
|
|
(hash & mask));
|
|
|
|
}
|
|
|
|
|
2020-12-10 00:30:54 +00:00
|
|
|
} // namespace
|
|
|
|
|
2020-05-05 16:13:04 +00:00
|
|
|
size_t MaxCompressedLength(size_t source_bytes) {
|
2011-03-18 17:14:15 +00:00
|
|
|
// Compressed data can be defined as:
|
|
|
|
// compressed := item* literal*
|
|
|
|
// item := literal* copy
|
|
|
|
//
|
|
|
|
// The trailing literal sequence has a space blowup of at most 62/60
|
|
|
|
// since a literal of length 60 needs one tag byte + one extra byte
|
|
|
|
// for length information.
|
|
|
|
//
|
|
|
|
// Item blowup is trickier to measure. Suppose the "copy" op copies
|
|
|
|
// 4 bytes of data. Because of a special check in the encoding code,
|
|
|
|
// we produce a 4-byte copy only if the offset is < 65536. Therefore
|
|
|
|
// the copy op takes 3 bytes to encode, and this type of item leads
|
|
|
|
// to at most the 62/60 blowup for representing literals.
|
|
|
|
//
|
|
|
|
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
|
|
|
|
// enough, it will take 5 bytes to encode the copy op. Therefore the
|
|
|
|
// worst case here is a one-byte literal followed by a five-byte copy.
|
|
|
|
// I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
|
|
|
|
//
|
|
|
|
// This last factor dominates the blowup, so the final estimate is:
|
2020-05-05 16:13:04 +00:00
|
|
|
return 32 + source_bytes + source_bytes / 6;
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
|
2017-01-27 08:10:36 +00:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
void UnalignedCopy64(const void* src, void* dst) {
|
2017-02-14 20:36:05 +00:00
|
|
|
char tmp[8];
|
2020-04-12 00:01:01 +00:00
|
|
|
std::memcpy(tmp, src, 8);
|
|
|
|
std::memcpy(dst, tmp, 8);
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
|
2017-01-27 08:10:36 +00:00
|
|
|
void UnalignedCopy128(const void* src, void* dst) {
|
2020-04-12 00:01:01 +00:00
|
|
|
// std::memcpy() gets vectorized when the appropriate compiler options are
|
|
|
|
// used. For example, x86 compilers targeting SSE2+ will optimize to an SSE2
|
|
|
|
// load and store.
|
2017-02-14 20:36:05 +00:00
|
|
|
char tmp[16];
|
2020-04-12 00:01:01 +00:00
|
|
|
std::memcpy(tmp, src, 16);
|
|
|
|
std::memcpy(dst, tmp, 16);
|
2017-01-27 08:10:36 +00:00
|
|
|
}
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2020-12-09 02:27:22 +00:00
|
|
|
template <bool use_16bytes_chunk>
|
|
|
|
inline void ConditionalUnalignedCopy128(const char* src, char* dst) {
|
|
|
|
if (use_16bytes_chunk) {
|
|
|
|
UnalignedCopy128(src, dst);
|
|
|
|
} else {
|
|
|
|
UnalignedCopy64(src, dst);
|
|
|
|
UnalignedCopy64(src + 8, dst + 8);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-27 08:10:36 +00:00
|
|
|
// Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) a byte at a time. Used
|
|
|
|
// for handling COPY operations where the input and output regions may overlap.
|
|
|
|
// For example, suppose:
|
|
|
|
// src == "ab"
|
|
|
|
// op == src + 2
|
|
|
|
// op_limit == op + 20
|
|
|
|
// After IncrementalCopySlow(src, op, op_limit), the result will have eleven
|
|
|
|
// copies of "ab"
|
|
|
|
// ababababababababababab
|
2020-04-12 00:01:01 +00:00
|
|
|
// Note that this does not match the semantics of either std::memcpy() or
|
|
|
|
// std::memmove().
|
2017-01-27 08:10:36 +00:00
|
|
|
inline char* IncrementalCopySlow(const char* src, char* op,
|
|
|
|
char* const op_limit) {
|
2019-05-13 16:30:17 +00:00
|
|
|
// TODO: Remove pragma when LLVM is aware this
|
|
|
|
// function is only called in cold regions and when cold regions don't get
|
|
|
|
// vectorized or unrolled.
|
Rework a very hot, very sensitive part of snappy to reduce the number of
instructions, the number of dynamic branches, and avoid a particular
loop structure than LLVM has a very hard time optimizing for this
particular case.
The code being changed is part of the hottest path for snappy
decompression. In the benchmarks for decompressing protocol buffers,
this has proven to be amazingly sensitive to the slightest changes in
code layout. For example, previously we added '.p2align 5' assembly
directive to the code. This essentially padded the loop out from the
function. Merely by doing this we saw significant performance
improvements.
As a consequence, several of the compiler's typically reasonable
optimizations can have surprising bad impacts. Loop unrolling is a
primary culprit, but in the next LLVM release we are seeing an issue due
to loop rotation. While some of the problems caused by the newly
triggered loop rotation in LLVM can be mitigated with ongoing work on
LLVM's code layout optimizations (specifically, loop header cloning),
that is a fairly long term project. And even minor fluctuations in how
that subsequent optimization is performed may prevent gaining the
performance back.
For now, we need some way to unblock the next LLVM release which
contains a generic improvement to the LLVM loop optimizer that enables
loop rotation in more places, but uncovers this sensitivity and weakness
in a particular case.
This CL restructures the loop to have a simpler structure. Specifically,
we eagerly test what the terminal condition will be and provide two
versions of the copy loop that use a single loop predicate.
The comments in the source code and benchmarks indicate that only one of
these two cases is actually hot: we expect to generally have enough slop
in the buffer. That in turn allows us to generate a much simpler branch
and loop structure for the hot path (especially for the protocol buffer
decompression benchmark).
However, structuring even this simple loop in a way that doesn't trigger
some other performance bubble (often a more severe one) is quite
challenging. We have to carefully manage the variables used in the loop
and the addressing pattern. We should teach LLVM how to do this
reliably, but that too is a *much* more significant undertaking and is
extremely rare to have this degree of importance. The desired structure
of the loop, as shown with IACA's analysis for the broadwell
micro-architecture (HSW and SKX are similar):
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 1 | | | 1.0 1.0 | | | | | | | mov rcx, qword ptr [rdi+rdx*1-0x8]
| 2^ | | | | 0.4 | 1.0 | | | 0.6 | | mov qword ptr [rdi], rcx
| 1 | | | | 1.0 1.0 | | | | | | mov rcx, qword ptr [rdi+rdx*1]
| 2^ | | | 0.3 | | 1.0 | | | 0.7 | | mov qword ptr [rdi+0x8], rcx
| 1 | 0.5 | | | | | 0.5 | | | | add rdi, 0x10
| 1 | 0.2 | | | | | | 0.8 | | | cmp rdi, rax
| 0F | | | | | | | | | | jb 0xffffffffffffffe9
Specifically, the arrangement of addressing modes for the stores such
that micro-op fusion (indicated by the `^` on the `2` micro-op count) is
important to achieve good throughput for this loop.
The other thing necessary to make this change effective is to remove our
previous hack using `.p2align 5` to pad out the main decompression loop,
and to forcibly disable loop unrolling for critical loops. Because this
change simplifies the loop structure, more unrolling opportunities show
up. Also, the next LLVM release's generic loop optimization improvements
allow unrolling in more places, requiring still more disabling of
unrolling in this change. Perhaps most surprising of these is that we
must disable loop unrolling in the *slow* path. While unrolling there
seems pointless, it should also be harmless. This cold code is laid out
very far away from all of the hot code. All the samples shown in a
profile of the benchmark occur before this loop in the function. And
yet, if the loop gets unrolled (which seems to only happen reliably with
the next LLVM release) we see a nearly 20% regression in decompressing
protocol buffers!
With the current release of LLVM, we still observe some regression from
this source change, but it is fairly small (5% on decompressing protocol
buffers, less elsewhere). And with the next LLVM release it drops to
under 1% even in that case. Meanwhile, without this change, the next
release of LLVM will regress decompressing protocol buffers by more than
10%.
2017-12-22 04:51:07 +00:00
|
|
|
#ifdef __clang__
|
|
|
|
#pragma clang loop unroll(disable)
|
|
|
|
#endif
|
2017-01-27 08:10:36 +00:00
|
|
|
while (op < op_limit) {
|
|
|
|
*op++ = *src++;
|
|
|
|
}
|
|
|
|
return op_limit;
|
|
|
|
}
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2021-06-24 17:09:34 +00:00
|
|
|
#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
|
2018-08-09 17:58:55 +00:00
|
|
|
|
2020-12-09 02:27:22 +00:00
|
|
|
// Computes the bytes for shuffle control mask (please read comments on
|
|
|
|
// 'pattern_generation_masks' as well) for the given index_offset and
|
|
|
|
// pattern_size. For example, when the 'offset' is 6, it will generate a
|
|
|
|
// repeating pattern of size 6. So, the first 16 byte indexes will correspond to
|
|
|
|
// the pattern-bytes {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3} and the
|
|
|
|
// next 16 byte indexes will correspond to the pattern-bytes {4, 5, 0, 1, 2, 3,
|
|
|
|
// 4, 5, 0, 1, 2, 3, 4, 5, 0, 1}. These byte index sequences are generated by
|
|
|
|
// calling MakePatternMaskBytes(0, 6, index_sequence<16>()) and
|
|
|
|
// MakePatternMaskBytes(16, 6, index_sequence<16>()) respectively.
|
|
|
|
template <size_t... indexes>
|
|
|
|
inline constexpr std::array<char, sizeof...(indexes)> MakePatternMaskBytes(
|
|
|
|
int index_offset, int pattern_size, index_sequence<indexes...>) {
|
|
|
|
return {static_cast<char>((index_offset + indexes) % pattern_size)...};
|
|
|
|
}
|
|
|
|
|
|
|
|
// Computes the shuffle control mask bytes array for given pattern-sizes and
|
|
|
|
// returns an array.
|
|
|
|
template <size_t... pattern_sizes_minus_one>
|
2021-06-24 17:09:34 +00:00
|
|
|
inline constexpr std::array<std::array<char, sizeof(V128)>,
|
2020-12-09 02:27:22 +00:00
|
|
|
sizeof...(pattern_sizes_minus_one)>
|
|
|
|
MakePatternMaskBytesTable(int index_offset,
|
|
|
|
index_sequence<pattern_sizes_minus_one...>) {
|
2021-06-24 17:09:34 +00:00
|
|
|
return {
|
|
|
|
MakePatternMaskBytes(index_offset, pattern_sizes_minus_one + 1,
|
|
|
|
make_index_sequence</*indexes=*/sizeof(V128)>())...};
|
2020-12-09 02:27:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// This is an array of shuffle control masks that can be used as the source
|
2018-08-09 17:58:55 +00:00
|
|
|
// operand for PSHUFB to permute the contents of the destination XMM register
|
|
|
|
// into a repeating byte pattern.
|
2021-06-24 17:09:34 +00:00
|
|
|
alignas(16) constexpr std::array<std::array<char, sizeof(V128)>,
|
2020-12-14 09:38:48 +00:00
|
|
|
16> pattern_generation_masks =
|
2020-12-09 02:27:22 +00:00
|
|
|
MakePatternMaskBytesTable(
|
|
|
|
/*index_offset=*/0,
|
|
|
|
/*pattern_sizes_minus_one=*/make_index_sequence<16>());
|
|
|
|
|
|
|
|
// Similar to 'pattern_generation_masks', this table is used to "rotate" the
|
|
|
|
// pattern so that we can copy the *next 16 bytes* consistent with the pattern.
|
|
|
|
// Basically, pattern_reshuffle_masks is a continuation of
|
|
|
|
// pattern_generation_masks. It follows that, pattern_reshuffle_masks is same as
|
|
|
|
// pattern_generation_masks for offsets 1, 2, 4, 8 and 16.
|
2021-06-24 17:09:34 +00:00
|
|
|
alignas(16) constexpr std::array<std::array<char, sizeof(V128)>,
|
2020-12-14 09:38:48 +00:00
|
|
|
16> pattern_reshuffle_masks =
|
2020-12-09 02:27:22 +00:00
|
|
|
MakePatternMaskBytesTable(
|
|
|
|
/*index_offset=*/16,
|
|
|
|
/*pattern_sizes_minus_one=*/make_index_sequence<16>());
|
|
|
|
|
|
|
|
SNAPPY_ATTRIBUTE_ALWAYS_INLINE
|
2021-06-24 17:09:34 +00:00
|
|
|
static inline V128 LoadPattern(const char* src, const size_t pattern_size) {
|
|
|
|
V128 generation_mask = V128_Load(reinterpret_cast<const V128*>(
|
2020-12-09 02:27:22 +00:00
|
|
|
pattern_generation_masks[pattern_size - 1].data()));
|
|
|
|
// Uninitialized bytes are masked out by the shuffle mask.
|
|
|
|
// TODO: remove annotation and macro defs once MSan is fixed.
|
|
|
|
SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(src + pattern_size, 16 - pattern_size);
|
2021-06-24 17:09:34 +00:00
|
|
|
return V128_Shuffle(V128_LoadU(reinterpret_cast<const V128*>(src)),
|
|
|
|
generation_mask);
|
2020-12-09 02:27:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
SNAPPY_ATTRIBUTE_ALWAYS_INLINE
|
2021-06-24 17:09:34 +00:00
|
|
|
static inline std::pair<V128 /* pattern */, V128 /* reshuffle_mask */>
|
2020-12-09 02:27:22 +00:00
|
|
|
LoadPatternAndReshuffleMask(const char* src, const size_t pattern_size) {
|
2021-06-24 17:09:34 +00:00
|
|
|
V128 pattern = LoadPattern(src, pattern_size);
|
2020-12-09 02:27:22 +00:00
|
|
|
|
|
|
|
// This mask will generate the next 16 bytes in-place. Doing so enables us to
|
2021-06-24 17:09:34 +00:00
|
|
|
// write data by at most 4 V128_StoreU.
|
2020-12-09 02:27:22 +00:00
|
|
|
//
|
|
|
|
// For example, suppose pattern is: abcdefabcdefabcd
|
|
|
|
// Shuffling with this mask will generate: efabcdefabcdefab
|
|
|
|
// Shuffling again will generate: cdefabcdefabcdef
|
2021-06-24 17:09:34 +00:00
|
|
|
V128 reshuffle_mask = V128_Load(reinterpret_cast<const V128*>(
|
2020-12-09 02:27:22 +00:00
|
|
|
pattern_reshuffle_masks[pattern_size - 1].data()));
|
|
|
|
return {pattern, reshuffle_mask};
|
|
|
|
}
|
2020-11-13 17:58:32 +00:00
|
|
|
|
2021-06-24 17:09:34 +00:00
|
|
|
#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
|
2020-07-10 22:21:23 +00:00
|
|
|
|
2020-12-09 02:27:22 +00:00
|
|
|
// Fallback for when we need to copy while extending the pattern, for example
|
|
|
|
// copying 10 bytes from 3 positions back abc -> abcabcabcabca.
|
|
|
|
//
|
|
|
|
// REQUIRES: [dst - offset, dst + 64) is a valid address range.
|
|
|
|
SNAPPY_ATTRIBUTE_ALWAYS_INLINE
|
|
|
|
static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) {
|
2021-06-24 17:09:34 +00:00
|
|
|
#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
|
2020-12-09 02:27:22 +00:00
|
|
|
if (SNAPPY_PREDICT_TRUE(offset <= 16)) {
|
|
|
|
switch (offset) {
|
|
|
|
case 0:
|
|
|
|
return false;
|
|
|
|
case 1: {
|
2021-07-07 19:22:28 +00:00
|
|
|
// TODO: Ideally we should memset, move back once the
|
|
|
|
// codegen issues are fixed.
|
|
|
|
V128 pattern = V128_DupChar(dst[-1]);
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
|
|
|
|
}
|
2020-12-09 02:27:22 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
case 2:
|
|
|
|
case 4:
|
|
|
|
case 8:
|
|
|
|
case 16: {
|
2021-06-24 17:09:34 +00:00
|
|
|
V128 pattern = LoadPattern(dst - offset, offset);
|
2020-12-09 02:27:22 +00:00
|
|
|
for (int i = 0; i < 4; i++) {
|
2021-06-24 17:09:34 +00:00
|
|
|
V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
|
2020-12-09 02:27:22 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
default: {
|
|
|
|
auto pattern_and_reshuffle_mask =
|
|
|
|
LoadPatternAndReshuffleMask(dst - offset, offset);
|
2021-06-24 17:09:34 +00:00
|
|
|
V128 pattern = pattern_and_reshuffle_mask.first;
|
|
|
|
V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
|
2020-12-09 02:27:22 +00:00
|
|
|
for (int i = 0; i < 4; i++) {
|
2021-06-24 17:09:34 +00:00
|
|
|
V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
|
|
|
|
pattern = V128_Shuffle(pattern, reshuffle_mask);
|
2020-12-09 02:27:22 +00:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (SNAPPY_PREDICT_TRUE(offset < 16)) {
|
|
|
|
if (SNAPPY_PREDICT_FALSE(offset == 0)) return false;
|
|
|
|
// Extend the pattern to the first 16 bytes.
|
2022-07-27 15:28:16 +00:00
|
|
|
// The simpler formulation of `dst[i - offset]` induces undefined behavior.
|
2021-11-30 18:46:18 +00:00
|
|
|
for (int i = 0; i < 16; i++) dst[i] = (dst - offset)[i];
|
2020-12-09 02:27:22 +00:00
|
|
|
// Find a multiple of pattern >= 16.
|
|
|
|
static std::array<uint8_t, 16> pattern_sizes = []() {
|
|
|
|
std::array<uint8_t, 16> res;
|
|
|
|
for (int i = 1; i < 16; i++) res[i] = (16 / i + 1) * i;
|
|
|
|
return res;
|
|
|
|
}();
|
|
|
|
offset = pattern_sizes[offset];
|
|
|
|
for (int i = 1; i < 4; i++) {
|
|
|
|
std::memcpy(dst + i * 16, dst + i * 16 - offset, 16);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2021-06-24 17:09:34 +00:00
|
|
|
#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
|
2018-08-09 17:58:55 +00:00
|
|
|
|
2020-12-09 02:27:22 +00:00
|
|
|
// Very rare.
|
|
|
|
for (int i = 0; i < 4; i++) {
|
|
|
|
std::memcpy(dst + i * 16, dst + i * 16 - offset, 16);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-11-14 15:27:36 +00:00
|
|
|
// Copy [src, src+(op_limit-op)) to [op, op_limit) but faster than
|
2017-01-27 08:10:36 +00:00
|
|
|
// IncrementalCopySlow. buf_limit is the address past the end of the writable
|
|
|
|
// region of the buffer.
|
|
|
|
inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
|
|
|
|
char* const buf_limit) {
|
2021-06-24 17:09:34 +00:00
|
|
|
#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
|
2020-12-09 02:27:22 +00:00
|
|
|
constexpr int big_pattern_size_lower_bound = 16;
|
|
|
|
#else
|
|
|
|
constexpr int big_pattern_size_lower_bound = 8;
|
|
|
|
#endif
|
|
|
|
|
2017-01-27 08:10:36 +00:00
|
|
|
// Terminology:
|
|
|
|
//
|
|
|
|
// slop = buf_limit - op
|
|
|
|
// pat = op - src
|
2020-12-09 02:27:22 +00:00
|
|
|
// len = op_limit - op
|
2017-01-27 08:10:36 +00:00
|
|
|
assert(src < op);
|
2020-12-09 02:27:22 +00:00
|
|
|
assert(op < op_limit);
|
2017-01-27 08:10:36 +00:00
|
|
|
assert(op_limit <= buf_limit);
|
2020-01-31 09:44:03 +00:00
|
|
|
// NOTE: The copy tags use 3 or 6 bits to store the copy length, so len <= 64.
|
|
|
|
assert(op_limit - op <= 64);
|
|
|
|
// NOTE: In practice the compressor always emits len >= 4, so it is ok to
|
|
|
|
// assume that to optimize this function, but this is not guaranteed by the
|
|
|
|
// compression format, so we have to also handle len < 4 in case the input
|
|
|
|
// does not satisfy these conditions.
|
2017-01-27 08:10:36 +00:00
|
|
|
|
|
|
|
size_t pattern_size = op - src;
|
|
|
|
// The cases are split into different branches to allow the branch predictor,
|
|
|
|
// FDO, and static prediction hints to work better. For each input we list the
|
|
|
|
// ratio of invocations that match each condition.
|
|
|
|
//
|
|
|
|
// input slop < 16 pat < 8 len > 16
|
|
|
|
// ------------------------------------------
|
|
|
|
// html|html4|cp 0% 1.01% 27.73%
|
|
|
|
// urls 0% 0.88% 14.79%
|
|
|
|
// jpg 0% 64.29% 7.14%
|
|
|
|
// pdf 0% 2.56% 58.06%
|
|
|
|
// txt[1-4] 0% 0.23% 0.97%
|
|
|
|
// pb 0% 0.96% 13.88%
|
|
|
|
// bin 0.01% 22.27% 41.17%
|
|
|
|
//
|
|
|
|
// It is very rare that we don't have enough slop for doing block copies. It
|
|
|
|
// is also rare that we need to expand a pattern. Small patterns are common
|
|
|
|
// for incompressible formats and for those we are plenty fast already.
|
|
|
|
// Lengths are normally not greater than 16 but they vary depending on the
|
|
|
|
// input. In general if we always predict len <= 16 it would be an ok
|
|
|
|
// prediction.
|
|
|
|
//
|
2020-12-09 02:27:22 +00:00
|
|
|
// In order to be fast we want a pattern >= 16 bytes (or 8 bytes in non-SSE)
|
|
|
|
// and an unrolled loop copying 1x 16 bytes (or 2x 8 bytes in non-SSE) at a
|
|
|
|
// time.
|
2017-01-27 08:10:36 +00:00
|
|
|
|
2020-12-09 02:27:22 +00:00
|
|
|
// Handle the uncommon case where pattern is less than 16 (or 8 in non-SSE)
|
|
|
|
// bytes.
|
|
|
|
if (pattern_size < big_pattern_size_lower_bound) {
|
2021-06-24 17:09:34 +00:00
|
|
|
#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
|
2018-03-27 04:55:23 +00:00
|
|
|
// Load the first eight bytes into an 128-bit XMM register, then use PSHUFB
|
|
|
|
// to permute the register's contents in-place into a repeating sequence of
|
|
|
|
// the first "pattern_size" bytes.
|
|
|
|
// For example, suppose:
|
|
|
|
// src == "abc"
|
|
|
|
// op == op + 3
|
2021-06-24 17:09:34 +00:00
|
|
|
// After V128_Shuffle(), "pattern" will have five copies of "abc"
|
2018-03-27 04:55:23 +00:00
|
|
|
// followed by one byte of slop: abcabcabcabcabca.
|
|
|
|
//
|
|
|
|
// The non-SSE fallback implementation suffers from store-forwarding stalls
|
|
|
|
// because its loads and stores partly overlap. By expanding the pattern
|
|
|
|
// in-place, we avoid the penalty.
|
2020-12-09 02:27:22 +00:00
|
|
|
|
|
|
|
// Typically, the op_limit is the gating factor so try to simplify the loop
|
|
|
|
// based on that.
|
|
|
|
if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
|
|
|
|
auto pattern_and_reshuffle_mask =
|
|
|
|
LoadPatternAndReshuffleMask(src, pattern_size);
|
2021-06-24 17:09:34 +00:00
|
|
|
V128 pattern = pattern_and_reshuffle_mask.first;
|
|
|
|
V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
|
2020-12-09 02:27:22 +00:00
|
|
|
|
|
|
|
// There is at least one, and at most four 16-byte blocks. Writing four
|
|
|
|
// conditionals instead of a loop allows FDO to layout the code with
|
|
|
|
// respect to the actual probabilities of each length.
|
|
|
|
// TODO: Replace with loop with trip count hint.
|
2021-06-24 17:09:34 +00:00
|
|
|
V128_StoreU(reinterpret_cast<V128*>(op), pattern);
|
2020-12-09 02:27:22 +00:00
|
|
|
|
|
|
|
if (op + 16 < op_limit) {
|
2021-06-24 17:09:34 +00:00
|
|
|
pattern = V128_Shuffle(pattern, reshuffle_mask);
|
|
|
|
V128_StoreU(reinterpret_cast<V128*>(op + 16), pattern);
|
2020-12-03 03:15:10 +00:00
|
|
|
}
|
2020-12-09 02:27:22 +00:00
|
|
|
if (op + 32 < op_limit) {
|
2021-06-24 17:09:34 +00:00
|
|
|
pattern = V128_Shuffle(pattern, reshuffle_mask);
|
|
|
|
V128_StoreU(reinterpret_cast<V128*>(op + 32), pattern);
|
2020-12-09 02:27:22 +00:00
|
|
|
}
|
|
|
|
if (op + 48 < op_limit) {
|
2021-06-24 17:09:34 +00:00
|
|
|
pattern = V128_Shuffle(pattern, reshuffle_mask);
|
|
|
|
V128_StoreU(reinterpret_cast<V128*>(op + 48), pattern);
|
2020-12-09 02:27:22 +00:00
|
|
|
}
|
|
|
|
return op_limit;
|
2018-03-27 04:55:23 +00:00
|
|
|
}
|
2020-12-09 02:27:22 +00:00
|
|
|
char* const op_end = buf_limit - 15;
|
|
|
|
if (SNAPPY_PREDICT_TRUE(op < op_end)) {
|
|
|
|
auto pattern_and_reshuffle_mask =
|
|
|
|
LoadPatternAndReshuffleMask(src, pattern_size);
|
2021-06-24 17:09:34 +00:00
|
|
|
V128 pattern = pattern_and_reshuffle_mask.first;
|
|
|
|
V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
|
2020-12-09 02:27:22 +00:00
|
|
|
|
|
|
|
// This code path is relatively cold however so we save code size
|
|
|
|
// by avoiding unrolling and vectorizing.
|
|
|
|
//
|
|
|
|
// TODO: Remove pragma when when cold regions don't get
|
|
|
|
// vectorized or unrolled.
|
2020-12-14 09:58:26 +00:00
|
|
|
#ifdef __clang__
|
|
|
|
#pragma clang loop unroll(disable)
|
|
|
|
#endif
|
2020-12-09 02:27:22 +00:00
|
|
|
do {
|
2021-06-24 17:09:34 +00:00
|
|
|
V128_StoreU(reinterpret_cast<V128*>(op), pattern);
|
|
|
|
pattern = V128_Shuffle(pattern, reshuffle_mask);
|
2020-12-09 02:27:22 +00:00
|
|
|
op += 16;
|
|
|
|
} while (SNAPPY_PREDICT_TRUE(op < op_end));
|
|
|
|
}
|
|
|
|
return IncrementalCopySlow(op - pattern_size, op, op_limit);
|
2021-06-24 17:09:34 +00:00
|
|
|
#else // !SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
|
2018-01-16 21:39:18 +00:00
|
|
|
// If plenty of buffer space remains, expand the pattern to at least 8
|
|
|
|
// bytes. The way the following loop is written, we need 8 bytes of buffer
|
|
|
|
// space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10
|
|
|
|
// bytes if pattern_size is 2. Precisely encoding that is probably not
|
|
|
|
// worthwhile; instead, invoke the slow path if we cannot write 11 bytes
|
|
|
|
// (because 11 are required in the worst case).
|
|
|
|
if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 11)) {
|
2017-01-27 08:10:36 +00:00
|
|
|
while (pattern_size < 8) {
|
|
|
|
UnalignedCopy64(src, op);
|
|
|
|
op += pattern_size;
|
|
|
|
pattern_size *= 2;
|
|
|
|
}
|
2017-07-28 21:31:04 +00:00
|
|
|
if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
|
2017-01-27 08:10:36 +00:00
|
|
|
} else {
|
|
|
|
return IncrementalCopySlow(src, op, op_limit);
|
|
|
|
}
|
2021-06-24 17:09:34 +00:00
|
|
|
#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
|
2017-01-27 08:10:36 +00:00
|
|
|
}
|
2020-12-09 02:27:22 +00:00
|
|
|
assert(pattern_size >= big_pattern_size_lower_bound);
|
|
|
|
constexpr bool use_16bytes_chunk = big_pattern_size_lower_bound == 16;
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2020-12-09 02:27:22 +00:00
|
|
|
// Copy 1x 16 bytes (or 2x 8 bytes in non-SSE) at a time. Because op - src can
|
|
|
|
// be < 16 in non-SSE, a single UnalignedCopy128 might overwrite data in op.
|
|
|
|
// UnalignedCopy64 is safe because expanding the pattern to at least 8 bytes
|
|
|
|
// guarantees that op - src >= 8.
|
Rework a very hot, very sensitive part of snappy to reduce the number of
instructions, the number of dynamic branches, and avoid a particular
loop structure than LLVM has a very hard time optimizing for this
particular case.
The code being changed is part of the hottest path for snappy
decompression. In the benchmarks for decompressing protocol buffers,
this has proven to be amazingly sensitive to the slightest changes in
code layout. For example, previously we added '.p2align 5' assembly
directive to the code. This essentially padded the loop out from the
function. Merely by doing this we saw significant performance
improvements.
As a consequence, several of the compiler's typically reasonable
optimizations can have surprising bad impacts. Loop unrolling is a
primary culprit, but in the next LLVM release we are seeing an issue due
to loop rotation. While some of the problems caused by the newly
triggered loop rotation in LLVM can be mitigated with ongoing work on
LLVM's code layout optimizations (specifically, loop header cloning),
that is a fairly long term project. And even minor fluctuations in how
that subsequent optimization is performed may prevent gaining the
performance back.
For now, we need some way to unblock the next LLVM release which
contains a generic improvement to the LLVM loop optimizer that enables
loop rotation in more places, but uncovers this sensitivity and weakness
in a particular case.
This CL restructures the loop to have a simpler structure. Specifically,
we eagerly test what the terminal condition will be and provide two
versions of the copy loop that use a single loop predicate.
The comments in the source code and benchmarks indicate that only one of
these two cases is actually hot: we expect to generally have enough slop
in the buffer. That in turn allows us to generate a much simpler branch
and loop structure for the hot path (especially for the protocol buffer
decompression benchmark).
However, structuring even this simple loop in a way that doesn't trigger
some other performance bubble (often a more severe one) is quite
challenging. We have to carefully manage the variables used in the loop
and the addressing pattern. We should teach LLVM how to do this
reliably, but that too is a *much* more significant undertaking and is
extremely rare to have this degree of importance. The desired structure
of the loop, as shown with IACA's analysis for the broadwell
micro-architecture (HSW and SKX are similar):
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 1 | | | 1.0 1.0 | | | | | | | mov rcx, qword ptr [rdi+rdx*1-0x8]
| 2^ | | | | 0.4 | 1.0 | | | 0.6 | | mov qword ptr [rdi], rcx
| 1 | | | | 1.0 1.0 | | | | | | mov rcx, qword ptr [rdi+rdx*1]
| 2^ | | | 0.3 | | 1.0 | | | 0.7 | | mov qword ptr [rdi+0x8], rcx
| 1 | 0.5 | | | | | 0.5 | | | | add rdi, 0x10
| 1 | 0.2 | | | | | | 0.8 | | | cmp rdi, rax
| 0F | | | | | | | | | | jb 0xffffffffffffffe9
Specifically, the arrangement of addressing modes for the stores such
that micro-op fusion (indicated by the `^` on the `2` micro-op count) is
important to achieve good throughput for this loop.
The other thing necessary to make this change effective is to remove our
previous hack using `.p2align 5` to pad out the main decompression loop,
and to forcibly disable loop unrolling for critical loops. Because this
change simplifies the loop structure, more unrolling opportunities show
up. Also, the next LLVM release's generic loop optimization improvements
allow unrolling in more places, requiring still more disabling of
unrolling in this change. Perhaps most surprising of these is that we
must disable loop unrolling in the *slow* path. While unrolling there
seems pointless, it should also be harmless. This cold code is laid out
very far away from all of the hot code. All the samples shown in a
profile of the benchmark occur before this loop in the function. And
yet, if the loop gets unrolled (which seems to only happen reliably with
the next LLVM release) we see a nearly 20% regression in decompressing
protocol buffers!
With the current release of LLVM, we still observe some regression from
this source change, but it is fairly small (5% on decompressing protocol
buffers, less elsewhere). And with the next LLVM release it drops to
under 1% even in that case. Meanwhile, without this change, the next
release of LLVM will regress decompressing protocol buffers by more than
10%.
2017-12-22 04:51:07 +00:00
|
|
|
//
|
|
|
|
// Typically, the op_limit is the gating factor so try to simplify the loop
|
|
|
|
// based on that.
|
2020-12-09 02:27:22 +00:00
|
|
|
if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
|
2020-01-10 15:41:30 +00:00
|
|
|
// There is at least one, and at most four 16-byte blocks. Writing four
|
|
|
|
// conditionals instead of a loop allows FDO to layout the code with respect
|
|
|
|
// to the actual probabilities of each length.
|
|
|
|
// TODO: Replace with loop with trip count hint.
|
2020-12-09 02:27:22 +00:00
|
|
|
ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op);
|
2020-01-10 15:41:30 +00:00
|
|
|
if (op + 16 < op_limit) {
|
2020-12-09 02:27:22 +00:00
|
|
|
ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 16, op + 16);
|
2020-01-10 15:41:30 +00:00
|
|
|
}
|
|
|
|
if (op + 32 < op_limit) {
|
2020-12-09 02:27:22 +00:00
|
|
|
ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 32, op + 32);
|
2020-01-10 15:41:30 +00:00
|
|
|
}
|
|
|
|
if (op + 48 < op_limit) {
|
2020-12-09 02:27:22 +00:00
|
|
|
ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 48, op + 48);
|
2020-01-10 15:41:30 +00:00
|
|
|
}
|
Rework a very hot, very sensitive part of snappy to reduce the number of
instructions, the number of dynamic branches, and avoid a particular
loop structure than LLVM has a very hard time optimizing for this
particular case.
The code being changed is part of the hottest path for snappy
decompression. In the benchmarks for decompressing protocol buffers,
this has proven to be amazingly sensitive to the slightest changes in
code layout. For example, previously we added '.p2align 5' assembly
directive to the code. This essentially padded the loop out from the
function. Merely by doing this we saw significant performance
improvements.
As a consequence, several of the compiler's typically reasonable
optimizations can have surprising bad impacts. Loop unrolling is a
primary culprit, but in the next LLVM release we are seeing an issue due
to loop rotation. While some of the problems caused by the newly
triggered loop rotation in LLVM can be mitigated with ongoing work on
LLVM's code layout optimizations (specifically, loop header cloning),
that is a fairly long term project. And even minor fluctuations in how
that subsequent optimization is performed may prevent gaining the
performance back.
For now, we need some way to unblock the next LLVM release which
contains a generic improvement to the LLVM loop optimizer that enables
loop rotation in more places, but uncovers this sensitivity and weakness
in a particular case.
This CL restructures the loop to have a simpler structure. Specifically,
we eagerly test what the terminal condition will be and provide two
versions of the copy loop that use a single loop predicate.
The comments in the source code and benchmarks indicate that only one of
these two cases is actually hot: we expect to generally have enough slop
in the buffer. That in turn allows us to generate a much simpler branch
and loop structure for the hot path (especially for the protocol buffer
decompression benchmark).
However, structuring even this simple loop in a way that doesn't trigger
some other performance bubble (often a more severe one) is quite
challenging. We have to carefully manage the variables used in the loop
and the addressing pattern. We should teach LLVM how to do this
reliably, but that too is a *much* more significant undertaking and is
extremely rare to have this degree of importance. The desired structure
of the loop, as shown with IACA's analysis for the broadwell
micro-architecture (HSW and SKX are similar):
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 1 | | | 1.0 1.0 | | | | | | | mov rcx, qword ptr [rdi+rdx*1-0x8]
| 2^ | | | | 0.4 | 1.0 | | | 0.6 | | mov qword ptr [rdi], rcx
| 1 | | | | 1.0 1.0 | | | | | | mov rcx, qword ptr [rdi+rdx*1]
| 2^ | | | 0.3 | | 1.0 | | | 0.7 | | mov qword ptr [rdi+0x8], rcx
| 1 | 0.5 | | | | | 0.5 | | | | add rdi, 0x10
| 1 | 0.2 | | | | | | 0.8 | | | cmp rdi, rax
| 0F | | | | | | | | | | jb 0xffffffffffffffe9
Specifically, the arrangement of addressing modes for the stores such
that micro-op fusion (indicated by the `^` on the `2` micro-op count) is
important to achieve good throughput for this loop.
The other thing necessary to make this change effective is to remove our
previous hack using `.p2align 5` to pad out the main decompression loop,
and to forcibly disable loop unrolling for critical loops. Because this
change simplifies the loop structure, more unrolling opportunities show
up. Also, the next LLVM release's generic loop optimization improvements
allow unrolling in more places, requiring still more disabling of
unrolling in this change. Perhaps most surprising of these is that we
must disable loop unrolling in the *slow* path. While unrolling there
seems pointless, it should also be harmless. This cold code is laid out
very far away from all of the hot code. All the samples shown in a
profile of the benchmark occur before this loop in the function. And
yet, if the loop gets unrolled (which seems to only happen reliably with
the next LLVM release) we see a nearly 20% regression in decompressing
protocol buffers!
With the current release of LLVM, we still observe some regression from
this source change, but it is fairly small (5% on decompressing protocol
buffers, less elsewhere). And with the next LLVM release it drops to
under 1% even in that case. Meanwhile, without this change, the next
release of LLVM will regress decompressing protocol buffers by more than
10%.
2017-12-22 04:51:07 +00:00
|
|
|
return op_limit;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fall back to doing as much as we can with the available slop in the
|
|
|
|
// buffer. This code path is relatively cold however so we save code size by
|
|
|
|
// avoiding unrolling and vectorizing.
|
|
|
|
//
|
2019-05-13 16:30:17 +00:00
|
|
|
// TODO: Remove pragma when when cold regions don't get vectorized
|
|
|
|
// or unrolled.
|
2020-12-14 09:58:26 +00:00
|
|
|
#ifdef __clang__
|
|
|
|
#pragma clang loop unroll(disable)
|
|
|
|
#endif
|
2020-10-30 17:37:07 +00:00
|
|
|
for (char* op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
|
2020-12-09 02:27:22 +00:00
|
|
|
ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op);
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
2020-10-30 17:37:07 +00:00
|
|
|
if (op >= op_limit) return op_limit;
|
Rework a very hot, very sensitive part of snappy to reduce the number of
instructions, the number of dynamic branches, and avoid a particular
loop structure than LLVM has a very hard time optimizing for this
particular case.
The code being changed is part of the hottest path for snappy
decompression. In the benchmarks for decompressing protocol buffers,
this has proven to be amazingly sensitive to the slightest changes in
code layout. For example, previously we added '.p2align 5' assembly
directive to the code. This essentially padded the loop out from the
function. Merely by doing this we saw significant performance
improvements.
As a consequence, several of the compiler's typically reasonable
optimizations can have surprising bad impacts. Loop unrolling is a
primary culprit, but in the next LLVM release we are seeing an issue due
to loop rotation. While some of the problems caused by the newly
triggered loop rotation in LLVM can be mitigated with ongoing work on
LLVM's code layout optimizations (specifically, loop header cloning),
that is a fairly long term project. And even minor fluctuations in how
that subsequent optimization is performed may prevent gaining the
performance back.
For now, we need some way to unblock the next LLVM release which
contains a generic improvement to the LLVM loop optimizer that enables
loop rotation in more places, but uncovers this sensitivity and weakness
in a particular case.
This CL restructures the loop to have a simpler structure. Specifically,
we eagerly test what the terminal condition will be and provide two
versions of the copy loop that use a single loop predicate.
The comments in the source code and benchmarks indicate that only one of
these two cases is actually hot: we expect to generally have enough slop
in the buffer. That in turn allows us to generate a much simpler branch
and loop structure for the hot path (especially for the protocol buffer
decompression benchmark).
However, structuring even this simple loop in a way that doesn't trigger
some other performance bubble (often a more severe one) is quite
challenging. We have to carefully manage the variables used in the loop
and the addressing pattern. We should teach LLVM how to do this
reliably, but that too is a *much* more significant undertaking and is
extremely rare to have this degree of importance. The desired structure
of the loop, as shown with IACA's analysis for the broadwell
micro-architecture (HSW and SKX are similar):
| Num Of | Ports pressure in cycles | |
| Uops | 0 - DV | 1 | 2 - D | 3 - D | 4 | 5 | 6 | 7 | |
---------------------------------------------------------------------------------
| 1 | | | 1.0 1.0 | | | | | | | mov rcx, qword ptr [rdi+rdx*1-0x8]
| 2^ | | | | 0.4 | 1.0 | | | 0.6 | | mov qword ptr [rdi], rcx
| 1 | | | | 1.0 1.0 | | | | | | mov rcx, qword ptr [rdi+rdx*1]
| 2^ | | | 0.3 | | 1.0 | | | 0.7 | | mov qword ptr [rdi+0x8], rcx
| 1 | 0.5 | | | | | 0.5 | | | | add rdi, 0x10
| 1 | 0.2 | | | | | | 0.8 | | | cmp rdi, rax
| 0F | | | | | | | | | | jb 0xffffffffffffffe9
Specifically, the arrangement of addressing modes for the stores such
that micro-op fusion (indicated by the `^` on the `2` micro-op count) is
important to achieve good throughput for this loop.
The other thing necessary to make this change effective is to remove our
previous hack using `.p2align 5` to pad out the main decompression loop,
and to forcibly disable loop unrolling for critical loops. Because this
change simplifies the loop structure, more unrolling opportunities show
up. Also, the next LLVM release's generic loop optimization improvements
allow unrolling in more places, requiring still more disabling of
unrolling in this change. Perhaps most surprising of these is that we
must disable loop unrolling in the *slow* path. While unrolling there
seems pointless, it should also be harmless. This cold code is laid out
very far away from all of the hot code. All the samples shown in a
profile of the benchmark occur before this loop in the function. And
yet, if the loop gets unrolled (which seems to only happen reliably with
the next LLVM release) we see a nearly 20% regression in decompressing
protocol buffers!
With the current release of LLVM, we still observe some regression from
this source change, but it is fairly small (5% on decompressing protocol
buffers, less elsewhere). And with the next LLVM release it drops to
under 1% even in that case. Meanwhile, without this change, the next
release of LLVM will regress decompressing protocol buffers by more than
10%.
2017-12-22 04:51:07 +00:00
|
|
|
|
2017-01-27 08:10:36 +00:00
|
|
|
// We only take this branch if we didn't have enough slop and we can do a
|
|
|
|
// single 8 byte copy.
|
2017-07-28 21:31:04 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE(op <= buf_limit - 8)) {
|
2012-02-21 17:02:17 +00:00
|
|
|
UnalignedCopy64(src, op);
|
2011-03-18 17:14:15 +00:00
|
|
|
src += 8;
|
|
|
|
op += 8;
|
|
|
|
}
|
2017-01-27 08:10:36 +00:00
|
|
|
return IncrementalCopySlow(src, op, op_limit);
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
|
2013-06-14 21:42:26 +00:00
|
|
|
} // namespace
|
|
|
|
|
2018-12-04 01:27:56 +00:00
|
|
|
template <bool allow_fast_path>
|
2020-10-30 17:37:07 +00:00
|
|
|
static inline char* EmitLiteral(char* op, const char* literal, int len) {
|
2016-06-27 12:01:31 +00:00
|
|
|
// The vast majority of copies are below 16 bytes, for which a
|
2020-04-12 00:01:01 +00:00
|
|
|
// call to std::memcpy() is overkill. This fast path can sometimes
|
2016-06-27 12:01:31 +00:00
|
|
|
// copy up to 15 bytes too much, but that is okay in the
|
|
|
|
// main loop, since we have a bit to go on for both sides:
|
|
|
|
//
|
|
|
|
// - The input will always have kInputMarginBytes = 15 extra
|
|
|
|
// available bytes, as long as we're in the main loop, and
|
|
|
|
// if not, allow_fast_path = false.
|
|
|
|
// - The output will always have 32 spare bytes (see
|
|
|
|
// MaxCompressedLength).
|
2020-10-30 17:37:07 +00:00
|
|
|
assert(len > 0); // Zero-length literals are disallowed
|
2016-06-27 12:01:31 +00:00
|
|
|
int n = len - 1;
|
|
|
|
if (allow_fast_path && len <= 16) {
|
2011-03-18 17:14:15 +00:00
|
|
|
// Fits in tag byte
|
|
|
|
*op++ = LITERAL | (n << 2);
|
|
|
|
|
2017-01-27 08:10:36 +00:00
|
|
|
UnalignedCopy128(literal, op);
|
2016-06-27 12:01:31 +00:00
|
|
|
return op + len;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n < 60) {
|
|
|
|
// Fits in tag byte
|
|
|
|
*op++ = LITERAL | (n << 2);
|
2011-03-18 17:14:15 +00:00
|
|
|
} else {
|
2019-01-22 20:50:51 +00:00
|
|
|
int count = (Bits::Log2Floor(n) >> 3) + 1;
|
2011-03-18 17:14:15 +00:00
|
|
|
assert(count >= 1);
|
|
|
|
assert(count <= 4);
|
2019-01-22 20:50:51 +00:00
|
|
|
*op++ = LITERAL | ((59 + count) << 2);
|
|
|
|
// Encode in upcoming bytes.
|
|
|
|
// Write 4 bytes, though we may care about only 1 of them. The output buffer
|
|
|
|
// is guaranteed to have at least 3 more spaces left as 'len >= 61' holds
|
2020-04-12 00:01:01 +00:00
|
|
|
// here and there is a std::memcpy() of size 'len' below.
|
2019-01-22 20:50:51 +00:00
|
|
|
LittleEndian::Store32(op, n);
|
|
|
|
op += count;
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
2022-11-22 09:59:27 +00:00
|
|
|
// When allow_fast_path is true, we can overwrite up to 16 bytes.
|
|
|
|
if (allow_fast_path) {
|
|
|
|
char* destination = op;
|
|
|
|
const char* source = literal;
|
|
|
|
const char* end = destination + len;
|
|
|
|
do {
|
|
|
|
std::memcpy(destination, source, 16);
|
|
|
|
destination += 16;
|
|
|
|
source += 16;
|
|
|
|
} while (destination < end);
|
|
|
|
} else {
|
|
|
|
std::memcpy(op, literal, len);
|
|
|
|
}
|
2011-03-18 17:14:15 +00:00
|
|
|
return op + len;
|
|
|
|
}
|
|
|
|
|
2018-12-04 01:27:56 +00:00
|
|
|
template <bool len_less_than_12>
|
|
|
|
static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) {
|
2012-05-22 09:32:50 +00:00
|
|
|
assert(len <= 64);
|
|
|
|
assert(len >= 4);
|
|
|
|
assert(offset < 65536);
|
2016-06-28 18:53:11 +00:00
|
|
|
assert(len_less_than_12 == (len < 12));
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2020-03-27 23:17:50 +00:00
|
|
|
if (len_less_than_12) {
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t u = (len << 2) + (offset << 8);
|
|
|
|
uint32_t copy1 = COPY_1_BYTE_OFFSET - (4 << 2) + ((offset >> 3) & 0xe0);
|
|
|
|
uint32_t copy2 = COPY_2_BYTE_OFFSET - (1 << 2);
|
2020-03-27 23:17:50 +00:00
|
|
|
// It turns out that offset < 2048 is a difficult to predict branch.
|
|
|
|
// `perf record` shows this is the highest percentage of branch misses in
|
|
|
|
// benchmarks. This code produces branch free code, the data dependency
|
|
|
|
// chain that bottlenecks the throughput is so long that a few extra
|
|
|
|
// instructions are completely free (IPC << 6 because of data deps).
|
|
|
|
u += offset < 2048 ? copy1 : copy2;
|
|
|
|
LittleEndian::Store32(op, u);
|
|
|
|
op += offset < 2048 ? 2 : 3;
|
2011-03-18 17:14:15 +00:00
|
|
|
} else {
|
2016-06-28 18:53:11 +00:00
|
|
|
// Write 4 bytes, though we only care about 3 of them. The output buffer
|
|
|
|
// is required to have some slack, so the extra byte won't overrun it.
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
|
2016-06-28 18:53:11 +00:00
|
|
|
LittleEndian::Store32(op, u);
|
|
|
|
op += 3;
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
return op;
|
|
|
|
}
|
|
|
|
|
2018-12-04 01:27:56 +00:00
|
|
|
template <bool len_less_than_12>
|
|
|
|
static inline char* EmitCopy(char* op, size_t offset, size_t len) {
|
2016-06-28 18:53:11 +00:00
|
|
|
assert(len_less_than_12 == (len < 12));
|
|
|
|
if (len_less_than_12) {
|
2018-12-04 01:27:56 +00:00
|
|
|
return EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len);
|
2016-06-28 18:53:11 +00:00
|
|
|
} else {
|
|
|
|
// A special case for len <= 64 might help, but so far measurements suggest
|
|
|
|
// it's in the noise.
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2016-06-28 18:53:11 +00:00
|
|
|
// Emit 64 byte copies but make sure to keep at least four bytes reserved.
|
2017-07-28 21:31:04 +00:00
|
|
|
while (SNAPPY_PREDICT_FALSE(len >= 68)) {
|
2018-12-04 01:27:56 +00:00
|
|
|
op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 64);
|
2016-06-28 18:53:11 +00:00
|
|
|
len -= 64;
|
|
|
|
}
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2016-06-28 18:53:11 +00:00
|
|
|
// One or two copies will now finish the job.
|
|
|
|
if (len > 64) {
|
2018-12-04 01:27:56 +00:00
|
|
|
op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 60);
|
2016-06-28 18:53:11 +00:00
|
|
|
len -= 60;
|
|
|
|
}
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2016-06-28 18:53:11 +00:00
|
|
|
// Emit remainder.
|
2018-12-04 01:27:56 +00:00
|
|
|
if (len < 12) {
|
|
|
|
op = EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len);
|
|
|
|
} else {
|
|
|
|
op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, len);
|
|
|
|
}
|
2016-06-28 18:53:11 +00:00
|
|
|
return op;
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t v = 0;
|
2011-03-18 17:14:15 +00:00
|
|
|
const char* limit = start + n;
|
|
|
|
if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
|
|
|
|
*result = v;
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-16 19:28:52 +00:00
|
|
|
namespace {
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t CalculateTableSize(uint32_t input_size) {
|
2019-07-11 00:38:22 +00:00
|
|
|
static_assert(
|
|
|
|
kMaxHashTableSize >= kMinHashTableSize,
|
|
|
|
"kMaxHashTableSize should be greater or equal to kMinHashTableSize.");
|
2018-10-16 19:28:52 +00:00
|
|
|
if (input_size > kMaxHashTableSize) {
|
|
|
|
return kMaxHashTableSize;
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
2019-07-11 00:38:22 +00:00
|
|
|
if (input_size < kMinHashTableSize) {
|
|
|
|
return kMinHashTableSize;
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
2019-01-06 19:48:31 +00:00
|
|
|
// This is equivalent to Log2Ceiling(input_size), assuming input_size > 1.
|
|
|
|
// 2 << Log2Floor(x - 1) is equivalent to 1 << (1 + Log2Floor(x - 1)).
|
|
|
|
return 2u << Bits::Log2Floor(input_size - 1);
|
2018-10-16 19:28:52 +00:00
|
|
|
}
|
|
|
|
} // namespace
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2018-10-16 19:28:52 +00:00
|
|
|
namespace internal {
|
|
|
|
WorkingMemory::WorkingMemory(size_t input_size) {
|
|
|
|
const size_t max_fragment_size = std::min(input_size, kBlockSize);
|
|
|
|
const size_t table_size = CalculateTableSize(max_fragment_size);
|
|
|
|
size_ = table_size * sizeof(*table_) + max_fragment_size +
|
|
|
|
MaxCompressedLength(max_fragment_size);
|
|
|
|
mem_ = std::allocator<char>().allocate(size_);
|
2020-04-12 20:03:50 +00:00
|
|
|
table_ = reinterpret_cast<uint16_t*>(mem_);
|
2018-10-16 19:28:52 +00:00
|
|
|
input_ = mem_ + table_size * sizeof(*table_);
|
|
|
|
output_ = input_ + max_fragment_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
WorkingMemory::~WorkingMemory() {
|
|
|
|
std::allocator<char>().deallocate(mem_, size_);
|
|
|
|
}
|
|
|
|
|
2020-04-12 20:03:50 +00:00
|
|
|
uint16_t* WorkingMemory::GetHashTable(size_t fragment_size,
|
2020-10-30 17:37:07 +00:00
|
|
|
int* table_size) const {
|
2018-10-16 19:28:52 +00:00
|
|
|
const size_t htsize = CalculateTableSize(fragment_size);
|
|
|
|
memset(table_, 0, htsize * sizeof(*table_));
|
2011-03-18 17:14:15 +00:00
|
|
|
*table_size = htsize;
|
2018-10-16 19:28:52 +00:00
|
|
|
return table_;
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
} // end namespace internal
|
|
|
|
|
|
|
|
// Flat array compression that does not emit the "uncompressed length"
|
|
|
|
// prefix. Compresses "input" string to the "*op" buffer.
|
|
|
|
//
|
|
|
|
// REQUIRES: "input" is at most "kBlockSize" bytes long.
|
|
|
|
// REQUIRES: "op" points to an array of memory that is at least
|
|
|
|
// "MaxCompressedLength(input.size())" in size.
|
|
|
|
// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
|
|
|
|
// REQUIRES: "table_size" is a power of two
|
|
|
|
//
|
|
|
|
// Returns an "end" pointer into "op" buffer.
|
|
|
|
// "end - op" is the compressed size of "input".
|
|
|
|
namespace internal {
|
2020-10-30 17:37:07 +00:00
|
|
|
char* CompressFragment(const char* input, size_t input_size, char* op,
|
|
|
|
uint16_t* table, const int table_size) {
|
2011-03-18 17:14:15 +00:00
|
|
|
// "ip" is the input pointer, and "op" is the output pointer.
|
|
|
|
const char* ip = input;
|
2012-05-22 09:32:50 +00:00
|
|
|
assert(input_size <= kBlockSize);
|
2018-08-16 17:44:34 +00:00
|
|
|
assert((table_size & (table_size - 1)) == 0); // table must be power of two
|
2022-11-03 20:36:33 +00:00
|
|
|
const uint32_t mask = 2 * (table_size - 1);
|
2011-03-18 17:14:15 +00:00
|
|
|
const char* ip_end = input + input_size;
|
|
|
|
const char* base_ip = ip;
|
|
|
|
|
2012-01-04 13:10:46 +00:00
|
|
|
const size_t kInputMarginBytes = 15;
|
2017-07-28 21:31:04 +00:00
|
|
|
if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
|
2011-03-18 17:14:15 +00:00
|
|
|
const char* ip_limit = input + input_size - kInputMarginBytes;
|
|
|
|
|
2020-04-12 20:03:50 +00:00
|
|
|
for (uint32_t preload = LittleEndian::Load32(ip + 1);;) {
|
2020-03-31 02:46:46 +00:00
|
|
|
// Bytes in [next_emit, ip) will be emitted as literal bytes. Or
|
|
|
|
// [next_emit, ip_end) after the main loop.
|
|
|
|
const char* next_emit = ip++;
|
2020-04-12 20:03:50 +00:00
|
|
|
uint64_t data = LittleEndian::Load64(ip);
|
2011-03-18 17:14:15 +00:00
|
|
|
// The body of this loop calls EmitLiteral once and then EmitCopy one or
|
|
|
|
// more times. (The exception is that when we're close to exhausting
|
|
|
|
// the input we goto emit_remainder.)
|
|
|
|
//
|
|
|
|
// In the first iteration of this loop we're just starting, so
|
|
|
|
// there's nothing to copy, so calling EmitLiteral once is
|
|
|
|
// necessary. And we only start a new iteration when the
|
|
|
|
// current iteration has determined that a call to EmitLiteral will
|
|
|
|
// precede the next call to EmitCopy (if any).
|
|
|
|
//
|
|
|
|
// Step 1: Scan forward in the input looking for a 4-byte-long match.
|
|
|
|
// If we get close to exhausting the input then goto emit_remainder.
|
|
|
|
//
|
|
|
|
// Heuristic match skipping: If 32 bytes are scanned with no matches
|
|
|
|
// found, start looking only at every other byte. If 32 more bytes are
|
Make heuristic match skipping more aggressive.
This causes compression to be much faster on incompressible inputs
(such as the jpeg and pdf tests), and is neutral or even positive on the other
tests. The test set shows only microscopic density regressions; I attempted to
construct a worst-case test set containing ~1500 different cases of mixed
plaintext + /dev/urandom, and even those seemed to be only 0.38 percentage
points less dense on average (the single worst case was 87.8% -> 89.0%), which
we can live with given that this is already an edge case.
The original idea is by Klaus Post; I only tweaked the implementation.
Ironically, the new implementation is almost more in line with the
comment that was there, so I've left that largely alone, albeit
with a small modification.
Microbenchmark results (opt mode, 64-bit, static linking):
Ivy Bridge:
Benchmark Base (ns) New (ns) Improvement
----------------------------------------------------------------------------------------
BM_ZFlat/0 120284 115480 847.0MB/s html (22.31 %) +4.2%
BM_ZFlat/1 1527911 1522242 440.7MB/s urls (47.78 %) +0.4%
BM_ZFlat/2 17591 10582 10.9GB/s jpg (99.95 %) +66.2%
BM_ZFlat/3 323 322 593.3MB/s jpg_200 (73.00 %) +0.3%
BM_ZFlat/4 53691 14063 6.8GB/s pdf (83.30 %) +281.8%
BM_ZFlat/5 495442 492347 794.8MB/s html4 (22.52 %) +0.6%
BM_ZFlat/6 473523 473622 306.7MB/s txt1 (57.88 %) -0.0%
BM_ZFlat/7 421406 420120 284.5MB/s txt2 (61.91 %) +0.3%
BM_ZFlat/8 1265632 1270538 320.8MB/s txt3 (54.99 %) -0.4%
BM_ZFlat/9 1742688 1737894 264.8MB/s txt4 (66.26 %) +0.3%
BM_ZFlat/10 107950 103404 1095.1MB/s pb (19.68 %) +4.4%
BM_ZFlat/11 372660 371818 473.5MB/s gaviota (37.72 %) +0.2%
BM_ZFlat/12 53239 49528 474.4MB/s cp (48.12 %) +7.5%
BM_ZFlat/13 18940 17349 613.9MB/s c (42.47 %) +9.2%
BM_ZFlat/14 5155 5075 700.3MB/s lsp (48.37 %) +1.6%
BM_ZFlat/15 1474757 1474471 667.2MB/s xls (41.23 %) +0.0%
BM_ZFlat/16 363 362 528.0MB/s xls_200 (78.00 %) +0.3%
BM_ZFlat/17 453849 456931 1073.2MB/s bin (18.11 %) -0.7%
BM_ZFlat/18 90 87 2.1GB/s bin_200 (7.50 %) +3.4%
BM_ZFlat/19 82163 80498 453.7MB/s sum (48.96 %) +2.1%
BM_ZFlat/20 7174 7124 566.7MB/s man (59.21 %) +0.7%
Sum of all benchmarks 8694831 8623857 +0.8%
Sandy Bridge:
Benchmark Base (ns) New (ns) Improvement
----------------------------------------------------------------------------------------
BM_ZFlat/0 117426 112649 868.2MB/s html (22.31 %) +4.2%
BM_ZFlat/1 1517095 1498522 447.5MB/s urls (47.78 %) +1.2%
BM_ZFlat/2 18601 10649 10.8GB/s jpg (99.95 %) +74.7%
BM_ZFlat/3 359 356 536.0MB/s jpg_200 (73.00 %) +0.8%
BM_ZFlat/4 60249 13832 6.9GB/s pdf (83.30 %) +335.6%
BM_ZFlat/5 481246 475571 822.7MB/s html4 (22.52 %) +1.2%
BM_ZFlat/6 460541 455693 318.8MB/s txt1 (57.88 %) +1.1%
BM_ZFlat/7 407751 404147 295.8MB/s txt2 (61.91 %) +0.9%
BM_ZFlat/8 1228255 1222519 333.4MB/s txt3 (54.99 %) +0.5%
BM_ZFlat/9 1678299 1666379 276.2MB/s txt4 (66.26 %) +0.7%
BM_ZFlat/10 106499 101715 1113.4MB/s pb (19.68 %) +4.7%
BM_ZFlat/11 361913 360222 488.7MB/s gaviota (37.72 %) +0.5%
BM_ZFlat/12 53137 49618 473.6MB/s cp (48.12 %) +7.1%
BM_ZFlat/13 18801 17812 597.8MB/s c (42.47 %) +5.6%
BM_ZFlat/14 5394 5383 660.2MB/s lsp (48.37 %) +0.2%
BM_ZFlat/15 1435411 1432870 686.4MB/s xls (41.23 %) +0.2%
BM_ZFlat/16 389 395 483.3MB/s xls_200 (78.00 %) -1.5%
BM_ZFlat/17 447255 445510 1100.4MB/s bin (18.11 %) +0.4%
BM_ZFlat/18 86 86 2.2GB/s bin_200 (7.50 %) +0.0%
BM_ZFlat/19 82555 79512 459.3MB/s sum (48.96 %) +3.8%
BM_ZFlat/20 7527 7553 534.5MB/s man (59.21 %) -0.3%
Sum of all benchmarks 8488789 8360993 +1.5%
Haswell:
Benchmark Base (ns) New (ns) Improvement
----------------------------------------------------------------------------------------
BM_ZFlat/0 107512 105621 925.6MB/s html (22.31 %) +1.8%
BM_ZFlat/1 1344306 1332479 503.1MB/s urls (47.78 %) +0.9%
BM_ZFlat/2 14752 9471 12.1GB/s jpg (99.95 %) +55.8%
BM_ZFlat/3 287 275 694.0MB/s jpg_200 (73.00 %) +4.4%
BM_ZFlat/4 48810 12263 7.8GB/s pdf (83.30 %) +298.0%
BM_ZFlat/5 443013 442064 884.6MB/s html4 (22.52 %) +0.2%
BM_ZFlat/6 429239 432124 336.0MB/s txt1 (57.88 %) -0.7%
BM_ZFlat/7 381765 383681 311.5MB/s txt2 (61.91 %) -0.5%
BM_ZFlat/8 1136667 1154304 353.0MB/s txt3 (54.99 %) -1.5%
BM_ZFlat/9 1579925 1592431 288.9MB/s txt4 (66.26 %) -0.8%
BM_ZFlat/10 98345 92411 1.2GB/s pb (19.68 %) +6.4%
BM_ZFlat/11 340397 340466 516.8MB/s gaviota (37.72 %) -0.0%
BM_ZFlat/12 47076 43536 539.5MB/s cp (48.12 %) +8.1%
BM_ZFlat/13 16680 15637 680.8MB/s c (42.47 %) +6.7%
BM_ZFlat/14 4616 4539 782.6MB/s lsp (48.37 %) +1.7%
BM_ZFlat/15 1331231 1334094 736.9MB/s xls (41.23 %) -0.2%
BM_ZFlat/16 326 322 593.5MB/s xls_200 (78.00 %) +1.2%
BM_ZFlat/17 404383 400326 1.2GB/s bin (18.11 %) +1.0%
BM_ZFlat/18 69 69 2.7GB/s bin_200 (7.50 %) +0.0%
BM_ZFlat/19 74771 71348 511.7MB/s sum (48.96 %) +4.8%
BM_ZFlat/20 6461 6383 632.2MB/s man (59.21 %) +1.2%
Sum of all benchmarks 7810631 7773844 +0.5%
I've done a quick test that there are no performance regressions on external
GCC (4.9.2, Debian, Haswell, 64-bit), too.
2016-04-05 09:50:26 +00:00
|
|
|
// scanned (or skipped), look at every third byte, etc.. When a match is
|
|
|
|
// found, immediately go back to looking at every byte. This is a small
|
|
|
|
// loss (~5% performance, ~0.1% density) for compressible data due to more
|
2011-03-18 17:14:15 +00:00
|
|
|
// bookkeeping, but for non-compressible data (such as JPEG) it's a huge
|
|
|
|
// win since the compressor quickly "realizes" the data is incompressible
|
|
|
|
// and doesn't bother looking for matches everywhere.
|
|
|
|
//
|
|
|
|
// The "skip" variable keeps track of how many bytes there are since the
|
|
|
|
// last match; dividing it by 32 (ie. right-shifting by five) gives the
|
|
|
|
// number of bytes to move ahead for each iteration.
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t skip = 32;
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
const char* candidate;
|
2020-03-25 15:24:14 +00:00
|
|
|
if (ip_limit - ip >= 16) {
|
|
|
|
auto delta = ip - base_ip;
|
2020-05-04 12:31:03 +00:00
|
|
|
for (int j = 0; j < 4; ++j) {
|
|
|
|
for (int k = 0; k < 4; ++k) {
|
2020-03-25 15:24:14 +00:00
|
|
|
int i = 4 * j + k;
|
2020-03-31 02:46:46 +00:00
|
|
|
// These for-loops are meant to be unrolled. So we can freely
|
|
|
|
// special case the first iteration to use the value already
|
|
|
|
// loaded in preload.
|
2020-05-05 16:13:04 +00:00
|
|
|
uint32_t dword = i == 0 ? preload : static_cast<uint32_t>(data);
|
2020-03-31 02:46:46 +00:00
|
|
|
assert(dword == LittleEndian::Load32(ip + i));
|
2022-11-03 20:36:33 +00:00
|
|
|
uint16_t* table_entry = TableEntry(table, dword, mask);
|
|
|
|
candidate = base_ip + *table_entry;
|
2020-03-25 15:24:14 +00:00
|
|
|
assert(candidate >= base_ip);
|
|
|
|
assert(candidate < ip + i);
|
2022-11-03 20:36:33 +00:00
|
|
|
*table_entry = delta + i;
|
2020-03-31 02:46:46 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE(LittleEndian::Load32(candidate) == dword)) {
|
2020-03-25 15:24:14 +00:00
|
|
|
*op = LITERAL | (i << 2);
|
|
|
|
UnalignedCopy128(next_emit, op + 1);
|
|
|
|
ip += i;
|
|
|
|
op = op + i + 2;
|
|
|
|
goto emit_match;
|
|
|
|
}
|
|
|
|
data >>= 8;
|
|
|
|
}
|
|
|
|
data = LittleEndian::Load64(ip + 4 * j + 4);
|
|
|
|
}
|
|
|
|
ip += 16;
|
|
|
|
skip += 16;
|
|
|
|
}
|
|
|
|
while (true) {
|
2020-04-12 20:03:50 +00:00
|
|
|
assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip));
|
2022-11-03 20:36:33 +00:00
|
|
|
uint16_t* table_entry = TableEntry(table, data, mask);
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t bytes_between_hash_lookups = skip >> 5;
|
Make heuristic match skipping more aggressive.
This causes compression to be much faster on incompressible inputs
(such as the jpeg and pdf tests), and is neutral or even positive on the other
tests. The test set shows only microscopic density regressions; I attempted to
construct a worst-case test set containing ~1500 different cases of mixed
plaintext + /dev/urandom, and even those seemed to be only 0.38 percentage
points less dense on average (the single worst case was 87.8% -> 89.0%), which
we can live with given that this is already an edge case.
The original idea is by Klaus Post; I only tweaked the implementation.
Ironically, the new implementation is almost more in line with the
comment that was there, so I've left that largely alone, albeit
with a small modification.
Microbenchmark results (opt mode, 64-bit, static linking):
Ivy Bridge:
Benchmark Base (ns) New (ns) Improvement
----------------------------------------------------------------------------------------
BM_ZFlat/0 120284 115480 847.0MB/s html (22.31 %) +4.2%
BM_ZFlat/1 1527911 1522242 440.7MB/s urls (47.78 %) +0.4%
BM_ZFlat/2 17591 10582 10.9GB/s jpg (99.95 %) +66.2%
BM_ZFlat/3 323 322 593.3MB/s jpg_200 (73.00 %) +0.3%
BM_ZFlat/4 53691 14063 6.8GB/s pdf (83.30 %) +281.8%
BM_ZFlat/5 495442 492347 794.8MB/s html4 (22.52 %) +0.6%
BM_ZFlat/6 473523 473622 306.7MB/s txt1 (57.88 %) -0.0%
BM_ZFlat/7 421406 420120 284.5MB/s txt2 (61.91 %) +0.3%
BM_ZFlat/8 1265632 1270538 320.8MB/s txt3 (54.99 %) -0.4%
BM_ZFlat/9 1742688 1737894 264.8MB/s txt4 (66.26 %) +0.3%
BM_ZFlat/10 107950 103404 1095.1MB/s pb (19.68 %) +4.4%
BM_ZFlat/11 372660 371818 473.5MB/s gaviota (37.72 %) +0.2%
BM_ZFlat/12 53239 49528 474.4MB/s cp (48.12 %) +7.5%
BM_ZFlat/13 18940 17349 613.9MB/s c (42.47 %) +9.2%
BM_ZFlat/14 5155 5075 700.3MB/s lsp (48.37 %) +1.6%
BM_ZFlat/15 1474757 1474471 667.2MB/s xls (41.23 %) +0.0%
BM_ZFlat/16 363 362 528.0MB/s xls_200 (78.00 %) +0.3%
BM_ZFlat/17 453849 456931 1073.2MB/s bin (18.11 %) -0.7%
BM_ZFlat/18 90 87 2.1GB/s bin_200 (7.50 %) +3.4%
BM_ZFlat/19 82163 80498 453.7MB/s sum (48.96 %) +2.1%
BM_ZFlat/20 7174 7124 566.7MB/s man (59.21 %) +0.7%
Sum of all benchmarks 8694831 8623857 +0.8%
Sandy Bridge:
Benchmark Base (ns) New (ns) Improvement
----------------------------------------------------------------------------------------
BM_ZFlat/0 117426 112649 868.2MB/s html (22.31 %) +4.2%
BM_ZFlat/1 1517095 1498522 447.5MB/s urls (47.78 %) +1.2%
BM_ZFlat/2 18601 10649 10.8GB/s jpg (99.95 %) +74.7%
BM_ZFlat/3 359 356 536.0MB/s jpg_200 (73.00 %) +0.8%
BM_ZFlat/4 60249 13832 6.9GB/s pdf (83.30 %) +335.6%
BM_ZFlat/5 481246 475571 822.7MB/s html4 (22.52 %) +1.2%
BM_ZFlat/6 460541 455693 318.8MB/s txt1 (57.88 %) +1.1%
BM_ZFlat/7 407751 404147 295.8MB/s txt2 (61.91 %) +0.9%
BM_ZFlat/8 1228255 1222519 333.4MB/s txt3 (54.99 %) +0.5%
BM_ZFlat/9 1678299 1666379 276.2MB/s txt4 (66.26 %) +0.7%
BM_ZFlat/10 106499 101715 1113.4MB/s pb (19.68 %) +4.7%
BM_ZFlat/11 361913 360222 488.7MB/s gaviota (37.72 %) +0.5%
BM_ZFlat/12 53137 49618 473.6MB/s cp (48.12 %) +7.1%
BM_ZFlat/13 18801 17812 597.8MB/s c (42.47 %) +5.6%
BM_ZFlat/14 5394 5383 660.2MB/s lsp (48.37 %) +0.2%
BM_ZFlat/15 1435411 1432870 686.4MB/s xls (41.23 %) +0.2%
BM_ZFlat/16 389 395 483.3MB/s xls_200 (78.00 %) -1.5%
BM_ZFlat/17 447255 445510 1100.4MB/s bin (18.11 %) +0.4%
BM_ZFlat/18 86 86 2.2GB/s bin_200 (7.50 %) +0.0%
BM_ZFlat/19 82555 79512 459.3MB/s sum (48.96 %) +3.8%
BM_ZFlat/20 7527 7553 534.5MB/s man (59.21 %) -0.3%
Sum of all benchmarks 8488789 8360993 +1.5%
Haswell:
Benchmark Base (ns) New (ns) Improvement
----------------------------------------------------------------------------------------
BM_ZFlat/0 107512 105621 925.6MB/s html (22.31 %) +1.8%
BM_ZFlat/1 1344306 1332479 503.1MB/s urls (47.78 %) +0.9%
BM_ZFlat/2 14752 9471 12.1GB/s jpg (99.95 %) +55.8%
BM_ZFlat/3 287 275 694.0MB/s jpg_200 (73.00 %) +4.4%
BM_ZFlat/4 48810 12263 7.8GB/s pdf (83.30 %) +298.0%
BM_ZFlat/5 443013 442064 884.6MB/s html4 (22.52 %) +0.2%
BM_ZFlat/6 429239 432124 336.0MB/s txt1 (57.88 %) -0.7%
BM_ZFlat/7 381765 383681 311.5MB/s txt2 (61.91 %) -0.5%
BM_ZFlat/8 1136667 1154304 353.0MB/s txt3 (54.99 %) -1.5%
BM_ZFlat/9 1579925 1592431 288.9MB/s txt4 (66.26 %) -0.8%
BM_ZFlat/10 98345 92411 1.2GB/s pb (19.68 %) +6.4%
BM_ZFlat/11 340397 340466 516.8MB/s gaviota (37.72 %) -0.0%
BM_ZFlat/12 47076 43536 539.5MB/s cp (48.12 %) +8.1%
BM_ZFlat/13 16680 15637 680.8MB/s c (42.47 %) +6.7%
BM_ZFlat/14 4616 4539 782.6MB/s lsp (48.37 %) +1.7%
BM_ZFlat/15 1331231 1334094 736.9MB/s xls (41.23 %) -0.2%
BM_ZFlat/16 326 322 593.5MB/s xls_200 (78.00 %) +1.2%
BM_ZFlat/17 404383 400326 1.2GB/s bin (18.11 %) +1.0%
BM_ZFlat/18 69 69 2.7GB/s bin_200 (7.50 %) +0.0%
BM_ZFlat/19 74771 71348 511.7MB/s sum (48.96 %) +4.8%
BM_ZFlat/20 6461 6383 632.2MB/s man (59.21 %) +1.2%
Sum of all benchmarks 7810631 7773844 +0.5%
I've done a quick test that there are no performance regressions on external
GCC (4.9.2, Debian, Haswell, 64-bit), too.
2016-04-05 09:50:26 +00:00
|
|
|
skip += bytes_between_hash_lookups;
|
2020-03-25 15:24:14 +00:00
|
|
|
const char* next_ip = ip + bytes_between_hash_lookups;
|
2017-07-28 21:31:04 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
|
2020-03-31 02:46:46 +00:00
|
|
|
ip = next_emit;
|
2011-03-18 17:14:15 +00:00
|
|
|
goto emit_remainder;
|
|
|
|
}
|
2022-11-03 20:36:33 +00:00
|
|
|
candidate = base_ip + *table_entry;
|
2012-05-22 09:32:50 +00:00
|
|
|
assert(candidate >= base_ip);
|
|
|
|
assert(candidate < ip);
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2022-11-03 20:36:33 +00:00
|
|
|
*table_entry = ip - base_ip;
|
2020-04-12 20:03:50 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) ==
|
2020-03-25 15:24:14 +00:00
|
|
|
LittleEndian::Load32(candidate))) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
data = LittleEndian::Load32(next_ip);
|
|
|
|
ip = next_ip;
|
|
|
|
}
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
// Step 2: A 4-byte match has been found. We'll later see if more
|
|
|
|
// than 4 bytes match. But, prior to the match, input
|
|
|
|
// bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
|
2012-05-22 09:32:50 +00:00
|
|
|
assert(next_emit + 16 <= ip_end);
|
2018-12-04 01:27:56 +00:00
|
|
|
op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit, ip - next_emit);
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
// Step 3: Call EmitCopy, and then see if another EmitCopy could
|
|
|
|
// be our next move. Repeat until we find no match for the
|
|
|
|
// input immediately after what was consumed by the last EmitCopy call.
|
|
|
|
//
|
|
|
|
// If we exit this loop normally then we need to call EmitLiteral next,
|
|
|
|
// though we don't yet know how big the literal will be. We handle that
|
|
|
|
// by proceeding to the next iteration of the main loop. We also can exit
|
|
|
|
// this loop via goto if we get close to exhausting the input.
|
2020-03-25 15:24:14 +00:00
|
|
|
emit_match:
|
2011-03-18 17:14:15 +00:00
|
|
|
do {
|
|
|
|
// We have a 4-byte match at ip, and no need to emit any
|
|
|
|
// "literal bytes" prior to ip.
|
|
|
|
const char* base = ip;
|
2016-11-28 16:49:41 +00:00
|
|
|
std::pair<size_t, bool> p =
|
2020-03-27 23:17:50 +00:00
|
|
|
FindMatchLength(candidate + 4, ip + 4, ip_end, &data);
|
2016-06-28 18:53:11 +00:00
|
|
|
size_t matched = 4 + p.first;
|
2011-03-18 17:14:15 +00:00
|
|
|
ip += matched;
|
2012-01-04 13:10:46 +00:00
|
|
|
size_t offset = base - candidate;
|
2012-05-22 09:32:50 +00:00
|
|
|
assert(0 == memcmp(base, candidate, matched));
|
2018-12-04 01:27:56 +00:00
|
|
|
if (p.second) {
|
|
|
|
op = EmitCopy</*len_less_than_12=*/true>(op, offset, matched);
|
|
|
|
} else {
|
|
|
|
op = EmitCopy</*len_less_than_12=*/false>(op, offset, matched);
|
|
|
|
}
|
2017-07-28 21:31:04 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
|
2011-03-18 17:14:15 +00:00
|
|
|
goto emit_remainder;
|
|
|
|
}
|
2020-03-31 02:46:46 +00:00
|
|
|
// Expect 5 bytes to match
|
|
|
|
assert((data & 0xFFFFFFFFFF) ==
|
|
|
|
(LittleEndian::Load64(ip) & 0xFFFFFFFFFF));
|
2016-06-28 18:53:11 +00:00
|
|
|
// We are now looking for a 4-byte match again. We read
|
2022-11-03 20:36:33 +00:00
|
|
|
// table[Hash(ip, mask)] for that. To improve compression,
|
2020-11-09 23:32:45 +00:00
|
|
|
// we also update table[Hash(ip - 1, mask)] and table[Hash(ip, mask)].
|
2022-11-03 20:36:33 +00:00
|
|
|
*TableEntry(table, LittleEndian::Load32(ip - 1), mask) =
|
|
|
|
ip - base_ip - 1;
|
|
|
|
uint16_t* table_entry = TableEntry(table, data, mask);
|
|
|
|
candidate = base_ip + *table_entry;
|
|
|
|
*table_entry = ip - base_ip;
|
2020-03-31 02:46:46 +00:00
|
|
|
// Measurements on the benchmarks have shown the following probabilities
|
|
|
|
// for the loop to exit (ie. avg. number of iterations is reciprocal).
|
|
|
|
// BM_Flat/6 txt1 p = 0.3-0.4
|
|
|
|
// BM_Flat/7 txt2 p = 0.35
|
|
|
|
// BM_Flat/8 txt3 p = 0.3-0.4
|
|
|
|
// BM_Flat/9 txt3 p = 0.34-0.4
|
|
|
|
// BM_Flat/10 pb p = 0.4
|
|
|
|
// BM_Flat/11 gaviota p = 0.1
|
|
|
|
// BM_Flat/12 cp p = 0.5
|
|
|
|
// BM_Flat/13 c p = 0.3
|
2020-04-12 20:03:50 +00:00
|
|
|
} while (static_cast<uint32_t>(data) == LittleEndian::Load32(candidate));
|
2020-03-31 02:46:46 +00:00
|
|
|
// Because the least significant 5 bytes matched, we can utilize data
|
|
|
|
// for the next iteration.
|
|
|
|
preload = data >> 8;
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-03 09:40:00 +00:00
|
|
|
emit_remainder:
|
|
|
|
// Emit the remaining bytes as a literal
|
|
|
|
if (ip < ip_end) {
|
|
|
|
op = EmitLiteral</*allow_fast_path=*/false>(op, ip, ip_end - ip);
|
|
|
|
}
|
|
|
|
|
|
|
|
return op;
|
|
|
|
}
|
|
|
|
|
|
|
|
char* CompressFragmentDoubleHash(const char* input, size_t input_size, char* op,
|
|
|
|
uint16_t* table, const int table_size,
|
|
|
|
uint16_t* table2, const int table_size2) {
|
2024-04-04 18:36:37 +00:00
|
|
|
(void)table_size2;
|
|
|
|
assert(table_size == table_size2);
|
2024-04-03 09:40:00 +00:00
|
|
|
// "ip" is the input pointer, and "op" is the output pointer.
|
|
|
|
const char* ip = input;
|
|
|
|
assert(input_size <= kBlockSize);
|
|
|
|
assert((table_size & (table_size - 1)) == 0); // table must be power of two
|
|
|
|
const uint32_t mask = 2 * (table_size - 1);
|
|
|
|
const char* ip_end = input + input_size;
|
|
|
|
const char* base_ip = ip;
|
|
|
|
|
|
|
|
const size_t kInputMarginBytes = 15;
|
|
|
|
if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
|
|
|
|
const char* ip_limit = input + input_size - kInputMarginBytes;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
const char* next_emit = ip++;
|
|
|
|
uint64_t data = LittleEndian::Load64(ip);
|
|
|
|
uint32_t skip = 512;
|
|
|
|
|
|
|
|
const char* candidate;
|
|
|
|
uint32_t candidate_length;
|
|
|
|
while (true) {
|
|
|
|
assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip));
|
|
|
|
uint16_t* table_entry2 = TableEntry8ByteMatch(table2, data, mask);
|
|
|
|
uint32_t bytes_between_hash_lookups = skip >> 9;
|
|
|
|
skip++;
|
|
|
|
const char* next_ip = ip + bytes_between_hash_lookups;
|
|
|
|
if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
|
|
|
|
ip = next_emit;
|
|
|
|
goto emit_remainder;
|
|
|
|
}
|
|
|
|
candidate = base_ip + *table_entry2;
|
|
|
|
assert(candidate >= base_ip);
|
|
|
|
assert(candidate < ip);
|
|
|
|
|
|
|
|
*table_entry2 = ip - base_ip;
|
|
|
|
if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) ==
|
|
|
|
LittleEndian::Load32(candidate))) {
|
|
|
|
candidate_length =
|
|
|
|
FindMatchLengthPlain(candidate + 4, ip + 4, ip_end) + 4;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t* table_entry = TableEntry4ByteMatch(table, data, mask);
|
|
|
|
candidate = base_ip + *table_entry;
|
|
|
|
assert(candidate >= base_ip);
|
|
|
|
assert(candidate < ip);
|
|
|
|
|
|
|
|
*table_entry = ip - base_ip;
|
|
|
|
if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) ==
|
|
|
|
LittleEndian::Load32(candidate))) {
|
|
|
|
candidate_length =
|
|
|
|
FindMatchLengthPlain(candidate + 4, ip + 4, ip_end) + 4;
|
|
|
|
table_entry2 =
|
|
|
|
TableEntry8ByteMatch(table2, LittleEndian::Load64(ip + 1), mask);
|
|
|
|
auto candidate2 = base_ip + *table_entry2;
|
|
|
|
size_t candidate_length2 =
|
|
|
|
FindMatchLengthPlain(candidate2, ip + 1, ip_end);
|
|
|
|
if (candidate_length2 > candidate_length) {
|
|
|
|
*table_entry2 = ip - base_ip;
|
|
|
|
candidate = candidate2;
|
|
|
|
candidate_length = candidate_length2;
|
|
|
|
++ip;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
data = LittleEndian::Load64(next_ip);
|
|
|
|
ip = next_ip;
|
|
|
|
}
|
|
|
|
// Backtrack to the point it matches fully.
|
|
|
|
while (ip > next_emit && candidate > base_ip &&
|
|
|
|
*(ip - 1) == *(candidate - 1)) {
|
|
|
|
--ip;
|
|
|
|
--candidate;
|
|
|
|
++candidate_length;
|
|
|
|
}
|
|
|
|
*TableEntry8ByteMatch(table2, LittleEndian::Load64(ip + 1), mask) =
|
|
|
|
ip - base_ip + 1;
|
|
|
|
*TableEntry8ByteMatch(table2, LittleEndian::Load64(ip + 2), mask) =
|
|
|
|
ip - base_ip + 2;
|
|
|
|
*TableEntry4ByteMatch(table, LittleEndian::Load32(ip + 1), mask) =
|
|
|
|
ip - base_ip + 1;
|
|
|
|
// Step 2: A 4-byte or 8-byte match has been found.
|
|
|
|
// We'll later see if more than 4 bytes match. But, prior to the match,
|
|
|
|
// input bytes [next_emit, ip) are unmatched. Emit them as
|
|
|
|
// "literal bytes."
|
|
|
|
assert(next_emit + 16 <= ip_end);
|
|
|
|
if (ip - next_emit > 0) {
|
|
|
|
op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit,
|
|
|
|
ip - next_emit);
|
|
|
|
}
|
|
|
|
// Step 3: Call EmitCopy, and then see if another EmitCopy could
|
|
|
|
// be our next move. Repeat until we find no match for the
|
|
|
|
// input immediately after what was consumed by the last EmitCopy call.
|
|
|
|
//
|
|
|
|
// If we exit this loop normally then we need to call EmitLiteral next,
|
|
|
|
// though we don't yet know how big the literal will be. We handle that
|
|
|
|
// by proceeding to the next iteration of the main loop. We also can exit
|
|
|
|
// this loop via goto if we get close to exhausting the input.
|
|
|
|
do {
|
|
|
|
// We have a 4-byte match at ip, and no need to emit any
|
|
|
|
// "literal bytes" prior to ip.
|
|
|
|
const char* base = ip;
|
|
|
|
ip += candidate_length;
|
|
|
|
size_t offset = base - candidate;
|
|
|
|
if (candidate_length < 12) {
|
|
|
|
op =
|
|
|
|
EmitCopy</*len_less_than_12=*/true>(op, offset, candidate_length);
|
|
|
|
} else {
|
|
|
|
op = EmitCopy</*len_less_than_12=*/false>(op, offset,
|
|
|
|
candidate_length);
|
|
|
|
}
|
|
|
|
if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
|
|
|
|
goto emit_remainder;
|
|
|
|
}
|
|
|
|
// We are now looking for a 4-byte match again. We read
|
|
|
|
// table[Hash(ip, mask)] for that. To improve compression,
|
|
|
|
// we also update several previous table entries.
|
|
|
|
if (ip - base_ip > 7) {
|
|
|
|
*TableEntry8ByteMatch(table2, LittleEndian::Load64(ip - 7), mask) =
|
|
|
|
ip - base_ip - 7;
|
|
|
|
*TableEntry8ByteMatch(table2, LittleEndian::Load64(ip - 4), mask) =
|
|
|
|
ip - base_ip - 4;
|
|
|
|
}
|
|
|
|
*TableEntry8ByteMatch(table2, LittleEndian::Load64(ip - 3), mask) =
|
|
|
|
ip - base_ip - 3;
|
|
|
|
*TableEntry8ByteMatch(table2, LittleEndian::Load64(ip - 2), mask) =
|
|
|
|
ip - base_ip - 2;
|
|
|
|
*TableEntry4ByteMatch(table, LittleEndian::Load32(ip - 2), mask) =
|
|
|
|
ip - base_ip - 2;
|
|
|
|
*TableEntry4ByteMatch(table, LittleEndian::Load32(ip - 1), mask) =
|
|
|
|
ip - base_ip - 1;
|
|
|
|
|
|
|
|
uint16_t* table_entry =
|
|
|
|
TableEntry8ByteMatch(table2, LittleEndian::Load64(ip), mask);
|
|
|
|
candidate = base_ip + *table_entry;
|
|
|
|
*table_entry = ip - base_ip;
|
|
|
|
if (LittleEndian::Load32(ip) == LittleEndian::Load32(candidate)) {
|
|
|
|
candidate_length =
|
|
|
|
FindMatchLengthPlain(candidate + 4, ip + 4, ip_end) + 4;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
table_entry =
|
|
|
|
TableEntry4ByteMatch(table, LittleEndian::Load32(ip), mask);
|
|
|
|
candidate = base_ip + *table_entry;
|
|
|
|
*table_entry = ip - base_ip;
|
|
|
|
if (LittleEndian::Load32(ip) == LittleEndian::Load32(candidate)) {
|
|
|
|
candidate_length =
|
|
|
|
FindMatchLengthPlain(candidate + 4, ip + 4, ip_end) + 4;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
} while (true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-30 17:37:07 +00:00
|
|
|
emit_remainder:
|
2011-03-18 17:14:15 +00:00
|
|
|
// Emit the remaining bytes as a literal
|
2020-03-31 02:46:46 +00:00
|
|
|
if (ip < ip_end) {
|
|
|
|
op = EmitLiteral</*allow_fast_path=*/false>(op, ip, ip_end - ip);
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return op;
|
|
|
|
}
|
|
|
|
} // end namespace internal
|
|
|
|
|
2024-01-13 00:27:32 +00:00
|
|
|
static inline void Report(int token, const char *algorithm, size_t
|
|
|
|
compressed_size, size_t uncompressed_size) {
|
2020-05-05 16:13:04 +00:00
|
|
|
// TODO: Switch to [[maybe_unused]] when we can assume C++17.
|
2024-01-13 00:27:32 +00:00
|
|
|
(void)token;
|
2020-05-05 16:13:04 +00:00
|
|
|
(void)algorithm;
|
|
|
|
(void)compressed_size;
|
|
|
|
(void)uncompressed_size;
|
|
|
|
}
|
2017-02-01 16:34:26 +00:00
|
|
|
|
2011-03-18 17:14:15 +00:00
|
|
|
// Signature of output types needed by decompression code.
|
|
|
|
// The decompression code is templatized on a type that obeys this
|
|
|
|
// signature so that we do not pay virtual function call overhead in
|
|
|
|
// the middle of a tight decompression loop.
|
|
|
|
//
|
|
|
|
// class DecompressionWriter {
|
|
|
|
// public:
|
|
|
|
// // Called before decompression
|
|
|
|
// void SetExpectedLength(size_t length);
|
|
|
|
//
|
2020-02-12 18:04:58 +00:00
|
|
|
// // For performance a writer may choose to donate the cursor variable to the
|
|
|
|
// // decompression function. The decompression will inject it in all its
|
|
|
|
// // function calls to the writer. Keeping the important output cursor as a
|
|
|
|
// // function local stack variable allows the compiler to keep it in
|
|
|
|
// // register, which greatly aids performance by avoiding loads and stores of
|
|
|
|
// // this variable in the fast path loop iterations.
|
|
|
|
// T GetOutputPtr() const;
|
|
|
|
//
|
|
|
|
// // At end of decompression the loop donates the ownership of the cursor
|
|
|
|
// // variable back to the writer by calling this function.
|
|
|
|
// void SetOutputPtr(T op);
|
|
|
|
//
|
2011-03-18 17:14:15 +00:00
|
|
|
// // Called after decompression
|
|
|
|
// bool CheckLength() const;
|
|
|
|
//
|
|
|
|
// // Called repeatedly during decompression
|
2020-02-12 18:04:58 +00:00
|
|
|
// // Each function get a pointer to the op (output pointer), that the writer
|
|
|
|
// // can use and update. Note it's important that these functions get fully
|
|
|
|
// // inlined so that no actual address of the local variable needs to be
|
|
|
|
// // taken.
|
|
|
|
// bool Append(const char* ip, size_t length, T* op);
|
2020-04-12 20:03:50 +00:00
|
|
|
// bool AppendFromSelf(uint32_t offset, size_t length, T* op);
|
2011-03-18 17:14:15 +00:00
|
|
|
//
|
In the fast path for decompressing literals, instead of checking
whether there's 16 bytes free and then checking right afterwards
(when having subtracted the literal size) that there are now
5 bytes free, just check once for 21 bytes. This skips a compare
and a branch; although it is easily predictable, it is still
a few cycles on a fast path that we would like to get rid of.
Benchmarking this yields very confusing results. On open-source
GCC 4.8.1 on Haswell, we get exactly the expected results; the
benchmarks where we hit the fast path for literals (in particular
the two HTML benchmarks and the protobuf benchmark) give very nice
speedups, and the others are not really affected.
However, benchmarks with Google's GCC branch on other hardware
is much less clear. It seems that we have a weak loss in some cases
(and the win for the “typical” win cases are not nearly as clear),
but that it depends on microarchitecture and plain luck in how we run
the benchmark. Looking at the generated assembler, it seems that
the removal of the if causes other large-scale changes in how the
function is laid out, which makes it likely that this is just bad luck.
Thus, we should keep this change, even though its exact current impact is
unclear; it's a sensible change per se, and dropping it on the basis of
microoptimization for a given compiler (or even branch of a compiler)
would seem like a bad strategy in the long run.
Microbenchmark results (all in 64-bit, opt mode):
Nehalem, Google GCC:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 76747 75591 1.3GB/s html +1.5%
BM_UFlat/1 765756 757040 886.3MB/s urls +1.2%
BM_UFlat/2 10867 10893 10.9GB/s jpg -0.2%
BM_UFlat/3 124 131 1.4GB/s jpg_200 -5.3%
BM_UFlat/4 31663 31596 2.8GB/s pdf +0.2%
BM_UFlat/5 314162 308176 1.2GB/s html4 +1.9%
BM_UFlat/6 29668 29746 790.6MB/s cp -0.3%
BM_UFlat/7 12958 13386 796.4MB/s c -3.2%
BM_UFlat/8 3596 3682 966.0MB/s lsp -2.3%
BM_UFlat/9 1019193 1033493 953.3MB/s xls -1.4%
BM_UFlat/10 239 247 775.3MB/s xls_200 -3.2%
BM_UFlat/11 236411 240271 606.9MB/s txt1 -1.6%
BM_UFlat/12 206639 209768 571.2MB/s txt2 -1.5%
BM_UFlat/13 627803 635722 641.4MB/s txt3 -1.2%
BM_UFlat/14 845932 857816 538.2MB/s txt4 -1.4%
BM_UFlat/15 402107 391670 1.2GB/s bin +2.7%
BM_UFlat/16 283 279 683.6MB/s bin_200 +1.4%
BM_UFlat/17 46070 46815 781.5MB/s sum -1.6%
BM_UFlat/18 5053 5163 782.0MB/s man -2.1%
BM_UFlat/19 79721 76581 1.4GB/s pb +4.1%
BM_UFlat/20 251158 252330 697.5MB/s gaviota -0.5%
Sum of all benchmarks 4966150 4980396 -0.3%
Sandy Bridge, Google GCC:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 42850 42182 2.3GB/s html +1.6%
BM_UFlat/1 525660 515816 1.3GB/s urls +1.9%
BM_UFlat/2 7173 7283 16.3GB/s jpg -1.5%
BM_UFlat/3 92 91 2.1GB/s jpg_200 +1.1%
BM_UFlat/4 15147 14872 5.9GB/s pdf +1.8%
BM_UFlat/5 199936 192116 2.0GB/s html4 +4.1%
BM_UFlat/6 12796 12443 1.8GB/s cp +2.8%
BM_UFlat/7 6588 6400 1.6GB/s c +2.9%
BM_UFlat/8 2010 1951 1.8GB/s lsp +3.0%
BM_UFlat/9 761124 763049 1.3GB/s xls -0.3%
BM_UFlat/10 186 189 1016.1MB/s xls_200 -1.6%
BM_UFlat/11 159354 158460 918.6MB/s txt1 +0.6%
BM_UFlat/12 139732 139950 856.1MB/s txt2 -0.2%
BM_UFlat/13 429917 425027 961.7MB/s txt3 +1.2%
BM_UFlat/14 585255 587324 785.8MB/s txt4 -0.4%
BM_UFlat/15 276186 266173 1.8GB/s bin +3.8%
BM_UFlat/16 205 207 925.5MB/s bin_200 -1.0%
BM_UFlat/17 24925 24935 1.4GB/s sum -0.0%
BM_UFlat/18 2632 2576 1.5GB/s man +2.2%
BM_UFlat/19 40546 39108 2.8GB/s pb +3.7%
BM_UFlat/20 175803 168209 1048.9MB/s gaviota +4.5%
Sum of all benchmarks 3408117 3368361 +1.2%
Haswell, upstream GCC 4.8.1:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 46308 40641 2.3GB/s html +13.9%
BM_UFlat/1 513385 514706 1.3GB/s urls -0.3%
BM_UFlat/2 6197 6151 19.2GB/s jpg +0.7%
BM_UFlat/3 61 61 3.0GB/s jpg_200 +0.0%
BM_UFlat/4 13551 13429 6.5GB/s pdf +0.9%
BM_UFlat/5 198317 190243 2.0GB/s html4 +4.2%
BM_UFlat/6 14768 12560 1.8GB/s cp +17.6%
BM_UFlat/7 6453 6447 1.6GB/s c +0.1%
BM_UFlat/8 1991 1980 1.8GB/s lsp +0.6%
BM_UFlat/9 766947 770424 1.2GB/s xls -0.5%
BM_UFlat/10 170 169 1.1GB/s xls_200 +0.6%
BM_UFlat/11 164350 163554 888.7MB/s txt1 +0.5%
BM_UFlat/12 145444 143830 832.1MB/s txt2 +1.1%
BM_UFlat/13 437849 438413 929.2MB/s txt3 -0.1%
BM_UFlat/14 603587 605309 759.8MB/s txt4 -0.3%
BM_UFlat/15 249799 248067 1.9GB/s bin +0.7%
BM_UFlat/16 191 188 1011.4MB/s bin_200 +1.6%
BM_UFlat/17 26064 24778 1.4GB/s sum +5.2%
BM_UFlat/18 2620 2601 1.5GB/s man +0.7%
BM_UFlat/19 44551 37373 3.0GB/s pb +19.2%
BM_UFlat/20 165408 164584 1.0GB/s gaviota +0.5%
Sum of all benchmarks 3408011 3385508 +0.7%
git-svn-id: https://snappy.googlecode.com/svn/trunk@78 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2013-06-30 19:24:03 +00:00
|
|
|
// // The rules for how TryFastAppend differs from Append are somewhat
|
|
|
|
// // convoluted:
|
2011-11-23 11:14:17 +00:00
|
|
|
// //
|
In the fast path for decompressing literals, instead of checking
whether there's 16 bytes free and then checking right afterwards
(when having subtracted the literal size) that there are now
5 bytes free, just check once for 21 bytes. This skips a compare
and a branch; although it is easily predictable, it is still
a few cycles on a fast path that we would like to get rid of.
Benchmarking this yields very confusing results. On open-source
GCC 4.8.1 on Haswell, we get exactly the expected results; the
benchmarks where we hit the fast path for literals (in particular
the two HTML benchmarks and the protobuf benchmark) give very nice
speedups, and the others are not really affected.
However, benchmarks with Google's GCC branch on other hardware
is much less clear. It seems that we have a weak loss in some cases
(and the win for the “typical” win cases are not nearly as clear),
but that it depends on microarchitecture and plain luck in how we run
the benchmark. Looking at the generated assembler, it seems that
the removal of the if causes other large-scale changes in how the
function is laid out, which makes it likely that this is just bad luck.
Thus, we should keep this change, even though its exact current impact is
unclear; it's a sensible change per se, and dropping it on the basis of
microoptimization for a given compiler (or even branch of a compiler)
would seem like a bad strategy in the long run.
Microbenchmark results (all in 64-bit, opt mode):
Nehalem, Google GCC:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 76747 75591 1.3GB/s html +1.5%
BM_UFlat/1 765756 757040 886.3MB/s urls +1.2%
BM_UFlat/2 10867 10893 10.9GB/s jpg -0.2%
BM_UFlat/3 124 131 1.4GB/s jpg_200 -5.3%
BM_UFlat/4 31663 31596 2.8GB/s pdf +0.2%
BM_UFlat/5 314162 308176 1.2GB/s html4 +1.9%
BM_UFlat/6 29668 29746 790.6MB/s cp -0.3%
BM_UFlat/7 12958 13386 796.4MB/s c -3.2%
BM_UFlat/8 3596 3682 966.0MB/s lsp -2.3%
BM_UFlat/9 1019193 1033493 953.3MB/s xls -1.4%
BM_UFlat/10 239 247 775.3MB/s xls_200 -3.2%
BM_UFlat/11 236411 240271 606.9MB/s txt1 -1.6%
BM_UFlat/12 206639 209768 571.2MB/s txt2 -1.5%
BM_UFlat/13 627803 635722 641.4MB/s txt3 -1.2%
BM_UFlat/14 845932 857816 538.2MB/s txt4 -1.4%
BM_UFlat/15 402107 391670 1.2GB/s bin +2.7%
BM_UFlat/16 283 279 683.6MB/s bin_200 +1.4%
BM_UFlat/17 46070 46815 781.5MB/s sum -1.6%
BM_UFlat/18 5053 5163 782.0MB/s man -2.1%
BM_UFlat/19 79721 76581 1.4GB/s pb +4.1%
BM_UFlat/20 251158 252330 697.5MB/s gaviota -0.5%
Sum of all benchmarks 4966150 4980396 -0.3%
Sandy Bridge, Google GCC:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 42850 42182 2.3GB/s html +1.6%
BM_UFlat/1 525660 515816 1.3GB/s urls +1.9%
BM_UFlat/2 7173 7283 16.3GB/s jpg -1.5%
BM_UFlat/3 92 91 2.1GB/s jpg_200 +1.1%
BM_UFlat/4 15147 14872 5.9GB/s pdf +1.8%
BM_UFlat/5 199936 192116 2.0GB/s html4 +4.1%
BM_UFlat/6 12796 12443 1.8GB/s cp +2.8%
BM_UFlat/7 6588 6400 1.6GB/s c +2.9%
BM_UFlat/8 2010 1951 1.8GB/s lsp +3.0%
BM_UFlat/9 761124 763049 1.3GB/s xls -0.3%
BM_UFlat/10 186 189 1016.1MB/s xls_200 -1.6%
BM_UFlat/11 159354 158460 918.6MB/s txt1 +0.6%
BM_UFlat/12 139732 139950 856.1MB/s txt2 -0.2%
BM_UFlat/13 429917 425027 961.7MB/s txt3 +1.2%
BM_UFlat/14 585255 587324 785.8MB/s txt4 -0.4%
BM_UFlat/15 276186 266173 1.8GB/s bin +3.8%
BM_UFlat/16 205 207 925.5MB/s bin_200 -1.0%
BM_UFlat/17 24925 24935 1.4GB/s sum -0.0%
BM_UFlat/18 2632 2576 1.5GB/s man +2.2%
BM_UFlat/19 40546 39108 2.8GB/s pb +3.7%
BM_UFlat/20 175803 168209 1048.9MB/s gaviota +4.5%
Sum of all benchmarks 3408117 3368361 +1.2%
Haswell, upstream GCC 4.8.1:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 46308 40641 2.3GB/s html +13.9%
BM_UFlat/1 513385 514706 1.3GB/s urls -0.3%
BM_UFlat/2 6197 6151 19.2GB/s jpg +0.7%
BM_UFlat/3 61 61 3.0GB/s jpg_200 +0.0%
BM_UFlat/4 13551 13429 6.5GB/s pdf +0.9%
BM_UFlat/5 198317 190243 2.0GB/s html4 +4.2%
BM_UFlat/6 14768 12560 1.8GB/s cp +17.6%
BM_UFlat/7 6453 6447 1.6GB/s c +0.1%
BM_UFlat/8 1991 1980 1.8GB/s lsp +0.6%
BM_UFlat/9 766947 770424 1.2GB/s xls -0.5%
BM_UFlat/10 170 169 1.1GB/s xls_200 +0.6%
BM_UFlat/11 164350 163554 888.7MB/s txt1 +0.5%
BM_UFlat/12 145444 143830 832.1MB/s txt2 +1.1%
BM_UFlat/13 437849 438413 929.2MB/s txt3 -0.1%
BM_UFlat/14 603587 605309 759.8MB/s txt4 -0.3%
BM_UFlat/15 249799 248067 1.9GB/s bin +0.7%
BM_UFlat/16 191 188 1011.4MB/s bin_200 +1.6%
BM_UFlat/17 26064 24778 1.4GB/s sum +5.2%
BM_UFlat/18 2620 2601 1.5GB/s man +0.7%
BM_UFlat/19 44551 37373 3.0GB/s pb +19.2%
BM_UFlat/20 165408 164584 1.0GB/s gaviota +0.5%
Sum of all benchmarks 3408011 3385508 +0.7%
git-svn-id: https://snappy.googlecode.com/svn/trunk@78 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2013-06-30 19:24:03 +00:00
|
|
|
// // - TryFastAppend is allowed to decline (return false) at any
|
|
|
|
// // time, for any reason -- just "return false" would be
|
|
|
|
// // a perfectly legal implementation of TryFastAppend.
|
|
|
|
// // The intention is for TryFastAppend to allow a fast path
|
|
|
|
// // in the common case of a small append.
|
|
|
|
// // - TryFastAppend is allowed to read up to <available> bytes
|
|
|
|
// // from the input buffer, whereas Append is allowed to read
|
|
|
|
// // <length>. However, if it returns true, it must leave
|
|
|
|
// // at least five (kMaximumTagLength) bytes in the input buffer
|
|
|
|
// // afterwards, so that there is always enough space to read the
|
|
|
|
// // next tag without checking for a refill.
|
|
|
|
// // - TryFastAppend must always return decline (return false)
|
|
|
|
// // if <length> is 61 or more, as in this case the literal length is not
|
|
|
|
// // decoded fully. In practice, this should not be a big problem,
|
|
|
|
// // as it is unlikely that one would implement a fast path accepting
|
|
|
|
// // this much data.
|
2011-11-23 11:14:17 +00:00
|
|
|
// //
|
2020-02-12 18:04:58 +00:00
|
|
|
// bool TryFastAppend(const char* ip, size_t available, size_t length, T* op);
|
2011-11-23 11:14:17 +00:00
|
|
|
// };
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2021-10-28 04:39:14 +00:00
|
|
|
static inline uint32_t ExtractLowBytes(const uint32_t& v, int n) {
|
2019-01-04 04:39:15 +00:00
|
|
|
assert(n >= 0);
|
|
|
|
assert(n <= 4);
|
2019-04-04 15:17:45 +00:00
|
|
|
#if SNAPPY_HAVE_BMI2
|
2018-12-12 15:14:02 +00:00
|
|
|
return _bzhi_u32(v, 8 * n);
|
|
|
|
#else
|
2020-04-12 20:03:50 +00:00
|
|
|
// This needs to be wider than uint32_t otherwise `mask << 32` will be
|
Compute the wordmask instead of looking it up in a table.
Tested:
name old speed new speed delta
BM_UFlat/0 [html ] 2.13GB/s ± 0% 2.46GB/s ± 0% +15.70% (p=0.000 n=10+8)
BM_UFlat/1 [urls ] 1.21GB/s ± 0% 1.20GB/s ± 0% -1.49% (p=0.000 n=9+10)
BM_UFlat/2 [jpg ] 17.1GB/s ± 1% 17.2GB/s ± 1% ~ (p=0.120 n=11+11)
BM_UFlat/3 [jpg_200] 1.55GB/s ± 0% 1.54GB/s ± 0% -0.96% (p=0.000 n=10+7)
BM_UFlat/4 [pdf ] 12.9GB/s ± 0% 12.6GB/s ± 0% -1.98% (p=0.000 n=11+9)
BM_UFlat/5 [html4 ] 1.87GB/s ± 0% 1.87GB/s ± 0% -0.06% (p=0.033 n=11+11)
BM_UFlat/6 [txt1 ] 816MB/s ± 0% 793MB/s ± 0% -2.84% (p=0.000 n=11+11)
BM_UFlat/7 [txt2 ] 758MB/s ± 0% 737MB/s ± 0% -2.77% (p=0.000 n=11+11)
BM_UFlat/8 [txt3 ] 865MB/s ± 0% 839MB/s ± 0% -2.94% (p=0.000 n=11+8)
BM_UFlat/9 [txt4 ] 701MB/s ± 0% 679MB/s ± 0% -3.11% (p=0.000 n=11+10)
BM_UFlat/10 [pb ] 2.60GB/s ± 2% 3.07GB/s ± 0% +17.81% (p=0.000 n=11+11)
BM_UFlat/11 [gaviota] 1.01GB/s ± 0% 0.97GB/s ± 0% -3.83% (p=0.000 n=11+10)
BM_UFlat/12 [cp ] 1.66GB/s ± 1% 1.73GB/s ± 1% +4.32% (p=0.000 n=11+11)
BM_UFlat/13 [c ] 1.52GB/s ± 1% 1.53GB/s ± 0% +0.49% (p=0.002 n=11+11)
BM_UFlat/14 [lsp ] 1.61GB/s ± 0% 1.64GB/s ± 0% +2.10% (p=0.000 n=10+11)
BM_UFlat/15 [xls ] 1.12GB/s ± 0% 1.08GB/s ± 0% -3.95% (p=0.000 n=11+7)
BM_UFlat/16 [xls_200] 926MB/s ± 1% 935MB/s ± 1% ~ (p=0.056 n=9+11)
BM_UFlat/17 [bin ] 1.89GB/s ± 0% 1.86GB/s ± 0% -1.32% (p=0.000 n=11+11)
BM_UFlat/18 [bin_200] 1.96GB/s ± 0% 1.99GB/s ± 1% +1.78% (p=0.000 n=11+11)
BM_UFlat/19 [sum ] 1.32GB/s ± 0% 1.31GB/s ± 0% -0.79% (p=0.000 n=11+10)
BM_UFlat/20 [man ] 1.40GB/s ± 0% 1.43GB/s ± 0% +2.51% (p=0.000 n=9+10)
BM_UValidate/0 [html ] 2.95GB/s ± 1% 3.07GB/s ± 0% +4.11% (p=0.000 n=10+11)
BM_UValidate/1 [urls ] 1.57GB/s ± 0% 1.60GB/s ± 0% +2.24% (p=0.000 n=10+11)
BM_UValidate/2 [jpg ] 822GB/s ± 0% 850GB/s ± 0% +3.42% (p=0.000 n=10+11)
BM_UValidate/3 [jpg_200] 2.01GB/s ± 0% 2.04GB/s ± 0% +1.24% (p=0.000 n=11+11)
BM_UValidate/4 [pdf ] 33.7GB/s ± 0% 35.9GB/s ± 1% +6.51% (p=0.000 n=10+11)
BM_UIOVec/0 [html ] 852MB/s ± 0% 852MB/s ± 0% ~ (p=0.898 n=11+11)
BM_UIOVec/1 [urls ] 663MB/s ± 0% 652MB/s ± 0% -1.61% (p=0.000 n=11+11)
BM_UIOVec/2 [jpg ] 15.3GB/s ± 1% 15.3GB/s ± 2% ~ (p=0.459 n=9+10)
BM_UIOVec/3 [jpg_200] 652MB/s ± 0% 627MB/s ± 1% -3.80% (p=0.000 n=10+11)
BM_UIOVec/4 [pdf ] 8.80GB/s ± 1% 8.57GB/s ± 1% -2.62% (p=0.000 n=10+11)
BM_UFlatSink/0 [html ] 2.13GB/s ± 0% 2.46GB/s ± 0% +15.63% (p=0.000 n=11+11)
BM_UFlatSink/1 [urls ] 1.21GB/s ± 0% 1.20GB/s ± 0% -1.42% (p=0.000 n=11+10)
BM_UFlatSink/2 [jpg ] 17.1GB/s ± 2% 17.2GB/s ± 1% ~ (p=0.175 n=11+9)
BM_UFlatSink/3 [jpg_200] 1.52GB/s ± 1% 1.47GB/s ± 3% -3.15% (p=0.000 n=11+11)
BM_UFlatSink/4 [pdf ] 12.8GB/s ± 1% 12.6GB/s ± 1% -1.76% (p=0.000 n=11+11)
BM_UFlatSink/5 [html4 ] 1.87GB/s ± 0% 1.87GB/s ± 0% -0.19% (p=0.000 n=11+10)
BM_UFlatSink/6 [txt1 ] 816MB/s ± 0% 792MB/s ± 0% -2.94% (p=0.000 n=11+11)
BM_UFlatSink/7 [txt2 ] 758MB/s ± 0% 736MB/s ± 0% -2.83% (p=0.000 n=11+11)
BM_UFlatSink/8 [txt3 ] 865MB/s ± 0% 838MB/s ± 0% -3.13% (p=0.000 n=11+11)
BM_UFlatSink/9 [txt4 ] 701MB/s ± 0% 678MB/s ± 0% -3.20% (p=0.000 n=11+11)
BM_UFlatSink/10 [pb ] 2.60GB/s ± 2% 3.07GB/s ± 0% +18.27% (p=0.000 n=11+10)
BM_UFlatSink/11 [gaviota] 1.01GB/s ± 0% 0.97GB/s ± 0% -3.90% (p=0.000 n=11+11)
BM_UFlatSink/12 [cp ] 1.66GB/s ± 1% 1.73GB/s ± 1% +4.62% (p=0.000 n=11+10)
BM_UFlatSink/13 [c ] 1.52GB/s ± 0% 1.53GB/s ± 1% ~ (p=0.180 n=9+11)
BM_UFlatSink/14 [lsp ] 1.61GB/s ± 0% 1.64GB/s ± 1% +1.98% (p=0.000 n=9+11)
BM_UFlatSink/15 [xls ] 1.12GB/s ± 0% 1.08GB/s ± 0% -3.76% (p=0.000 n=11+11)
BM_UFlatSink/16 [xls_200] 909MB/s ± 2% 924MB/s ± 1% +1.62% (p=0.000 n=11+11)
BM_UFlatSink/17 [bin ] 1.88GB/s ± 0% 1.86GB/s ± 0% -1.18% (p=0.000 n=9+11)
BM_UFlatSink/18 [bin_200] 1.94GB/s ± 2% 1.94GB/s ± 1% ~ (p=0.090 n=11+11)
BM_UFlatSink/19 [sum ] 1.32GB/s ± 0% 1.31GB/s ± 0% -0.76% (p=0.000 n=11+11)
BM_UFlatSink/20 [man ] 1.39GB/s ± 2% 1.43GB/s ± 0% +2.75% (p=0.000 n=11+10)
Assembly before:
* 44 8b 5c 85 a0 mov -0x60(%rbp,%rax,4),%r11d
45 23 5d 00 and 0x0(%r13),%r11d
89 d6 mov %edx,%esi
81 e6 00 07 00 00 and $0x700,%esi
Assembly after:
* 89 c1 mov %eax,%ecx
* c0 e1 03 shl $0x3,%cl
* bf ff ff ff ff mov $0xffffffff,%edi
* 48 d3 e7 shl %cl,%rdi
* f7 d7 not %edi
41 23 7d 00 and 0x0(%r13),%edi
41 89 d3 mov %edx,%r11d
41 81 e3 00 07 00 00 and $0x700,%r11d
2018-08-28 15:47:31 +00:00
|
|
|
// undefined.
|
2020-04-12 20:03:50 +00:00
|
|
|
uint64_t mask = 0xffffffff;
|
2018-12-12 15:14:02 +00:00
|
|
|
return v & ~(mask << (8 * n));
|
|
|
|
#endif
|
Compute the wordmask instead of looking it up in a table.
Tested:
name old speed new speed delta
BM_UFlat/0 [html ] 2.13GB/s ± 0% 2.46GB/s ± 0% +15.70% (p=0.000 n=10+8)
BM_UFlat/1 [urls ] 1.21GB/s ± 0% 1.20GB/s ± 0% -1.49% (p=0.000 n=9+10)
BM_UFlat/2 [jpg ] 17.1GB/s ± 1% 17.2GB/s ± 1% ~ (p=0.120 n=11+11)
BM_UFlat/3 [jpg_200] 1.55GB/s ± 0% 1.54GB/s ± 0% -0.96% (p=0.000 n=10+7)
BM_UFlat/4 [pdf ] 12.9GB/s ± 0% 12.6GB/s ± 0% -1.98% (p=0.000 n=11+9)
BM_UFlat/5 [html4 ] 1.87GB/s ± 0% 1.87GB/s ± 0% -0.06% (p=0.033 n=11+11)
BM_UFlat/6 [txt1 ] 816MB/s ± 0% 793MB/s ± 0% -2.84% (p=0.000 n=11+11)
BM_UFlat/7 [txt2 ] 758MB/s ± 0% 737MB/s ± 0% -2.77% (p=0.000 n=11+11)
BM_UFlat/8 [txt3 ] 865MB/s ± 0% 839MB/s ± 0% -2.94% (p=0.000 n=11+8)
BM_UFlat/9 [txt4 ] 701MB/s ± 0% 679MB/s ± 0% -3.11% (p=0.000 n=11+10)
BM_UFlat/10 [pb ] 2.60GB/s ± 2% 3.07GB/s ± 0% +17.81% (p=0.000 n=11+11)
BM_UFlat/11 [gaviota] 1.01GB/s ± 0% 0.97GB/s ± 0% -3.83% (p=0.000 n=11+10)
BM_UFlat/12 [cp ] 1.66GB/s ± 1% 1.73GB/s ± 1% +4.32% (p=0.000 n=11+11)
BM_UFlat/13 [c ] 1.52GB/s ± 1% 1.53GB/s ± 0% +0.49% (p=0.002 n=11+11)
BM_UFlat/14 [lsp ] 1.61GB/s ± 0% 1.64GB/s ± 0% +2.10% (p=0.000 n=10+11)
BM_UFlat/15 [xls ] 1.12GB/s ± 0% 1.08GB/s ± 0% -3.95% (p=0.000 n=11+7)
BM_UFlat/16 [xls_200] 926MB/s ± 1% 935MB/s ± 1% ~ (p=0.056 n=9+11)
BM_UFlat/17 [bin ] 1.89GB/s ± 0% 1.86GB/s ± 0% -1.32% (p=0.000 n=11+11)
BM_UFlat/18 [bin_200] 1.96GB/s ± 0% 1.99GB/s ± 1% +1.78% (p=0.000 n=11+11)
BM_UFlat/19 [sum ] 1.32GB/s ± 0% 1.31GB/s ± 0% -0.79% (p=0.000 n=11+10)
BM_UFlat/20 [man ] 1.40GB/s ± 0% 1.43GB/s ± 0% +2.51% (p=0.000 n=9+10)
BM_UValidate/0 [html ] 2.95GB/s ± 1% 3.07GB/s ± 0% +4.11% (p=0.000 n=10+11)
BM_UValidate/1 [urls ] 1.57GB/s ± 0% 1.60GB/s ± 0% +2.24% (p=0.000 n=10+11)
BM_UValidate/2 [jpg ] 822GB/s ± 0% 850GB/s ± 0% +3.42% (p=0.000 n=10+11)
BM_UValidate/3 [jpg_200] 2.01GB/s ± 0% 2.04GB/s ± 0% +1.24% (p=0.000 n=11+11)
BM_UValidate/4 [pdf ] 33.7GB/s ± 0% 35.9GB/s ± 1% +6.51% (p=0.000 n=10+11)
BM_UIOVec/0 [html ] 852MB/s ± 0% 852MB/s ± 0% ~ (p=0.898 n=11+11)
BM_UIOVec/1 [urls ] 663MB/s ± 0% 652MB/s ± 0% -1.61% (p=0.000 n=11+11)
BM_UIOVec/2 [jpg ] 15.3GB/s ± 1% 15.3GB/s ± 2% ~ (p=0.459 n=9+10)
BM_UIOVec/3 [jpg_200] 652MB/s ± 0% 627MB/s ± 1% -3.80% (p=0.000 n=10+11)
BM_UIOVec/4 [pdf ] 8.80GB/s ± 1% 8.57GB/s ± 1% -2.62% (p=0.000 n=10+11)
BM_UFlatSink/0 [html ] 2.13GB/s ± 0% 2.46GB/s ± 0% +15.63% (p=0.000 n=11+11)
BM_UFlatSink/1 [urls ] 1.21GB/s ± 0% 1.20GB/s ± 0% -1.42% (p=0.000 n=11+10)
BM_UFlatSink/2 [jpg ] 17.1GB/s ± 2% 17.2GB/s ± 1% ~ (p=0.175 n=11+9)
BM_UFlatSink/3 [jpg_200] 1.52GB/s ± 1% 1.47GB/s ± 3% -3.15% (p=0.000 n=11+11)
BM_UFlatSink/4 [pdf ] 12.8GB/s ± 1% 12.6GB/s ± 1% -1.76% (p=0.000 n=11+11)
BM_UFlatSink/5 [html4 ] 1.87GB/s ± 0% 1.87GB/s ± 0% -0.19% (p=0.000 n=11+10)
BM_UFlatSink/6 [txt1 ] 816MB/s ± 0% 792MB/s ± 0% -2.94% (p=0.000 n=11+11)
BM_UFlatSink/7 [txt2 ] 758MB/s ± 0% 736MB/s ± 0% -2.83% (p=0.000 n=11+11)
BM_UFlatSink/8 [txt3 ] 865MB/s ± 0% 838MB/s ± 0% -3.13% (p=0.000 n=11+11)
BM_UFlatSink/9 [txt4 ] 701MB/s ± 0% 678MB/s ± 0% -3.20% (p=0.000 n=11+11)
BM_UFlatSink/10 [pb ] 2.60GB/s ± 2% 3.07GB/s ± 0% +18.27% (p=0.000 n=11+10)
BM_UFlatSink/11 [gaviota] 1.01GB/s ± 0% 0.97GB/s ± 0% -3.90% (p=0.000 n=11+11)
BM_UFlatSink/12 [cp ] 1.66GB/s ± 1% 1.73GB/s ± 1% +4.62% (p=0.000 n=11+10)
BM_UFlatSink/13 [c ] 1.52GB/s ± 0% 1.53GB/s ± 1% ~ (p=0.180 n=9+11)
BM_UFlatSink/14 [lsp ] 1.61GB/s ± 0% 1.64GB/s ± 1% +1.98% (p=0.000 n=9+11)
BM_UFlatSink/15 [xls ] 1.12GB/s ± 0% 1.08GB/s ± 0% -3.76% (p=0.000 n=11+11)
BM_UFlatSink/16 [xls_200] 909MB/s ± 2% 924MB/s ± 1% +1.62% (p=0.000 n=11+11)
BM_UFlatSink/17 [bin ] 1.88GB/s ± 0% 1.86GB/s ± 0% -1.18% (p=0.000 n=9+11)
BM_UFlatSink/18 [bin_200] 1.94GB/s ± 2% 1.94GB/s ± 1% ~ (p=0.090 n=11+11)
BM_UFlatSink/19 [sum ] 1.32GB/s ± 0% 1.31GB/s ± 0% -0.76% (p=0.000 n=11+11)
BM_UFlatSink/20 [man ] 1.39GB/s ± 2% 1.43GB/s ± 0% +2.75% (p=0.000 n=11+10)
Assembly before:
* 44 8b 5c 85 a0 mov -0x60(%rbp,%rax,4),%r11d
45 23 5d 00 and 0x0(%r13),%r11d
89 d6 mov %edx,%esi
81 e6 00 07 00 00 and $0x700,%esi
Assembly after:
* 89 c1 mov %eax,%ecx
* c0 e1 03 shl $0x3,%cl
* bf ff ff ff ff mov $0xffffffff,%edi
* 48 d3 e7 shl %cl,%rdi
* f7 d7 not %edi
41 23 7d 00 and 0x0(%r13),%edi
41 89 d3 mov %edx,%r11d
41 81 e3 00 07 00 00 and $0x700,%r11d
2018-08-28 15:47:31 +00:00
|
|
|
}
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2020-04-12 20:03:50 +00:00
|
|
|
static inline bool LeftShiftOverflows(uint8_t value, uint32_t shift) {
|
2019-01-08 21:26:32 +00:00
|
|
|
assert(shift < 32);
|
2020-04-12 20:03:50 +00:00
|
|
|
static const uint8_t masks[] = {
|
2019-01-08 19:31:10 +00:00
|
|
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
|
|
|
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
|
|
|
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
|
|
|
|
0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe};
|
2019-01-08 00:52:08 +00:00
|
|
|
return (value & masks[shift]) != 0;
|
|
|
|
}
|
|
|
|
|
2020-12-10 00:30:54 +00:00
|
|
|
inline bool Copy64BytesWithPatternExtension(ptrdiff_t dst, size_t offset) {
|
2020-12-15 04:14:06 +00:00
|
|
|
// TODO: Switch to [[maybe_unused]] when we can assume C++17.
|
|
|
|
(void)dst;
|
2020-12-10 00:30:54 +00:00
|
|
|
return offset != 0;
|
|
|
|
}
|
|
|
|
|
2022-06-09 14:13:38 +00:00
|
|
|
// Copies between size bytes and 64 bytes from src to dest. size cannot exceed
|
|
|
|
// 64. More than size bytes, but never exceeding 64, might be copied if doing
|
2022-06-09 14:28:22 +00:00
|
|
|
// so gives better performance. [src, src + size) must not overlap with
|
|
|
|
// [dst, dst + size), but [src, src + 64) may overlap with [dst, dst + 64).
|
2022-06-09 14:13:38 +00:00
|
|
|
void MemCopy64(char* dst, const void* src, size_t size) {
|
2022-10-11 16:00:34 +00:00
|
|
|
// Always copy this many bytes. If that's below size then copy the full 64.
|
2022-06-09 14:13:38 +00:00
|
|
|
constexpr int kShortMemCopy = 32;
|
|
|
|
|
2022-10-11 16:00:34 +00:00
|
|
|
assert(size <= 64);
|
2022-06-09 14:13:38 +00:00
|
|
|
assert(std::less_equal<const void*>()(static_cast<const char*>(src) + size,
|
|
|
|
dst) ||
|
|
|
|
std::less_equal<const void*>()(dst + size, src));
|
|
|
|
|
|
|
|
// We know that src and dst are at least size bytes apart. However, because we
|
|
|
|
// might copy more than size bytes the copy still might overlap past size.
|
2022-10-11 16:00:34 +00:00
|
|
|
// E.g. if src and dst appear consecutively in memory (src + size >= dst).
|
|
|
|
// TODO: Investigate wider copies on other platforms.
|
|
|
|
#if defined(__x86_64__) && defined(__AVX__)
|
|
|
|
assert(kShortMemCopy <= 32);
|
|
|
|
__m256i data = _mm256_lddqu_si256(static_cast<const __m256i *>(src));
|
|
|
|
_mm256_storeu_si256(reinterpret_cast<__m256i *>(dst), data);
|
|
|
|
// Profiling shows that nearly all copies are short.
|
|
|
|
if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) {
|
|
|
|
data = _mm256_lddqu_si256(static_cast<const __m256i *>(src) + 1);
|
|
|
|
_mm256_storeu_si256(reinterpret_cast<__m256i *>(dst) + 1, data);
|
|
|
|
}
|
|
|
|
#else
|
2022-06-09 14:13:38 +00:00
|
|
|
std::memmove(dst, src, kShortMemCopy);
|
|
|
|
// Profiling shows that nearly all copies are short.
|
|
|
|
if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) {
|
|
|
|
std::memmove(dst + kShortMemCopy,
|
|
|
|
static_cast<const uint8_t*>(src) + kShortMemCopy,
|
2022-10-11 16:00:34 +00:00
|
|
|
64 - kShortMemCopy);
|
2022-06-09 14:13:38 +00:00
|
|
|
}
|
2022-10-11 16:00:34 +00:00
|
|
|
#endif
|
2020-12-10 00:30:54 +00:00
|
|
|
}
|
|
|
|
|
2022-06-09 14:13:38 +00:00
|
|
|
void MemCopy64(ptrdiff_t dst, const void* src, size_t size) {
|
2020-12-15 04:14:06 +00:00
|
|
|
// TODO: Switch to [[maybe_unused]] when we can assume C++17.
|
|
|
|
(void)dst;
|
|
|
|
(void)src;
|
|
|
|
(void)size;
|
|
|
|
}
|
2020-12-10 00:30:54 +00:00
|
|
|
|
2023-01-23 17:50:52 +00:00
|
|
|
void ClearDeferred(const void** deferred_src, size_t* deferred_length,
|
|
|
|
uint8_t* safe_source) {
|
|
|
|
*deferred_src = safe_source;
|
|
|
|
*deferred_length = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DeferMemCopy(const void** deferred_src, size_t* deferred_length,
|
|
|
|
const void* src, size_t length) {
|
|
|
|
*deferred_src = src;
|
|
|
|
*deferred_length = length;
|
|
|
|
}
|
|
|
|
|
2020-12-10 00:30:54 +00:00
|
|
|
SNAPPY_ATTRIBUTE_ALWAYS_INLINE
|
2022-10-05 08:26:41 +00:00
|
|
|
inline size_t AdvanceToNextTagARMOptimized(const uint8_t** ip_p, size_t* tag) {
|
2021-07-02 07:52:56 +00:00
|
|
|
const uint8_t*& ip = *ip_p;
|
|
|
|
// This section is crucial for the throughput of the decompression loop.
|
|
|
|
// The latency of an iteration is fundamentally constrained by the
|
|
|
|
// following data chain on ip.
|
|
|
|
// ip -> c = Load(ip) -> delta1 = (c & 3) -> ip += delta1 or delta2
|
|
|
|
// delta2 = ((c >> 2) + 1) ip++
|
|
|
|
// This is different from X86 optimizations because ARM has conditional add
|
|
|
|
// instruction (csinc) and it removes several register moves.
|
|
|
|
const size_t tag_type = *tag & 3;
|
|
|
|
const bool is_literal = (tag_type == 0);
|
2021-08-17 08:36:43 +00:00
|
|
|
if (is_literal) {
|
|
|
|
size_t next_literal_tag = (*tag >> 2) + 1;
|
|
|
|
*tag = ip[next_literal_tag];
|
|
|
|
ip += next_literal_tag + 1;
|
|
|
|
} else {
|
|
|
|
*tag = ip[tag_type];
|
|
|
|
ip += tag_type + 1;
|
|
|
|
}
|
2021-07-02 07:52:56 +00:00
|
|
|
return tag_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
SNAPPY_ATTRIBUTE_ALWAYS_INLINE
|
2022-10-05 08:26:41 +00:00
|
|
|
inline size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) {
|
2020-12-10 00:30:54 +00:00
|
|
|
const uint8_t*& ip = *ip_p;
|
|
|
|
// This section is crucial for the throughput of the decompression loop.
|
|
|
|
// The latency of an iteration is fundamentally constrained by the
|
|
|
|
// following data chain on ip.
|
|
|
|
// ip -> c = Load(ip) -> ip1 = ip + 1 + (c & 3) -> ip = ip1 or ip2
|
|
|
|
// ip2 = ip + 2 + (c >> 2)
|
|
|
|
// This amounts to 8 cycles.
|
|
|
|
// 5 (load) + 1 (c & 3) + 1 (lea ip1, [ip + (c & 3) + 1]) + 1 (cmov)
|
|
|
|
size_t literal_len = *tag >> 2;
|
|
|
|
size_t tag_type = *tag;
|
|
|
|
bool is_literal;
|
2022-01-24 09:05:38 +00:00
|
|
|
#if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__)
|
2020-12-10 00:30:54 +00:00
|
|
|
// TODO clang misses the fact that the (c & 3) already correctly
|
|
|
|
// sets the zero flag.
|
|
|
|
asm("and $3, %k[tag_type]\n\t"
|
2022-11-09 05:32:52 +00:00
|
|
|
: [tag_type] "+r"(tag_type), "=@ccz"(is_literal)
|
|
|
|
:: "cc");
|
2020-12-10 00:30:54 +00:00
|
|
|
#else
|
|
|
|
tag_type &= 3;
|
|
|
|
is_literal = (tag_type == 0);
|
|
|
|
#endif
|
|
|
|
// TODO
|
|
|
|
// This is code is subtle. Loading the values first and then cmov has less
|
|
|
|
// latency then cmov ip and then load. However clang would move the loads
|
|
|
|
// in an optimization phase, volatile prevents this transformation.
|
|
|
|
// Note that we have enough slop bytes (64) that the loads are always valid.
|
|
|
|
size_t tag_literal =
|
|
|
|
static_cast<const volatile uint8_t*>(ip)[1 + literal_len];
|
|
|
|
size_t tag_copy = static_cast<const volatile uint8_t*>(ip)[tag_type];
|
|
|
|
*tag = is_literal ? tag_literal : tag_copy;
|
|
|
|
const uint8_t* ip_copy = ip + 1 + tag_type;
|
|
|
|
const uint8_t* ip_literal = ip + 2 + literal_len;
|
|
|
|
ip = is_literal ? ip_literal : ip_copy;
|
|
|
|
#if defined(__GNUC__) && defined(__x86_64__)
|
|
|
|
// TODO Clang is "optimizing" zero-extension (a totally free
|
|
|
|
// operation) this means that after the cmov of tag, it emits another movzb
|
|
|
|
// tag, byte(tag). It really matters as it's on the core chain. This dummy
|
|
|
|
// asm, persuades clang to do the zero-extension at the load (it's automatic)
|
|
|
|
// removing the expensive movzb.
|
|
|
|
asm("" ::"r"(tag_copy));
|
|
|
|
#endif
|
|
|
|
return tag_type;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Extract the offset for copy-1 and copy-2 returns 0 for literals or copy-4.
|
|
|
|
inline uint32_t ExtractOffset(uint32_t val, size_t tag_type) {
|
2021-07-29 13:26:45 +00:00
|
|
|
// For x86 non-static storage works better. For ARM static storage is better.
|
|
|
|
// TODO: Once the array is recognized as a register, improve the
|
|
|
|
// readability for x86.
|
|
|
|
#if defined(__x86_64__)
|
|
|
|
constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull;
|
|
|
|
uint16_t result;
|
|
|
|
memcpy(&result,
|
|
|
|
reinterpret_cast<const char*>(&kExtractMasksCombined) + 2 * tag_type,
|
|
|
|
sizeof(result));
|
|
|
|
return val & result;
|
2021-08-06 06:46:53 +00:00
|
|
|
#elif defined(__aarch64__)
|
|
|
|
constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull;
|
2021-08-13 23:24:49 +00:00
|
|
|
return val & static_cast<uint32_t>(
|
|
|
|
(kExtractMasksCombined >> (tag_type * 16)) & 0xFFFF);
|
2021-07-29 13:26:45 +00:00
|
|
|
#else
|
|
|
|
static constexpr uint32_t kExtractMasks[4] = {0, 0xFF, 0xFFFF, 0};
|
|
|
|
return val & kExtractMasks[tag_type];
|
|
|
|
#endif
|
2020-12-10 00:30:54 +00:00
|
|
|
};
|
|
|
|
|
2020-11-14 15:27:36 +00:00
|
|
|
// Core decompression loop, when there is enough data available.
|
|
|
|
// Decompresses the input buffer [ip, ip_limit) into the output buffer
|
|
|
|
// [op, op_limit_min_slop). Returning when either we are too close to the end
|
|
|
|
// of the input buffer, or we exceed op_limit_min_slop or when a exceptional
|
|
|
|
// tag is encountered (literal of length > 60) or a copy-4.
|
|
|
|
// Returns {ip, op} at the points it stopped decoding.
|
|
|
|
// TODO This function probably does not need to be inlined, as it
|
|
|
|
// should decode large chunks at a time. This allows runtime dispatch to
|
|
|
|
// implementations based on CPU capability (BMI2 / perhaps 32 / 64 byte memcpy).
|
2020-12-10 00:30:54 +00:00
|
|
|
template <typename T>
|
|
|
|
std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless(
|
|
|
|
const uint8_t* ip, const uint8_t* ip_limit, ptrdiff_t op, T op_base,
|
|
|
|
ptrdiff_t op_limit_min_slop) {
|
2023-01-23 17:50:52 +00:00
|
|
|
// If deferred_src is invalid point it here.
|
|
|
|
uint8_t safe_source[64];
|
|
|
|
const void* deferred_src;
|
|
|
|
size_t deferred_length;
|
|
|
|
ClearDeferred(&deferred_src, &deferred_length, safe_source);
|
|
|
|
|
2020-12-10 00:30:54 +00:00
|
|
|
// We unroll the inner loop twice so we need twice the spare room.
|
|
|
|
op_limit_min_slop -= kSlopBytes;
|
|
|
|
if (2 * (kSlopBytes + 1) < ip_limit - ip && op < op_limit_min_slop) {
|
|
|
|
const uint8_t* const ip_limit_min_slop = ip_limit - 2 * kSlopBytes - 1;
|
2020-11-14 15:27:36 +00:00
|
|
|
ip++;
|
|
|
|
// ip points just past the tag and we are touching at maximum kSlopBytes
|
|
|
|
// in an iteration.
|
2020-12-10 00:30:54 +00:00
|
|
|
size_t tag = ip[-1];
|
2021-08-03 07:36:20 +00:00
|
|
|
#if defined(__clang__) && defined(__aarch64__)
|
|
|
|
// Workaround for https://bugs.llvm.org/show_bug.cgi?id=51317
|
|
|
|
// when loading 1 byte, clang for aarch64 doesn't realize that it(ldrb)
|
|
|
|
// comes with free zero-extension, so clang generates another
|
|
|
|
// 'and xn, xm, 0xff' before it use that as the offset. This 'and' is
|
|
|
|
// redundant and can be removed by adding this dummy asm, which gives
|
|
|
|
// clang a hint that we're doing the zero-extension at the load.
|
|
|
|
asm("" ::"r"(tag));
|
|
|
|
#endif
|
2020-11-14 15:27:36 +00:00
|
|
|
do {
|
2020-12-10 00:30:54 +00:00
|
|
|
// The throughput is limited by instructions, unrolling the inner loop
|
|
|
|
// twice reduces the amount of instructions checking limits and also
|
|
|
|
// leads to reduced mov's.
|
2022-11-18 21:51:44 +00:00
|
|
|
|
2023-01-23 17:50:52 +00:00
|
|
|
SNAPPY_PREFETCH(ip + 128);
|
2020-12-10 00:30:54 +00:00
|
|
|
for (int i = 0; i < 2; i++) {
|
|
|
|
const uint8_t* old_ip = ip;
|
|
|
|
assert(tag == ip[-1]);
|
|
|
|
// For literals tag_type = 0, hence we will always obtain 0 from
|
|
|
|
// ExtractLowBytes. For literals offset will thus be kLiteralOffset.
|
2023-03-16 16:20:43 +00:00
|
|
|
ptrdiff_t len_minus_offset = kLengthMinusOffset[tag];
|
|
|
|
uint32_t next;
|
2021-07-02 07:52:56 +00:00
|
|
|
#if defined(__aarch64__)
|
|
|
|
size_t tag_type = AdvanceToNextTagARMOptimized(&ip, &tag);
|
2023-03-16 16:20:43 +00:00
|
|
|
// We never need more than 16 bits. Doing a Load16 allows the compiler
|
|
|
|
// to elide the masking operation in ExtractOffset.
|
|
|
|
next = LittleEndian::Load16(old_ip);
|
2021-07-02 07:52:56 +00:00
|
|
|
#else
|
|
|
|
size_t tag_type = AdvanceToNextTagX86Optimized(&ip, &tag);
|
2023-03-16 16:20:43 +00:00
|
|
|
next = LittleEndian::Load32(old_ip);
|
2021-07-02 07:52:56 +00:00
|
|
|
#endif
|
2023-03-16 16:20:43 +00:00
|
|
|
size_t len = len_minus_offset & 0xFF;
|
|
|
|
ptrdiff_t extracted = ExtractOffset(next, tag_type);
|
|
|
|
ptrdiff_t len_min_offset = len_minus_offset - extracted;
|
|
|
|
if (SNAPPY_PREDICT_FALSE(len_minus_offset > extracted)) {
|
2020-12-10 00:30:54 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE(len & 0x80)) {
|
|
|
|
// Exceptional case (long literal or copy 4).
|
|
|
|
// Actually doing the copy here is negatively impacting the main
|
|
|
|
// loop due to compiler incorrectly allocating a register for
|
|
|
|
// this fallback. Hence we just break.
|
|
|
|
break_loop:
|
|
|
|
ip = old_ip;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
// Only copy-1 or copy-2 tags can get here.
|
|
|
|
assert(tag_type == 1 || tag_type == 2);
|
2023-01-23 17:50:52 +00:00
|
|
|
std::ptrdiff_t delta = (op + deferred_length) + len_min_offset - len;
|
2020-12-10 00:30:54 +00:00
|
|
|
// Guard against copies before the buffer start.
|
2023-01-23 17:50:52 +00:00
|
|
|
// Execute any deferred MemCopy since we write to dst here.
|
|
|
|
MemCopy64(op_base + op, deferred_src, deferred_length);
|
|
|
|
op += deferred_length;
|
|
|
|
ClearDeferred(&deferred_src, &deferred_length, safe_source);
|
2020-12-10 00:30:54 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE(delta < 0 ||
|
|
|
|
!Copy64BytesWithPatternExtension(
|
|
|
|
op_base + op, len - len_min_offset))) {
|
|
|
|
goto break_loop;
|
|
|
|
}
|
2023-01-23 17:50:52 +00:00
|
|
|
// We aren't deferring this copy so add length right away.
|
2020-12-10 00:30:54 +00:00
|
|
|
op += len;
|
|
|
|
continue;
|
2020-11-14 15:27:36 +00:00
|
|
|
}
|
2023-01-23 17:50:52 +00:00
|
|
|
std::ptrdiff_t delta = (op + deferred_length) + len_min_offset - len;
|
2020-12-10 00:30:54 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE(delta < 0)) {
|
|
|
|
// Due to the spurious offset in literals have this will trigger
|
|
|
|
// at the start of a block when op is still smaller than 256.
|
|
|
|
if (tag_type != 0) goto break_loop;
|
2023-01-23 17:50:52 +00:00
|
|
|
MemCopy64(op_base + op, deferred_src, deferred_length);
|
|
|
|
op += deferred_length;
|
|
|
|
DeferMemCopy(&deferred_src, &deferred_length, old_ip, len);
|
2020-12-10 00:30:54 +00:00
|
|
|
continue;
|
2020-12-09 02:27:22 +00:00
|
|
|
}
|
2020-12-10 00:30:54 +00:00
|
|
|
|
|
|
|
// For copies we need to copy from op_base + delta, for literals
|
|
|
|
// we need to copy from ip instead of from the stream.
|
|
|
|
const void* from =
|
|
|
|
tag_type ? reinterpret_cast<void*>(op_base + delta) : old_ip;
|
2023-01-23 17:50:52 +00:00
|
|
|
MemCopy64(op_base + op, deferred_src, deferred_length);
|
|
|
|
op += deferred_length;
|
|
|
|
DeferMemCopy(&deferred_src, &deferred_length, from, len);
|
2020-11-14 15:27:36 +00:00
|
|
|
}
|
2023-01-23 17:50:52 +00:00
|
|
|
} while (ip < ip_limit_min_slop &&
|
2023-07-12 17:12:01 +00:00
|
|
|
static_cast<ptrdiff_t>(op + deferred_length) < op_limit_min_slop);
|
2020-12-10 00:30:54 +00:00
|
|
|
exit:
|
2020-11-14 15:27:36 +00:00
|
|
|
ip--;
|
|
|
|
assert(ip <= ip_limit);
|
|
|
|
}
|
2023-01-23 17:50:52 +00:00
|
|
|
// If we deferred a copy then we can perform. If we are up to date then we
|
|
|
|
// might not have enough slop bytes and could run past the end.
|
|
|
|
if (deferred_length) {
|
|
|
|
MemCopy64(op_base + op, deferred_src, deferred_length);
|
|
|
|
op += deferred_length;
|
|
|
|
ClearDeferred(&deferred_src, &deferred_length, safe_source);
|
|
|
|
}
|
2020-11-14 15:27:36 +00:00
|
|
|
return {ip, op};
|
|
|
|
}
|
|
|
|
|
2011-03-18 17:14:15 +00:00
|
|
|
// Helper class for decompression
|
|
|
|
class SnappyDecompressor {
|
|
|
|
private:
|
2020-10-30 17:37:07 +00:00
|
|
|
Source* reader_; // Underlying source of bytes to decompress
|
|
|
|
const char* ip_; // Points to next buffered byte
|
|
|
|
const char* ip_limit_; // Points just past buffered bytes
|
2020-02-12 18:04:58 +00:00
|
|
|
// If ip < ip_limit_min_maxtaglen_ it's safe to read kMaxTagLength from
|
|
|
|
// buffer.
|
|
|
|
const char* ip_limit_min_maxtaglen_;
|
2020-10-30 17:37:07 +00:00
|
|
|
uint32_t peeked_; // Bytes peeked from reader (need to skip)
|
|
|
|
bool eof_; // Hit end of input without an error?
|
|
|
|
char scratch_[kMaximumTagLength]; // See RefillTag().
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
// Ensure that all of the tag metadata for the next tag is available
|
|
|
|
// in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
|
|
|
|
// if (ip_limit_ - ip_ < 5).
|
|
|
|
//
|
|
|
|
// Returns true on success, false on error or end of input.
|
|
|
|
bool RefillTag();
|
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
void ResetLimit(const char* ip) {
|
|
|
|
ip_limit_min_maxtaglen_ =
|
|
|
|
ip_limit_ - std::min<ptrdiff_t>(ip_limit_ - ip, kMaximumTagLength - 1);
|
|
|
|
}
|
|
|
|
|
2011-03-18 17:14:15 +00:00
|
|
|
public:
|
|
|
|
explicit SnappyDecompressor(Source* reader)
|
2020-10-30 17:37:07 +00:00
|
|
|
: reader_(reader), ip_(NULL), ip_limit_(NULL), peeked_(0), eof_(false) {}
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
~SnappyDecompressor() {
|
|
|
|
// Advance past any bytes we peeked at from the reader
|
|
|
|
reader_->Skip(peeked_);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true iff we have hit the end of the input without an error.
|
2020-10-30 17:37:07 +00:00
|
|
|
bool eof() const { return eof_; }
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
// Read the uncompressed length stored at the start of the compressed data.
|
2018-08-16 17:44:34 +00:00
|
|
|
// On success, stores the length in *result and returns true.
|
2011-03-18 17:14:15 +00:00
|
|
|
// On failure, returns false.
|
2020-04-12 20:03:50 +00:00
|
|
|
bool ReadUncompressedLength(uint32_t* result) {
|
2020-10-30 17:37:07 +00:00
|
|
|
assert(ip_ == NULL); // Must not have read anything yet
|
2011-03-18 17:14:15 +00:00
|
|
|
// Length is encoded in 1..5 bytes
|
|
|
|
*result = 0;
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t shift = 0;
|
2011-03-18 17:14:15 +00:00
|
|
|
while (true) {
|
|
|
|
if (shift >= 32) return false;
|
|
|
|
size_t n;
|
|
|
|
const char* ip = reader_->Peek(&n);
|
|
|
|
if (n == 0) return false;
|
|
|
|
const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
|
|
|
|
reader_->Skip(1);
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t val = c & 0x7f;
|
|
|
|
if (LeftShiftOverflows(static_cast<uint8_t>(val), shift)) return false;
|
2016-01-04 11:52:15 +00:00
|
|
|
*result |= val << shift;
|
2011-03-18 17:14:15 +00:00
|
|
|
if (c < 128) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
shift += 7;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process the next item found in the input.
|
|
|
|
// Returns true if successful, false on error or end of input.
|
|
|
|
template <class Writer>
|
Ensure DecompressAllTags starts on a 32-byte boundary + 16 bytes.
First of all, I'm sorry about this ugly hack. I hope the following long
explanation is enough to justify it.
We have observed that, in some conditions, the results for dataset number 10
(pb) in the zippy benchmark can show a >20% regression on Skylake CPUs.
In order to diagnose this, we profiled the benchmark looking at hot functions
(99% of the time is spent on DecompressAllTags), then looked at the generated
code to see if there was any difference. In order to discard a minor difference
we observed in register allocation we replaced zippy.cc with a pre-built assembly
file so it was the same in both variants, and we still were able to reproduce the
regression.
After discarding a regression caused by the compiler, we digged a bit further
and noticed that the alignment of the function in the final binary was
different. Both were aligned to a 16-byte boundary, but the slower one was also
(by chance) aligned to a 32-byte boundary. A regression caused by alignment
differences would explain why I could reproduce it consistently on the same CitC
client, but not others: slight differences in the sources can cause the resulting
binary to have different layout.
Here are some detailed benchmark results before/after the fix. Note how fixing
the alignment makes the difference between baseline and experiment go away, but
regular 32-byte alignment puts both variants in the same ballpark as the
original regression:
Original (note BM_UCord_10 and BM_UDataBuffer_10 around the -24% line):
BASELINE
BM_UCord/10 2938 2932 24194 3.767GB/s pb
BM_UDataBuffer/10 3008 3004 23316 3.677GB/s pb
EXPERIMENT
BM_UCord/10 3797 3789 18512 2.915GB/s pb
BM_UDataBuffer/10 4024 4016 17543 2.750GB/s pb
Aligning DecompressAllTags to a 32-byte boundary:
BASELINE
BM_UCord/10 3872 3862 18035 2.860GB/s pb
BM_UDataBuffer/10 4010 3998 17591 2.763GB/s pb
EXPERIMENT
BM_UCord/10 3884 3876 18126 2.850GB/s pb
BM_UDataBuffer/10 4037 4027 17199 2.743GB/s pb
Aligning DecompressAllTags to a 32-byte boundary + 16 bytes (this patch):
BASELINE
BM_UCord/10 3103 3095 22642 3.569GB/s pb
BM_UDataBuffer/10 3186 3177 21947 3.476GB/s pb
EXPERIMENT
BM_UCord/10 3104 3095 22632 3.569GB/s pb
BM_UDataBuffer/10 3167 3159 22076 3.496GB/s pb
This change forces the "good" alignment for DecompressAllTags which, if
anything, should make benchmark results more stable (and maybe we'll improve
some unlucky application!).
2018-02-03 02:38:30 +00:00
|
|
|
#if defined(__GNUC__) && defined(__x86_64__)
|
|
|
|
__attribute__((aligned(32)))
|
|
|
|
#endif
|
2020-10-30 17:37:07 +00:00
|
|
|
void
|
|
|
|
DecompressAllTags(Writer* writer) {
|
2011-03-18 17:14:15 +00:00
|
|
|
const char* ip = ip_;
|
2020-02-12 18:04:58 +00:00
|
|
|
ResetLimit(ip);
|
|
|
|
auto op = writer->GetOutputPtr();
|
2011-12-05 21:27:26 +00:00
|
|
|
// We could have put this refill fragment only at the beginning of the loop.
|
|
|
|
// However, duplicating it at the end of each branch gives the compiler more
|
|
|
|
// scope to optimize the <ip_limit_ - ip> expression based on the local
|
|
|
|
// context, which overall increases speed.
|
2020-02-12 18:04:58 +00:00
|
|
|
#define MAYBE_REFILL() \
|
|
|
|
if (SNAPPY_PREDICT_FALSE(ip >= ip_limit_min_maxtaglen_)) { \
|
|
|
|
ip_ = ip; \
|
|
|
|
if (SNAPPY_PREDICT_FALSE(!RefillTag())) goto exit; \
|
|
|
|
ip = ip_; \
|
|
|
|
ResetLimit(ip); \
|
|
|
|
} \
|
2020-04-12 20:03:50 +00:00
|
|
|
preload = static_cast<uint8_t>(*ip)
|
2020-02-12 18:04:58 +00:00
|
|
|
|
|
|
|
// At the start of the for loop below the least significant byte of preload
|
|
|
|
// contains the tag.
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t preload;
|
2011-12-05 21:27:26 +00:00
|
|
|
MAYBE_REFILL();
|
2020-10-30 17:37:07 +00:00
|
|
|
for (;;) {
|
2020-11-14 15:27:36 +00:00
|
|
|
{
|
2020-12-10 00:30:54 +00:00
|
|
|
ptrdiff_t op_limit_min_slop;
|
2020-11-14 15:27:36 +00:00
|
|
|
auto op_base = writer->GetBase(&op_limit_min_slop);
|
|
|
|
if (op_base) {
|
|
|
|
auto res =
|
|
|
|
DecompressBranchless(reinterpret_cast<const uint8_t*>(ip),
|
|
|
|
reinterpret_cast<const uint8_t*>(ip_limit_),
|
2020-12-10 00:30:54 +00:00
|
|
|
op - op_base, op_base, op_limit_min_slop);
|
2020-11-14 15:27:36 +00:00
|
|
|
ip = reinterpret_cast<const char*>(res.first);
|
2020-12-10 00:30:54 +00:00
|
|
|
op = op_base + res.second;
|
2020-11-14 15:27:36 +00:00
|
|
|
MAYBE_REFILL();
|
|
|
|
}
|
|
|
|
}
|
2020-04-12 20:03:50 +00:00
|
|
|
const uint8_t c = static_cast<uint8_t>(preload);
|
2020-02-12 18:04:58 +00:00
|
|
|
ip++;
|
Speed up decompression by caching ip_.
It is seemingly hard for the compiler to understand that ip_, the current input
pointer into the compressed data stream, can not alias on anything else, and
thus using it directly will incur memory traffic as it cannot be kept in a
register. The code already knew about this and cached it into a local
variable, but since Step() only decoded one tag, it had to move ip_ back into
place between every tag. This seems to have cost us a significant amount of
performance, so changing Step() into a function that decodes as much as it can
before it saves ip_ back and returns. (Note that Step() was already inlined,
so it is not the manual inlining that buys the performance here.)
The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron
(for plain array-to-array decompression, in 64-bit opt mode).
There is a tiny difference in the behavior here; if an invalid literal is
encountered (ie., the writer refuses the Append() operation), ip_ will now
point to the byte past the tag byte, instead of where the literal was
originally thought to end. However, we don't use ip_ for anything after
DecompressAllTags() has returned, so this should not change external behavior
in any way.
Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
Benchmark Time(ns) CPU(ns) Iterations
---------------------------------------------------
BM_UFlat/0 79134 79110 8835 1.2GB/s html [ +6.2%]
BM_UFlat/1 786126 786096 891 851.8MB/s urls [+10.0%]
BM_UFlat/2 9948 9948 69125 11.9GB/s jpg [ -1.3%]
BM_UFlat/3 31999 31998 21898 2.7GB/s pdf [ +6.5%]
BM_UFlat/4 318909 318829 2204 1.2GB/s html4 [ +6.5%]
BM_UFlat/5 31384 31390 22363 747.5MB/s cp [ +9.2%]
BM_UFlat/6 14037 14034 49858 757.7MB/s c [+10.6%]
BM_UFlat/7 4612 4612 151395 769.5MB/s lsp [ +9.5%]
BM_UFlat/8 1203174 1203007 582 816.3MB/s xls [+19.3%]
BM_UFlat/9 253869 253955 2757 571.1MB/s txt1 [+11.4%]
BM_UFlat/10 219292 219290 3194 544.4MB/s txt2 [+12.1%]
BM_UFlat/11 672135 672131 1000 605.5MB/s txt3 [+11.2%]
BM_UFlat/12 902512 902492 776 509.2MB/s txt4 [+12.5%]
BM_UFlat/13 372110 371998 1881 1.3GB/s bin [ +5.8%]
BM_UFlat/14 50407 50407 10000 723.5MB/s sum [+13.5%]
BM_UFlat/15 5699 5701 100000 707.2MB/s man [+12.4%]
BM_UFlat/16 83448 83424 8383 1.3GB/s pb [ +5.7%]
BM_UFlat/17 256958 256963 2723 684.1MB/s gaviota [ +7.9%]
BM_UValidate/0 42795 42796 16351 2.2GB/s html [+25.8%]
BM_UValidate/1 490672 490622 1427 1.3GB/s urls [+22.7%]
BM_UValidate/2 237 237 2950297 499.0GB/s jpg [+24.9%]
BM_UValidate/3 14610 14611 47901 6.0GB/s pdf [+26.8%]
BM_UValidate/4 171973 171990 4071 2.2GB/s html4 [+25.7%]
git-svn-id: https://snappy.googlecode.com/svn/trunk@38 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2011-06-02 17:59:40 +00:00
|
|
|
|
2017-01-27 08:10:36 +00:00
|
|
|
// Ratio of iterations that have LITERAL vs non-LITERAL for different
|
|
|
|
// inputs.
|
|
|
|
//
|
|
|
|
// input LITERAL NON_LITERAL
|
|
|
|
// -----------------------------------
|
|
|
|
// html|html4|cp 23% 77%
|
|
|
|
// urls 36% 64%
|
|
|
|
// jpg 47% 53%
|
|
|
|
// pdf 19% 81%
|
|
|
|
// txt[1-4] 25% 75%
|
|
|
|
// pb 24% 76%
|
|
|
|
// bin 24% 76%
|
2017-07-28 21:31:04 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) {
|
2012-01-04 13:10:46 +00:00
|
|
|
size_t literal_length = (c >> 2) + 1u;
|
2020-02-12 18:04:58 +00:00
|
|
|
if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length, &op)) {
|
2012-05-22 09:32:50 +00:00
|
|
|
assert(literal_length < 61);
|
2011-11-23 11:14:17 +00:00
|
|
|
ip += literal_length;
|
2019-05-13 16:30:17 +00:00
|
|
|
// NOTE: There is no MAYBE_REFILL() here, as TryFastAppend()
|
In the fast path for decompressing literals, instead of checking
whether there's 16 bytes free and then checking right afterwards
(when having subtracted the literal size) that there are now
5 bytes free, just check once for 21 bytes. This skips a compare
and a branch; although it is easily predictable, it is still
a few cycles on a fast path that we would like to get rid of.
Benchmarking this yields very confusing results. On open-source
GCC 4.8.1 on Haswell, we get exactly the expected results; the
benchmarks where we hit the fast path for literals (in particular
the two HTML benchmarks and the protobuf benchmark) give very nice
speedups, and the others are not really affected.
However, benchmarks with Google's GCC branch on other hardware
is much less clear. It seems that we have a weak loss in some cases
(and the win for the “typical” win cases are not nearly as clear),
but that it depends on microarchitecture and plain luck in how we run
the benchmark. Looking at the generated assembler, it seems that
the removal of the if causes other large-scale changes in how the
function is laid out, which makes it likely that this is just bad luck.
Thus, we should keep this change, even though its exact current impact is
unclear; it's a sensible change per se, and dropping it on the basis of
microoptimization for a given compiler (or even branch of a compiler)
would seem like a bad strategy in the long run.
Microbenchmark results (all in 64-bit, opt mode):
Nehalem, Google GCC:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 76747 75591 1.3GB/s html +1.5%
BM_UFlat/1 765756 757040 886.3MB/s urls +1.2%
BM_UFlat/2 10867 10893 10.9GB/s jpg -0.2%
BM_UFlat/3 124 131 1.4GB/s jpg_200 -5.3%
BM_UFlat/4 31663 31596 2.8GB/s pdf +0.2%
BM_UFlat/5 314162 308176 1.2GB/s html4 +1.9%
BM_UFlat/6 29668 29746 790.6MB/s cp -0.3%
BM_UFlat/7 12958 13386 796.4MB/s c -3.2%
BM_UFlat/8 3596 3682 966.0MB/s lsp -2.3%
BM_UFlat/9 1019193 1033493 953.3MB/s xls -1.4%
BM_UFlat/10 239 247 775.3MB/s xls_200 -3.2%
BM_UFlat/11 236411 240271 606.9MB/s txt1 -1.6%
BM_UFlat/12 206639 209768 571.2MB/s txt2 -1.5%
BM_UFlat/13 627803 635722 641.4MB/s txt3 -1.2%
BM_UFlat/14 845932 857816 538.2MB/s txt4 -1.4%
BM_UFlat/15 402107 391670 1.2GB/s bin +2.7%
BM_UFlat/16 283 279 683.6MB/s bin_200 +1.4%
BM_UFlat/17 46070 46815 781.5MB/s sum -1.6%
BM_UFlat/18 5053 5163 782.0MB/s man -2.1%
BM_UFlat/19 79721 76581 1.4GB/s pb +4.1%
BM_UFlat/20 251158 252330 697.5MB/s gaviota -0.5%
Sum of all benchmarks 4966150 4980396 -0.3%
Sandy Bridge, Google GCC:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 42850 42182 2.3GB/s html +1.6%
BM_UFlat/1 525660 515816 1.3GB/s urls +1.9%
BM_UFlat/2 7173 7283 16.3GB/s jpg -1.5%
BM_UFlat/3 92 91 2.1GB/s jpg_200 +1.1%
BM_UFlat/4 15147 14872 5.9GB/s pdf +1.8%
BM_UFlat/5 199936 192116 2.0GB/s html4 +4.1%
BM_UFlat/6 12796 12443 1.8GB/s cp +2.8%
BM_UFlat/7 6588 6400 1.6GB/s c +2.9%
BM_UFlat/8 2010 1951 1.8GB/s lsp +3.0%
BM_UFlat/9 761124 763049 1.3GB/s xls -0.3%
BM_UFlat/10 186 189 1016.1MB/s xls_200 -1.6%
BM_UFlat/11 159354 158460 918.6MB/s txt1 +0.6%
BM_UFlat/12 139732 139950 856.1MB/s txt2 -0.2%
BM_UFlat/13 429917 425027 961.7MB/s txt3 +1.2%
BM_UFlat/14 585255 587324 785.8MB/s txt4 -0.4%
BM_UFlat/15 276186 266173 1.8GB/s bin +3.8%
BM_UFlat/16 205 207 925.5MB/s bin_200 -1.0%
BM_UFlat/17 24925 24935 1.4GB/s sum -0.0%
BM_UFlat/18 2632 2576 1.5GB/s man +2.2%
BM_UFlat/19 40546 39108 2.8GB/s pb +3.7%
BM_UFlat/20 175803 168209 1048.9MB/s gaviota +4.5%
Sum of all benchmarks 3408117 3368361 +1.2%
Haswell, upstream GCC 4.8.1:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 46308 40641 2.3GB/s html +13.9%
BM_UFlat/1 513385 514706 1.3GB/s urls -0.3%
BM_UFlat/2 6197 6151 19.2GB/s jpg +0.7%
BM_UFlat/3 61 61 3.0GB/s jpg_200 +0.0%
BM_UFlat/4 13551 13429 6.5GB/s pdf +0.9%
BM_UFlat/5 198317 190243 2.0GB/s html4 +4.2%
BM_UFlat/6 14768 12560 1.8GB/s cp +17.6%
BM_UFlat/7 6453 6447 1.6GB/s c +0.1%
BM_UFlat/8 1991 1980 1.8GB/s lsp +0.6%
BM_UFlat/9 766947 770424 1.2GB/s xls -0.5%
BM_UFlat/10 170 169 1.1GB/s xls_200 +0.6%
BM_UFlat/11 164350 163554 888.7MB/s txt1 +0.5%
BM_UFlat/12 145444 143830 832.1MB/s txt2 +1.1%
BM_UFlat/13 437849 438413 929.2MB/s txt3 -0.1%
BM_UFlat/14 603587 605309 759.8MB/s txt4 -0.3%
BM_UFlat/15 249799 248067 1.9GB/s bin +0.7%
BM_UFlat/16 191 188 1011.4MB/s bin_200 +1.6%
BM_UFlat/17 26064 24778 1.4GB/s sum +5.2%
BM_UFlat/18 2620 2601 1.5GB/s man +0.7%
BM_UFlat/19 44551 37373 3.0GB/s pb +19.2%
BM_UFlat/20 165408 164584 1.0GB/s gaviota +0.5%
Sum of all benchmarks 3408011 3385508 +0.7%
git-svn-id: https://snappy.googlecode.com/svn/trunk@78 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2013-06-30 19:24:03 +00:00
|
|
|
// will not return true unless there's already at least five spare
|
|
|
|
// bytes in addition to the literal.
|
2020-04-12 20:03:50 +00:00
|
|
|
preload = static_cast<uint8_t>(*ip);
|
2011-11-23 11:14:17 +00:00
|
|
|
continue;
|
|
|
|
}
|
2017-07-28 21:31:04 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) {
|
2011-06-03 20:47:14 +00:00
|
|
|
// Long literal.
|
2012-01-04 13:10:46 +00:00
|
|
|
const size_t literal_length_length = literal_length - 60;
|
2011-06-03 20:47:14 +00:00
|
|
|
literal_length =
|
2018-12-12 15:14:02 +00:00
|
|
|
ExtractLowBytes(LittleEndian::Load32(ip), literal_length_length) +
|
|
|
|
1;
|
2011-06-03 20:47:14 +00:00
|
|
|
ip += literal_length_length;
|
|
|
|
}
|
|
|
|
|
2012-01-04 13:10:46 +00:00
|
|
|
size_t avail = ip_limit_ - ip;
|
Speed up decompression by caching ip_.
It is seemingly hard for the compiler to understand that ip_, the current input
pointer into the compressed data stream, can not alias on anything else, and
thus using it directly will incur memory traffic as it cannot be kept in a
register. The code already knew about this and cached it into a local
variable, but since Step() only decoded one tag, it had to move ip_ back into
place between every tag. This seems to have cost us a significant amount of
performance, so changing Step() into a function that decodes as much as it can
before it saves ip_ back and returns. (Note that Step() was already inlined,
so it is not the manual inlining that buys the performance here.)
The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron
(for plain array-to-array decompression, in 64-bit opt mode).
There is a tiny difference in the behavior here; if an invalid literal is
encountered (ie., the writer refuses the Append() operation), ip_ will now
point to the byte past the tag byte, instead of where the literal was
originally thought to end. However, we don't use ip_ for anything after
DecompressAllTags() has returned, so this should not change external behavior
in any way.
Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
Benchmark Time(ns) CPU(ns) Iterations
---------------------------------------------------
BM_UFlat/0 79134 79110 8835 1.2GB/s html [ +6.2%]
BM_UFlat/1 786126 786096 891 851.8MB/s urls [+10.0%]
BM_UFlat/2 9948 9948 69125 11.9GB/s jpg [ -1.3%]
BM_UFlat/3 31999 31998 21898 2.7GB/s pdf [ +6.5%]
BM_UFlat/4 318909 318829 2204 1.2GB/s html4 [ +6.5%]
BM_UFlat/5 31384 31390 22363 747.5MB/s cp [ +9.2%]
BM_UFlat/6 14037 14034 49858 757.7MB/s c [+10.6%]
BM_UFlat/7 4612 4612 151395 769.5MB/s lsp [ +9.5%]
BM_UFlat/8 1203174 1203007 582 816.3MB/s xls [+19.3%]
BM_UFlat/9 253869 253955 2757 571.1MB/s txt1 [+11.4%]
BM_UFlat/10 219292 219290 3194 544.4MB/s txt2 [+12.1%]
BM_UFlat/11 672135 672131 1000 605.5MB/s txt3 [+11.2%]
BM_UFlat/12 902512 902492 776 509.2MB/s txt4 [+12.5%]
BM_UFlat/13 372110 371998 1881 1.3GB/s bin [ +5.8%]
BM_UFlat/14 50407 50407 10000 723.5MB/s sum [+13.5%]
BM_UFlat/15 5699 5701 100000 707.2MB/s man [+12.4%]
BM_UFlat/16 83448 83424 8383 1.3GB/s pb [ +5.7%]
BM_UFlat/17 256958 256963 2723 684.1MB/s gaviota [ +7.9%]
BM_UValidate/0 42795 42796 16351 2.2GB/s html [+25.8%]
BM_UValidate/1 490672 490622 1427 1.3GB/s urls [+22.7%]
BM_UValidate/2 237 237 2950297 499.0GB/s jpg [+24.9%]
BM_UValidate/3 14610 14611 47901 6.0GB/s pdf [+26.8%]
BM_UValidate/4 171973 171990 4071 2.2GB/s html4 [+25.7%]
git-svn-id: https://snappy.googlecode.com/svn/trunk@38 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2011-06-02 17:59:40 +00:00
|
|
|
while (avail < literal_length) {
|
2020-02-12 18:04:58 +00:00
|
|
|
if (!writer->Append(ip, avail, &op)) goto exit;
|
Speed up decompression by caching ip_.
It is seemingly hard for the compiler to understand that ip_, the current input
pointer into the compressed data stream, can not alias on anything else, and
thus using it directly will incur memory traffic as it cannot be kept in a
register. The code already knew about this and cached it into a local
variable, but since Step() only decoded one tag, it had to move ip_ back into
place between every tag. This seems to have cost us a significant amount of
performance, so changing Step() into a function that decodes as much as it can
before it saves ip_ back and returns. (Note that Step() was already inlined,
so it is not the manual inlining that buys the performance here.)
The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron
(for plain array-to-array decompression, in 64-bit opt mode).
There is a tiny difference in the behavior here; if an invalid literal is
encountered (ie., the writer refuses the Append() operation), ip_ will now
point to the byte past the tag byte, instead of where the literal was
originally thought to end. However, we don't use ip_ for anything after
DecompressAllTags() has returned, so this should not change external behavior
in any way.
Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
Benchmark Time(ns) CPU(ns) Iterations
---------------------------------------------------
BM_UFlat/0 79134 79110 8835 1.2GB/s html [ +6.2%]
BM_UFlat/1 786126 786096 891 851.8MB/s urls [+10.0%]
BM_UFlat/2 9948 9948 69125 11.9GB/s jpg [ -1.3%]
BM_UFlat/3 31999 31998 21898 2.7GB/s pdf [ +6.5%]
BM_UFlat/4 318909 318829 2204 1.2GB/s html4 [ +6.5%]
BM_UFlat/5 31384 31390 22363 747.5MB/s cp [ +9.2%]
BM_UFlat/6 14037 14034 49858 757.7MB/s c [+10.6%]
BM_UFlat/7 4612 4612 151395 769.5MB/s lsp [ +9.5%]
BM_UFlat/8 1203174 1203007 582 816.3MB/s xls [+19.3%]
BM_UFlat/9 253869 253955 2757 571.1MB/s txt1 [+11.4%]
BM_UFlat/10 219292 219290 3194 544.4MB/s txt2 [+12.1%]
BM_UFlat/11 672135 672131 1000 605.5MB/s txt3 [+11.2%]
BM_UFlat/12 902512 902492 776 509.2MB/s txt4 [+12.5%]
BM_UFlat/13 372110 371998 1881 1.3GB/s bin [ +5.8%]
BM_UFlat/14 50407 50407 10000 723.5MB/s sum [+13.5%]
BM_UFlat/15 5699 5701 100000 707.2MB/s man [+12.4%]
BM_UFlat/16 83448 83424 8383 1.3GB/s pb [ +5.7%]
BM_UFlat/17 256958 256963 2723 684.1MB/s gaviota [ +7.9%]
BM_UValidate/0 42795 42796 16351 2.2GB/s html [+25.8%]
BM_UValidate/1 490672 490622 1427 1.3GB/s urls [+22.7%]
BM_UValidate/2 237 237 2950297 499.0GB/s jpg [+24.9%]
BM_UValidate/3 14610 14611 47901 6.0GB/s pdf [+26.8%]
BM_UValidate/4 171973 171990 4071 2.2GB/s html4 [+25.7%]
git-svn-id: https://snappy.googlecode.com/svn/trunk@38 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2011-06-02 17:59:40 +00:00
|
|
|
literal_length -= avail;
|
|
|
|
reader_->Skip(peeked_);
|
|
|
|
size_t n;
|
|
|
|
ip = reader_->Peek(&n);
|
|
|
|
avail = n;
|
|
|
|
peeked_ = avail;
|
2020-02-12 18:04:58 +00:00
|
|
|
if (avail == 0) goto exit;
|
Speed up decompression by caching ip_.
It is seemingly hard for the compiler to understand that ip_, the current input
pointer into the compressed data stream, can not alias on anything else, and
thus using it directly will incur memory traffic as it cannot be kept in a
register. The code already knew about this and cached it into a local
variable, but since Step() only decoded one tag, it had to move ip_ back into
place between every tag. This seems to have cost us a significant amount of
performance, so changing Step() into a function that decodes as much as it can
before it saves ip_ back and returns. (Note that Step() was already inlined,
so it is not the manual inlining that buys the performance here.)
The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron
(for plain array-to-array decompression, in 64-bit opt mode).
There is a tiny difference in the behavior here; if an invalid literal is
encountered (ie., the writer refuses the Append() operation), ip_ will now
point to the byte past the tag byte, instead of where the literal was
originally thought to end. However, we don't use ip_ for anything after
DecompressAllTags() has returned, so this should not change external behavior
in any way.
Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
Benchmark Time(ns) CPU(ns) Iterations
---------------------------------------------------
BM_UFlat/0 79134 79110 8835 1.2GB/s html [ +6.2%]
BM_UFlat/1 786126 786096 891 851.8MB/s urls [+10.0%]
BM_UFlat/2 9948 9948 69125 11.9GB/s jpg [ -1.3%]
BM_UFlat/3 31999 31998 21898 2.7GB/s pdf [ +6.5%]
BM_UFlat/4 318909 318829 2204 1.2GB/s html4 [ +6.5%]
BM_UFlat/5 31384 31390 22363 747.5MB/s cp [ +9.2%]
BM_UFlat/6 14037 14034 49858 757.7MB/s c [+10.6%]
BM_UFlat/7 4612 4612 151395 769.5MB/s lsp [ +9.5%]
BM_UFlat/8 1203174 1203007 582 816.3MB/s xls [+19.3%]
BM_UFlat/9 253869 253955 2757 571.1MB/s txt1 [+11.4%]
BM_UFlat/10 219292 219290 3194 544.4MB/s txt2 [+12.1%]
BM_UFlat/11 672135 672131 1000 605.5MB/s txt3 [+11.2%]
BM_UFlat/12 902512 902492 776 509.2MB/s txt4 [+12.5%]
BM_UFlat/13 372110 371998 1881 1.3GB/s bin [ +5.8%]
BM_UFlat/14 50407 50407 10000 723.5MB/s sum [+13.5%]
BM_UFlat/15 5699 5701 100000 707.2MB/s man [+12.4%]
BM_UFlat/16 83448 83424 8383 1.3GB/s pb [ +5.7%]
BM_UFlat/17 256958 256963 2723 684.1MB/s gaviota [ +7.9%]
BM_UValidate/0 42795 42796 16351 2.2GB/s html [+25.8%]
BM_UValidate/1 490672 490622 1427 1.3GB/s urls [+22.7%]
BM_UValidate/2 237 237 2950297 499.0GB/s jpg [+24.9%]
BM_UValidate/3 14610 14611 47901 6.0GB/s pdf [+26.8%]
BM_UValidate/4 171973 171990 4071 2.2GB/s html4 [+25.7%]
git-svn-id: https://snappy.googlecode.com/svn/trunk@38 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2011-06-02 17:59:40 +00:00
|
|
|
ip_limit_ = ip + avail;
|
2020-02-12 18:04:58 +00:00
|
|
|
ResetLimit(ip);
|
Speed up decompression by caching ip_.
It is seemingly hard for the compiler to understand that ip_, the current input
pointer into the compressed data stream, can not alias on anything else, and
thus using it directly will incur memory traffic as it cannot be kept in a
register. The code already knew about this and cached it into a local
variable, but since Step() only decoded one tag, it had to move ip_ back into
place between every tag. This seems to have cost us a significant amount of
performance, so changing Step() into a function that decodes as much as it can
before it saves ip_ back and returns. (Note that Step() was already inlined,
so it is not the manual inlining that buys the performance here.)
The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron
(for plain array-to-array decompression, in 64-bit opt mode).
There is a tiny difference in the behavior here; if an invalid literal is
encountered (ie., the writer refuses the Append() operation), ip_ will now
point to the byte past the tag byte, instead of where the literal was
originally thought to end. However, we don't use ip_ for anything after
DecompressAllTags() has returned, so this should not change external behavior
in any way.
Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
Benchmark Time(ns) CPU(ns) Iterations
---------------------------------------------------
BM_UFlat/0 79134 79110 8835 1.2GB/s html [ +6.2%]
BM_UFlat/1 786126 786096 891 851.8MB/s urls [+10.0%]
BM_UFlat/2 9948 9948 69125 11.9GB/s jpg [ -1.3%]
BM_UFlat/3 31999 31998 21898 2.7GB/s pdf [ +6.5%]
BM_UFlat/4 318909 318829 2204 1.2GB/s html4 [ +6.5%]
BM_UFlat/5 31384 31390 22363 747.5MB/s cp [ +9.2%]
BM_UFlat/6 14037 14034 49858 757.7MB/s c [+10.6%]
BM_UFlat/7 4612 4612 151395 769.5MB/s lsp [ +9.5%]
BM_UFlat/8 1203174 1203007 582 816.3MB/s xls [+19.3%]
BM_UFlat/9 253869 253955 2757 571.1MB/s txt1 [+11.4%]
BM_UFlat/10 219292 219290 3194 544.4MB/s txt2 [+12.1%]
BM_UFlat/11 672135 672131 1000 605.5MB/s txt3 [+11.2%]
BM_UFlat/12 902512 902492 776 509.2MB/s txt4 [+12.5%]
BM_UFlat/13 372110 371998 1881 1.3GB/s bin [ +5.8%]
BM_UFlat/14 50407 50407 10000 723.5MB/s sum [+13.5%]
BM_UFlat/15 5699 5701 100000 707.2MB/s man [+12.4%]
BM_UFlat/16 83448 83424 8383 1.3GB/s pb [ +5.7%]
BM_UFlat/17 256958 256963 2723 684.1MB/s gaviota [ +7.9%]
BM_UValidate/0 42795 42796 16351 2.2GB/s html [+25.8%]
BM_UValidate/1 490672 490622 1427 1.3GB/s urls [+22.7%]
BM_UValidate/2 237 237 2950297 499.0GB/s jpg [+24.9%]
BM_UValidate/3 14610 14611 47901 6.0GB/s pdf [+26.8%]
BM_UValidate/4 171973 171990 4071 2.2GB/s html4 [+25.7%]
git-svn-id: https://snappy.googlecode.com/svn/trunk@38 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2011-06-02 17:59:40 +00:00
|
|
|
}
|
2020-02-12 18:04:58 +00:00
|
|
|
if (!writer->Append(ip, literal_length, &op)) goto exit;
|
Speed up decompression by caching ip_.
It is seemingly hard for the compiler to understand that ip_, the current input
pointer into the compressed data stream, can not alias on anything else, and
thus using it directly will incur memory traffic as it cannot be kept in a
register. The code already knew about this and cached it into a local
variable, but since Step() only decoded one tag, it had to move ip_ back into
place between every tag. This seems to have cost us a significant amount of
performance, so changing Step() into a function that decodes as much as it can
before it saves ip_ back and returns. (Note that Step() was already inlined,
so it is not the manual inlining that buys the performance here.)
The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron
(for plain array-to-array decompression, in 64-bit opt mode).
There is a tiny difference in the behavior here; if an invalid literal is
encountered (ie., the writer refuses the Append() operation), ip_ will now
point to the byte past the tag byte, instead of where the literal was
originally thought to end. However, we don't use ip_ for anything after
DecompressAllTags() has returned, so this should not change external behavior
in any way.
Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
Benchmark Time(ns) CPU(ns) Iterations
---------------------------------------------------
BM_UFlat/0 79134 79110 8835 1.2GB/s html [ +6.2%]
BM_UFlat/1 786126 786096 891 851.8MB/s urls [+10.0%]
BM_UFlat/2 9948 9948 69125 11.9GB/s jpg [ -1.3%]
BM_UFlat/3 31999 31998 21898 2.7GB/s pdf [ +6.5%]
BM_UFlat/4 318909 318829 2204 1.2GB/s html4 [ +6.5%]
BM_UFlat/5 31384 31390 22363 747.5MB/s cp [ +9.2%]
BM_UFlat/6 14037 14034 49858 757.7MB/s c [+10.6%]
BM_UFlat/7 4612 4612 151395 769.5MB/s lsp [ +9.5%]
BM_UFlat/8 1203174 1203007 582 816.3MB/s xls [+19.3%]
BM_UFlat/9 253869 253955 2757 571.1MB/s txt1 [+11.4%]
BM_UFlat/10 219292 219290 3194 544.4MB/s txt2 [+12.1%]
BM_UFlat/11 672135 672131 1000 605.5MB/s txt3 [+11.2%]
BM_UFlat/12 902512 902492 776 509.2MB/s txt4 [+12.5%]
BM_UFlat/13 372110 371998 1881 1.3GB/s bin [ +5.8%]
BM_UFlat/14 50407 50407 10000 723.5MB/s sum [+13.5%]
BM_UFlat/15 5699 5701 100000 707.2MB/s man [+12.4%]
BM_UFlat/16 83448 83424 8383 1.3GB/s pb [ +5.7%]
BM_UFlat/17 256958 256963 2723 684.1MB/s gaviota [ +7.9%]
BM_UValidate/0 42795 42796 16351 2.2GB/s html [+25.8%]
BM_UValidate/1 490672 490622 1427 1.3GB/s urls [+22.7%]
BM_UValidate/2 237 237 2950297 499.0GB/s jpg [+24.9%]
BM_UValidate/3 14610 14611 47901 6.0GB/s pdf [+26.8%]
BM_UValidate/4 171973 171990 4071 2.2GB/s html4 [+25.7%]
git-svn-id: https://snappy.googlecode.com/svn/trunk@38 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2011-06-02 17:59:40 +00:00
|
|
|
ip += literal_length;
|
2011-12-05 21:27:26 +00:00
|
|
|
MAYBE_REFILL();
|
Speed up decompression by caching ip_.
It is seemingly hard for the compiler to understand that ip_, the current input
pointer into the compressed data stream, can not alias on anything else, and
thus using it directly will incur memory traffic as it cannot be kept in a
register. The code already knew about this and cached it into a local
variable, but since Step() only decoded one tag, it had to move ip_ back into
place between every tag. This seems to have cost us a significant amount of
performance, so changing Step() into a function that decodes as much as it can
before it saves ip_ back and returns. (Note that Step() was already inlined,
so it is not the manual inlining that buys the performance here.)
The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron
(for plain array-to-array decompression, in 64-bit opt mode).
There is a tiny difference in the behavior here; if an invalid literal is
encountered (ie., the writer refuses the Append() operation), ip_ will now
point to the byte past the tag byte, instead of where the literal was
originally thought to end. However, we don't use ip_ for anything after
DecompressAllTags() has returned, so this should not change external behavior
in any way.
Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
Benchmark Time(ns) CPU(ns) Iterations
---------------------------------------------------
BM_UFlat/0 79134 79110 8835 1.2GB/s html [ +6.2%]
BM_UFlat/1 786126 786096 891 851.8MB/s urls [+10.0%]
BM_UFlat/2 9948 9948 69125 11.9GB/s jpg [ -1.3%]
BM_UFlat/3 31999 31998 21898 2.7GB/s pdf [ +6.5%]
BM_UFlat/4 318909 318829 2204 1.2GB/s html4 [ +6.5%]
BM_UFlat/5 31384 31390 22363 747.5MB/s cp [ +9.2%]
BM_UFlat/6 14037 14034 49858 757.7MB/s c [+10.6%]
BM_UFlat/7 4612 4612 151395 769.5MB/s lsp [ +9.5%]
BM_UFlat/8 1203174 1203007 582 816.3MB/s xls [+19.3%]
BM_UFlat/9 253869 253955 2757 571.1MB/s txt1 [+11.4%]
BM_UFlat/10 219292 219290 3194 544.4MB/s txt2 [+12.1%]
BM_UFlat/11 672135 672131 1000 605.5MB/s txt3 [+11.2%]
BM_UFlat/12 902512 902492 776 509.2MB/s txt4 [+12.5%]
BM_UFlat/13 372110 371998 1881 1.3GB/s bin [ +5.8%]
BM_UFlat/14 50407 50407 10000 723.5MB/s sum [+13.5%]
BM_UFlat/15 5699 5701 100000 707.2MB/s man [+12.4%]
BM_UFlat/16 83448 83424 8383 1.3GB/s pb [ +5.7%]
BM_UFlat/17 256958 256963 2723 684.1MB/s gaviota [ +7.9%]
BM_UValidate/0 42795 42796 16351 2.2GB/s html [+25.8%]
BM_UValidate/1 490672 490622 1427 1.3GB/s urls [+22.7%]
BM_UValidate/2 237 237 2950297 499.0GB/s jpg [+24.9%]
BM_UValidate/3 14610 14611 47901 6.0GB/s pdf [+26.8%]
BM_UValidate/4 171973 171990 4071 2.2GB/s html4 [+25.7%]
git-svn-id: https://snappy.googlecode.com/svn/trunk@38 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2011-06-02 17:59:40 +00:00
|
|
|
} else {
|
2020-02-07 14:38:49 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE((c & 3) == COPY_4_BYTE_OFFSET)) {
|
|
|
|
const size_t copy_offset = LittleEndian::Load32(ip);
|
|
|
|
const size_t length = (c >> 2) + 1;
|
|
|
|
ip += 4;
|
2020-02-12 18:04:58 +00:00
|
|
|
|
|
|
|
if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
|
2020-02-07 14:38:49 +00:00
|
|
|
} else {
|
2021-07-29 13:26:45 +00:00
|
|
|
const ptrdiff_t entry = kLengthMinusOffset[c];
|
2020-02-12 18:04:58 +00:00
|
|
|
preload = LittleEndian::Load32(ip);
|
2020-04-12 20:03:50 +00:00
|
|
|
const uint32_t trailer = ExtractLowBytes(preload, c & 3);
|
|
|
|
const uint32_t length = entry & 0xff;
|
2020-12-09 02:27:22 +00:00
|
|
|
assert(length > 0);
|
2020-02-07 14:38:49 +00:00
|
|
|
|
|
|
|
// copy_offset/256 is encoded in bits 8..10. By just fetching
|
|
|
|
// those bits, we get copy_offset (since the bit-field starts at
|
|
|
|
// bit 8).
|
2020-12-10 00:30:54 +00:00
|
|
|
const uint32_t copy_offset = trailer - entry + length;
|
2020-02-12 18:04:58 +00:00
|
|
|
if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
|
|
|
|
|
|
|
|
ip += (c & 3);
|
|
|
|
// By using the result of the previous load we reduce the critical
|
|
|
|
// dependency chain of ip to 4 cycles.
|
|
|
|
preload >>= (c & 3) * 8;
|
|
|
|
if (ip < ip_limit_min_maxtaglen_) continue;
|
Speed up decompression by caching ip_.
It is seemingly hard for the compiler to understand that ip_, the current input
pointer into the compressed data stream, can not alias on anything else, and
thus using it directly will incur memory traffic as it cannot be kept in a
register. The code already knew about this and cached it into a local
variable, but since Step() only decoded one tag, it had to move ip_ back into
place between every tag. This seems to have cost us a significant amount of
performance, so changing Step() into a function that decodes as much as it can
before it saves ip_ back and returns. (Note that Step() was already inlined,
so it is not the manual inlining that buys the performance here.)
The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron
(for plain array-to-array decompression, in 64-bit opt mode).
There is a tiny difference in the behavior here; if an invalid literal is
encountered (ie., the writer refuses the Append() operation), ip_ will now
point to the byte past the tag byte, instead of where the literal was
originally thought to end. However, we don't use ip_ for anything after
DecompressAllTags() has returned, so this should not change external behavior
in any way.
Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
Benchmark Time(ns) CPU(ns) Iterations
---------------------------------------------------
BM_UFlat/0 79134 79110 8835 1.2GB/s html [ +6.2%]
BM_UFlat/1 786126 786096 891 851.8MB/s urls [+10.0%]
BM_UFlat/2 9948 9948 69125 11.9GB/s jpg [ -1.3%]
BM_UFlat/3 31999 31998 21898 2.7GB/s pdf [ +6.5%]
BM_UFlat/4 318909 318829 2204 1.2GB/s html4 [ +6.5%]
BM_UFlat/5 31384 31390 22363 747.5MB/s cp [ +9.2%]
BM_UFlat/6 14037 14034 49858 757.7MB/s c [+10.6%]
BM_UFlat/7 4612 4612 151395 769.5MB/s lsp [ +9.5%]
BM_UFlat/8 1203174 1203007 582 816.3MB/s xls [+19.3%]
BM_UFlat/9 253869 253955 2757 571.1MB/s txt1 [+11.4%]
BM_UFlat/10 219292 219290 3194 544.4MB/s txt2 [+12.1%]
BM_UFlat/11 672135 672131 1000 605.5MB/s txt3 [+11.2%]
BM_UFlat/12 902512 902492 776 509.2MB/s txt4 [+12.5%]
BM_UFlat/13 372110 371998 1881 1.3GB/s bin [ +5.8%]
BM_UFlat/14 50407 50407 10000 723.5MB/s sum [+13.5%]
BM_UFlat/15 5699 5701 100000 707.2MB/s man [+12.4%]
BM_UFlat/16 83448 83424 8383 1.3GB/s pb [ +5.7%]
BM_UFlat/17 256958 256963 2723 684.1MB/s gaviota [ +7.9%]
BM_UValidate/0 42795 42796 16351 2.2GB/s html [+25.8%]
BM_UValidate/1 490672 490622 1427 1.3GB/s urls [+22.7%]
BM_UValidate/2 237 237 2950297 499.0GB/s jpg [+24.9%]
BM_UValidate/3 14610 14611 47901 6.0GB/s pdf [+26.8%]
BM_UValidate/4 171973 171990 4071 2.2GB/s html4 [+25.7%]
git-svn-id: https://snappy.googlecode.com/svn/trunk@38 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2011-06-02 17:59:40 +00:00
|
|
|
}
|
2011-12-05 21:27:26 +00:00
|
|
|
MAYBE_REFILL();
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
}
|
2011-12-05 21:27:26 +00:00
|
|
|
#undef MAYBE_REFILL
|
2020-02-12 18:04:58 +00:00
|
|
|
exit:
|
|
|
|
writer->SetOutputPtr(op);
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-11-14 15:27:36 +00:00
|
|
|
constexpr uint32_t CalculateNeeded(uint8_t tag) {
|
2020-11-19 13:42:42 +00:00
|
|
|
return ((tag & 3) == 0 && tag >= (60 * 4))
|
|
|
|
? (tag >> 2) - 58
|
|
|
|
: (0x05030201 >> ((tag * 8) & 31)) & 0xFF;
|
2020-11-14 15:27:36 +00:00
|
|
|
}
|
|
|
|
|
2020-11-19 13:42:42 +00:00
|
|
|
#if __cplusplus >= 201402L
|
2020-11-14 15:27:36 +00:00
|
|
|
constexpr bool VerifyCalculateNeeded() {
|
|
|
|
for (int i = 0; i < 1; i++) {
|
|
|
|
if (CalculateNeeded(i) != (char_table[i] >> 11) + 1) return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure CalculateNeeded is correct by verifying it against the established
|
|
|
|
// table encoding the number of added bytes needed.
|
|
|
|
static_assert(VerifyCalculateNeeded(), "");
|
2020-11-19 13:42:42 +00:00
|
|
|
#endif // c++14
|
2020-11-14 15:27:36 +00:00
|
|
|
|
2011-03-18 17:14:15 +00:00
|
|
|
bool SnappyDecompressor::RefillTag() {
|
|
|
|
const char* ip = ip_;
|
|
|
|
if (ip == ip_limit_) {
|
|
|
|
// Fetch a new fragment from the reader
|
2020-10-30 17:37:07 +00:00
|
|
|
reader_->Skip(peeked_); // All peeked bytes are used up
|
2011-03-18 17:14:15 +00:00
|
|
|
size_t n;
|
|
|
|
ip = reader_->Peek(&n);
|
|
|
|
peeked_ = n;
|
2017-01-27 08:10:36 +00:00
|
|
|
eof_ = (n == 0);
|
|
|
|
if (eof_) return false;
|
2011-03-18 17:14:15 +00:00
|
|
|
ip_limit_ = ip + n;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the tag character
|
2012-05-22 09:32:50 +00:00
|
|
|
assert(ip < ip_limit_);
|
2011-03-18 17:14:15 +00:00
|
|
|
const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
|
2020-11-14 15:27:36 +00:00
|
|
|
// At this point make sure that the data for the next tag is consecutive.
|
|
|
|
// For copy 1 this means the next 2 bytes (tag and 1 byte offset)
|
|
|
|
// For copy 2 the next 3 bytes (tag and 2 byte offset)
|
|
|
|
// For copy 4 the next 5 bytes (tag and 4 byte offset)
|
|
|
|
// For all small literals we only need 1 byte buf for literals 60...63 the
|
|
|
|
// length is encoded in 1...4 extra bytes.
|
|
|
|
const uint32_t needed = CalculateNeeded(c);
|
2012-05-22 09:32:50 +00:00
|
|
|
assert(needed <= sizeof(scratch_));
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
// Read more bytes from reader if needed
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t nbuf = ip_limit_ - ip;
|
2011-03-18 17:14:15 +00:00
|
|
|
if (nbuf < needed) {
|
|
|
|
// Stitch together bytes from ip and reader to form the word
|
|
|
|
// contents. We store the needed bytes in "scratch_". They
|
|
|
|
// will be consumed immediately by the caller since we do not
|
|
|
|
// read more than we need.
|
2020-04-12 00:01:01 +00:00
|
|
|
std::memmove(scratch_, ip, nbuf);
|
2011-03-18 17:14:15 +00:00
|
|
|
reader_->Skip(peeked_); // All peeked bytes are used up
|
|
|
|
peeked_ = 0;
|
|
|
|
while (nbuf < needed) {
|
|
|
|
size_t length;
|
|
|
|
const char* src = reader_->Peek(&length);
|
|
|
|
if (length == 0) return false;
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t to_add = std::min<uint32_t>(needed - nbuf, length);
|
2020-04-12 00:01:01 +00:00
|
|
|
std::memcpy(scratch_ + nbuf, src, to_add);
|
2011-03-18 17:14:15 +00:00
|
|
|
nbuf += to_add;
|
|
|
|
reader_->Skip(to_add);
|
|
|
|
}
|
2012-05-22 09:32:50 +00:00
|
|
|
assert(nbuf == needed);
|
2011-03-18 17:14:15 +00:00
|
|
|
ip_ = scratch_;
|
|
|
|
ip_limit_ = scratch_ + needed;
|
In the fast path for decompressing literals, instead of checking
whether there's 16 bytes free and then checking right afterwards
(when having subtracted the literal size) that there are now
5 bytes free, just check once for 21 bytes. This skips a compare
and a branch; although it is easily predictable, it is still
a few cycles on a fast path that we would like to get rid of.
Benchmarking this yields very confusing results. On open-source
GCC 4.8.1 on Haswell, we get exactly the expected results; the
benchmarks where we hit the fast path for literals (in particular
the two HTML benchmarks and the protobuf benchmark) give very nice
speedups, and the others are not really affected.
However, benchmarks with Google's GCC branch on other hardware
is much less clear. It seems that we have a weak loss in some cases
(and the win for the “typical” win cases are not nearly as clear),
but that it depends on microarchitecture and plain luck in how we run
the benchmark. Looking at the generated assembler, it seems that
the removal of the if causes other large-scale changes in how the
function is laid out, which makes it likely that this is just bad luck.
Thus, we should keep this change, even though its exact current impact is
unclear; it's a sensible change per se, and dropping it on the basis of
microoptimization for a given compiler (or even branch of a compiler)
would seem like a bad strategy in the long run.
Microbenchmark results (all in 64-bit, opt mode):
Nehalem, Google GCC:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 76747 75591 1.3GB/s html +1.5%
BM_UFlat/1 765756 757040 886.3MB/s urls +1.2%
BM_UFlat/2 10867 10893 10.9GB/s jpg -0.2%
BM_UFlat/3 124 131 1.4GB/s jpg_200 -5.3%
BM_UFlat/4 31663 31596 2.8GB/s pdf +0.2%
BM_UFlat/5 314162 308176 1.2GB/s html4 +1.9%
BM_UFlat/6 29668 29746 790.6MB/s cp -0.3%
BM_UFlat/7 12958 13386 796.4MB/s c -3.2%
BM_UFlat/8 3596 3682 966.0MB/s lsp -2.3%
BM_UFlat/9 1019193 1033493 953.3MB/s xls -1.4%
BM_UFlat/10 239 247 775.3MB/s xls_200 -3.2%
BM_UFlat/11 236411 240271 606.9MB/s txt1 -1.6%
BM_UFlat/12 206639 209768 571.2MB/s txt2 -1.5%
BM_UFlat/13 627803 635722 641.4MB/s txt3 -1.2%
BM_UFlat/14 845932 857816 538.2MB/s txt4 -1.4%
BM_UFlat/15 402107 391670 1.2GB/s bin +2.7%
BM_UFlat/16 283 279 683.6MB/s bin_200 +1.4%
BM_UFlat/17 46070 46815 781.5MB/s sum -1.6%
BM_UFlat/18 5053 5163 782.0MB/s man -2.1%
BM_UFlat/19 79721 76581 1.4GB/s pb +4.1%
BM_UFlat/20 251158 252330 697.5MB/s gaviota -0.5%
Sum of all benchmarks 4966150 4980396 -0.3%
Sandy Bridge, Google GCC:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 42850 42182 2.3GB/s html +1.6%
BM_UFlat/1 525660 515816 1.3GB/s urls +1.9%
BM_UFlat/2 7173 7283 16.3GB/s jpg -1.5%
BM_UFlat/3 92 91 2.1GB/s jpg_200 +1.1%
BM_UFlat/4 15147 14872 5.9GB/s pdf +1.8%
BM_UFlat/5 199936 192116 2.0GB/s html4 +4.1%
BM_UFlat/6 12796 12443 1.8GB/s cp +2.8%
BM_UFlat/7 6588 6400 1.6GB/s c +2.9%
BM_UFlat/8 2010 1951 1.8GB/s lsp +3.0%
BM_UFlat/9 761124 763049 1.3GB/s xls -0.3%
BM_UFlat/10 186 189 1016.1MB/s xls_200 -1.6%
BM_UFlat/11 159354 158460 918.6MB/s txt1 +0.6%
BM_UFlat/12 139732 139950 856.1MB/s txt2 -0.2%
BM_UFlat/13 429917 425027 961.7MB/s txt3 +1.2%
BM_UFlat/14 585255 587324 785.8MB/s txt4 -0.4%
BM_UFlat/15 276186 266173 1.8GB/s bin +3.8%
BM_UFlat/16 205 207 925.5MB/s bin_200 -1.0%
BM_UFlat/17 24925 24935 1.4GB/s sum -0.0%
BM_UFlat/18 2632 2576 1.5GB/s man +2.2%
BM_UFlat/19 40546 39108 2.8GB/s pb +3.7%
BM_UFlat/20 175803 168209 1048.9MB/s gaviota +4.5%
Sum of all benchmarks 3408117 3368361 +1.2%
Haswell, upstream GCC 4.8.1:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 46308 40641 2.3GB/s html +13.9%
BM_UFlat/1 513385 514706 1.3GB/s urls -0.3%
BM_UFlat/2 6197 6151 19.2GB/s jpg +0.7%
BM_UFlat/3 61 61 3.0GB/s jpg_200 +0.0%
BM_UFlat/4 13551 13429 6.5GB/s pdf +0.9%
BM_UFlat/5 198317 190243 2.0GB/s html4 +4.2%
BM_UFlat/6 14768 12560 1.8GB/s cp +17.6%
BM_UFlat/7 6453 6447 1.6GB/s c +0.1%
BM_UFlat/8 1991 1980 1.8GB/s lsp +0.6%
BM_UFlat/9 766947 770424 1.2GB/s xls -0.5%
BM_UFlat/10 170 169 1.1GB/s xls_200 +0.6%
BM_UFlat/11 164350 163554 888.7MB/s txt1 +0.5%
BM_UFlat/12 145444 143830 832.1MB/s txt2 +1.1%
BM_UFlat/13 437849 438413 929.2MB/s txt3 -0.1%
BM_UFlat/14 603587 605309 759.8MB/s txt4 -0.3%
BM_UFlat/15 249799 248067 1.9GB/s bin +0.7%
BM_UFlat/16 191 188 1011.4MB/s bin_200 +1.6%
BM_UFlat/17 26064 24778 1.4GB/s sum +5.2%
BM_UFlat/18 2620 2601 1.5GB/s man +0.7%
BM_UFlat/19 44551 37373 3.0GB/s pb +19.2%
BM_UFlat/20 165408 164584 1.0GB/s gaviota +0.5%
Sum of all benchmarks 3408011 3385508 +0.7%
git-svn-id: https://snappy.googlecode.com/svn/trunk@78 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2013-06-30 19:24:03 +00:00
|
|
|
} else if (nbuf < kMaximumTagLength) {
|
2011-03-18 17:14:15 +00:00
|
|
|
// Have enough bytes, but move into scratch_ so that we do not
|
|
|
|
// read past end of input
|
2020-04-12 00:01:01 +00:00
|
|
|
std::memmove(scratch_, ip, nbuf);
|
2011-03-18 17:14:15 +00:00
|
|
|
reader_->Skip(peeked_); // All peeked bytes are used up
|
|
|
|
peeked_ = 0;
|
|
|
|
ip_ = scratch_;
|
|
|
|
ip_limit_ = scratch_ + nbuf;
|
|
|
|
} else {
|
|
|
|
// Pass pointer to buffer returned by reader_.
|
|
|
|
ip_ = ip;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Writer>
|
2013-06-12 19:51:15 +00:00
|
|
|
static bool InternalUncompress(Source* r, Writer* writer) {
|
2011-03-18 17:14:15 +00:00
|
|
|
// Read the uncompressed length from the front of the compressed input
|
|
|
|
SnappyDecompressor decompressor(r);
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t uncompressed_len = 0;
|
2011-03-18 17:14:15 +00:00
|
|
|
if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
|
2017-02-01 16:34:26 +00:00
|
|
|
|
|
|
|
return InternalUncompressAllTags(&decompressor, writer, r->Available(),
|
|
|
|
uncompressed_len);
|
2012-01-08 17:55:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
template <typename Writer>
|
|
|
|
static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
|
2020-10-30 17:37:07 +00:00
|
|
|
Writer* writer, uint32_t compressed_len,
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t uncompressed_len) {
|
2024-01-13 00:27:32 +00:00
|
|
|
int token = 0;
|
|
|
|
Report(token, "snappy_uncompress", compressed_len, uncompressed_len);
|
2017-02-01 16:34:26 +00:00
|
|
|
|
2011-03-18 17:14:15 +00:00
|
|
|
writer->SetExpectedLength(uncompressed_len);
|
|
|
|
|
|
|
|
// Process the entire input
|
2012-01-08 17:55:48 +00:00
|
|
|
decompressor->DecompressAllTags(writer);
|
2015-06-22 14:03:28 +00:00
|
|
|
writer->Flush();
|
2012-01-08 17:55:48 +00:00
|
|
|
return (decompressor->eof() && writer->CheckLength());
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
|
2020-04-12 20:03:50 +00:00
|
|
|
bool GetUncompressedLength(Source* source, uint32_t* result) {
|
2011-03-18 17:14:15 +00:00
|
|
|
SnappyDecompressor decompressor(source);
|
|
|
|
return decompressor.ReadUncompressedLength(result);
|
|
|
|
}
|
|
|
|
|
2024-05-21 19:25:25 +00:00
|
|
|
size_t Compress(Source* reader, Sink* writer) {
|
|
|
|
return Compress(reader, writer, CompressionOptions{});
|
|
|
|
}
|
|
|
|
|
2024-04-03 09:40:00 +00:00
|
|
|
size_t Compress(Source* reader, Sink* writer, CompressionOptions options) {
|
2024-04-04 18:36:37 +00:00
|
|
|
assert(options.level == 1 || options.level == 2);
|
2024-01-13 00:27:32 +00:00
|
|
|
int token = 0;
|
2011-03-18 17:14:15 +00:00
|
|
|
size_t written = 0;
|
2012-01-04 13:10:46 +00:00
|
|
|
size_t N = reader->Available();
|
2017-02-01 16:34:26 +00:00
|
|
|
const size_t uncompressed_size = N;
|
2011-03-18 17:14:15 +00:00
|
|
|
char ulength[Varint::kMax32];
|
|
|
|
char* p = Varint::Encode32(ulength, N);
|
2020-10-30 17:37:07 +00:00
|
|
|
writer->Append(ulength, p - ulength);
|
2011-03-18 17:14:15 +00:00
|
|
|
written += (p - ulength);
|
|
|
|
|
2018-10-16 19:28:52 +00:00
|
|
|
internal::WorkingMemory wmem(N);
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
while (N > 0) {
|
|
|
|
// Get next block to compress (without copying if possible)
|
|
|
|
size_t fragment_size;
|
|
|
|
const char* fragment = reader->Peek(&fragment_size);
|
2012-05-22 09:32:50 +00:00
|
|
|
assert(fragment_size != 0); // premature end of input
|
2017-03-13 19:46:43 +00:00
|
|
|
const size_t num_to_read = std::min(N, kBlockSize);
|
2011-03-18 17:14:15 +00:00
|
|
|
size_t bytes_read = fragment_size;
|
|
|
|
|
2012-01-04 13:10:46 +00:00
|
|
|
size_t pending_advance = 0;
|
2011-03-18 17:14:15 +00:00
|
|
|
if (bytes_read >= num_to_read) {
|
|
|
|
// Buffer returned by reader is large enough
|
|
|
|
pending_advance = num_to_read;
|
|
|
|
fragment_size = num_to_read;
|
|
|
|
} else {
|
2018-10-16 19:28:52 +00:00
|
|
|
char* scratch = wmem.GetScratchInput();
|
2020-04-12 00:01:01 +00:00
|
|
|
std::memcpy(scratch, fragment, bytes_read);
|
2011-03-18 17:14:15 +00:00
|
|
|
reader->Skip(bytes_read);
|
|
|
|
|
|
|
|
while (bytes_read < num_to_read) {
|
|
|
|
fragment = reader->Peek(&fragment_size);
|
2017-03-13 19:46:43 +00:00
|
|
|
size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read);
|
2020-04-12 00:01:01 +00:00
|
|
|
std::memcpy(scratch + bytes_read, fragment, n);
|
2011-03-18 17:14:15 +00:00
|
|
|
bytes_read += n;
|
|
|
|
reader->Skip(n);
|
|
|
|
}
|
2012-05-22 09:32:50 +00:00
|
|
|
assert(bytes_read == num_to_read);
|
2018-10-16 19:28:52 +00:00
|
|
|
fragment = scratch;
|
2011-03-18 17:14:15 +00:00
|
|
|
fragment_size = num_to_read;
|
|
|
|
}
|
2012-05-22 09:32:50 +00:00
|
|
|
assert(fragment_size == num_to_read);
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
// Get encoding table for compression
|
|
|
|
int table_size;
|
2020-04-12 20:03:50 +00:00
|
|
|
uint16_t* table = wmem.GetHashTable(num_to_read, &table_size);
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
// Compress input_fragment and append to dest
|
2024-01-19 16:14:10 +00:00
|
|
|
int max_output = MaxCompressedLength(num_to_read);
|
2018-10-16 19:28:52 +00:00
|
|
|
|
|
|
|
// Since we encode kBlockSize regions followed by a region
|
|
|
|
// which is <= kBlockSize in length, a previously allocated
|
|
|
|
// scratch_output[] region is big enough for this iteration.
|
2024-01-19 16:14:10 +00:00
|
|
|
// Need a scratch buffer for the output, in case the byte sink doesn't
|
|
|
|
// have room for us directly.
|
2018-10-16 19:28:52 +00:00
|
|
|
char* dest = writer->GetAppendBuffer(max_output, wmem.GetScratchOutput());
|
2024-04-03 09:40:00 +00:00
|
|
|
char* end = nullptr;
|
|
|
|
if (options.level == 1) {
|
|
|
|
end = internal::CompressFragment(fragment, fragment_size, dest, table,
|
|
|
|
table_size);
|
|
|
|
} else if (options.level == 2) {
|
|
|
|
end = internal::CompressFragmentDoubleHash(
|
|
|
|
fragment, fragment_size, dest, table, table_size >> 1,
|
|
|
|
table + (table_size >> 1), table_size >> 1);
|
|
|
|
}
|
2011-03-18 17:14:15 +00:00
|
|
|
writer->Append(dest, end - dest);
|
|
|
|
written += (end - dest);
|
|
|
|
|
|
|
|
N -= num_to_read;
|
|
|
|
reader->Skip(pending_advance);
|
|
|
|
}
|
|
|
|
|
2024-01-13 00:27:32 +00:00
|
|
|
Report(token, "snappy_compress", written, uncompressed_size);
|
2011-03-18 17:14:15 +00:00
|
|
|
return written;
|
|
|
|
}
|
|
|
|
|
2013-06-13 16:19:52 +00:00
|
|
|
// -----------------------------------------------------------------------
|
|
|
|
// IOVec interfaces
|
|
|
|
// -----------------------------------------------------------------------
|
|
|
|
|
2022-09-26 17:23:33 +00:00
|
|
|
// A `Source` implementation that yields the contents of an `iovec` array. Note
|
|
|
|
// that `total_size` is the total number of bytes to be read from the elements
|
|
|
|
// of `iov` (_not_ the total number of elements in `iov`).
|
|
|
|
class SnappyIOVecReader : public Source {
|
|
|
|
public:
|
|
|
|
SnappyIOVecReader(const struct iovec* iov, size_t total_size)
|
|
|
|
: curr_iov_(iov),
|
|
|
|
curr_pos_(total_size > 0 ? reinterpret_cast<const char*>(iov->iov_base)
|
|
|
|
: nullptr),
|
|
|
|
curr_size_remaining_(total_size > 0 ? iov->iov_len : 0),
|
|
|
|
total_size_remaining_(total_size) {
|
|
|
|
// Skip empty leading `iovec`s.
|
|
|
|
if (total_size > 0 && curr_size_remaining_ == 0) Advance();
|
|
|
|
}
|
|
|
|
|
2023-12-29 18:43:26 +00:00
|
|
|
~SnappyIOVecReader() override = default;
|
2022-09-26 17:23:33 +00:00
|
|
|
|
2023-12-29 18:43:26 +00:00
|
|
|
size_t Available() const override { return total_size_remaining_; }
|
2022-09-26 17:23:33 +00:00
|
|
|
|
2023-12-29 18:43:26 +00:00
|
|
|
const char* Peek(size_t* len) override {
|
2022-09-26 17:23:33 +00:00
|
|
|
*len = curr_size_remaining_;
|
|
|
|
return curr_pos_;
|
|
|
|
}
|
|
|
|
|
2023-12-29 18:43:26 +00:00
|
|
|
void Skip(size_t n) override {
|
2022-09-26 17:23:33 +00:00
|
|
|
while (n >= curr_size_remaining_ && n > 0) {
|
|
|
|
n -= curr_size_remaining_;
|
|
|
|
Advance();
|
|
|
|
}
|
|
|
|
curr_size_remaining_ -= n;
|
|
|
|
total_size_remaining_ -= n;
|
|
|
|
curr_pos_ += n;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
// Advances to the next nonempty `iovec` and updates related variables.
|
|
|
|
void Advance() {
|
|
|
|
do {
|
|
|
|
assert(total_size_remaining_ >= curr_size_remaining_);
|
|
|
|
total_size_remaining_ -= curr_size_remaining_;
|
|
|
|
if (total_size_remaining_ == 0) {
|
|
|
|
curr_pos_ = nullptr;
|
|
|
|
curr_size_remaining_ = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
++curr_iov_;
|
|
|
|
curr_pos_ = reinterpret_cast<const char*>(curr_iov_->iov_base);
|
|
|
|
curr_size_remaining_ = curr_iov_->iov_len;
|
|
|
|
} while (curr_size_remaining_ == 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// The `iovec` currently being read.
|
|
|
|
const struct iovec* curr_iov_;
|
|
|
|
// The location in `curr_iov_` currently being read.
|
|
|
|
const char* curr_pos_;
|
|
|
|
// The amount of unread data in `curr_iov_`.
|
|
|
|
size_t curr_size_remaining_;
|
|
|
|
// The amount of unread data in the entire input array.
|
|
|
|
size_t total_size_remaining_;
|
|
|
|
};
|
|
|
|
|
2013-06-13 16:19:52 +00:00
|
|
|
// A type that writes to an iovec.
|
|
|
|
// Note that this is not a "ByteSink", but a type that matches the
|
|
|
|
// Writer template argument to SnappyDecompressor::DecompressAllTags().
|
|
|
|
class SnappyIOVecWriter {
|
|
|
|
private:
|
2018-08-08 01:39:54 +00:00
|
|
|
// output_iov_end_ is set to iov + count and used to determine when
|
|
|
|
// the end of the iovs is reached.
|
|
|
|
const struct iovec* output_iov_end_;
|
2013-06-13 16:19:52 +00:00
|
|
|
|
2018-08-17 19:02:02 +00:00
|
|
|
#if !defined(NDEBUG)
|
|
|
|
const struct iovec* output_iov_;
|
|
|
|
#endif // !defined(NDEBUG)
|
|
|
|
|
2018-08-08 01:39:54 +00:00
|
|
|
// Current iov that is being written into.
|
|
|
|
const struct iovec* curr_iov_;
|
|
|
|
|
|
|
|
// Pointer to current iov's write location.
|
|
|
|
char* curr_iov_output_;
|
|
|
|
|
|
|
|
// Remaining bytes to write into curr_iov_output.
|
|
|
|
size_t curr_iov_remaining_;
|
2013-06-13 16:19:52 +00:00
|
|
|
|
|
|
|
// Total bytes decompressed into output_iov_ so far.
|
|
|
|
size_t total_written_;
|
|
|
|
|
|
|
|
// Maximum number of bytes that will be decompressed into output_iov_.
|
|
|
|
size_t output_limit_;
|
|
|
|
|
2018-08-08 01:39:54 +00:00
|
|
|
static inline char* GetIOVecPointer(const struct iovec* iov, size_t offset) {
|
|
|
|
return reinterpret_cast<char*>(iov->iov_base) + offset;
|
2013-06-13 16:19:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
// Does not take ownership of iov. iov must be valid during the
|
|
|
|
// entire lifetime of the SnappyIOVecWriter.
|
|
|
|
inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count)
|
2018-08-17 19:02:02 +00:00
|
|
|
: output_iov_end_(iov + iov_count),
|
|
|
|
#if !defined(NDEBUG)
|
|
|
|
output_iov_(iov),
|
|
|
|
#endif // !defined(NDEBUG)
|
2018-08-08 01:39:54 +00:00
|
|
|
curr_iov_(iov),
|
|
|
|
curr_iov_output_(iov_count ? reinterpret_cast<char*>(iov->iov_base)
|
|
|
|
: nullptr),
|
|
|
|
curr_iov_remaining_(iov_count ? iov->iov_len : 0),
|
2013-06-13 16:19:52 +00:00
|
|
|
total_written_(0),
|
2020-10-30 17:37:07 +00:00
|
|
|
output_limit_(-1) {
|
2013-06-13 16:19:52 +00:00
|
|
|
}
|
|
|
|
|
2020-10-30 17:37:07 +00:00
|
|
|
inline void SetExpectedLength(size_t len) { output_limit_ = len; }
|
|
|
|
|
|
|
|
inline bool CheckLength() const { return total_written_ == output_limit_; }
|
2013-06-13 16:19:52 +00:00
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
inline bool Append(const char* ip, size_t len, char**) {
|
2013-06-13 16:19:52 +00:00
|
|
|
if (total_written_ + len > output_limit_) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-08 01:39:54 +00:00
|
|
|
return AppendNoCheck(ip, len);
|
|
|
|
}
|
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
char* GetOutputPtr() { return nullptr; }
|
2020-12-10 00:30:54 +00:00
|
|
|
char* GetBase(ptrdiff_t*) { return nullptr; }
|
2020-05-05 16:13:04 +00:00
|
|
|
void SetOutputPtr(char* op) {
|
|
|
|
// TODO: Switch to [[maybe_unused]] when we can assume C++17.
|
|
|
|
(void)op;
|
|
|
|
}
|
2020-02-12 18:04:58 +00:00
|
|
|
|
2018-08-08 01:39:54 +00:00
|
|
|
inline bool AppendNoCheck(const char* ip, size_t len) {
|
2013-06-13 16:19:52 +00:00
|
|
|
while (len > 0) {
|
2018-08-08 01:39:54 +00:00
|
|
|
if (curr_iov_remaining_ == 0) {
|
2013-06-13 16:19:52 +00:00
|
|
|
// This iovec is full. Go to the next one.
|
2018-08-08 01:39:54 +00:00
|
|
|
if (curr_iov_ + 1 >= output_iov_end_) {
|
2013-06-13 16:19:52 +00:00
|
|
|
return false;
|
|
|
|
}
|
2018-08-08 01:39:54 +00:00
|
|
|
++curr_iov_;
|
|
|
|
curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
|
|
|
|
curr_iov_remaining_ = curr_iov_->iov_len;
|
2013-06-13 16:19:52 +00:00
|
|
|
}
|
|
|
|
|
2018-08-08 01:39:54 +00:00
|
|
|
const size_t to_write = std::min(len, curr_iov_remaining_);
|
2020-04-12 00:01:01 +00:00
|
|
|
std::memcpy(curr_iov_output_, ip, to_write);
|
2018-08-08 01:39:54 +00:00
|
|
|
curr_iov_output_ += to_write;
|
|
|
|
curr_iov_remaining_ -= to_write;
|
2013-06-13 16:19:52 +00:00
|
|
|
total_written_ += to_write;
|
|
|
|
ip += to_write;
|
|
|
|
len -= to_write;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
inline bool TryFastAppend(const char* ip, size_t available, size_t len,
|
|
|
|
char**) {
|
2013-06-13 16:19:52 +00:00
|
|
|
const size_t space_left = output_limit_ - total_written_;
|
In the fast path for decompressing literals, instead of checking
whether there's 16 bytes free and then checking right afterwards
(when having subtracted the literal size) that there are now
5 bytes free, just check once for 21 bytes. This skips a compare
and a branch; although it is easily predictable, it is still
a few cycles on a fast path that we would like to get rid of.
Benchmarking this yields very confusing results. On open-source
GCC 4.8.1 on Haswell, we get exactly the expected results; the
benchmarks where we hit the fast path for literals (in particular
the two HTML benchmarks and the protobuf benchmark) give very nice
speedups, and the others are not really affected.
However, benchmarks with Google's GCC branch on other hardware
is much less clear. It seems that we have a weak loss in some cases
(and the win for the “typical” win cases are not nearly as clear),
but that it depends on microarchitecture and plain luck in how we run
the benchmark. Looking at the generated assembler, it seems that
the removal of the if causes other large-scale changes in how the
function is laid out, which makes it likely that this is just bad luck.
Thus, we should keep this change, even though its exact current impact is
unclear; it's a sensible change per se, and dropping it on the basis of
microoptimization for a given compiler (or even branch of a compiler)
would seem like a bad strategy in the long run.
Microbenchmark results (all in 64-bit, opt mode):
Nehalem, Google GCC:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 76747 75591 1.3GB/s html +1.5%
BM_UFlat/1 765756 757040 886.3MB/s urls +1.2%
BM_UFlat/2 10867 10893 10.9GB/s jpg -0.2%
BM_UFlat/3 124 131 1.4GB/s jpg_200 -5.3%
BM_UFlat/4 31663 31596 2.8GB/s pdf +0.2%
BM_UFlat/5 314162 308176 1.2GB/s html4 +1.9%
BM_UFlat/6 29668 29746 790.6MB/s cp -0.3%
BM_UFlat/7 12958 13386 796.4MB/s c -3.2%
BM_UFlat/8 3596 3682 966.0MB/s lsp -2.3%
BM_UFlat/9 1019193 1033493 953.3MB/s xls -1.4%
BM_UFlat/10 239 247 775.3MB/s xls_200 -3.2%
BM_UFlat/11 236411 240271 606.9MB/s txt1 -1.6%
BM_UFlat/12 206639 209768 571.2MB/s txt2 -1.5%
BM_UFlat/13 627803 635722 641.4MB/s txt3 -1.2%
BM_UFlat/14 845932 857816 538.2MB/s txt4 -1.4%
BM_UFlat/15 402107 391670 1.2GB/s bin +2.7%
BM_UFlat/16 283 279 683.6MB/s bin_200 +1.4%
BM_UFlat/17 46070 46815 781.5MB/s sum -1.6%
BM_UFlat/18 5053 5163 782.0MB/s man -2.1%
BM_UFlat/19 79721 76581 1.4GB/s pb +4.1%
BM_UFlat/20 251158 252330 697.5MB/s gaviota -0.5%
Sum of all benchmarks 4966150 4980396 -0.3%
Sandy Bridge, Google GCC:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 42850 42182 2.3GB/s html +1.6%
BM_UFlat/1 525660 515816 1.3GB/s urls +1.9%
BM_UFlat/2 7173 7283 16.3GB/s jpg -1.5%
BM_UFlat/3 92 91 2.1GB/s jpg_200 +1.1%
BM_UFlat/4 15147 14872 5.9GB/s pdf +1.8%
BM_UFlat/5 199936 192116 2.0GB/s html4 +4.1%
BM_UFlat/6 12796 12443 1.8GB/s cp +2.8%
BM_UFlat/7 6588 6400 1.6GB/s c +2.9%
BM_UFlat/8 2010 1951 1.8GB/s lsp +3.0%
BM_UFlat/9 761124 763049 1.3GB/s xls -0.3%
BM_UFlat/10 186 189 1016.1MB/s xls_200 -1.6%
BM_UFlat/11 159354 158460 918.6MB/s txt1 +0.6%
BM_UFlat/12 139732 139950 856.1MB/s txt2 -0.2%
BM_UFlat/13 429917 425027 961.7MB/s txt3 +1.2%
BM_UFlat/14 585255 587324 785.8MB/s txt4 -0.4%
BM_UFlat/15 276186 266173 1.8GB/s bin +3.8%
BM_UFlat/16 205 207 925.5MB/s bin_200 -1.0%
BM_UFlat/17 24925 24935 1.4GB/s sum -0.0%
BM_UFlat/18 2632 2576 1.5GB/s man +2.2%
BM_UFlat/19 40546 39108 2.8GB/s pb +3.7%
BM_UFlat/20 175803 168209 1048.9MB/s gaviota +4.5%
Sum of all benchmarks 3408117 3368361 +1.2%
Haswell, upstream GCC 4.8.1:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 46308 40641 2.3GB/s html +13.9%
BM_UFlat/1 513385 514706 1.3GB/s urls -0.3%
BM_UFlat/2 6197 6151 19.2GB/s jpg +0.7%
BM_UFlat/3 61 61 3.0GB/s jpg_200 +0.0%
BM_UFlat/4 13551 13429 6.5GB/s pdf +0.9%
BM_UFlat/5 198317 190243 2.0GB/s html4 +4.2%
BM_UFlat/6 14768 12560 1.8GB/s cp +17.6%
BM_UFlat/7 6453 6447 1.6GB/s c +0.1%
BM_UFlat/8 1991 1980 1.8GB/s lsp +0.6%
BM_UFlat/9 766947 770424 1.2GB/s xls -0.5%
BM_UFlat/10 170 169 1.1GB/s xls_200 +0.6%
BM_UFlat/11 164350 163554 888.7MB/s txt1 +0.5%
BM_UFlat/12 145444 143830 832.1MB/s txt2 +1.1%
BM_UFlat/13 437849 438413 929.2MB/s txt3 -0.1%
BM_UFlat/14 603587 605309 759.8MB/s txt4 -0.3%
BM_UFlat/15 249799 248067 1.9GB/s bin +0.7%
BM_UFlat/16 191 188 1011.4MB/s bin_200 +1.6%
BM_UFlat/17 26064 24778 1.4GB/s sum +5.2%
BM_UFlat/18 2620 2601 1.5GB/s man +0.7%
BM_UFlat/19 44551 37373 3.0GB/s pb +19.2%
BM_UFlat/20 165408 164584 1.0GB/s gaviota +0.5%
Sum of all benchmarks 3408011 3385508 +0.7%
git-svn-id: https://snappy.googlecode.com/svn/trunk@78 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2013-06-30 19:24:03 +00:00
|
|
|
if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
|
2018-08-08 01:39:54 +00:00
|
|
|
curr_iov_remaining_ >= 16) {
|
2013-06-13 16:19:52 +00:00
|
|
|
// Fast path, used for the majority (about 95%) of invocations.
|
2018-08-08 01:39:54 +00:00
|
|
|
UnalignedCopy128(ip, curr_iov_output_);
|
|
|
|
curr_iov_output_ += len;
|
|
|
|
curr_iov_remaining_ -= len;
|
2013-06-13 16:19:52 +00:00
|
|
|
total_written_ += len;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
inline bool AppendFromSelf(size_t offset, size_t len, char**) {
|
2018-08-08 01:39:54 +00:00
|
|
|
// See SnappyArrayWriter::AppendFromSelf for an explanation of
|
|
|
|
// the "offset - 1u" trick.
|
|
|
|
if (offset - 1u >= total_written_) {
|
2013-06-13 16:19:52 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
const size_t space_left = output_limit_ - total_written_;
|
|
|
|
if (len > space_left) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Locate the iovec from which we need to start the copy.
|
2018-08-08 01:39:54 +00:00
|
|
|
const iovec* from_iov = curr_iov_;
|
|
|
|
size_t from_iov_offset = curr_iov_->iov_len - curr_iov_remaining_;
|
2013-06-13 16:19:52 +00:00
|
|
|
while (offset > 0) {
|
|
|
|
if (from_iov_offset >= offset) {
|
|
|
|
from_iov_offset -= offset;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset -= from_iov_offset;
|
2018-08-08 01:39:54 +00:00
|
|
|
--from_iov;
|
2018-08-17 19:02:02 +00:00
|
|
|
#if !defined(NDEBUG)
|
2018-08-08 01:39:54 +00:00
|
|
|
assert(from_iov >= output_iov_);
|
2018-08-17 19:02:02 +00:00
|
|
|
#endif // !defined(NDEBUG)
|
2018-08-08 01:39:54 +00:00
|
|
|
from_iov_offset = from_iov->iov_len;
|
2013-06-13 16:19:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy <len> bytes starting from the iovec pointed to by from_iov_index to
|
|
|
|
// the current iovec.
|
|
|
|
while (len > 0) {
|
2018-08-08 01:39:54 +00:00
|
|
|
assert(from_iov <= curr_iov_);
|
|
|
|
if (from_iov != curr_iov_) {
|
|
|
|
const size_t to_copy =
|
|
|
|
std::min(from_iov->iov_len - from_iov_offset, len);
|
|
|
|
AppendNoCheck(GetIOVecPointer(from_iov, from_iov_offset), to_copy);
|
2013-06-13 16:19:52 +00:00
|
|
|
len -= to_copy;
|
|
|
|
if (len > 0) {
|
2018-08-08 01:39:54 +00:00
|
|
|
++from_iov;
|
2013-06-13 16:19:52 +00:00
|
|
|
from_iov_offset = 0;
|
|
|
|
}
|
|
|
|
} else {
|
2018-08-08 01:39:54 +00:00
|
|
|
size_t to_copy = curr_iov_remaining_;
|
2013-06-13 16:19:52 +00:00
|
|
|
if (to_copy == 0) {
|
|
|
|
// This iovec is full. Go to the next one.
|
2018-08-08 01:39:54 +00:00
|
|
|
if (curr_iov_ + 1 >= output_iov_end_) {
|
2013-06-13 16:19:52 +00:00
|
|
|
return false;
|
|
|
|
}
|
2018-08-08 01:39:54 +00:00
|
|
|
++curr_iov_;
|
|
|
|
curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
|
|
|
|
curr_iov_remaining_ = curr_iov_->iov_len;
|
2013-06-13 16:19:52 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (to_copy > len) {
|
|
|
|
to_copy = len;
|
|
|
|
}
|
2020-12-09 02:27:22 +00:00
|
|
|
assert(to_copy > 0);
|
2018-08-08 01:39:54 +00:00
|
|
|
|
|
|
|
IncrementalCopy(GetIOVecPointer(from_iov, from_iov_offset),
|
|
|
|
curr_iov_output_, curr_iov_output_ + to_copy,
|
|
|
|
curr_iov_output_ + curr_iov_remaining_);
|
|
|
|
curr_iov_output_ += to_copy;
|
|
|
|
curr_iov_remaining_ -= to_copy;
|
2013-06-13 16:19:52 +00:00
|
|
|
from_iov_offset += to_copy;
|
|
|
|
total_written_ += to_copy;
|
|
|
|
len -= to_copy;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-22 14:03:28 +00:00
|
|
|
inline void Flush() {}
|
2013-06-13 16:19:52 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
bool RawUncompressToIOVec(const char* compressed, size_t compressed_length,
|
|
|
|
const struct iovec* iov, size_t iov_cnt) {
|
|
|
|
ByteArraySource reader(compressed, compressed_length);
|
|
|
|
return RawUncompressToIOVec(&reader, iov, iov_cnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov,
|
|
|
|
size_t iov_cnt) {
|
|
|
|
SnappyIOVecWriter output(iov, iov_cnt);
|
|
|
|
return InternalUncompress(compressed, &output);
|
|
|
|
}
|
|
|
|
|
2011-03-18 17:14:15 +00:00
|
|
|
// -----------------------------------------------------------------------
|
|
|
|
// Flat array interfaces
|
|
|
|
// -----------------------------------------------------------------------
|
|
|
|
|
|
|
|
// A type that writes to a flat array.
|
|
|
|
// Note that this is not a "ByteSink", but a type that matches the
|
Speed up decompression by caching ip_.
It is seemingly hard for the compiler to understand that ip_, the current input
pointer into the compressed data stream, can not alias on anything else, and
thus using it directly will incur memory traffic as it cannot be kept in a
register. The code already knew about this and cached it into a local
variable, but since Step() only decoded one tag, it had to move ip_ back into
place between every tag. This seems to have cost us a significant amount of
performance, so changing Step() into a function that decodes as much as it can
before it saves ip_ back and returns. (Note that Step() was already inlined,
so it is not the manual inlining that buys the performance here.)
The wins are about 3-6% for Core 2, 6-13% on Core i7 and 5-12% on Opteron
(for plain array-to-array decompression, in 64-bit opt mode).
There is a tiny difference in the behavior here; if an invalid literal is
encountered (ie., the writer refuses the Append() operation), ip_ will now
point to the byte past the tag byte, instead of where the literal was
originally thought to end. However, we don't use ip_ for anything after
DecompressAllTags() has returned, so this should not change external behavior
in any way.
Microbenchmark results for Core i7, 64-bit (Opteron results are similar):
Benchmark Time(ns) CPU(ns) Iterations
---------------------------------------------------
BM_UFlat/0 79134 79110 8835 1.2GB/s html [ +6.2%]
BM_UFlat/1 786126 786096 891 851.8MB/s urls [+10.0%]
BM_UFlat/2 9948 9948 69125 11.9GB/s jpg [ -1.3%]
BM_UFlat/3 31999 31998 21898 2.7GB/s pdf [ +6.5%]
BM_UFlat/4 318909 318829 2204 1.2GB/s html4 [ +6.5%]
BM_UFlat/5 31384 31390 22363 747.5MB/s cp [ +9.2%]
BM_UFlat/6 14037 14034 49858 757.7MB/s c [+10.6%]
BM_UFlat/7 4612 4612 151395 769.5MB/s lsp [ +9.5%]
BM_UFlat/8 1203174 1203007 582 816.3MB/s xls [+19.3%]
BM_UFlat/9 253869 253955 2757 571.1MB/s txt1 [+11.4%]
BM_UFlat/10 219292 219290 3194 544.4MB/s txt2 [+12.1%]
BM_UFlat/11 672135 672131 1000 605.5MB/s txt3 [+11.2%]
BM_UFlat/12 902512 902492 776 509.2MB/s txt4 [+12.5%]
BM_UFlat/13 372110 371998 1881 1.3GB/s bin [ +5.8%]
BM_UFlat/14 50407 50407 10000 723.5MB/s sum [+13.5%]
BM_UFlat/15 5699 5701 100000 707.2MB/s man [+12.4%]
BM_UFlat/16 83448 83424 8383 1.3GB/s pb [ +5.7%]
BM_UFlat/17 256958 256963 2723 684.1MB/s gaviota [ +7.9%]
BM_UValidate/0 42795 42796 16351 2.2GB/s html [+25.8%]
BM_UValidate/1 490672 490622 1427 1.3GB/s urls [+22.7%]
BM_UValidate/2 237 237 2950297 499.0GB/s jpg [+24.9%]
BM_UValidate/3 14610 14611 47901 6.0GB/s pdf [+26.8%]
BM_UValidate/4 171973 171990 4071 2.2GB/s html4 [+25.7%]
git-svn-id: https://snappy.googlecode.com/svn/trunk@38 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2011-06-02 17:59:40 +00:00
|
|
|
// Writer template argument to SnappyDecompressor::DecompressAllTags().
|
2011-03-18 17:14:15 +00:00
|
|
|
class SnappyArrayWriter {
|
|
|
|
private:
|
|
|
|
char* base_;
|
|
|
|
char* op_;
|
|
|
|
char* op_limit_;
|
2020-02-12 18:04:58 +00:00
|
|
|
// If op < op_limit_min_slop_ then it's safe to unconditionally write
|
|
|
|
// kSlopBytes starting at op.
|
|
|
|
char* op_limit_min_slop_;
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
public:
|
|
|
|
inline explicit SnappyArrayWriter(char* dst)
|
|
|
|
: base_(dst),
|
2015-06-22 14:10:47 +00:00
|
|
|
op_(dst),
|
2020-02-12 18:04:58 +00:00
|
|
|
op_limit_(dst),
|
|
|
|
op_limit_min_slop_(dst) {} // Safe default see invariant.
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
inline void SetExpectedLength(size_t len) {
|
|
|
|
op_limit_ = op_ + len;
|
2020-02-12 18:04:58 +00:00
|
|
|
// Prevent pointer from being past the buffer.
|
|
|
|
op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, len);
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
|
2020-10-30 17:37:07 +00:00
|
|
|
inline bool CheckLength() const { return op_ == op_limit_; }
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
char* GetOutputPtr() { return op_; }
|
2020-12-10 00:30:54 +00:00
|
|
|
char* GetBase(ptrdiff_t* op_limit_min_slop) {
|
|
|
|
*op_limit_min_slop = op_limit_min_slop_ - base_;
|
2020-11-14 15:27:36 +00:00
|
|
|
return base_;
|
|
|
|
}
|
2020-02-12 18:04:58 +00:00
|
|
|
void SetOutputPtr(char* op) { op_ = op; }
|
|
|
|
|
|
|
|
inline bool Append(const char* ip, size_t len, char** op_p) {
|
|
|
|
char* op = *op_p;
|
2012-01-04 13:10:46 +00:00
|
|
|
const size_t space_left = op_limit_ - op;
|
2020-02-12 18:04:58 +00:00
|
|
|
if (space_left < len) return false;
|
2020-04-12 00:01:01 +00:00
|
|
|
std::memcpy(op, ip, len);
|
2020-02-12 18:04:58 +00:00
|
|
|
*op_p = op + len;
|
2011-11-23 11:14:17 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
inline bool TryFastAppend(const char* ip, size_t available, size_t len,
|
|
|
|
char** op_p) {
|
|
|
|
char* op = *op_p;
|
2012-01-04 13:10:46 +00:00
|
|
|
const size_t space_left = op_limit_ - op;
|
In the fast path for decompressing literals, instead of checking
whether there's 16 bytes free and then checking right afterwards
(when having subtracted the literal size) that there are now
5 bytes free, just check once for 21 bytes. This skips a compare
and a branch; although it is easily predictable, it is still
a few cycles on a fast path that we would like to get rid of.
Benchmarking this yields very confusing results. On open-source
GCC 4.8.1 on Haswell, we get exactly the expected results; the
benchmarks where we hit the fast path for literals (in particular
the two HTML benchmarks and the protobuf benchmark) give very nice
speedups, and the others are not really affected.
However, benchmarks with Google's GCC branch on other hardware
is much less clear. It seems that we have a weak loss in some cases
(and the win for the “typical” win cases are not nearly as clear),
but that it depends on microarchitecture and plain luck in how we run
the benchmark. Looking at the generated assembler, it seems that
the removal of the if causes other large-scale changes in how the
function is laid out, which makes it likely that this is just bad luck.
Thus, we should keep this change, even though its exact current impact is
unclear; it's a sensible change per se, and dropping it on the basis of
microoptimization for a given compiler (or even branch of a compiler)
would seem like a bad strategy in the long run.
Microbenchmark results (all in 64-bit, opt mode):
Nehalem, Google GCC:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 76747 75591 1.3GB/s html +1.5%
BM_UFlat/1 765756 757040 886.3MB/s urls +1.2%
BM_UFlat/2 10867 10893 10.9GB/s jpg -0.2%
BM_UFlat/3 124 131 1.4GB/s jpg_200 -5.3%
BM_UFlat/4 31663 31596 2.8GB/s pdf +0.2%
BM_UFlat/5 314162 308176 1.2GB/s html4 +1.9%
BM_UFlat/6 29668 29746 790.6MB/s cp -0.3%
BM_UFlat/7 12958 13386 796.4MB/s c -3.2%
BM_UFlat/8 3596 3682 966.0MB/s lsp -2.3%
BM_UFlat/9 1019193 1033493 953.3MB/s xls -1.4%
BM_UFlat/10 239 247 775.3MB/s xls_200 -3.2%
BM_UFlat/11 236411 240271 606.9MB/s txt1 -1.6%
BM_UFlat/12 206639 209768 571.2MB/s txt2 -1.5%
BM_UFlat/13 627803 635722 641.4MB/s txt3 -1.2%
BM_UFlat/14 845932 857816 538.2MB/s txt4 -1.4%
BM_UFlat/15 402107 391670 1.2GB/s bin +2.7%
BM_UFlat/16 283 279 683.6MB/s bin_200 +1.4%
BM_UFlat/17 46070 46815 781.5MB/s sum -1.6%
BM_UFlat/18 5053 5163 782.0MB/s man -2.1%
BM_UFlat/19 79721 76581 1.4GB/s pb +4.1%
BM_UFlat/20 251158 252330 697.5MB/s gaviota -0.5%
Sum of all benchmarks 4966150 4980396 -0.3%
Sandy Bridge, Google GCC:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 42850 42182 2.3GB/s html +1.6%
BM_UFlat/1 525660 515816 1.3GB/s urls +1.9%
BM_UFlat/2 7173 7283 16.3GB/s jpg -1.5%
BM_UFlat/3 92 91 2.1GB/s jpg_200 +1.1%
BM_UFlat/4 15147 14872 5.9GB/s pdf +1.8%
BM_UFlat/5 199936 192116 2.0GB/s html4 +4.1%
BM_UFlat/6 12796 12443 1.8GB/s cp +2.8%
BM_UFlat/7 6588 6400 1.6GB/s c +2.9%
BM_UFlat/8 2010 1951 1.8GB/s lsp +3.0%
BM_UFlat/9 761124 763049 1.3GB/s xls -0.3%
BM_UFlat/10 186 189 1016.1MB/s xls_200 -1.6%
BM_UFlat/11 159354 158460 918.6MB/s txt1 +0.6%
BM_UFlat/12 139732 139950 856.1MB/s txt2 -0.2%
BM_UFlat/13 429917 425027 961.7MB/s txt3 +1.2%
BM_UFlat/14 585255 587324 785.8MB/s txt4 -0.4%
BM_UFlat/15 276186 266173 1.8GB/s bin +3.8%
BM_UFlat/16 205 207 925.5MB/s bin_200 -1.0%
BM_UFlat/17 24925 24935 1.4GB/s sum -0.0%
BM_UFlat/18 2632 2576 1.5GB/s man +2.2%
BM_UFlat/19 40546 39108 2.8GB/s pb +3.7%
BM_UFlat/20 175803 168209 1048.9MB/s gaviota +4.5%
Sum of all benchmarks 3408117 3368361 +1.2%
Haswell, upstream GCC 4.8.1:
Benchmark Base (ns) New (ns) Improvement
------------------------------------------------------------------------------
BM_UFlat/0 46308 40641 2.3GB/s html +13.9%
BM_UFlat/1 513385 514706 1.3GB/s urls -0.3%
BM_UFlat/2 6197 6151 19.2GB/s jpg +0.7%
BM_UFlat/3 61 61 3.0GB/s jpg_200 +0.0%
BM_UFlat/4 13551 13429 6.5GB/s pdf +0.9%
BM_UFlat/5 198317 190243 2.0GB/s html4 +4.2%
BM_UFlat/6 14768 12560 1.8GB/s cp +17.6%
BM_UFlat/7 6453 6447 1.6GB/s c +0.1%
BM_UFlat/8 1991 1980 1.8GB/s lsp +0.6%
BM_UFlat/9 766947 770424 1.2GB/s xls -0.5%
BM_UFlat/10 170 169 1.1GB/s xls_200 +0.6%
BM_UFlat/11 164350 163554 888.7MB/s txt1 +0.5%
BM_UFlat/12 145444 143830 832.1MB/s txt2 +1.1%
BM_UFlat/13 437849 438413 929.2MB/s txt3 -0.1%
BM_UFlat/14 603587 605309 759.8MB/s txt4 -0.3%
BM_UFlat/15 249799 248067 1.9GB/s bin +0.7%
BM_UFlat/16 191 188 1011.4MB/s bin_200 +1.6%
BM_UFlat/17 26064 24778 1.4GB/s sum +5.2%
BM_UFlat/18 2620 2601 1.5GB/s man +0.7%
BM_UFlat/19 44551 37373 3.0GB/s pb +19.2%
BM_UFlat/20 165408 164584 1.0GB/s gaviota +0.5%
Sum of all benchmarks 3408011 3385508 +0.7%
git-svn-id: https://snappy.googlecode.com/svn/trunk@78 03e5f5b5-db94-4691-08a0-1a8bf15f6143
2013-06-30 19:24:03 +00:00
|
|
|
if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
|
2011-11-23 11:14:17 +00:00
|
|
|
// Fast path, used for the majority (about 95%) of invocations.
|
2017-01-27 08:10:36 +00:00
|
|
|
UnalignedCopy128(ip, op);
|
2020-02-12 18:04:58 +00:00
|
|
|
*op_p = op + len;
|
2011-11-23 11:14:17 +00:00
|
|
|
return true;
|
2011-03-18 17:14:15 +00:00
|
|
|
} else {
|
2011-11-23 11:14:17 +00:00
|
|
|
return false;
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-13 19:47:34 +00:00
|
|
|
SNAPPY_ATTRIBUTE_ALWAYS_INLINE
|
2020-02-12 18:04:58 +00:00
|
|
|
inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) {
|
2020-12-09 02:27:22 +00:00
|
|
|
assert(len > 0);
|
2020-02-12 18:04:58 +00:00
|
|
|
char* const op = *op_p;
|
2020-05-05 16:13:04 +00:00
|
|
|
assert(op >= base_);
|
2020-02-12 18:04:58 +00:00
|
|
|
char* const op_end = op + len;
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2013-07-29 11:06:44 +00:00
|
|
|
// Check if we try to append from before the start of the buffer.
|
2020-05-05 16:13:04 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - base_) < offset))
|
|
|
|
return false;
|
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) ||
|
|
|
|
op >= op_limit_min_slop_ || offset < len)) {
|
|
|
|
if (op_end > op_limit_ || offset == 0) return false;
|
|
|
|
*op_p = IncrementalCopy(op - offset, op, op_end, op_limit_);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
std::memmove(op, op - offset, kSlopBytes);
|
|
|
|
*op_p = op_end;
|
2011-03-18 17:14:15 +00:00
|
|
|
return true;
|
|
|
|
}
|
2015-06-22 14:03:28 +00:00
|
|
|
inline size_t Produced() const {
|
2017-01-27 08:10:36 +00:00
|
|
|
assert(op_ >= base_);
|
2015-06-22 14:03:28 +00:00
|
|
|
return op_ - base_;
|
|
|
|
}
|
|
|
|
inline void Flush() {}
|
2011-03-18 17:14:15 +00:00
|
|
|
};
|
|
|
|
|
2020-05-05 16:13:04 +00:00
|
|
|
bool RawUncompress(const char* compressed, size_t compressed_length,
|
|
|
|
char* uncompressed) {
|
|
|
|
ByteArraySource reader(compressed, compressed_length);
|
2011-03-18 17:14:15 +00:00
|
|
|
return RawUncompress(&reader, uncompressed);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool RawUncompress(Source* compressed, char* uncompressed) {
|
|
|
|
SnappyArrayWriter output(uncompressed);
|
2013-06-12 19:51:15 +00:00
|
|
|
return InternalUncompress(compressed, &output);
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
|
2020-05-05 16:13:04 +00:00
|
|
|
bool Uncompress(const char* compressed, size_t compressed_length,
|
|
|
|
std::string* uncompressed) {
|
2011-03-18 17:14:15 +00:00
|
|
|
size_t ulength;
|
2020-05-05 16:13:04 +00:00
|
|
|
if (!GetUncompressedLength(compressed, compressed_length, &ulength)) {
|
2011-03-18 17:14:15 +00:00
|
|
|
return false;
|
|
|
|
}
|
2013-06-12 19:51:15 +00:00
|
|
|
// On 32-bit builds: max_size() < kuint32max. Check for that instead
|
|
|
|
// of crashing (e.g., consider externally specified compressed data).
|
|
|
|
if (ulength > uncompressed->max_size()) {
|
2011-03-18 17:14:15 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
STLStringResizeUninitialized(uncompressed, ulength);
|
2020-05-05 16:13:04 +00:00
|
|
|
return RawUncompress(compressed, compressed_length,
|
|
|
|
string_as_array(uncompressed));
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// A Writer that drops everything on the floor and just does validation
|
|
|
|
class SnappyDecompressionValidator {
|
|
|
|
private:
|
|
|
|
size_t expected_;
|
|
|
|
size_t produced_;
|
|
|
|
|
|
|
|
public:
|
2020-10-30 17:37:07 +00:00
|
|
|
inline SnappyDecompressionValidator() : expected_(0), produced_(0) {}
|
|
|
|
inline void SetExpectedLength(size_t len) { expected_ = len; }
|
2020-02-12 18:04:58 +00:00
|
|
|
size_t GetOutputPtr() { return produced_; }
|
2020-12-10 00:30:54 +00:00
|
|
|
size_t GetBase(ptrdiff_t* op_limit_min_slop) {
|
|
|
|
*op_limit_min_slop = std::numeric_limits<ptrdiff_t>::max() - kSlopBytes + 1;
|
|
|
|
return 1;
|
|
|
|
}
|
2020-02-12 18:04:58 +00:00
|
|
|
void SetOutputPtr(size_t op) { produced_ = op; }
|
2020-10-30 17:37:07 +00:00
|
|
|
inline bool CheckLength() const { return expected_ == produced_; }
|
2020-02-12 18:04:58 +00:00
|
|
|
inline bool Append(const char* ip, size_t len, size_t* produced) {
|
2020-05-05 16:13:04 +00:00
|
|
|
// TODO: Switch to [[maybe_unused]] when we can assume C++17.
|
|
|
|
(void)ip;
|
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
*produced += len;
|
|
|
|
return *produced <= expected_;
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
2020-02-12 18:04:58 +00:00
|
|
|
inline bool TryFastAppend(const char* ip, size_t available, size_t length,
|
|
|
|
size_t* produced) {
|
2020-05-05 16:13:04 +00:00
|
|
|
// TODO: Switch to [[maybe_unused]] when we can assume C++17.
|
|
|
|
(void)ip;
|
|
|
|
(void)available;
|
|
|
|
(void)length;
|
|
|
|
(void)produced;
|
|
|
|
|
2011-11-23 11:14:17 +00:00
|
|
|
return false;
|
|
|
|
}
|
2020-02-12 18:04:58 +00:00
|
|
|
inline bool AppendFromSelf(size_t offset, size_t len, size_t* produced) {
|
2013-07-29 11:06:44 +00:00
|
|
|
// See SnappyArrayWriter::AppendFromSelf for an explanation of
|
|
|
|
// the "offset - 1u" trick.
|
2020-02-12 18:04:58 +00:00
|
|
|
if (*produced <= offset - 1u) return false;
|
|
|
|
*produced += len;
|
|
|
|
return *produced <= expected_;
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
2015-06-22 14:03:28 +00:00
|
|
|
inline void Flush() {}
|
2011-03-18 17:14:15 +00:00
|
|
|
};
|
|
|
|
|
2020-05-05 16:13:04 +00:00
|
|
|
bool IsValidCompressedBuffer(const char* compressed, size_t compressed_length) {
|
|
|
|
ByteArraySource reader(compressed, compressed_length);
|
2011-03-18 17:14:15 +00:00
|
|
|
SnappyDecompressionValidator writer;
|
2013-06-12 19:51:15 +00:00
|
|
|
return InternalUncompress(&reader, &writer);
|
2011-03-18 17:14:15 +00:00
|
|
|
}
|
|
|
|
|
2015-06-22 14:03:28 +00:00
|
|
|
bool IsValidCompressed(Source* compressed) {
|
|
|
|
SnappyDecompressionValidator writer;
|
|
|
|
return InternalUncompress(compressed, &writer);
|
|
|
|
}
|
|
|
|
|
2024-05-21 19:25:25 +00:00
|
|
|
void RawCompress(const char* input, size_t input_length, char* compressed,
|
|
|
|
size_t* compressed_length) {
|
|
|
|
RawCompress(input, input_length, compressed, compressed_length,
|
|
|
|
CompressionOptions{});
|
|
|
|
}
|
|
|
|
|
2020-10-30 17:37:07 +00:00
|
|
|
void RawCompress(const char* input, size_t input_length, char* compressed,
|
2024-04-03 09:40:00 +00:00
|
|
|
size_t* compressed_length, CompressionOptions options) {
|
2011-03-18 17:14:15 +00:00
|
|
|
ByteArraySource reader(input, input_length);
|
|
|
|
UncheckedByteArraySink writer(compressed);
|
2024-04-03 09:40:00 +00:00
|
|
|
Compress(&reader, &writer, options);
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
// Compute how many bytes were added
|
|
|
|
*compressed_length = (writer.CurrentDestination() - compressed);
|
|
|
|
}
|
|
|
|
|
2024-05-21 19:25:25 +00:00
|
|
|
void RawCompressFromIOVec(const struct iovec* iov, size_t uncompressed_length,
|
|
|
|
char* compressed, size_t* compressed_length) {
|
|
|
|
RawCompressFromIOVec(iov, uncompressed_length, compressed, compressed_length,
|
|
|
|
CompressionOptions{});
|
|
|
|
}
|
|
|
|
|
2022-09-26 17:23:33 +00:00
|
|
|
void RawCompressFromIOVec(const struct iovec* iov, size_t uncompressed_length,
|
2024-04-03 09:40:00 +00:00
|
|
|
char* compressed, size_t* compressed_length,
|
|
|
|
CompressionOptions options) {
|
2022-09-26 17:23:33 +00:00
|
|
|
SnappyIOVecReader reader(iov, uncompressed_length);
|
|
|
|
UncheckedByteArraySink writer(compressed);
|
2024-04-03 09:40:00 +00:00
|
|
|
Compress(&reader, &writer, options);
|
2022-09-26 17:23:33 +00:00
|
|
|
|
|
|
|
// Compute how many bytes were added.
|
|
|
|
*compressed_length = writer.CurrentDestination() - compressed;
|
|
|
|
}
|
|
|
|
|
2024-05-21 19:25:25 +00:00
|
|
|
size_t Compress(const char* input, size_t input_length,
|
|
|
|
std::string* compressed) {
|
|
|
|
return Compress(input, input_length, compressed, CompressionOptions{});
|
|
|
|
}
|
|
|
|
|
2024-04-03 09:40:00 +00:00
|
|
|
size_t Compress(const char* input, size_t input_length, std::string* compressed,
|
|
|
|
CompressionOptions options) {
|
2011-03-18 17:14:15 +00:00
|
|
|
// Pre-grow the buffer to the max length of the compressed output
|
2016-05-26 21:51:33 +00:00
|
|
|
STLStringResizeUninitialized(compressed, MaxCompressedLength(input_length));
|
2011-03-18 17:14:15 +00:00
|
|
|
|
|
|
|
size_t compressed_length;
|
|
|
|
RawCompress(input, input_length, string_as_array(compressed),
|
2024-04-03 09:40:00 +00:00
|
|
|
&compressed_length, options);
|
2022-09-26 17:23:33 +00:00
|
|
|
compressed->erase(compressed_length);
|
|
|
|
return compressed_length;
|
|
|
|
}
|
|
|
|
|
2024-05-21 19:25:25 +00:00
|
|
|
size_t CompressFromIOVec(const struct iovec* iov, size_t iov_cnt,
|
|
|
|
std::string* compressed) {
|
|
|
|
return CompressFromIOVec(iov, iov_cnt, compressed, CompressionOptions{});
|
|
|
|
}
|
|
|
|
|
2022-09-26 17:23:33 +00:00
|
|
|
size_t CompressFromIOVec(const struct iovec* iov, size_t iov_cnt,
|
2024-04-03 09:40:00 +00:00
|
|
|
std::string* compressed, CompressionOptions options) {
|
2022-09-26 17:23:33 +00:00
|
|
|
// Compute the number of bytes to be compressed.
|
|
|
|
size_t uncompressed_length = 0;
|
2022-10-08 01:11:32 +00:00
|
|
|
for (size_t i = 0; i < iov_cnt; ++i) {
|
2022-09-26 17:23:33 +00:00
|
|
|
uncompressed_length += iov[i].iov_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pre-grow the buffer to the max length of the compressed output.
|
|
|
|
STLStringResizeUninitialized(compressed, MaxCompressedLength(
|
|
|
|
uncompressed_length));
|
|
|
|
|
|
|
|
size_t compressed_length;
|
|
|
|
RawCompressFromIOVec(iov, uncompressed_length, string_as_array(compressed),
|
2024-04-03 09:40:00 +00:00
|
|
|
&compressed_length, options);
|
2022-09-26 17:23:33 +00:00
|
|
|
compressed->erase(compressed_length);
|
2011-03-18 17:14:15 +00:00
|
|
|
return compressed_length;
|
|
|
|
}
|
|
|
|
|
2015-06-22 14:03:28 +00:00
|
|
|
// -----------------------------------------------------------------------
|
|
|
|
// Sink interface
|
|
|
|
// -----------------------------------------------------------------------
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2015-06-22 14:03:28 +00:00
|
|
|
// A type that decompresses into a Sink. The template parameter
|
|
|
|
// Allocator must export one method "char* Allocate(int size);", which
|
|
|
|
// allocates a buffer of "size" and appends that to the destination.
|
|
|
|
template <typename Allocator>
|
|
|
|
class SnappyScatteredWriter {
|
|
|
|
Allocator allocator_;
|
|
|
|
|
|
|
|
// We need random access into the data generated so far. Therefore
|
|
|
|
// we keep track of all of the generated data as an array of blocks.
|
|
|
|
// All of the blocks except the last have length kBlockSize.
|
2016-11-28 16:49:41 +00:00
|
|
|
std::vector<char*> blocks_;
|
2015-06-22 14:03:28 +00:00
|
|
|
size_t expected_;
|
|
|
|
|
|
|
|
// Total size of all fully generated blocks so far
|
|
|
|
size_t full_size_;
|
|
|
|
|
|
|
|
// Pointer into current output block
|
2020-10-30 17:37:07 +00:00
|
|
|
char* op_base_; // Base of output block
|
|
|
|
char* op_ptr_; // Pointer to next unfilled byte in block
|
|
|
|
char* op_limit_; // Pointer just past block
|
2020-02-12 18:04:58 +00:00
|
|
|
// If op < op_limit_min_slop_ then it's safe to unconditionally write
|
|
|
|
// kSlopBytes starting at op.
|
|
|
|
char* op_limit_min_slop_;
|
2015-06-22 14:03:28 +00:00
|
|
|
|
2020-10-30 17:37:07 +00:00
|
|
|
inline size_t Size() const { return full_size_ + (op_ptr_ - op_base_); }
|
2015-06-22 14:03:28 +00:00
|
|
|
|
|
|
|
bool SlowAppend(const char* ip, size_t len);
|
|
|
|
bool SlowAppendFromSelf(size_t offset, size_t len);
|
|
|
|
|
|
|
|
public:
|
|
|
|
inline explicit SnappyScatteredWriter(const Allocator& allocator)
|
|
|
|
: allocator_(allocator),
|
|
|
|
full_size_(0),
|
|
|
|
op_base_(NULL),
|
|
|
|
op_ptr_(NULL),
|
2020-05-21 21:30:02 +00:00
|
|
|
op_limit_(NULL),
|
2020-10-30 17:37:07 +00:00
|
|
|
op_limit_min_slop_(NULL) {}
|
2020-02-12 18:04:58 +00:00
|
|
|
char* GetOutputPtr() { return op_ptr_; }
|
2020-12-10 00:30:54 +00:00
|
|
|
char* GetBase(ptrdiff_t* op_limit_min_slop) {
|
|
|
|
*op_limit_min_slop = op_limit_min_slop_ - op_base_;
|
2020-11-14 15:27:36 +00:00
|
|
|
return op_base_;
|
|
|
|
}
|
2020-02-12 18:04:58 +00:00
|
|
|
void SetOutputPtr(char* op) { op_ptr_ = op; }
|
2015-06-22 14:03:28 +00:00
|
|
|
|
|
|
|
inline void SetExpectedLength(size_t len) {
|
|
|
|
assert(blocks_.empty());
|
|
|
|
expected_ = len;
|
|
|
|
}
|
|
|
|
|
2020-10-30 17:37:07 +00:00
|
|
|
inline bool CheckLength() const { return Size() == expected_; }
|
2015-06-22 14:03:28 +00:00
|
|
|
|
|
|
|
// Return the number of bytes actually uncompressed so far
|
2020-10-30 17:37:07 +00:00
|
|
|
inline size_t Produced() const { return Size(); }
|
2015-06-22 14:03:28 +00:00
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
inline bool Append(const char* ip, size_t len, char** op_p) {
|
|
|
|
char* op = *op_p;
|
|
|
|
size_t avail = op_limit_ - op;
|
2015-06-22 14:03:28 +00:00
|
|
|
if (len <= avail) {
|
|
|
|
// Fast path
|
2020-04-12 00:01:01 +00:00
|
|
|
std::memcpy(op, ip, len);
|
2020-02-12 18:04:58 +00:00
|
|
|
*op_p = op + len;
|
2015-06-22 14:03:28 +00:00
|
|
|
return true;
|
|
|
|
} else {
|
2020-02-12 18:04:58 +00:00
|
|
|
op_ptr_ = op;
|
|
|
|
bool res = SlowAppend(ip, len);
|
|
|
|
*op_p = op_ptr_;
|
|
|
|
return res;
|
2015-06-22 14:03:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
inline bool TryFastAppend(const char* ip, size_t available, size_t length,
|
|
|
|
char** op_p) {
|
|
|
|
char* op = *op_p;
|
2015-06-22 14:03:28 +00:00
|
|
|
const int space_left = op_limit_ - op;
|
|
|
|
if (length <= 16 && available >= 16 + kMaximumTagLength &&
|
|
|
|
space_left >= 16) {
|
|
|
|
// Fast path, used for the majority (about 95%) of invocations.
|
2017-01-27 08:10:36 +00:00
|
|
|
UnalignedCopy128(ip, op);
|
2020-02-12 18:04:58 +00:00
|
|
|
*op_p = op + length;
|
2015-06-22 14:03:28 +00:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-12 18:04:58 +00:00
|
|
|
inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) {
|
|
|
|
char* op = *op_p;
|
2020-05-05 16:13:04 +00:00
|
|
|
assert(op >= op_base_);
|
2020-02-12 18:04:58 +00:00
|
|
|
// Check if we try to append from before the start of the buffer.
|
|
|
|
if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) ||
|
2020-10-30 17:37:07 +00:00
|
|
|
static_cast<size_t>(op - op_base_) < offset ||
|
|
|
|
op >= op_limit_min_slop_ || offset < len)) {
|
2020-02-12 18:04:58 +00:00
|
|
|
if (offset == 0) return false;
|
2020-05-05 16:13:04 +00:00
|
|
|
if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - op_base_) < offset ||
|
2020-11-02 17:46:36 +00:00
|
|
|
op + len > op_limit_)) {
|
2020-02-12 18:04:58 +00:00
|
|
|
op_ptr_ = op;
|
|
|
|
bool res = SlowAppendFromSelf(offset, len);
|
|
|
|
*op_p = op_ptr_;
|
|
|
|
return res;
|
|
|
|
}
|
2020-11-02 17:46:36 +00:00
|
|
|
*op_p = IncrementalCopy(op - offset, op, op + len, op_limit_);
|
2017-01-27 08:10:36 +00:00
|
|
|
return true;
|
2015-06-22 14:03:28 +00:00
|
|
|
}
|
2020-02-12 18:04:58 +00:00
|
|
|
// Fast path
|
|
|
|
char* const op_end = op + len;
|
|
|
|
std::memmove(op, op - offset, kSlopBytes);
|
|
|
|
*op_p = op_end;
|
|
|
|
return true;
|
2015-06-22 14:03:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Called at the end of the decompress. We ask the allocator
|
|
|
|
// write all blocks to the sink.
|
|
|
|
inline void Flush() { allocator_.Flush(Produced()); }
|
|
|
|
};
|
|
|
|
|
2020-10-30 17:37:07 +00:00
|
|
|
template <typename Allocator>
|
2015-06-22 14:03:28 +00:00
|
|
|
bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
|
|
|
|
size_t avail = op_limit_ - op_ptr_;
|
|
|
|
while (len > avail) {
|
|
|
|
// Completely fill this block
|
2020-04-12 00:01:01 +00:00
|
|
|
std::memcpy(op_ptr_, ip, avail);
|
2015-06-22 14:03:28 +00:00
|
|
|
op_ptr_ += avail;
|
|
|
|
assert(op_limit_ - op_ptr_ == 0);
|
|
|
|
full_size_ += (op_ptr_ - op_base_);
|
|
|
|
len -= avail;
|
|
|
|
ip += avail;
|
|
|
|
|
|
|
|
// Bounds check
|
2020-02-12 18:04:58 +00:00
|
|
|
if (full_size_ + len > expected_) return false;
|
2015-06-22 14:03:28 +00:00
|
|
|
|
|
|
|
// Make new block
|
2017-03-13 19:46:43 +00:00
|
|
|
size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_);
|
2015-06-22 14:03:28 +00:00
|
|
|
op_base_ = allocator_.Allocate(bsize);
|
|
|
|
op_ptr_ = op_base_;
|
|
|
|
op_limit_ = op_base_ + bsize;
|
2020-02-12 18:04:58 +00:00
|
|
|
op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, bsize);
|
|
|
|
|
2015-06-22 14:03:28 +00:00
|
|
|
blocks_.push_back(op_base_);
|
|
|
|
avail = bsize;
|
|
|
|
}
|
|
|
|
|
2020-04-12 00:01:01 +00:00
|
|
|
std::memcpy(op_ptr_, ip, len);
|
2015-06-22 14:03:28 +00:00
|
|
|
op_ptr_ += len;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-30 17:37:07 +00:00
|
|
|
template <typename Allocator>
|
2015-06-22 14:03:28 +00:00
|
|
|
bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
|
|
|
|
size_t len) {
|
|
|
|
// Overflow check
|
|
|
|
// See SnappyArrayWriter::AppendFromSelf for an explanation of
|
|
|
|
// the "offset - 1u" trick.
|
|
|
|
const size_t cur = Size();
|
|
|
|
if (offset - 1u >= cur) return false;
|
|
|
|
if (expected_ - cur < len) return false;
|
|
|
|
|
|
|
|
// Currently we shouldn't ever hit this path because Compress() chops the
|
|
|
|
// input into blocks and does not create cross-block copies. However, it is
|
|
|
|
// nice if we do not rely on that, since we can get better compression if we
|
|
|
|
// allow cross-block copies and thus might want to change the compressor in
|
|
|
|
// the future.
|
2020-02-12 18:04:58 +00:00
|
|
|
// TODO Replace this with a properly optimized path. This is not
|
|
|
|
// triggered right now. But this is so super slow, that it would regress
|
|
|
|
// performance unacceptably if triggered.
|
2015-06-22 14:03:28 +00:00
|
|
|
size_t src = cur - offset;
|
2020-02-12 18:04:58 +00:00
|
|
|
char* op = op_ptr_;
|
2015-06-22 14:03:28 +00:00
|
|
|
while (len-- > 0) {
|
2020-10-30 17:37:07 +00:00
|
|
|
char c = blocks_[src >> kBlockLog][src & (kBlockSize - 1)];
|
2020-02-12 18:04:58 +00:00
|
|
|
if (!Append(&c, 1, &op)) {
|
|
|
|
op_ptr_ = op;
|
|
|
|
return false;
|
|
|
|
}
|
2015-06-22 14:03:28 +00:00
|
|
|
src++;
|
|
|
|
}
|
2020-02-12 18:04:58 +00:00
|
|
|
op_ptr_ = op;
|
2015-06-22 14:03:28 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
class SnappySinkAllocator {
|
|
|
|
public:
|
2020-10-30 17:37:07 +00:00
|
|
|
explicit SnappySinkAllocator(Sink* dest) : dest_(dest) {}
|
2015-06-22 14:03:28 +00:00
|
|
|
|
|
|
|
char* Allocate(int size) {
|
|
|
|
Datablock block(new char[size], size);
|
|
|
|
blocks_.push_back(block);
|
|
|
|
return block.data;
|
|
|
|
}
|
2011-03-18 17:14:15 +00:00
|
|
|
|
2015-06-22 14:03:28 +00:00
|
|
|
// We flush only at the end, because the writer wants
|
|
|
|
// random access to the blocks and once we hand the
|
|
|
|
// block over to the sink, we can't access it anymore.
|
|
|
|
// Also we don't write more than has been actually written
|
|
|
|
// to the blocks.
|
|
|
|
void Flush(size_t size) {
|
|
|
|
size_t size_written = 0;
|
2020-05-04 12:31:03 +00:00
|
|
|
for (Datablock& block : blocks_) {
|
|
|
|
size_t block_size = std::min<size_t>(block.size, size - size_written);
|
|
|
|
dest_->AppendAndTakeOwnership(block.data, block_size,
|
2015-06-22 14:03:28 +00:00
|
|
|
&SnappySinkAllocator::Deleter, NULL);
|
|
|
|
size_written += block_size;
|
|
|
|
}
|
|
|
|
blocks_.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
struct Datablock {
|
|
|
|
char* data;
|
|
|
|
size_t size;
|
|
|
|
Datablock(char* p, size_t s) : data(p), size(s) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
static void Deleter(void* arg, const char* bytes, size_t size) {
|
2020-05-05 16:13:04 +00:00
|
|
|
// TODO: Switch to [[maybe_unused]] when we can assume C++17.
|
|
|
|
(void)arg;
|
|
|
|
(void)size;
|
|
|
|
|
2015-06-22 14:03:28 +00:00
|
|
|
delete[] bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
Sink* dest_;
|
2016-11-28 16:49:41 +00:00
|
|
|
std::vector<Datablock> blocks_;
|
2015-06-22 14:03:28 +00:00
|
|
|
|
|
|
|
// Note: copying this object is allowed
|
|
|
|
};
|
|
|
|
|
|
|
|
size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) {
|
|
|
|
SnappySinkAllocator allocator(uncompressed);
|
|
|
|
SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
|
|
|
|
InternalUncompress(compressed, &writer);
|
|
|
|
return writer.Produced();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Uncompress(Source* compressed, Sink* uncompressed) {
|
|
|
|
// Read the uncompressed length from the front of the compressed input
|
|
|
|
SnappyDecompressor decompressor(compressed);
|
2020-04-12 20:03:50 +00:00
|
|
|
uint32_t uncompressed_len = 0;
|
2015-06-22 14:03:28 +00:00
|
|
|
if (!decompressor.ReadUncompressedLength(&uncompressed_len)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
char c;
|
|
|
|
size_t allocated_size;
|
2020-10-30 17:37:07 +00:00
|
|
|
char* buf = uncompressed->GetAppendBufferVariable(1, uncompressed_len, &c, 1,
|
|
|
|
&allocated_size);
|
2015-06-22 14:03:28 +00:00
|
|
|
|
2017-02-01 16:34:26 +00:00
|
|
|
const size_t compressed_len = compressed->Available();
|
2015-06-22 14:03:28 +00:00
|
|
|
// If we can get a flat buffer, then use it, otherwise do block by block
|
|
|
|
// uncompression
|
|
|
|
if (allocated_size >= uncompressed_len) {
|
|
|
|
SnappyArrayWriter writer(buf);
|
2017-02-01 16:34:26 +00:00
|
|
|
bool result = InternalUncompressAllTags(&decompressor, &writer,
|
|
|
|
compressed_len, uncompressed_len);
|
2015-06-22 14:03:28 +00:00
|
|
|
uncompressed->Append(buf, writer.Produced());
|
|
|
|
return result;
|
|
|
|
} else {
|
|
|
|
SnappySinkAllocator allocator(uncompressed);
|
|
|
|
SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
|
2017-02-01 16:34:26 +00:00
|
|
|
return InternalUncompressAllTags(&decompressor, &writer, compressed_len,
|
|
|
|
uncompressed_len);
|
2015-06-22 14:03:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-16 17:44:34 +00:00
|
|
|
} // namespace snappy
|