snappy/snappy-internal.h

425 lines
16 KiB
C
Raw Permalink Normal View History

// Copyright 2008 Google Inc. All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Internals shared between the Snappy implementation and its unittest.
#ifndef THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
#define THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
#include <utility>
#include "snappy-stubs-internal.h"
#if SNAPPY_HAVE_SSSE3
// Please do not replace with <x86intrin.h> or with headers that assume more
// advanced SSE versions without checking with all the OWNERS.
#include <emmintrin.h>
#include <tmmintrin.h>
#endif
#if SNAPPY_HAVE_NEON
#include <arm_neon.h>
#endif
#if SNAPPY_HAVE_SSSE3 || SNAPPY_HAVE_NEON
#define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 1
#else
#define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 0
#endif
namespace snappy {
namespace internal {
#if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
#if SNAPPY_HAVE_SSSE3
using V128 = __m128i;
#elif SNAPPY_HAVE_NEON
using V128 = uint8x16_t;
#endif
// Load 128 bits of integer data. `src` must be 16-byte aligned.
inline V128 V128_Load(const V128* src);
// Load 128 bits of integer data. `src` does not need to be aligned.
inline V128 V128_LoadU(const V128* src);
// Store 128 bits of integer data. `dst` does not need to be aligned.
inline void V128_StoreU(V128* dst, V128 val);
// Shuffle packed 8-bit integers using a shuffle mask.
// Each packed integer in the shuffle mask must be in [0,16).
inline V128 V128_Shuffle(V128 input, V128 shuffle_mask);
// Constructs V128 with 16 chars |c|.
inline V128 V128_DupChar(char c);
#if SNAPPY_HAVE_SSSE3
inline V128 V128_Load(const V128* src) { return _mm_load_si128(src); }
inline V128 V128_LoadU(const V128* src) { return _mm_loadu_si128(src); }
inline void V128_StoreU(V128* dst, V128 val) { _mm_storeu_si128(dst, val); }
inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) {
return _mm_shuffle_epi8(input, shuffle_mask);
}
inline V128 V128_DupChar(char c) { return _mm_set1_epi8(c); }
#elif SNAPPY_HAVE_NEON
inline V128 V128_Load(const V128* src) {
return vld1q_u8(reinterpret_cast<const uint8_t*>(src));
}
inline V128 V128_LoadU(const V128* src) {
return vld1q_u8(reinterpret_cast<const uint8_t*>(src));
}
inline void V128_StoreU(V128* dst, V128 val) {
vst1q_u8(reinterpret_cast<uint8_t*>(dst), val);
}
inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) {
assert(vminvq_u8(shuffle_mask) >= 0 && vmaxvq_u8(shuffle_mask) <= 15);
return vqtbl1q_u8(input, shuffle_mask);
}
inline V128 V128_DupChar(char c) { return vdupq_n_u8(c); }
#endif
#endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
Reduce number of allocations when compressing and simplify the code. Before we were allocating at least once: twice with large table and thrice when we used a scratch buffer. With this approach we always allocate once. name old speed new speed delta BM_UFlat/0 [html ] 2.45GB/s ± 0% 2.45GB/s ± 0% -0.13% (p=0.000 n=11+11) BM_UFlat/1 [urls ] 1.19GB/s ± 0% 1.22GB/s ± 0% +2.48% (p=0.000 n=11+11) BM_UFlat/2 [jpg ] 17.2GB/s ± 2% 17.3GB/s ± 1% ~ (p=0.193 n=11+11) BM_UFlat/3 [jpg_200 ] 1.52GB/s ± 0% 1.51GB/s ± 0% -0.78% (p=0.000 n=10+9) BM_UFlat/4 [pdf ] 12.5GB/s ± 1% 12.5GB/s ± 1% ~ (p=0.881 n=9+9) BM_UFlat/5 [html4 ] 1.86GB/s ± 0% 1.86GB/s ± 0% ~ (p=0.123 n=11+11) BM_UFlat/6 [txt1 ] 793MB/s ± 0% 799MB/s ± 0% +0.78% (p=0.000 n=11+9) BM_UFlat/7 [txt2 ] 739MB/s ± 0% 744MB/s ± 0% +0.77% (p=0.000 n=11+11) BM_UFlat/8 [txt3 ] 839MB/s ± 0% 845MB/s ± 0% +0.71% (p=0.000 n=11+11) BM_UFlat/9 [txt4 ] 678MB/s ± 0% 685MB/s ± 0% +1.01% (p=0.000 n=11+11) BM_UFlat/10 [pb ] 3.08GB/s ± 0% 3.12GB/s ± 0% +1.21% (p=0.000 n=11+11) BM_UFlat/11 [gaviota ] 975MB/s ± 0% 976MB/s ± 0% +0.11% (p=0.000 n=11+11) BM_UFlat/12 [cp ] 1.73GB/s ± 1% 1.74GB/s ± 1% +0.46% (p=0.010 n=11+11) BM_UFlat/13 [c ] 1.53GB/s ± 0% 1.53GB/s ± 0% ~ (p=0.987 n=11+10) BM_UFlat/14 [lsp ] 1.65GB/s ± 0% 1.63GB/s ± 1% -1.04% (p=0.000 n=11+11) BM_UFlat/15 [xls ] 1.08GB/s ± 0% 1.15GB/s ± 0% +6.12% (p=0.000 n=10+11) BM_UFlat/16 [xls_200 ] 944MB/s ± 0% 920MB/s ± 3% -2.51% (p=0.000 n=9+11) BM_UFlat/17 [bin ] 1.86GB/s ± 0% 1.87GB/s ± 0% +0.68% (p=0.000 n=10+11) BM_UFlat/18 [bin_200 ] 1.91GB/s ± 3% 1.92GB/s ± 5% ~ (p=0.356 n=11+11) BM_UFlat/19 [sum ] 1.31GB/s ± 0% 1.40GB/s ± 0% +6.53% (p=0.000 n=11+11) BM_UFlat/20 [man ] 1.42GB/s ± 0% 1.42GB/s ± 0% +0.33% (p=0.000 n=10+10)
2018-10-16 19:28:52 +00:00
// Working memory performs a single allocation to hold all scratch space
// required for compression.
class WorkingMemory {
public:
Reduce number of allocations when compressing and simplify the code. Before we were allocating at least once: twice with large table and thrice when we used a scratch buffer. With this approach we always allocate once. name old speed new speed delta BM_UFlat/0 [html ] 2.45GB/s ± 0% 2.45GB/s ± 0% -0.13% (p=0.000 n=11+11) BM_UFlat/1 [urls ] 1.19GB/s ± 0% 1.22GB/s ± 0% +2.48% (p=0.000 n=11+11) BM_UFlat/2 [jpg ] 17.2GB/s ± 2% 17.3GB/s ± 1% ~ (p=0.193 n=11+11) BM_UFlat/3 [jpg_200 ] 1.52GB/s ± 0% 1.51GB/s ± 0% -0.78% (p=0.000 n=10+9) BM_UFlat/4 [pdf ] 12.5GB/s ± 1% 12.5GB/s ± 1% ~ (p=0.881 n=9+9) BM_UFlat/5 [html4 ] 1.86GB/s ± 0% 1.86GB/s ± 0% ~ (p=0.123 n=11+11) BM_UFlat/6 [txt1 ] 793MB/s ± 0% 799MB/s ± 0% +0.78% (p=0.000 n=11+9) BM_UFlat/7 [txt2 ] 739MB/s ± 0% 744MB/s ± 0% +0.77% (p=0.000 n=11+11) BM_UFlat/8 [txt3 ] 839MB/s ± 0% 845MB/s ± 0% +0.71% (p=0.000 n=11+11) BM_UFlat/9 [txt4 ] 678MB/s ± 0% 685MB/s ± 0% +1.01% (p=0.000 n=11+11) BM_UFlat/10 [pb ] 3.08GB/s ± 0% 3.12GB/s ± 0% +1.21% (p=0.000 n=11+11) BM_UFlat/11 [gaviota ] 975MB/s ± 0% 976MB/s ± 0% +0.11% (p=0.000 n=11+11) BM_UFlat/12 [cp ] 1.73GB/s ± 1% 1.74GB/s ± 1% +0.46% (p=0.010 n=11+11) BM_UFlat/13 [c ] 1.53GB/s ± 0% 1.53GB/s ± 0% ~ (p=0.987 n=11+10) BM_UFlat/14 [lsp ] 1.65GB/s ± 0% 1.63GB/s ± 1% -1.04% (p=0.000 n=11+11) BM_UFlat/15 [xls ] 1.08GB/s ± 0% 1.15GB/s ± 0% +6.12% (p=0.000 n=10+11) BM_UFlat/16 [xls_200 ] 944MB/s ± 0% 920MB/s ± 3% -2.51% (p=0.000 n=9+11) BM_UFlat/17 [bin ] 1.86GB/s ± 0% 1.87GB/s ± 0% +0.68% (p=0.000 n=10+11) BM_UFlat/18 [bin_200 ] 1.91GB/s ± 3% 1.92GB/s ± 5% ~ (p=0.356 n=11+11) BM_UFlat/19 [sum ] 1.31GB/s ± 0% 1.40GB/s ± 0% +6.53% (p=0.000 n=11+11) BM_UFlat/20 [man ] 1.42GB/s ± 0% 1.42GB/s ± 0% +0.33% (p=0.000 n=10+10)
2018-10-16 19:28:52 +00:00
explicit WorkingMemory(size_t input_size);
~WorkingMemory();
// Allocates and clears a hash table using memory in "*this",
// stores the number of buckets in "*table_size" and returns a pointer to
// the base of the hash table.
uint16_t* GetHashTable(size_t fragment_size, int* table_size) const;
Reduce number of allocations when compressing and simplify the code. Before we were allocating at least once: twice with large table and thrice when we used a scratch buffer. With this approach we always allocate once. name old speed new speed delta BM_UFlat/0 [html ] 2.45GB/s ± 0% 2.45GB/s ± 0% -0.13% (p=0.000 n=11+11) BM_UFlat/1 [urls ] 1.19GB/s ± 0% 1.22GB/s ± 0% +2.48% (p=0.000 n=11+11) BM_UFlat/2 [jpg ] 17.2GB/s ± 2% 17.3GB/s ± 1% ~ (p=0.193 n=11+11) BM_UFlat/3 [jpg_200 ] 1.52GB/s ± 0% 1.51GB/s ± 0% -0.78% (p=0.000 n=10+9) BM_UFlat/4 [pdf ] 12.5GB/s ± 1% 12.5GB/s ± 1% ~ (p=0.881 n=9+9) BM_UFlat/5 [html4 ] 1.86GB/s ± 0% 1.86GB/s ± 0% ~ (p=0.123 n=11+11) BM_UFlat/6 [txt1 ] 793MB/s ± 0% 799MB/s ± 0% +0.78% (p=0.000 n=11+9) BM_UFlat/7 [txt2 ] 739MB/s ± 0% 744MB/s ± 0% +0.77% (p=0.000 n=11+11) BM_UFlat/8 [txt3 ] 839MB/s ± 0% 845MB/s ± 0% +0.71% (p=0.000 n=11+11) BM_UFlat/9 [txt4 ] 678MB/s ± 0% 685MB/s ± 0% +1.01% (p=0.000 n=11+11) BM_UFlat/10 [pb ] 3.08GB/s ± 0% 3.12GB/s ± 0% +1.21% (p=0.000 n=11+11) BM_UFlat/11 [gaviota ] 975MB/s ± 0% 976MB/s ± 0% +0.11% (p=0.000 n=11+11) BM_UFlat/12 [cp ] 1.73GB/s ± 1% 1.74GB/s ± 1% +0.46% (p=0.010 n=11+11) BM_UFlat/13 [c ] 1.53GB/s ± 0% 1.53GB/s ± 0% ~ (p=0.987 n=11+10) BM_UFlat/14 [lsp ] 1.65GB/s ± 0% 1.63GB/s ± 1% -1.04% (p=0.000 n=11+11) BM_UFlat/15 [xls ] 1.08GB/s ± 0% 1.15GB/s ± 0% +6.12% (p=0.000 n=10+11) BM_UFlat/16 [xls_200 ] 944MB/s ± 0% 920MB/s ± 3% -2.51% (p=0.000 n=9+11) BM_UFlat/17 [bin ] 1.86GB/s ± 0% 1.87GB/s ± 0% +0.68% (p=0.000 n=10+11) BM_UFlat/18 [bin_200 ] 1.91GB/s ± 3% 1.92GB/s ± 5% ~ (p=0.356 n=11+11) BM_UFlat/19 [sum ] 1.31GB/s ± 0% 1.40GB/s ± 0% +6.53% (p=0.000 n=11+11) BM_UFlat/20 [man ] 1.42GB/s ± 0% 1.42GB/s ± 0% +0.33% (p=0.000 n=10+10)
2018-10-16 19:28:52 +00:00
char* GetScratchInput() const { return input_; }
char* GetScratchOutput() const { return output_; }
private:
char* mem_; // the allocated memory, never nullptr
size_t size_; // the size of the allocated memory, never 0
uint16_t* table_; // the pointer to the hashtable
char* input_; // the pointer to the input scratch buffer
char* output_; // the pointer to the output scratch buffer
// No copying
WorkingMemory(const WorkingMemory&);
void operator=(const WorkingMemory&);
};
// Flat array compression that does not emit the "uncompressed length"
// prefix. Compresses "input" string to the "*op" buffer.
//
// REQUIRES: "input_length <= kBlockSize"
// REQUIRES: "op" points to an array of memory that is at least
// "MaxCompressedLength(input_length)" in size.
// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
// REQUIRES: "table_size" is a power of two
//
// Returns an "end" pointer into "op" buffer.
// "end - op" is the compressed size of "input".
char* CompressFragment(const char* input,
size_t input_length,
char* op,
uint16_t* table,
const int table_size);
Re-work fast path that emits copies in zippy compression. The primary motivation for the change is that FindMatchLength is likely to discover a difference in the first 8 bytes it compares. If that occurs then we know the length of the match is less than 12, because FindMatchLength is invoked after a 4-byte match is found. When emitting a copy, it is useful to know that the length is less than 12 because the two-byte variant of an emitted copy requires that. This is a performance-tuning change that should not affect the library's behavior. With FDO on perflab/Haswell the geometric mean for ZFlat/* went from 47,290ns to 45,741ns, an improvement of 3.4%. SAMPLE (before) BM_ZFlat/0 102824 102650 40691 951.4MB/s html (22.31 %) BM_ZFlat/1 1293512 1290442 3225 518.9MB/s urls (47.78 %) BM_ZFlat/2 10373 10353 417959 11.1GB/s jpg (99.95 %) BM_ZFlat/3 268 268 15745324 712.4MB/s jpg_200 (73.00 %) BM_ZFlat/4 12137 12113 342462 7.9GB/s pdf (83.30 %) BM_ZFlat/5 430672 429720 9724 909.0MB/s html4 (22.52 %) BM_ZFlat/6 420541 419636 9833 345.6MB/s txt1 (57.88 %) BM_ZFlat/7 373829 373158 10000 319.9MB/s txt2 (61.91 %) BM_ZFlat/8 1119014 1116604 3755 364.5MB/s txt3 (54.99 %) BM_ZFlat/9 1544203 1540657 2748 298.3MB/s txt4 (66.26 %) BM_ZFlat/10 91041 90866 46002 1.2GB/s pb (19.68 %) BM_ZFlat/11 332766 331990 10000 529.5MB/s gaviota (37.72 %) BM_ZFlat/12 39960 39886 100000 588.3MB/s cp (48.12 %) BM_ZFlat/13 14493 14465 287181 735.1MB/s c (42.47 %) BM_ZFlat/14 4447 4440 947927 799.3MB/s lsp (48.37 %) BM_ZFlat/15 1316362 1313350 3196 747.7MB/s xls (41.23 %) BM_ZFlat/16 312 311 10000000 613.0MB/s xls_200 (78.00 %) BM_ZFlat/17 388471 387502 10000 1.2GB/s bin (18.11 %) BM_ZFlat/18 65 64 64838208 2.9GB/s bin_200 (7.50 %) BM_ZFlat/19 65900 65787 63099 554.3MB/s sum (48.96 %) BM_ZFlat/20 6188 6177 681951 652.6MB/s man (59.21 %) SAMPLE (after) Benchmark Time(ns) CPU(ns) Iterations -------------------------------------------- BM_ZFlat/0 99259 99044 42428 986.0MB/s html (22.31 %) BM_ZFlat/1 1257039 1255276 3341 533.4MB/s urls (47.78 %) BM_ZFlat/2 10044 10030 405781 11.4GB/s jpg (99.95 %) BM_ZFlat/3 268 267 15732282 713.3MB/s jpg_200 (73.00 %) BM_ZFlat/4 11675 11657 358629 8.2GB/s pdf (83.30 %) BM_ZFlat/5 420951 419818 9739 930.5MB/s html4 (22.52 %) BM_ZFlat/6 415460 414632 10000 349.8MB/s txt1 (57.88 %) BM_ZFlat/7 367191 366436 10000 325.8MB/s txt2 (61.91 %) BM_ZFlat/8 1098345 1096036 3819 371.3MB/s txt3 (54.99 %) BM_ZFlat/9 1508701 1505306 2758 305.3MB/s txt4 (66.26 %) BM_ZFlat/10 87195 87031 47289 1.3GB/s pb (19.68 %) BM_ZFlat/11 322338 321637 10000 546.5MB/s gaviota (37.72 %) BM_ZFlat/12 36739 36668 100000 639.9MB/s cp (48.12 %) BM_ZFlat/13 13646 13618 304009 780.9MB/s c (42.47 %) BM_ZFlat/14 4249 4240 992456 837.0MB/s lsp (48.37 %) BM_ZFlat/15 1262925 1260012 3314 779.4MB/s xls (41.23 %) BM_ZFlat/16 308 308 10000000 619.8MB/s xls_200 (78.00 %) BM_ZFlat/17 379750 378944 10000 1.3GB/s bin (18.11 %) BM_ZFlat/18 62 62 67443280 3.0GB/s bin_200 (7.50 %) BM_ZFlat/19 61706 61587 67645 592.1MB/s sum (48.96 %) BM_ZFlat/20 5968 5958 698974 676.6MB/s man (59.21 %)
2016-06-28 18:53:11 +00:00
// Find the largest n such that
//
// s1[0,n-1] == s2[0,n-1]
// and n <= (s2_limit - s2).
//
Re-work fast path that emits copies in zippy compression. The primary motivation for the change is that FindMatchLength is likely to discover a difference in the first 8 bytes it compares. If that occurs then we know the length of the match is less than 12, because FindMatchLength is invoked after a 4-byte match is found. When emitting a copy, it is useful to know that the length is less than 12 because the two-byte variant of an emitted copy requires that. This is a performance-tuning change that should not affect the library's behavior. With FDO on perflab/Haswell the geometric mean for ZFlat/* went from 47,290ns to 45,741ns, an improvement of 3.4%. SAMPLE (before) BM_ZFlat/0 102824 102650 40691 951.4MB/s html (22.31 %) BM_ZFlat/1 1293512 1290442 3225 518.9MB/s urls (47.78 %) BM_ZFlat/2 10373 10353 417959 11.1GB/s jpg (99.95 %) BM_ZFlat/3 268 268 15745324 712.4MB/s jpg_200 (73.00 %) BM_ZFlat/4 12137 12113 342462 7.9GB/s pdf (83.30 %) BM_ZFlat/5 430672 429720 9724 909.0MB/s html4 (22.52 %) BM_ZFlat/6 420541 419636 9833 345.6MB/s txt1 (57.88 %) BM_ZFlat/7 373829 373158 10000 319.9MB/s txt2 (61.91 %) BM_ZFlat/8 1119014 1116604 3755 364.5MB/s txt3 (54.99 %) BM_ZFlat/9 1544203 1540657 2748 298.3MB/s txt4 (66.26 %) BM_ZFlat/10 91041 90866 46002 1.2GB/s pb (19.68 %) BM_ZFlat/11 332766 331990 10000 529.5MB/s gaviota (37.72 %) BM_ZFlat/12 39960 39886 100000 588.3MB/s cp (48.12 %) BM_ZFlat/13 14493 14465 287181 735.1MB/s c (42.47 %) BM_ZFlat/14 4447 4440 947927 799.3MB/s lsp (48.37 %) BM_ZFlat/15 1316362 1313350 3196 747.7MB/s xls (41.23 %) BM_ZFlat/16 312 311 10000000 613.0MB/s xls_200 (78.00 %) BM_ZFlat/17 388471 387502 10000 1.2GB/s bin (18.11 %) BM_ZFlat/18 65 64 64838208 2.9GB/s bin_200 (7.50 %) BM_ZFlat/19 65900 65787 63099 554.3MB/s sum (48.96 %) BM_ZFlat/20 6188 6177 681951 652.6MB/s man (59.21 %) SAMPLE (after) Benchmark Time(ns) CPU(ns) Iterations -------------------------------------------- BM_ZFlat/0 99259 99044 42428 986.0MB/s html (22.31 %) BM_ZFlat/1 1257039 1255276 3341 533.4MB/s urls (47.78 %) BM_ZFlat/2 10044 10030 405781 11.4GB/s jpg (99.95 %) BM_ZFlat/3 268 267 15732282 713.3MB/s jpg_200 (73.00 %) BM_ZFlat/4 11675 11657 358629 8.2GB/s pdf (83.30 %) BM_ZFlat/5 420951 419818 9739 930.5MB/s html4 (22.52 %) BM_ZFlat/6 415460 414632 10000 349.8MB/s txt1 (57.88 %) BM_ZFlat/7 367191 366436 10000 325.8MB/s txt2 (61.91 %) BM_ZFlat/8 1098345 1096036 3819 371.3MB/s txt3 (54.99 %) BM_ZFlat/9 1508701 1505306 2758 305.3MB/s txt4 (66.26 %) BM_ZFlat/10 87195 87031 47289 1.3GB/s pb (19.68 %) BM_ZFlat/11 322338 321637 10000 546.5MB/s gaviota (37.72 %) BM_ZFlat/12 36739 36668 100000 639.9MB/s cp (48.12 %) BM_ZFlat/13 13646 13618 304009 780.9MB/s c (42.47 %) BM_ZFlat/14 4249 4240 992456 837.0MB/s lsp (48.37 %) BM_ZFlat/15 1262925 1260012 3314 779.4MB/s xls (41.23 %) BM_ZFlat/16 308 308 10000000 619.8MB/s xls_200 (78.00 %) BM_ZFlat/17 379750 378944 10000 1.3GB/s bin (18.11 %) BM_ZFlat/18 62 62 67443280 3.0GB/s bin_200 (7.50 %) BM_ZFlat/19 61706 61587 67645 592.1MB/s sum (48.96 %) BM_ZFlat/20 5968 5958 698974 676.6MB/s man (59.21 %)
2016-06-28 18:53:11 +00:00
// Return make_pair(n, n < 8).
// Does not read *s2_limit or beyond.
// Does not read *(s1 + (s2_limit - s2)) or beyond.
// Requires that s2_limit >= s2.
//
// In addition populate *data with the next 5 bytes from the end of the match.
// This is only done if 8 bytes are available (s2_limit - s2 >= 8). The point is
// that on some arch's this can be done faster in this routine than subsequent
// loading from s2 + n.
//
// Separate implementation for 64-bit, little-endian cpus.
#if !SNAPPY_IS_BIG_ENDIAN && \
(defined(__x86_64__) || defined(_M_X64) || defined(ARCH_PPC) || \
defined(ARCH_ARM))
2016-11-28 16:49:41 +00:00
static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
const char* s2,
const char* s2_limit,
uint64_t* data) {
assert(s2_limit >= s2);
Re-work fast path that emits copies in zippy compression. The primary motivation for the change is that FindMatchLength is likely to discover a difference in the first 8 bytes it compares. If that occurs then we know the length of the match is less than 12, because FindMatchLength is invoked after a 4-byte match is found. When emitting a copy, it is useful to know that the length is less than 12 because the two-byte variant of an emitted copy requires that. This is a performance-tuning change that should not affect the library's behavior. With FDO on perflab/Haswell the geometric mean for ZFlat/* went from 47,290ns to 45,741ns, an improvement of 3.4%. SAMPLE (before) BM_ZFlat/0 102824 102650 40691 951.4MB/s html (22.31 %) BM_ZFlat/1 1293512 1290442 3225 518.9MB/s urls (47.78 %) BM_ZFlat/2 10373 10353 417959 11.1GB/s jpg (99.95 %) BM_ZFlat/3 268 268 15745324 712.4MB/s jpg_200 (73.00 %) BM_ZFlat/4 12137 12113 342462 7.9GB/s pdf (83.30 %) BM_ZFlat/5 430672 429720 9724 909.0MB/s html4 (22.52 %) BM_ZFlat/6 420541 419636 9833 345.6MB/s txt1 (57.88 %) BM_ZFlat/7 373829 373158 10000 319.9MB/s txt2 (61.91 %) BM_ZFlat/8 1119014 1116604 3755 364.5MB/s txt3 (54.99 %) BM_ZFlat/9 1544203 1540657 2748 298.3MB/s txt4 (66.26 %) BM_ZFlat/10 91041 90866 46002 1.2GB/s pb (19.68 %) BM_ZFlat/11 332766 331990 10000 529.5MB/s gaviota (37.72 %) BM_ZFlat/12 39960 39886 100000 588.3MB/s cp (48.12 %) BM_ZFlat/13 14493 14465 287181 735.1MB/s c (42.47 %) BM_ZFlat/14 4447 4440 947927 799.3MB/s lsp (48.37 %) BM_ZFlat/15 1316362 1313350 3196 747.7MB/s xls (41.23 %) BM_ZFlat/16 312 311 10000000 613.0MB/s xls_200 (78.00 %) BM_ZFlat/17 388471 387502 10000 1.2GB/s bin (18.11 %) BM_ZFlat/18 65 64 64838208 2.9GB/s bin_200 (7.50 %) BM_ZFlat/19 65900 65787 63099 554.3MB/s sum (48.96 %) BM_ZFlat/20 6188 6177 681951 652.6MB/s man (59.21 %) SAMPLE (after) Benchmark Time(ns) CPU(ns) Iterations -------------------------------------------- BM_ZFlat/0 99259 99044 42428 986.0MB/s html (22.31 %) BM_ZFlat/1 1257039 1255276 3341 533.4MB/s urls (47.78 %) BM_ZFlat/2 10044 10030 405781 11.4GB/s jpg (99.95 %) BM_ZFlat/3 268 267 15732282 713.3MB/s jpg_200 (73.00 %) BM_ZFlat/4 11675 11657 358629 8.2GB/s pdf (83.30 %) BM_ZFlat/5 420951 419818 9739 930.5MB/s html4 (22.52 %) BM_ZFlat/6 415460 414632 10000 349.8MB/s txt1 (57.88 %) BM_ZFlat/7 367191 366436 10000 325.8MB/s txt2 (61.91 %) BM_ZFlat/8 1098345 1096036 3819 371.3MB/s txt3 (54.99 %) BM_ZFlat/9 1508701 1505306 2758 305.3MB/s txt4 (66.26 %) BM_ZFlat/10 87195 87031 47289 1.3GB/s pb (19.68 %) BM_ZFlat/11 322338 321637 10000 546.5MB/s gaviota (37.72 %) BM_ZFlat/12 36739 36668 100000 639.9MB/s cp (48.12 %) BM_ZFlat/13 13646 13618 304009 780.9MB/s c (42.47 %) BM_ZFlat/14 4249 4240 992456 837.0MB/s lsp (48.37 %) BM_ZFlat/15 1262925 1260012 3314 779.4MB/s xls (41.23 %) BM_ZFlat/16 308 308 10000000 619.8MB/s xls_200 (78.00 %) BM_ZFlat/17 379750 378944 10000 1.3GB/s bin (18.11 %) BM_ZFlat/18 62 62 67443280 3.0GB/s bin_200 (7.50 %) BM_ZFlat/19 61706 61587 67645 592.1MB/s sum (48.96 %) BM_ZFlat/20 5968 5958 698974 676.6MB/s man (59.21 %)
2016-06-28 18:53:11 +00:00
size_t matched = 0;
// This block isn't necessary for correctness; we could just start looping
// immediately. As an optimization though, it is useful. It creates some not
// uncommon code paths that determine, without extra effort, whether the match
// length is less than 8. In short, we are hoping to avoid a conditional
// branch, and perhaps get better code layout from the C++ compiler.
if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
uint64_t a1 = UNALIGNED_LOAD64(s1);
uint64_t a2 = UNALIGNED_LOAD64(s2);
if (SNAPPY_PREDICT_TRUE(a1 != a2)) {
// This code is critical for performance. The reason is that it determines
// how much to advance `ip` (s2). This obviously depends on both the loads
// from the `candidate` (s1) and `ip`. Furthermore the next `candidate`
// depends on the advanced `ip` calculated here through a load, hash and
// new candidate hash lookup (a lot of cycles). This makes s1 (ie.
// `candidate`) the variable that limits throughput. This is the reason we
// go through hoops to have this function update `data` for the next iter.
// The straightforward code would use *data, given by
//
// *data = UNALIGNED_LOAD64(s2 + matched_bytes) (Latency of 5 cycles),
//
// as input for the hash table lookup to find next candidate. However
// this forces the load on the data dependency chain of s1, because
// matched_bytes directly depends on s1. However matched_bytes is 0..7, so
// we can also calculate *data by
//
// *data = AlignRight(UNALIGNED_LOAD64(s2), UNALIGNED_LOAD64(s2 + 8),
// matched_bytes);
//
// The loads do not depend on s1 anymore and are thus off the bottleneck.
// The straightforward implementation on x86_64 would be to use
//
// shrd rax, rdx, cl (cl being matched_bytes * 8)
//
// unfortunately shrd with a variable shift has a 4 cycle latency. So this
// only wins 1 cycle. The BMI2 shrx instruction is a 1 cycle variable
// shift instruction but can only shift 64 bits. If we focus on just
// obtaining the least significant 4 bytes, we can obtain this by
//
// *data = ConditionalMove(matched_bytes < 4, UNALIGNED_LOAD64(s2),
// UNALIGNED_LOAD64(s2 + 4) >> ((matched_bytes & 3) * 8);
//
// Writen like above this is not a big win, the conditional move would be
// a cmp followed by a cmov (2 cycles) followed by a shift (1 cycle).
// However matched_bytes < 4 is equal to
// static_cast<uint32_t>(xorval) != 0. Writen that way, the conditional
// move (2 cycles) can execute in parallel with FindLSBSetNonZero64
// (tzcnt), which takes 3 cycles.
uint64_t xorval = a1 ^ a2;
int shift = Bits::FindLSBSetNonZero64(xorval);
size_t matched_bytes = shift >> 3;
uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
#ifndef __x86_64__
a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
#else
// Ideally this would just be
//
// a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
//
// However clang correctly infers that the above statement participates on
// a critical data dependency chain and thus, unfortunately, refuses to
// use a conditional move (it's tuned to cut data dependencies). In this
// case there is a longer parallel chain anyway AND this will be fairly
// unpredictable.
asm("testl %k2, %k2\n\t"
"cmovzq %1, %0\n\t"
: "+r"(a2)
: "r"(a3), "r"(xorval)
: "cc");
#endif
*data = a2 >> (shift & (3 * 8));
return std::pair<size_t, bool>(matched_bytes, true);
Re-work fast path that emits copies in zippy compression. The primary motivation for the change is that FindMatchLength is likely to discover a difference in the first 8 bytes it compares. If that occurs then we know the length of the match is less than 12, because FindMatchLength is invoked after a 4-byte match is found. When emitting a copy, it is useful to know that the length is less than 12 because the two-byte variant of an emitted copy requires that. This is a performance-tuning change that should not affect the library's behavior. With FDO on perflab/Haswell the geometric mean for ZFlat/* went from 47,290ns to 45,741ns, an improvement of 3.4%. SAMPLE (before) BM_ZFlat/0 102824 102650 40691 951.4MB/s html (22.31 %) BM_ZFlat/1 1293512 1290442 3225 518.9MB/s urls (47.78 %) BM_ZFlat/2 10373 10353 417959 11.1GB/s jpg (99.95 %) BM_ZFlat/3 268 268 15745324 712.4MB/s jpg_200 (73.00 %) BM_ZFlat/4 12137 12113 342462 7.9GB/s pdf (83.30 %) BM_ZFlat/5 430672 429720 9724 909.0MB/s html4 (22.52 %) BM_ZFlat/6 420541 419636 9833 345.6MB/s txt1 (57.88 %) BM_ZFlat/7 373829 373158 10000 319.9MB/s txt2 (61.91 %) BM_ZFlat/8 1119014 1116604 3755 364.5MB/s txt3 (54.99 %) BM_ZFlat/9 1544203 1540657 2748 298.3MB/s txt4 (66.26 %) BM_ZFlat/10 91041 90866 46002 1.2GB/s pb (19.68 %) BM_ZFlat/11 332766 331990 10000 529.5MB/s gaviota (37.72 %) BM_ZFlat/12 39960 39886 100000 588.3MB/s cp (48.12 %) BM_ZFlat/13 14493 14465 287181 735.1MB/s c (42.47 %) BM_ZFlat/14 4447 4440 947927 799.3MB/s lsp (48.37 %) BM_ZFlat/15 1316362 1313350 3196 747.7MB/s xls (41.23 %) BM_ZFlat/16 312 311 10000000 613.0MB/s xls_200 (78.00 %) BM_ZFlat/17 388471 387502 10000 1.2GB/s bin (18.11 %) BM_ZFlat/18 65 64 64838208 2.9GB/s bin_200 (7.50 %) BM_ZFlat/19 65900 65787 63099 554.3MB/s sum (48.96 %) BM_ZFlat/20 6188 6177 681951 652.6MB/s man (59.21 %) SAMPLE (after) Benchmark Time(ns) CPU(ns) Iterations -------------------------------------------- BM_ZFlat/0 99259 99044 42428 986.0MB/s html (22.31 %) BM_ZFlat/1 1257039 1255276 3341 533.4MB/s urls (47.78 %) BM_ZFlat/2 10044 10030 405781 11.4GB/s jpg (99.95 %) BM_ZFlat/3 268 267 15732282 713.3MB/s jpg_200 (73.00 %) BM_ZFlat/4 11675 11657 358629 8.2GB/s pdf (83.30 %) BM_ZFlat/5 420951 419818 9739 930.5MB/s html4 (22.52 %) BM_ZFlat/6 415460 414632 10000 349.8MB/s txt1 (57.88 %) BM_ZFlat/7 367191 366436 10000 325.8MB/s txt2 (61.91 %) BM_ZFlat/8 1098345 1096036 3819 371.3MB/s txt3 (54.99 %) BM_ZFlat/9 1508701 1505306 2758 305.3MB/s txt4 (66.26 %) BM_ZFlat/10 87195 87031 47289 1.3GB/s pb (19.68 %) BM_ZFlat/11 322338 321637 10000 546.5MB/s gaviota (37.72 %) BM_ZFlat/12 36739 36668 100000 639.9MB/s cp (48.12 %) BM_ZFlat/13 13646 13618 304009 780.9MB/s c (42.47 %) BM_ZFlat/14 4249 4240 992456 837.0MB/s lsp (48.37 %) BM_ZFlat/15 1262925 1260012 3314 779.4MB/s xls (41.23 %) BM_ZFlat/16 308 308 10000000 619.8MB/s xls_200 (78.00 %) BM_ZFlat/17 379750 378944 10000 1.3GB/s bin (18.11 %) BM_ZFlat/18 62 62 67443280 3.0GB/s bin_200 (7.50 %) BM_ZFlat/19 61706 61587 67645 592.1MB/s sum (48.96 %) BM_ZFlat/20 5968 5958 698974 676.6MB/s man (59.21 %)
2016-06-28 18:53:11 +00:00
} else {
matched = 8;
s2 += 8;
}
}
SNAPPY_PREFETCH(s1 + 64);
SNAPPY_PREFETCH(s2 + 64);
// Find out how long the match is. We loop over the data 64 bits at a
// time until we find a 64-bit block that doesn't match; then we find
// the first non-matching bit and use that to calculate the total
// length of the match.
while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
uint64_t a1 = UNALIGNED_LOAD64(s1 + matched);
uint64_t a2 = UNALIGNED_LOAD64(s2);
if (a1 == a2) {
s2 += 8;
matched += 8;
} else {
uint64_t xorval = a1 ^ a2;
int shift = Bits::FindLSBSetNonZero64(xorval);
size_t matched_bytes = shift >> 3;
uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
#ifndef __x86_64__
a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
#else
asm("testl %k2, %k2\n\t"
"cmovzq %1, %0\n\t"
: "+r"(a2)
: "r"(a3), "r"(xorval)
: "cc");
#endif
*data = a2 >> (shift & (3 * 8));
matched += matched_bytes;
Re-work fast path that emits copies in zippy compression. The primary motivation for the change is that FindMatchLength is likely to discover a difference in the first 8 bytes it compares. If that occurs then we know the length of the match is less than 12, because FindMatchLength is invoked after a 4-byte match is found. When emitting a copy, it is useful to know that the length is less than 12 because the two-byte variant of an emitted copy requires that. This is a performance-tuning change that should not affect the library's behavior. With FDO on perflab/Haswell the geometric mean for ZFlat/* went from 47,290ns to 45,741ns, an improvement of 3.4%. SAMPLE (before) BM_ZFlat/0 102824 102650 40691 951.4MB/s html (22.31 %) BM_ZFlat/1 1293512 1290442 3225 518.9MB/s urls (47.78 %) BM_ZFlat/2 10373 10353 417959 11.1GB/s jpg (99.95 %) BM_ZFlat/3 268 268 15745324 712.4MB/s jpg_200 (73.00 %) BM_ZFlat/4 12137 12113 342462 7.9GB/s pdf (83.30 %) BM_ZFlat/5 430672 429720 9724 909.0MB/s html4 (22.52 %) BM_ZFlat/6 420541 419636 9833 345.6MB/s txt1 (57.88 %) BM_ZFlat/7 373829 373158 10000 319.9MB/s txt2 (61.91 %) BM_ZFlat/8 1119014 1116604 3755 364.5MB/s txt3 (54.99 %) BM_ZFlat/9 1544203 1540657 2748 298.3MB/s txt4 (66.26 %) BM_ZFlat/10 91041 90866 46002 1.2GB/s pb (19.68 %) BM_ZFlat/11 332766 331990 10000 529.5MB/s gaviota (37.72 %) BM_ZFlat/12 39960 39886 100000 588.3MB/s cp (48.12 %) BM_ZFlat/13 14493 14465 287181 735.1MB/s c (42.47 %) BM_ZFlat/14 4447 4440 947927 799.3MB/s lsp (48.37 %) BM_ZFlat/15 1316362 1313350 3196 747.7MB/s xls (41.23 %) BM_ZFlat/16 312 311 10000000 613.0MB/s xls_200 (78.00 %) BM_ZFlat/17 388471 387502 10000 1.2GB/s bin (18.11 %) BM_ZFlat/18 65 64 64838208 2.9GB/s bin_200 (7.50 %) BM_ZFlat/19 65900 65787 63099 554.3MB/s sum (48.96 %) BM_ZFlat/20 6188 6177 681951 652.6MB/s man (59.21 %) SAMPLE (after) Benchmark Time(ns) CPU(ns) Iterations -------------------------------------------- BM_ZFlat/0 99259 99044 42428 986.0MB/s html (22.31 %) BM_ZFlat/1 1257039 1255276 3341 533.4MB/s urls (47.78 %) BM_ZFlat/2 10044 10030 405781 11.4GB/s jpg (99.95 %) BM_ZFlat/3 268 267 15732282 713.3MB/s jpg_200 (73.00 %) BM_ZFlat/4 11675 11657 358629 8.2GB/s pdf (83.30 %) BM_ZFlat/5 420951 419818 9739 930.5MB/s html4 (22.52 %) BM_ZFlat/6 415460 414632 10000 349.8MB/s txt1 (57.88 %) BM_ZFlat/7 367191 366436 10000 325.8MB/s txt2 (61.91 %) BM_ZFlat/8 1098345 1096036 3819 371.3MB/s txt3 (54.99 %) BM_ZFlat/9 1508701 1505306 2758 305.3MB/s txt4 (66.26 %) BM_ZFlat/10 87195 87031 47289 1.3GB/s pb (19.68 %) BM_ZFlat/11 322338 321637 10000 546.5MB/s gaviota (37.72 %) BM_ZFlat/12 36739 36668 100000 639.9MB/s cp (48.12 %) BM_ZFlat/13 13646 13618 304009 780.9MB/s c (42.47 %) BM_ZFlat/14 4249 4240 992456 837.0MB/s lsp (48.37 %) BM_ZFlat/15 1262925 1260012 3314 779.4MB/s xls (41.23 %) BM_ZFlat/16 308 308 10000000 619.8MB/s xls_200 (78.00 %) BM_ZFlat/17 379750 378944 10000 1.3GB/s bin (18.11 %) BM_ZFlat/18 62 62 67443280 3.0GB/s bin_200 (7.50 %) BM_ZFlat/19 61706 61587 67645 592.1MB/s sum (48.96 %) BM_ZFlat/20 5968 5958 698974 676.6MB/s man (59.21 %)
2016-06-28 18:53:11 +00:00
assert(matched >= 8);
2016-11-28 16:49:41 +00:00
return std::pair<size_t, bool>(matched, false);
}
}
while (SNAPPY_PREDICT_TRUE(s2 < s2_limit)) {
Change a few branch annotations that profiling found to be wrong. Overall performance is neutral or slightly positive. Westmere (64-bit, opt): Benchmark Base (ns) New (ns) Improvement -------------------------------------------------------------------------------------- BM_UFlat/0 73798 71464 1.3GB/s html +3.3% BM_UFlat/1 715223 704318 953.5MB/s urls +1.5% BM_UFlat/2 8137 8871 13.0GB/s jpg -8.3% BM_UFlat/3 200 204 935.5MB/s jpg_200 -2.0% BM_UFlat/4 21627 21281 4.5GB/s pdf +1.6% BM_UFlat/5 302806 290350 1.3GB/s html4 +4.3% BM_UFlat/6 218920 219017 664.1MB/s txt1 -0.0% BM_UFlat/7 190437 191212 626.1MB/s txt2 -0.4% BM_UFlat/8 584192 580484 703.4MB/s txt3 +0.6% BM_UFlat/9 776537 779055 591.6MB/s txt4 -0.3% BM_UFlat/10 76056 72606 1.5GB/s pb +4.8% BM_UFlat/11 235962 239043 737.4MB/s gaviota -1.3% BM_UFlat/12 28049 28000 840.1MB/s cp +0.2% BM_UFlat/13 12225 12021 886.9MB/s c +1.7% BM_UFlat/14 3362 3544 1004.0MB/s lsp -5.1% BM_UFlat/15 937015 939206 1048.9MB/s xls -0.2% BM_UFlat/16 236 233 823.1MB/s xls_200 +1.3% BM_UFlat/17 373170 361947 1.3GB/s bin +3.1% BM_UFlat/18 264 264 725.5MB/s bin_200 +0.0% BM_UFlat/19 42834 43577 839.2MB/s sum -1.7% BM_UFlat/20 4770 4736 853.6MB/s man +0.7% BM_UValidate/0 39671 39944 2.4GB/s html -0.7% BM_UValidate/1 443391 443391 1.5GB/s urls +0.0% BM_UValidate/2 163 163 703.3GB/s jpg +0.0% BM_UValidate/3 113 112 1.7GB/s jpg_200 +0.9% BM_UValidate/4 7555 7608 12.6GB/s pdf -0.7% BM_ZFlat/0 157616 157568 621.5MB/s html (22.31 %) +0.0% BM_ZFlat/1 1997290 2014486 333.4MB/s urls (47.77 %) -0.9% BM_ZFlat/2 23035 22237 5.2GB/s jpg (99.95 %) +3.6% BM_ZFlat/3 539 540 354.5MB/s jpg_200 (73.00 %) -0.2% BM_ZFlat/4 80709 81369 1.2GB/s pdf (81.85 %) -0.8% BM_ZFlat/5 639059 639220 613.0MB/s html4 (22.51 %) -0.0% BM_ZFlat/6 577203 583370 249.3MB/s txt1 (57.87 %) -1.1% BM_ZFlat/7 510887 516094 232.0MB/s txt2 (61.93 %) -1.0% BM_ZFlat/8 1535843 1556973 262.2MB/s txt3 (54.92 %) -1.4% BM_ZFlat/9 2070068 2102380 219.3MB/s txt4 (66.22 %) -1.5% BM_ZFlat/10 152396 152148 745.5MB/s pb (19.64 %) +0.2% BM_ZFlat/11 447367 445859 395.4MB/s gaviota (37.72 %) +0.3% BM_ZFlat/12 76375 76797 306.3MB/s cp (48.12 %) -0.5% BM_ZFlat/13 31518 31987 333.3MB/s c (42.40 %) -1.5% BM_ZFlat/14 10598 10827 328.6MB/s lsp (48.37 %) -2.1% BM_ZFlat/15 1782243 1802728 546.5MB/s xls (41.23 %) -1.1% BM_ZFlat/16 526 539 355.0MB/s xls_200 (78.00 %) -2.4% BM_ZFlat/17 598141 597311 822.1MB/s bin (18.11 %) +0.1% BM_ZFlat/18 121 120 1.6GB/s bin_200 (7.50 %) +0.8% BM_ZFlat/19 109981 112173 326.0MB/s sum (48.96 %) -2.0% BM_ZFlat/20 14355 14575 277.4MB/s man (59.36 %) -1.5% Sum of all benchmarks 33882722 33879325 +0.0% Sandy Bridge (64-bit, opt): Benchmark Base (ns) New (ns) Improvement -------------------------------------------------------------------------------------- BM_UFlat/0 43764 41600 2.3GB/s html +5.2% BM_UFlat/1 517990 507058 1.3GB/s urls +2.2% BM_UFlat/2 6625 5529 20.8GB/s jpg +19.8% BM_UFlat/3 154 155 1.2GB/s jpg_200 -0.6% BM_UFlat/4 12795 11747 8.1GB/s pdf +8.9% BM_UFlat/5 200335 193413 2.0GB/s html4 +3.6% BM_UFlat/6 156574 156426 929.2MB/s txt1 +0.1% BM_UFlat/7 137574 137464 870.4MB/s txt2 +0.1% BM_UFlat/8 422551 421603 967.4MB/s txt3 +0.2% BM_UFlat/9 577749 578985 795.6MB/s txt4 -0.2% BM_UFlat/10 42329 39362 2.8GB/s pb +7.5% BM_UFlat/11 170615 169751 1037.9MB/s gaviota +0.5% BM_UFlat/12 12800 12719 1.8GB/s cp +0.6% BM_UFlat/13 6585 6579 1.6GB/s c +0.1% BM_UFlat/14 2066 2044 1.7GB/s lsp +1.1% BM_UFlat/15 750861 746911 1.3GB/s xls +0.5% BM_UFlat/16 188 192 996.0MB/s xls_200 -2.1% BM_UFlat/17 271622 264333 1.8GB/s bin +2.8% BM_UFlat/18 208 207 923.6MB/s bin_200 +0.5% BM_UFlat/19 24667 24845 1.4GB/s sum -0.7% BM_UFlat/20 2663 2662 1.5GB/s man +0.0% BM_ZFlat/0 115173 115624 846.5MB/s html (22.31 %) -0.4% BM_ZFlat/1 1530331 1537769 436.5MB/s urls (47.77 %) -0.5% BM_ZFlat/2 17503 17013 6.8GB/s jpg (99.95 %) +2.9% BM_ZFlat/3 385 385 496.3MB/s jpg_200 (73.00 %) +0.0% BM_ZFlat/4 61753 61540 1.6GB/s pdf (81.85 %) +0.3% BM_ZFlat/5 484806 483356 810.1MB/s html4 (22.51 %) +0.3% BM_ZFlat/6 464143 467609 310.9MB/s txt1 (57.87 %) -0.7% BM_ZFlat/7 410315 413319 289.5MB/s txt2 (61.93 %) -0.7% BM_ZFlat/8 1244082 1249381 326.5MB/s txt3 (54.92 %) -0.4% BM_ZFlat/9 1696914 1709685 269.4MB/s txt4 (66.22 %) -0.7% BM_ZFlat/10 104148 103372 1096.7MB/s pb (19.64 %) +0.8% BM_ZFlat/11 363522 359722 489.8MB/s gaviota (37.72 %) +1.1% BM_ZFlat/12 47021 50095 469.3MB/s cp (48.12 %) -6.1% BM_ZFlat/13 16888 16985 627.4MB/s c (42.40 %) -0.6% BM_ZFlat/14 5496 5469 650.3MB/s lsp (48.37 %) +0.5% BM_ZFlat/15 1460713 1448760 679.5MB/s xls (41.23 %) +0.8% BM_ZFlat/16 387 393 486.8MB/s xls_200 (78.00 %) -1.5% BM_ZFlat/17 457654 451462 1086.6MB/s bin (18.11 %) +1.4% BM_ZFlat/18 97 87 2.1GB/s bin_200 (7.50 %) +11.5% BM_ZFlat/19 77904 80924 451.7MB/s sum (48.96 %) -3.7% BM_ZFlat/20 7648 7663 527.1MB/s man (59.36 %) -0.2% Sum of all benchmarks 25493635 25482069 +0.0% A=dehao R=sesse
2015-06-22 13:41:30 +00:00
if (s1[matched] == *s2) {
++s2;
++matched;
} else {
if (s2 <= s2_limit - 8) {
*data = UNALIGNED_LOAD64(s2);
}
2016-11-28 16:49:41 +00:00
return std::pair<size_t, bool>(matched, matched < 8);
}
}
2016-11-28 16:49:41 +00:00
return std::pair<size_t, bool>(matched, matched < 8);
}
#else
2016-11-28 16:49:41 +00:00
static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
const char* s2,
const char* s2_limit,
uint64_t* data) {
// Implementation based on the x86-64 version, above.
assert(s2_limit >= s2);
int matched = 0;
while (s2 <= s2_limit - 4 &&
UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
s2 += 4;
matched += 4;
}
if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
uint32_t x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
int matching_bits = Bits::FindLSBSetNonZero(x);
matched += matching_bits >> 3;
s2 += matching_bits >> 3;
} else {
while ((s2 < s2_limit) && (s1[matched] == *s2)) {
++s2;
++matched;
}
}
if (s2 <= s2_limit - 8) *data = LittleEndian::Load64(s2);
2016-11-28 16:49:41 +00:00
return std::pair<size_t, bool>(matched, matched < 8);
}
#endif
static inline size_t FindMatchLengthPlain(const char* s1, const char* s2,
const char* s2_limit) {
// Implementation based on the x86-64 version, above.
assert(s2_limit >= s2);
int matched = 0;
while (s2 <= s2_limit - 8 &&
UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
s2 += 8;
matched += 8;
}
if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 8) {
uint64_t x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
int matching_bits = Bits::FindLSBSetNonZero64(x);
matched += matching_bits >> 3;
s2 += matching_bits >> 3;
} else {
while ((s2 < s2_limit) && (s1[matched] == *s2)) {
++s2;
++matched;
}
}
return matched;
}
// Lookup tables for decompression code. Give --snappy_dump_decompression_table
// to the unit test to recompute char_table.
enum {
LITERAL = 0,
COPY_1_BYTE_OFFSET = 1, // 3 bit length + 3 bits of offset in opcode
COPY_2_BYTE_OFFSET = 2,
COPY_4_BYTE_OFFSET = 3
};
static const int kMaximumTagLength = 5; // COPY_4_BYTE_OFFSET plus the actual offset.
// Data stored per entry in lookup table:
// Range Bits-used Description
// ------------------------------------
// 1..64 0..7 Literal/copy length encoded in opcode byte
// 0..7 8..10 Copy offset encoded in opcode byte / 256
// 0..4 11..13 Extra bytes after opcode
//
// We use eight bits for the length even though 7 would have sufficed
// because of efficiency reasons:
// (1) Extracting a byte is faster than a bit-field
// (2) It properly aligns copy offset so we do not need a <<8
static constexpr uint16_t char_table[256] = {
// clang-format off
0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040,
// clang-format on
};
} // end namespace internal
} // end namespace snappy
#endif // THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_