Add SNAPPY_ prefix to PREDICT_{TRUE,FALSE} macros.

This commit is contained in:
jueminyang 2017-07-28 14:31:04 -07:00 committed by Victor Costan
parent be6dc3db83
commit 71b8f86887
4 changed files with 24 additions and 23 deletions

View File

@ -95,7 +95,7 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
// uncommon code paths that determine, without extra effort, whether the match
// length is less than 8. In short, we are hoping to avoid a conditional
// branch, and perhaps get better code layout from the C++ compiler.
if (PREDICT_TRUE(s2 <= s2_limit - 8)) {
if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
uint64 a1 = UNALIGNED_LOAD64(s1);
uint64 a2 = UNALIGNED_LOAD64(s2);
if (a1 != a2) {
@ -111,7 +111,7 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
// time until we find a 64-bit block that doesn't match; then we find
// the first non-matching bit and use that to calculate the total
// length of the match.
while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
if (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
s2 += 8;
matched += 8;
@ -123,7 +123,7 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
return std::pair<size_t, bool>(matched, false);
}
}
while (PREDICT_TRUE(s2 < s2_limit)) {
while (SNAPPY_PREDICT_TRUE(s2 < s2_limit)) {
if (s1[matched] == *s2) {
++s2;
++matched;

View File

@ -73,11 +73,11 @@
// Static prediction hints.
#ifdef HAVE_BUILTIN_EXPECT
#define PREDICT_FALSE(x) (__builtin_expect(x, 0))
#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
#define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0))
#define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
#else
#define PREDICT_FALSE(x) x
#define PREDICT_TRUE(x) x
#define SNAPPY_PREDICT_FALSE(x) x
#define SNAPPY_PREDICT_TRUE(x) x
#endif
// This is only used for recomputing the tag byte table used during

View File

@ -524,7 +524,7 @@ class LogMessage {
// and ones that are always active.
#define CRASH_UNLESS(condition) \
PREDICT_TRUE(condition) ? (void)0 : \
SNAPPY_PREDICT_TRUE(condition) ? (void)0 : \
snappy::LogMessageVoidify() & snappy::LogMessageCrash()
#ifdef _MSC_VER

View File

@ -175,7 +175,7 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
// copying 2x 8 bytes at a time.
// Handle the uncommon case where pattern is less than 8 bytes.
if (PREDICT_FALSE(pattern_size < 8)) {
if (SNAPPY_PREDICT_FALSE(pattern_size < 8)) {
// Expand pattern to at least 8 bytes. The worse case scenario in terms of
// buffer usage is when the pattern is size 3. ^ is the original position
// of op. x are irrelevant bytes copied by the last UnalignedCopy64.
@ -185,13 +185,13 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
// abcabcabcabcxxxxx
// ^
// The last x is 14 bytes after ^.
if (PREDICT_TRUE(op <= buf_limit - 14)) {
if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 14)) {
while (pattern_size < 8) {
UnalignedCopy64(src, op);
op += pattern_size;
pattern_size *= 2;
}
if (PREDICT_TRUE(op >= op_limit)) return op_limit;
if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
} else {
return IncrementalCopySlow(src, op, op_limit);
}
@ -207,11 +207,11 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
UnalignedCopy64(src + 8, op + 8);
src += 16;
op += 16;
if (PREDICT_TRUE(op >= op_limit)) return op_limit;
if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
}
// We only take this branch if we didn't have enough slop and we can do a
// single 8 byte copy.
if (PREDICT_FALSE(op <= buf_limit - 8)) {
if (SNAPPY_PREDICT_FALSE(op <= buf_limit - 8)) {
UnalignedCopy64(src, op);
src += 8;
op += 8;
@ -273,7 +273,7 @@ static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len,
assert(offset < 65536);
assert(len_less_than_12 == (len < 12));
if (len_less_than_12 && PREDICT_TRUE(offset < 2048)) {
if (len_less_than_12 && SNAPPY_PREDICT_TRUE(offset < 2048)) {
// offset fits in 11 bits. The 3 highest go in the top of the first byte,
// and the rest go in the second byte.
*op++ = COPY_1_BYTE_OFFSET + ((len - 4) << 2) + ((offset >> 3) & 0xe0);
@ -298,7 +298,7 @@ static inline char* EmitCopy(char* op, size_t offset, size_t len,
// it's in the noise.
// Emit 64 byte copies but make sure to keep at least four bytes reserved.
while (PREDICT_FALSE(len >= 68)) {
while (SNAPPY_PREDICT_FALSE(len >= 68)) {
op = EmitCopyAtMost64(op, offset, 64, false);
len -= 64;
}
@ -427,7 +427,7 @@ char* CompressFragment(const char* input,
const char* next_emit = ip;
const size_t kInputMarginBytes = 15;
if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
const char* ip_limit = input + input_size - kInputMarginBytes;
for (uint32 next_hash = Hash(++ip, shift); ; ) {
@ -468,7 +468,7 @@ char* CompressFragment(const char* input,
uint32 bytes_between_hash_lookups = skip >> 5;
skip += bytes_between_hash_lookups;
next_ip = ip + bytes_between_hash_lookups;
if (PREDICT_FALSE(next_ip > ip_limit)) {
if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
goto emit_remainder;
}
next_hash = Hash(next_ip, shift);
@ -477,8 +477,8 @@ char* CompressFragment(const char* input,
assert(candidate < ip);
table[hash] = ip - base_ip;
} while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
UNALIGNED_LOAD32(candidate)));
} while (SNAPPY_PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
UNALIGNED_LOAD32(candidate)));
// Step 2: A 4-byte match has been found. We'll later see if more
// than 4 bytes match. But, prior to the match, input
@ -509,7 +509,7 @@ char* CompressFragment(const char* input,
assert(0 == memcmp(base, candidate, matched));
op = EmitCopy(op, offset, matched, p.second);
next_emit = ip;
if (PREDICT_FALSE(ip >= ip_limit)) {
if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
goto emit_remainder;
}
// We are now looking for a 4-byte match again. We read
@ -700,7 +700,7 @@ class SnappyDecompressor {
// txt[1-4] 25% 75%
// pb 24% 76%
// bin 24% 76%
if (PREDICT_FALSE((c & 0x3) == LITERAL)) {
if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) {
size_t literal_length = (c >> 2) + 1u;
if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
assert(literal_length < 61);
@ -710,7 +710,7 @@ class SnappyDecompressor {
// bytes in addition to the literal.
continue;
}
if (PREDICT_FALSE(literal_length >= 61)) {
if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) {
// Long literal.
const size_t literal_length_length = literal_length - 60;
literal_length =
@ -1354,7 +1354,8 @@ class SnappyScatteredWriter {
char* const op_end = op_ptr_ + len;
// See SnappyArrayWriter::AppendFromSelf for an explanation of
// the "offset - 1u" trick.
if (PREDICT_TRUE(offset - 1u < op_ptr_ - op_base_ && op_end <= op_limit_)) {
if (SNAPPY_PREDICT_TRUE(offset - 1u < op_ptr_ - op_base_ &&
op_end <= op_limit_)) {
// Fast path: src and dst in current block.
op_ptr_ = IncrementalCopy(op_ptr_ - offset, op_ptr_, op_end, op_limit_);
return true;