mirror of https://github.com/google/snappy.git
Fix public issue r57: Fix most warnings with -Wall, mostly signed/unsigned
warnings. There are still some in the unit test, but the main .cc file should be clean. We haven't enabled -Wall for the default build, since the unit test is still not clean. This also fixes a real bug in the open-source implementation of ReadFileToStringOrDie(); it would not detect errors correctly. I had to go through some pains to avoid performance loss as the types were changed; I think there might still be some with 32-bit if and only if LFS is enabled (ie., size_t is 64-bit), but for regular 32-bit and 64-bit I can't see any losses, and I've diffed the generated GCC assembler between the old and new code without seeing any significant choices. If anything, it's ever so slightly faster. This may or may not enable compression of very large blocks (>2^32 bytes) when size_t is 64-bit, but I haven't checked, and it is still not a supported case. git-svn-id: https://snappy.googlecode.com/svn/trunk@56 03e5f5b5-db94-4691-08a0-1a8bf15f6143
This commit is contained in:
parent
0755c81519
commit
d9068ee301
|
@ -86,10 +86,9 @@ using namespace std;
|
||||||
// version (anyone who wants to regenerate it can just do the call
|
// version (anyone who wants to regenerate it can just do the call
|
||||||
// themselves within main()).
|
// themselves within main()).
|
||||||
#define DEFINE_bool(flag_name, default_value, description) \
|
#define DEFINE_bool(flag_name, default_value, description) \
|
||||||
bool FLAGS_ ## flag_name = default_value;
|
bool FLAGS_ ## flag_name = default_value
|
||||||
#define DECLARE_bool(flag_name) \
|
#define DECLARE_bool(flag_name) \
|
||||||
extern bool FLAGS_ ## flag_name;
|
extern bool FLAGS_ ## flag_name
|
||||||
#define REGISTER_MODULE_INITIALIZER(name, code)
|
|
||||||
|
|
||||||
namespace snappy {
|
namespace snappy {
|
||||||
|
|
||||||
|
|
|
@ -353,7 +353,6 @@ int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
|
||||||
// compression.
|
// compression.
|
||||||
err = deflate(&comp_stream_, flush_mode);
|
err = deflate(&comp_stream_, flush_mode);
|
||||||
|
|
||||||
const uLong source_bytes_consumed = *sourceLen - comp_stream_.avail_in;
|
|
||||||
*sourceLen = comp_stream_.avail_in;
|
*sourceLen = comp_stream_.avail_in;
|
||||||
|
|
||||||
if ((err == Z_STREAM_END || err == Z_OK)
|
if ((err == Z_STREAM_END || err == Z_OK)
|
||||||
|
@ -397,7 +396,6 @@ int ZLib::CompressChunkOrAll(Bytef *dest, uLongf *destLen,
|
||||||
int ZLib::Compress(Bytef *dest, uLongf *destLen,
|
int ZLib::Compress(Bytef *dest, uLongf *destLen,
|
||||||
const Bytef *source, uLong sourceLen) {
|
const Bytef *source, uLong sourceLen) {
|
||||||
int err;
|
int err;
|
||||||
const uLongf orig_destLen = *destLen;
|
|
||||||
if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen,
|
if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen,
|
||||||
Z_FINISH)) != Z_OK )
|
Z_FINISH)) != Z_OK )
|
||||||
return err;
|
return err;
|
||||||
|
|
|
@ -135,7 +135,7 @@ namespace File {
|
||||||
while (!feof(fp)) {
|
while (!feof(fp)) {
|
||||||
char buf[4096];
|
char buf[4096];
|
||||||
size_t ret = fread(buf, 1, 4096, fp);
|
size_t ret = fread(buf, 1, 4096, fp);
|
||||||
if (ret == -1) {
|
if (ret == 0 && ferror(fp)) {
|
||||||
perror("fread");
|
perror("fread");
|
||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
51
snappy.cc
51
snappy.cc
|
@ -194,13 +194,13 @@ static inline char* EmitLiteral(char* op,
|
||||||
return op + len;
|
return op + len;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline char* EmitCopyLessThan64(char* op, int offset, int len) {
|
static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
|
||||||
DCHECK_LE(len, 64);
|
DCHECK_LE(len, 64);
|
||||||
DCHECK_GE(len, 4);
|
DCHECK_GE(len, 4);
|
||||||
DCHECK_LT(offset, 65536);
|
DCHECK_LT(offset, 65536);
|
||||||
|
|
||||||
if ((len < 12) && (offset < 2048)) {
|
if ((len < 12) && (offset < 2048)) {
|
||||||
int len_minus_4 = len - 4;
|
size_t len_minus_4 = len - 4;
|
||||||
assert(len_minus_4 < 8); // Must fit in 3 bits
|
assert(len_minus_4 < 8); // Must fit in 3 bits
|
||||||
*op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
|
*op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
|
||||||
*op++ = offset & 0xff;
|
*op++ = offset & 0xff;
|
||||||
|
@ -212,7 +212,7 @@ static inline char* EmitCopyLessThan64(char* op, int offset, int len) {
|
||||||
return op;
|
return op;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline char* EmitCopy(char* op, int offset, int len) {
|
static inline char* EmitCopy(char* op, size_t offset, int len) {
|
||||||
// Emit 64 byte copies but make sure to keep at least four bytes reserved
|
// Emit 64 byte copies but make sure to keep at least four bytes reserved
|
||||||
while (len >= 68) {
|
while (len >= 68) {
|
||||||
op = EmitCopyLessThan64(op, offset, 64);
|
op = EmitCopyLessThan64(op, offset, 64);
|
||||||
|
@ -249,7 +249,7 @@ uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
|
||||||
// compression, and if the input is short, we won't need that
|
// compression, and if the input is short, we won't need that
|
||||||
// many hash table entries anyway.
|
// many hash table entries anyway.
|
||||||
assert(kMaxHashTableSize >= 256);
|
assert(kMaxHashTableSize >= 256);
|
||||||
int htsize = 256;
|
size_t htsize = 256;
|
||||||
while (htsize < kMaxHashTableSize && htsize < input_size) {
|
while (htsize < kMaxHashTableSize && htsize < input_size) {
|
||||||
htsize <<= 1;
|
htsize <<= 1;
|
||||||
}
|
}
|
||||||
|
@ -304,14 +304,14 @@ char* CompressFragment(const char* input,
|
||||||
CHECK_LE(input_size, kBlockSize);
|
CHECK_LE(input_size, kBlockSize);
|
||||||
CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
|
CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
|
||||||
const int shift = 32 - Bits::Log2Floor(table_size);
|
const int shift = 32 - Bits::Log2Floor(table_size);
|
||||||
DCHECK_EQ(kuint32max >> shift, table_size - 1);
|
DCHECK_EQ(static_cast<int>(kuint32max >> shift), table_size - 1);
|
||||||
const char* ip_end = input + input_size;
|
const char* ip_end = input + input_size;
|
||||||
const char* base_ip = ip;
|
const char* base_ip = ip;
|
||||||
// Bytes in [next_emit, ip) will be emitted as literal bytes. Or
|
// Bytes in [next_emit, ip) will be emitted as literal bytes. Or
|
||||||
// [next_emit, ip_end) after the main loop.
|
// [next_emit, ip_end) after the main loop.
|
||||||
const char* next_emit = ip;
|
const char* next_emit = ip;
|
||||||
|
|
||||||
const int kInputMarginBytes = 15;
|
const size_t kInputMarginBytes = 15;
|
||||||
if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
|
if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
|
||||||
const char* ip_limit = input + input_size - kInputMarginBytes;
|
const char* ip_limit = input + input_size - kInputMarginBytes;
|
||||||
|
|
||||||
|
@ -387,7 +387,7 @@ char* CompressFragment(const char* input,
|
||||||
const char* base = ip;
|
const char* base = ip;
|
||||||
int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
|
int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
|
||||||
ip += matched;
|
ip += matched;
|
||||||
int offset = base - candidate;
|
size_t offset = base - candidate;
|
||||||
DCHECK_EQ(0, memcmp(base, candidate, matched));
|
DCHECK_EQ(0, memcmp(base, candidate, matched));
|
||||||
op = EmitCopy(op, offset, matched);
|
op = EmitCopy(op, offset, matched);
|
||||||
// We could immediately start working at ip now, but to improve
|
// We could immediately start working at ip now, but to improve
|
||||||
|
@ -435,8 +435,8 @@ char* CompressFragment(const char* input,
|
||||||
// bool CheckLength() const;
|
// bool CheckLength() const;
|
||||||
//
|
//
|
||||||
// // Called repeatedly during decompression
|
// // Called repeatedly during decompression
|
||||||
// bool Append(const char* ip, uint32 length);
|
// bool Append(const char* ip, size_t length);
|
||||||
// bool AppendFromSelf(uint32 offset, uint32 length);
|
// bool AppendFromSelf(uint32 offset, size_t length);
|
||||||
//
|
//
|
||||||
// // The difference between TryFastAppend and Append is that TryFastAppend
|
// // The difference between TryFastAppend and Append is that TryFastAppend
|
||||||
// // is allowed to read up to <available> bytes from the input buffer,
|
// // is allowed to read up to <available> bytes from the input buffer,
|
||||||
|
@ -453,7 +453,7 @@ char* CompressFragment(const char* input,
|
||||||
// // decoded fully. In practice, this should not be a big problem,
|
// // decoded fully. In practice, this should not be a big problem,
|
||||||
// // as it is unlikely that one would implement a fast path accepting
|
// // as it is unlikely that one would implement a fast path accepting
|
||||||
// // this much data.
|
// // this much data.
|
||||||
// bool TryFastAppend(const char* ip, uint32 available, uint32 length);
|
// bool TryFastAppend(const char* ip, size_t available, size_t length);
|
||||||
// };
|
// };
|
||||||
|
|
||||||
// -----------------------------------------------------------------------
|
// -----------------------------------------------------------------------
|
||||||
|
@ -601,7 +601,6 @@ static void ComputeTable() {
|
||||||
CHECK_EQ(dst[i], char_table[i]);
|
CHECK_EQ(dst[i], char_table[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
REGISTER_MODULE_INITIALIZER(snappy, ComputeTable());
|
|
||||||
#endif /* !NDEBUG */
|
#endif /* !NDEBUG */
|
||||||
|
|
||||||
// Helper class for decompression
|
// Helper class for decompression
|
||||||
|
@ -686,7 +685,7 @@ class SnappyDecompressor {
|
||||||
const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
|
const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
|
||||||
|
|
||||||
if ((c & 0x3) == LITERAL) {
|
if ((c & 0x3) == LITERAL) {
|
||||||
uint32 literal_length = (c >> 2) + 1;
|
size_t literal_length = (c >> 2) + 1u;
|
||||||
if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
|
if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
|
||||||
DCHECK_LT(literal_length, 61);
|
DCHECK_LT(literal_length, 61);
|
||||||
ip += literal_length;
|
ip += literal_length;
|
||||||
|
@ -695,13 +694,13 @@ class SnappyDecompressor {
|
||||||
}
|
}
|
||||||
if (PREDICT_FALSE(literal_length >= 61)) {
|
if (PREDICT_FALSE(literal_length >= 61)) {
|
||||||
// Long literal.
|
// Long literal.
|
||||||
const uint32 literal_length_length = literal_length - 60;
|
const size_t literal_length_length = literal_length - 60;
|
||||||
literal_length =
|
literal_length =
|
||||||
(LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
|
(LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
|
||||||
ip += literal_length_length;
|
ip += literal_length_length;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32 avail = ip_limit_ - ip;
|
size_t avail = ip_limit_ - ip;
|
||||||
while (avail < literal_length) {
|
while (avail < literal_length) {
|
||||||
if (!writer->Append(ip, avail)) return;
|
if (!writer->Append(ip, avail)) return;
|
||||||
literal_length -= avail;
|
literal_length -= avail;
|
||||||
|
@ -825,7 +824,7 @@ bool GetUncompressedLength(Source* source, uint32* result) {
|
||||||
|
|
||||||
size_t Compress(Source* reader, Sink* writer) {
|
size_t Compress(Source* reader, Sink* writer) {
|
||||||
size_t written = 0;
|
size_t written = 0;
|
||||||
int N = reader->Available();
|
size_t N = reader->Available();
|
||||||
char ulength[Varint::kMax32];
|
char ulength[Varint::kMax32];
|
||||||
char* p = Varint::Encode32(ulength, N);
|
char* p = Varint::Encode32(ulength, N);
|
||||||
writer->Append(ulength, p-ulength);
|
writer->Append(ulength, p-ulength);
|
||||||
|
@ -840,10 +839,10 @@ size_t Compress(Source* reader, Sink* writer) {
|
||||||
size_t fragment_size;
|
size_t fragment_size;
|
||||||
const char* fragment = reader->Peek(&fragment_size);
|
const char* fragment = reader->Peek(&fragment_size);
|
||||||
DCHECK_NE(fragment_size, 0) << ": premature end of input";
|
DCHECK_NE(fragment_size, 0) << ": premature end of input";
|
||||||
const int num_to_read = min(N, kBlockSize);
|
const size_t num_to_read = min(N, kBlockSize);
|
||||||
size_t bytes_read = fragment_size;
|
size_t bytes_read = fragment_size;
|
||||||
|
|
||||||
int pending_advance = 0;
|
size_t pending_advance = 0;
|
||||||
if (bytes_read >= num_to_read) {
|
if (bytes_read >= num_to_read) {
|
||||||
// Buffer returned by reader is large enough
|
// Buffer returned by reader is large enough
|
||||||
pending_advance = num_to_read;
|
pending_advance = num_to_read;
|
||||||
|
@ -931,9 +930,9 @@ class SnappyArrayWriter {
|
||||||
return op_ == op_limit_;
|
return op_ == op_limit_;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool Append(const char* ip, uint32 len) {
|
inline bool Append(const char* ip, size_t len) {
|
||||||
char* op = op_;
|
char* op = op_;
|
||||||
const int space_left = op_limit_ - op;
|
const size_t space_left = op_limit_ - op;
|
||||||
if (space_left < len) {
|
if (space_left < len) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -942,9 +941,9 @@ class SnappyArrayWriter {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool TryFastAppend(const char* ip, uint32 available, uint32 len) {
|
inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
|
||||||
char* op = op_;
|
char* op = op_;
|
||||||
const int space_left = op_limit_ - op;
|
const size_t space_left = op_limit_ - op;
|
||||||
if (len <= 16 && available >= 16 && space_left >= 16) {
|
if (len <= 16 && available >= 16 && space_left >= 16) {
|
||||||
// Fast path, used for the majority (about 95%) of invocations.
|
// Fast path, used for the majority (about 95%) of invocations.
|
||||||
UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
|
UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
|
||||||
|
@ -956,9 +955,9 @@ class SnappyArrayWriter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool AppendFromSelf(uint32 offset, uint32 len) {
|
inline bool AppendFromSelf(size_t offset, size_t len) {
|
||||||
char* op = op_;
|
char* op = op_;
|
||||||
const int space_left = op_limit_ - op;
|
const size_t space_left = op_limit_ - op;
|
||||||
|
|
||||||
if (op - base_ <= offset - 1u) { // -1u catches offset==0
|
if (op - base_ <= offset - 1u) { // -1u catches offset==0
|
||||||
return false;
|
return false;
|
||||||
|
@ -1022,14 +1021,14 @@ class SnappyDecompressionValidator {
|
||||||
inline bool CheckLength() const {
|
inline bool CheckLength() const {
|
||||||
return expected_ == produced_;
|
return expected_ == produced_;
|
||||||
}
|
}
|
||||||
inline bool Append(const char* ip, uint32 len) {
|
inline bool Append(const char* ip, size_t len) {
|
||||||
produced_ += len;
|
produced_ += len;
|
||||||
return produced_ <= expected_;
|
return produced_ <= expected_;
|
||||||
}
|
}
|
||||||
inline bool TryFastAppend(const char* ip, uint32 available, uint32 length) {
|
inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
inline bool AppendFromSelf(uint32 offset, uint32 len) {
|
inline bool AppendFromSelf(size_t offset, size_t len) {
|
||||||
if (produced_ <= offset - 1u) return false; // -1u catches offset==0
|
if (produced_ <= offset - 1u) return false; // -1u catches offset==0
|
||||||
produced_ += len;
|
produced_ += len;
|
||||||
return produced_ <= expected_;
|
return produced_ <= expected_;
|
||||||
|
|
4
snappy.h
4
snappy.h
|
@ -144,10 +144,10 @@ namespace snappy {
|
||||||
// decompression code should not rely on this guarantee since older
|
// decompression code should not rely on this guarantee since older
|
||||||
// compression code may not obey it.
|
// compression code may not obey it.
|
||||||
static const int kBlockLog = 15;
|
static const int kBlockLog = 15;
|
||||||
static const int kBlockSize = 1 << kBlockLog;
|
static const size_t kBlockSize = 1 << kBlockLog;
|
||||||
|
|
||||||
static const int kMaxHashTableBits = 14;
|
static const int kMaxHashTableBits = 14;
|
||||||
static const int kMaxHashTableSize = 1 << kMaxHashTableBits;
|
static const size_t kMaxHashTableSize = 1 << kMaxHashTableBits;
|
||||||
|
|
||||||
} // end namespace snappy
|
} // end namespace snappy
|
||||||
|
|
||||||
|
|
|
@ -300,7 +300,7 @@ static bool Uncompress(const string& compressed, CompressorType comp,
|
||||||
reinterpret_cast<const Bytef*>(compressed.data()),
|
reinterpret_cast<const Bytef*>(compressed.data()),
|
||||||
compressed.size());
|
compressed.size());
|
||||||
CHECK_EQ(Z_OK, ret);
|
CHECK_EQ(Z_OK, ret);
|
||||||
CHECK_EQ(destlen, size);
|
CHECK_EQ(static_cast<uLongf>(size), destlen);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif // ZLIB_VERSION
|
#endif // ZLIB_VERSION
|
||||||
|
@ -316,7 +316,7 @@ static bool Uncompress(const string& compressed, CompressorType comp,
|
||||||
&destlen,
|
&destlen,
|
||||||
NULL);
|
NULL);
|
||||||
CHECK_EQ(LZO_E_OK, ret);
|
CHECK_EQ(LZO_E_OK, ret);
|
||||||
CHECK_EQ(destlen, size);
|
CHECK_EQ(static_cast<lzo_uint>(size), destlen);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif // LZO_VERSION
|
#endif // LZO_VERSION
|
||||||
|
|
Loading…
Reference in New Issue