diff --git a/Makefile b/Makefile index 9b8ec13e8d..c1cb613af8 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ INSTALL_PATH ?= $(CURDIR) # OPT ?= -O2 -DNDEBUG # (A) Production use (optimized mode) # OPT ?= -g2 # (B) Debug mode, w/ full line-level debugging symbols -OPT ?= -O2 -g2 -DNDEBUG # (C) Profiling mode: opt, but w/debugging symbols +OPT ?= -O2 -g2 -DNDEBUG -Wall # (C) Profiling mode: opt, but w/debugging symbols #----------------------------------------------- # detect what platform we're building on diff --git a/db/corruption_test.cc b/db/corruption_test.cc index b963fe1b77..4b1fce7411 100644 --- a/db/corruption_test.cc +++ b/db/corruption_test.cc @@ -82,7 +82,7 @@ class CorruptionTest { } void Check(int min_expected, int max_expected) { - int next_expected = 0; + unsigned int next_expected = 0; int missed = 0; int bad_keys = 0; int bad_values = 0; @@ -123,7 +123,7 @@ class CorruptionTest { FileType type; std::string fname; int picked_number = -1; - for (int i = 0; i < filenames.size(); i++) { + for (unsigned int i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type) && type == filetype && int(number) > picked_number) { // Pick latest file diff --git a/db/db_bench.cc b/db/db_bench.cc index ee402227ca..19be99f1a2 100644 --- a/db/db_bench.cc +++ b/db/db_bench.cc @@ -149,7 +149,7 @@ static bool FLAGS_use_fsync = false; static bool FLAGS_disable_wal = false; // The total number of levels -static int FLAGS_num_levels = 7; +static unsigned int FLAGS_num_levels = 7; // Target level-0 file size for compaction static int FLAGS_target_file_size_base = 2 * 1048576; @@ -191,7 +191,7 @@ static enum leveldb::CompressionType FLAGS_compression_type = // Allows compression for levels 0 and 1 to be disabled when // other levels are compressed -static int FLAGS_min_level_to_compress = -1; +static unsigned int FLAGS_min_level_to_compress = -1; static int FLAGS_table_cache_numshardbits = 4; @@ -218,13 +218,11 @@ extern bool useMmapWrite; namespace leveldb { -namespace { - // Helper for quickly generating random data. class RandomGenerator { private: std::string data_; - int pos_; + unsigned int pos_; public: RandomGenerator() { @@ -252,11 +250,11 @@ class RandomGenerator { } }; static Slice TrimSpace(Slice s) { - int start = 0; + unsigned int start = 0; while (start < s.size() && isspace(s[start])) { start++; } - int limit = s.size(); + unsigned int limit = s.size(); while (limit > start && isspace(s[limit-1])) { limit--; } @@ -446,8 +444,6 @@ struct ThreadState { } }; -} // namespace - class Benchmark { private: Cache* cache_; @@ -534,6 +530,9 @@ class Benchmark { strlen(text), &compressed); name = "BZip2"; break; + case kNoCompression: + assert(false); // cannot happen + break; } if (!result) { @@ -611,7 +610,7 @@ class Benchmark { heap_counter_(0) { std::vector files; FLAGS_env->GetChildren(FLAGS_db, &files); - for (int i = 0; i < files.size(); i++) { + for (unsigned int i = 0; i < files.size(); i++) { if (Slice(files[i]).starts_with("heap-")) { FLAGS_env->DeleteFile(std::string(FLAGS_db) + "/" + files[i]); } diff --git a/db/db_impl.cc b/db/db_impl.cc index 77e3822493..d89cbdf8cb 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -160,7 +160,7 @@ Options SanitizeOptions(const std::string& dbname, } if (src.compression_per_level != NULL) { result.compression_per_level = new CompressionType[src.num_levels]; - for (unsigned int i = 0; i < src.num_levels; i++) { + for (int i = 0; i < src.num_levels; i++) { result.compression_per_level[i] = src.compression_per_level[i]; } } @@ -191,10 +191,11 @@ DBImpl::DBImpl(const Options& options, const std::string& dbname) disable_delete_obsolete_files_(false), delete_obsolete_files_last_run_(0), stall_level0_slowdown_(0), - stall_leveln_slowdown_(0), stall_memtable_compaction_(0), stall_level0_num_files_(0), + stall_leveln_slowdown_(0), started_at_(options.env->NowMicros()), + flush_on_destroy_(false), delayed_writes_(0) { mem_->Ref(); @@ -227,7 +228,10 @@ DBImpl::DBImpl(const Options& options, const std::string& dbname) DBImpl::~DBImpl() { // Wait for background work to finish - mutex_.Lock(); + if (flush_on_destroy_) { + FlushMemTable(FlushOptions()); + } + mutex_.Lock(); shutting_down_.Release_Store(this); // Any non-NULL value is ok while (bg_compaction_scheduled_ || bg_logstats_scheduled_) { bg_cv_.Wait(); @@ -316,7 +320,7 @@ void DBImpl::FindObsoleteFiles(DeletionState& deletion_state) { // delete_obsolete_files_period_micros. if (options_.delete_obsolete_files_period_micros != 0) { const uint64_t now_micros = env_->NowMicros(); - if (delete_obsolete_files_last_run_ + + if (delete_obsolete_files_last_run_ + options_.delete_obsolete_files_period_micros > now_micros) { return; } @@ -422,7 +426,6 @@ void DBImpl::DeleteObsoleteFiles() { std::set live; std::vector allfiles; std::vector files_to_evict; - uint64_t filenumber, lognumber, prevlognumber; FindObsoleteFiles(deletion_state); PurgeObsoleteFiles(deletion_state); EvictObsoleteFiles(deletion_state); @@ -1084,7 +1087,7 @@ void DBImpl::AllocateCompactionOutputFileNumbers(CompactionState* compact) { assert(compact != NULL); assert(compact->builder == NULL); int filesNeeded = compact->compaction->num_input_files(1); - for (unsigned i = 0; i < filesNeeded; i++) { + for (int i = 0; i < filesNeeded; i++) { uint64_t file_number = versions_->NewFileNumber(); pending_outputs_.insert(file_number); compact->allocated_file_numbers.push_back(file_number); @@ -1324,8 +1327,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) { ikey.sequence < compact->smallest_snapshot) { // If the user has specified a compaction filter, then invoke // it. If this key is not visible via any snapshot and the - // return value of the compaction filter is true and then - // drop this key from the output. + // return value of the compaction filter is true and then + // drop this key from the output. drop = options_.CompactionFilter(compact->compaction->level(), ikey.user_key, value, &compaction_filter_value); @@ -1605,6 +1608,10 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) { // into mem_. { mutex_.Unlock(); + if (options.disableWAL) { + flush_on_destroy_ = true; + } + if (!options.disableWAL) { status = log_->AddRecord(WriteBatchInternal::Contents(updates)); if (status.ok() && options.sync) { @@ -1731,7 +1738,7 @@ Status DBImpl::MakeRoomForWrite(bool force) { allow_delay = false; // Do not delay a single write more than once //Log(options_.info_log, // "delaying write %llu usecs for level0_slowdown_writes_trigger\n", - // delayed); + // (long long unsigned int)delayed); mutex_.Lock(); delayed_writes_++; } else if (!force && @@ -1770,7 +1777,7 @@ Status DBImpl::MakeRoomForWrite(bool force) { allow_delay = false; // Do not delay a single write more than once Log(options_.info_log, "delaying write %llu usecs for rate limits with max score %.2f\n", - delayed, score); + (long long unsigned int)delayed, score); mutex_.Lock(); } else { // Attempt to switch to a new memtable and trigger compaction of old diff --git a/db/db_impl.h b/db/db_impl.h index 0a7f05f93f..a7321c13a0 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -52,7 +52,7 @@ class DBImpl : public DB { virtual Status Flush(const FlushOptions& options); virtual Status DisableFileDeletions(); virtual Status EnableFileDeletions(); - virtual Status GetLiveFiles(std::vector&, + virtual Status GetLiveFiles(std::vector&, uint64_t* manifest_file_size); // Extra methods (for testing) that are not in the public DB interface @@ -233,6 +233,8 @@ class DBImpl : public DB { // Time at which this instance was started. const uint64_t started_at_; + bool flush_on_destroy_; // Used when disableWAL is true. + // Per level compaction stats. stats_[level] stores the stats for // compactions that produced data for the specified "level". struct CompactionStats { diff --git a/db/db_test.cc b/db/db_test.cc index 7a2256e49d..0b5c968757 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -44,7 +44,7 @@ static std::string RandomString(Random* rnd, int len) { return r; } -namespace { +namespace anon { class AtomicCounter { private: port::Mutex mu_; @@ -79,9 +79,9 @@ class SpecialEnv : public EnvWrapper { port::AtomicPointer non_writable_; bool count_random_reads_; - AtomicCounter random_read_counter_; + anon::AtomicCounter random_read_counter_; - AtomicCounter sleep_counter_; + anon::AtomicCounter sleep_counter_; explicit SpecialEnv(Env* base) : EnvWrapper(base) { delay_sstable_sync_.Release_Store(NULL); @@ -137,9 +137,9 @@ class SpecialEnv : public EnvWrapper { class CountingFile : public RandomAccessFile { private: RandomAccessFile* target_; - AtomicCounter* counter_; + anon::AtomicCounter* counter_; public: - CountingFile(RandomAccessFile* target, AtomicCounter* counter) + CountingFile(RandomAccessFile* target, anon::AtomicCounter* counter) : target_(target), counter_(counter) { } virtual ~CountingFile() { delete target_; } @@ -310,7 +310,7 @@ class DBTest { } // Check reverse iteration results are the reverse of forward results - int matched = 0; + unsigned int matched = 0; for (iter->SeekToLast(); iter->Valid(); iter->Prev()) { ASSERT_LT(matched, forward.size()); ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]); @@ -858,8 +858,8 @@ TEST(DBTest, WAL) { ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v1")); Reopen(); - ASSERT_EQ("NOT_FOUND", Get("foo")); - ASSERT_EQ("NOT_FOUND", Get("bar")); + ASSERT_EQ("v1", Get("foo")); + ASSERT_EQ("v1", Get("bar")); writeOpt.disableWAL = false; ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v2")); @@ -867,10 +867,9 @@ TEST(DBTest, WAL) { ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v2")); Reopen(); - // We garantee the 'bar' will be there - // because its put has WAL enabled. - // But 'foo' may or may not be there. + // Both value's should be present. ASSERT_EQ("v2", Get("bar")); + ASSERT_EQ("v2", Get("foo")); writeOpt.disableWAL = true; ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v3")); @@ -878,9 +877,9 @@ TEST(DBTest, WAL) { ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v3")); Reopen(); - // 'foo' should be there because its put - // has WAL enabled. + // again both values should be present. ASSERT_EQ("v3", Get("foo")); + ASSERT_EQ("v3", Get("bar")); } TEST(DBTest, CheckLock) { @@ -895,13 +894,13 @@ TEST(DBTest, FLUSH) { WriteOptions writeOpt = WriteOptions(); writeOpt.disableWAL = true; ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v1")); - // this will not flush the last 2 writes + // this will now also flush the last 2 writes dbfull()->Flush(FlushOptions()); ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v1")); Reopen(); ASSERT_EQ("v1", Get("foo")); - ASSERT_EQ("NOT_FOUND", Get("bar")); + ASSERT_EQ("v1", Get("bar")); writeOpt.disableWAL = true; ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v2")); @@ -1201,12 +1200,12 @@ static int cfilter_count; static std::string NEW_VALUE = "NewValue"; static bool keep_filter(int level, const Slice& key, const Slice& value, Slice** new_value) { - cfilter_count++; + cfilter_count++; return false; } static bool delete_filter(int level, const Slice& key, const Slice& value, Slice** new_value) { - cfilter_count++; + cfilter_count++; return true; } static bool change_filter(int level, const Slice& key, @@ -1223,8 +1222,8 @@ TEST(DBTest, CompactionFilter) { options.CompactionFilter = keep_filter; Reopen(&options); - // Write 100K+1 keys, these are written to a few files - // in L0. We do this so that the current snapshot points + // Write 100K+1 keys, these are written to a few files + // in L0. We do this so that the current snapshot points // to the 100001 key.The compaction filter is not invoked // on keys that are visible via a snapshot because we // anyways cannot delete it. @@ -1324,8 +1323,8 @@ TEST(DBTest, CompactionFilterWithValueChange) { options.CompactionFilter = change_filter; Reopen(&options); - // Write 100K+1 keys, these are written to a few files - // in L0. We do this so that the current snapshot points + // Write 100K+1 keys, these are written to a few files + // in L0. We do this so that the current snapshot points // to the 100001 key.The compaction filter is not invoked // on keys that are visible via a snapshot because we // anyways cannot delete it. @@ -2028,7 +2027,7 @@ TEST(DBTest, SnapshotFiles) { dbfull()->GetLiveFiles(files, &manifest_size); // CURRENT, MANIFEST, *.sst files - ASSERT_EQ(files.size(), 3); + ASSERT_EQ(files.size(), 3U); uint64_t number = 0; FileType type; @@ -2251,7 +2250,7 @@ static void MTThreadBody(void* arg) { ASSERT_EQ(k, key); ASSERT_GE(w, 0); ASSERT_LT(w, kNumThreads); - ASSERT_LE(c, reinterpret_cast( + ASSERT_LE((unsigned int)c, reinterpret_cast( t->state->counter[w].Acquire_Load())); } } diff --git a/db/dbformat_test.cc b/db/dbformat_test.cc index 5d82f5d313..8309454475 100644 --- a/db/dbformat_test.cc +++ b/db/dbformat_test.cc @@ -54,8 +54,8 @@ TEST(FormatTest, InternalKey_EncodeDecode) { (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1, (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1 }; - for (int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) { - for (int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) { + for (unsigned int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) { + for (unsigned int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) { TestKey(keys[k], seq[s], kTypeValue); TestKey("hello", 1, kTypeDeletion); } diff --git a/db/filename_test.cc b/db/filename_test.cc index 47353d6c9a..d0b4f94cce 100644 --- a/db/filename_test.cc +++ b/db/filename_test.cc @@ -35,7 +35,7 @@ TEST(FileNameTest, Parse) { { "LOG.old", 0, kInfoLogFile }, { "18446744073709551615.log", 18446744073709551615ull, kLogFile }, }; - for (int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) { + for (unsigned int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) { std::string f = cases[i].fname; ASSERT_TRUE(ParseFileName(f, &number, &type)) << f; ASSERT_EQ(cases[i].type, type) << f; @@ -67,7 +67,7 @@ TEST(FileNameTest, Parse) { "100.", "100.lop" }; - for (int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) { + for (unsigned int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) { std::string f = errors[i]; ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f; }; @@ -81,37 +81,37 @@ TEST(FileNameTest, Construction) { fname = CurrentFileName("foo"); ASSERT_EQ("foo/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); - ASSERT_EQ(0, number); + ASSERT_EQ(0U, number); ASSERT_EQ(kCurrentFile, type); fname = LockFileName("foo"); ASSERT_EQ("foo/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); - ASSERT_EQ(0, number); + ASSERT_EQ(0U, number); ASSERT_EQ(kDBLockFile, type); fname = LogFileName("foo", 192); ASSERT_EQ("foo/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); - ASSERT_EQ(192, number); + ASSERT_EQ(192U, number); ASSERT_EQ(kLogFile, type); fname = TableFileName("bar", 200); ASSERT_EQ("bar/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); - ASSERT_EQ(200, number); + ASSERT_EQ(200U, number); ASSERT_EQ(kTableFile, type); fname = DescriptorFileName("bar", 100); ASSERT_EQ("bar/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); - ASSERT_EQ(100, number); + ASSERT_EQ(100U, number); ASSERT_EQ(kDescriptorFile, type); fname = TempFileName("tmp", 999); ASSERT_EQ("tmp/", std::string(fname.data(), 4)); ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); - ASSERT_EQ(999, number); + ASSERT_EQ(999U, number); ASSERT_EQ(kTempFile, type); } diff --git a/db/log_reader.cc b/db/log_reader.cc index b35f115aad..ddd620246b 100644 --- a/db/log_reader.cc +++ b/db/log_reader.cc @@ -178,7 +178,7 @@ void Reader::ReportDrop(size_t bytes, const Status& reason) { unsigned int Reader::ReadPhysicalRecord(Slice* result) { while (true) { - if (buffer_.size() < kHeaderSize) { + if (buffer_.size() < (size_t)kHeaderSize) { if (!eof_) { // Last read was a full read, so this is a trailer to skip buffer_.clear(); @@ -189,7 +189,7 @@ unsigned int Reader::ReadPhysicalRecord(Slice* result) { ReportDrop(kBlockSize, status); eof_ = true; return kEof; - } else if (buffer_.size() < kBlockSize) { + } else if (buffer_.size() < (size_t)kBlockSize) { eof_ = true; } continue; diff --git a/db/log_test.cc b/db/log_test.cc index 4c5cf87573..b6a7336827 100644 --- a/db/log_test.cc +++ b/db/log_test.cc @@ -276,7 +276,7 @@ TEST(LogTest, MarginalTrailer) { // Make a trailer that is exactly the same length as an empty record. const int n = kBlockSize - 2*kHeaderSize; Write(BigString("foo", n)); - ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes()); + ASSERT_EQ((unsigned int)(kBlockSize - kHeaderSize), WrittenBytes()); Write(""); Write("bar"); ASSERT_EQ(BigString("foo", n), Read()); @@ -289,19 +289,19 @@ TEST(LogTest, MarginalTrailer2) { // Make a trailer that is exactly the same length as an empty record. const int n = kBlockSize - 2*kHeaderSize; Write(BigString("foo", n)); - ASSERT_EQ(kBlockSize - kHeaderSize, WrittenBytes()); + ASSERT_EQ((unsigned int)(kBlockSize - kHeaderSize), WrittenBytes()); Write("bar"); ASSERT_EQ(BigString("foo", n), Read()); ASSERT_EQ("bar", Read()); ASSERT_EQ("EOF", Read()); - ASSERT_EQ(0, DroppedBytes()); + ASSERT_EQ(0U, DroppedBytes()); ASSERT_EQ("", ReportMessage()); } TEST(LogTest, ShortTrailer) { const int n = kBlockSize - 2*kHeaderSize + 4; Write(BigString("foo", n)); - ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes()); + ASSERT_EQ((unsigned int)(kBlockSize - kHeaderSize + 4), WrittenBytes()); Write(""); Write("bar"); ASSERT_EQ(BigString("foo", n), Read()); @@ -313,7 +313,7 @@ TEST(LogTest, ShortTrailer) { TEST(LogTest, AlignedEof) { const int n = kBlockSize - 2*kHeaderSize + 4; Write(BigString("foo", n)); - ASSERT_EQ(kBlockSize - kHeaderSize + 4, WrittenBytes()); + ASSERT_EQ((unsigned int)(kBlockSize - kHeaderSize + 4), WrittenBytes()); ASSERT_EQ(BigString("foo", n), Read()); ASSERT_EQ("EOF", Read()); } @@ -337,7 +337,7 @@ TEST(LogTest, ReadError) { Write("foo"); ForceError(); ASSERT_EQ("EOF", Read()); - ASSERT_EQ(kBlockSize, DroppedBytes()); + ASSERT_EQ((unsigned int)kBlockSize, DroppedBytes()); ASSERT_EQ("OK", MatchError("read error")); } @@ -347,7 +347,7 @@ TEST(LogTest, BadRecordType) { IncrementByte(6, 100); FixChecksum(0, 3); ASSERT_EQ("EOF", Read()); - ASSERT_EQ(3, DroppedBytes()); + ASSERT_EQ(3U, DroppedBytes()); ASSERT_EQ("OK", MatchError("unknown record type")); } @@ -355,7 +355,7 @@ TEST(LogTest, TruncatedTrailingRecord) { Write("foo"); ShrinkSize(4); // Drop all payload as well as a header byte ASSERT_EQ("EOF", Read()); - ASSERT_EQ(kHeaderSize - 1, DroppedBytes()); + ASSERT_EQ((unsigned int)(kHeaderSize - 1), DroppedBytes()); ASSERT_EQ("OK", MatchError("truncated record at end of file")); } @@ -363,7 +363,7 @@ TEST(LogTest, BadLength) { Write("foo"); ShrinkSize(1); ASSERT_EQ("EOF", Read()); - ASSERT_EQ(kHeaderSize + 2, DroppedBytes()); + ASSERT_EQ((unsigned int)(kHeaderSize + 2), DroppedBytes()); ASSERT_EQ("OK", MatchError("bad record length")); } @@ -371,7 +371,7 @@ TEST(LogTest, ChecksumMismatch) { Write("foo"); IncrementByte(0, 10); ASSERT_EQ("EOF", Read()); - ASSERT_EQ(10, DroppedBytes()); + ASSERT_EQ(10U, DroppedBytes()); ASSERT_EQ("OK", MatchError("checksum mismatch")); } @@ -380,7 +380,7 @@ TEST(LogTest, UnexpectedMiddleType) { SetByte(6, kMiddleType); FixChecksum(0, 3); ASSERT_EQ("EOF", Read()); - ASSERT_EQ(3, DroppedBytes()); + ASSERT_EQ(3U, DroppedBytes()); ASSERT_EQ("OK", MatchError("missing start")); } @@ -389,7 +389,7 @@ TEST(LogTest, UnexpectedLastType) { SetByte(6, kLastType); FixChecksum(0, 3); ASSERT_EQ("EOF", Read()); - ASSERT_EQ(3, DroppedBytes()); + ASSERT_EQ(3U, DroppedBytes()); ASSERT_EQ("OK", MatchError("missing start")); } @@ -400,7 +400,7 @@ TEST(LogTest, UnexpectedFullType) { FixChecksum(0, 3); ASSERT_EQ("bar", Read()); ASSERT_EQ("EOF", Read()); - ASSERT_EQ(3, DroppedBytes()); + ASSERT_EQ(3U, DroppedBytes()); ASSERT_EQ("OK", MatchError("partial record without end")); } @@ -411,7 +411,7 @@ TEST(LogTest, UnexpectedFirstType) { FixChecksum(0, 3); ASSERT_EQ(BigString("bar", 100000), Read()); ASSERT_EQ("EOF", Read()); - ASSERT_EQ(3, DroppedBytes()); + ASSERT_EQ(3U, DroppedBytes()); ASSERT_EQ("OK", MatchError("partial record without end")); } diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc index c78f4b4fb1..ecd992389e 100644 --- a/db/skiplist_test.cc +++ b/db/skiplist_test.cc @@ -61,9 +61,9 @@ TEST(SkipTest, InsertAndLookup) { for (int i = 0; i < R; i++) { if (list.Contains(i)) { - ASSERT_EQ(keys.count(i), 1); + ASSERT_EQ(keys.count(i), 1U); } else { - ASSERT_EQ(keys.count(i), 0); + ASSERT_EQ(keys.count(i), 0U); } } @@ -195,7 +195,7 @@ class ConcurrentTest { } State() { - for (int k = 0; k < K; k++) { + for (unsigned int k = 0; k < K; k++) { Set(k, 0); } } @@ -225,7 +225,7 @@ class ConcurrentTest { void ReadStep(Random* rnd) { // Remember the initial committed state of the skiplist. State initial_state; - for (int k = 0; k < K; k++) { + for (unsigned int k = 0; k < K; k++) { initial_state.Set(k, current_.Get(k)); } @@ -249,8 +249,8 @@ class ConcurrentTest { // Note that generation 0 is never inserted, so it is ok if // <*,0,*> is missing. - ASSERT_TRUE((gen(pos) == 0) || - (gen(pos) > initial_state.Get(key(pos))) + ASSERT_TRUE((gen(pos) == 0U) || + (gen(pos) > (uint64_t)initial_state.Get(key(pos))) ) << "key: " << key(pos) << "; gen: " << gen(pos) << "; initgen: " diff --git a/db/version_set.cc b/db/version_set.cc index 22a3d9366b..211cc41238 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -455,11 +455,16 @@ int Version::PickLevelForMemTableOutput( } // Store in "*inputs" all files in "level" that overlap [begin,end] +// If hint_index is specified, then it points to a file in the +// overlapping range. +// The file_index returns a pointer to any file in an overlapping range. void Version::GetOverlappingInputs( int level, const InternalKey* begin, const InternalKey* end, - std::vector* inputs) { + std::vector* inputs, + int hint_index, + int* file_index) { inputs->clear(); Slice user_begin, user_end; if (begin != NULL) { @@ -470,7 +475,8 @@ void Version::GetOverlappingInputs( } const Comparator* user_cmp = vset_->icmp_.user_comparator(); if (begin != NULL && end != NULL && level > 0) { - GetOverlappingInputsBinarySearch(level, user_begin, user_end, inputs); + GetOverlappingInputsBinarySearch(level, user_begin, user_end, inputs, + hint_index, file_index); return; } for (size_t i = 0; i < files_[level].size(); ) { @@ -495,6 +501,8 @@ void Version::GetOverlappingInputs( inputs->clear(); i = 0; } + } else if (file_index) { + *file_index = i; } } } @@ -508,14 +516,24 @@ void Version::GetOverlappingInputsBinarySearch( int level, const Slice& user_begin, const Slice& user_end, - std::vector* inputs) { + std::vector* inputs, + int hint_index, + int* file_index) { assert(level > 0); int min = 0; int mid = 0; int max = files_[level].size() -1; bool foundOverlap = false; const Comparator* user_cmp = vset_->icmp_.user_comparator(); - while (min <= max) { + + // if the caller already knows the index of a file that has overlap, + // then we can skip the binary search. + if (hint_index != -1) { + mid = hint_index; + foundOverlap = true; + } + + while (!foundOverlap && min <= max) { mid = (min + max)/2; FileMetaData* f = files_[level][mid]; const Slice file_start = f->smallest.user_key(); @@ -534,6 +552,10 @@ void Version::GetOverlappingInputsBinarySearch( if (!foundOverlap) { return; } + // returns the index where an overlap is found + if (file_index) { + *file_index = mid; + } ExtendOverlappingInputs(level, user_begin, user_end, inputs, mid); } @@ -548,13 +570,21 @@ void Version::ExtendOverlappingInputs( std::vector* inputs, int midIndex) { - // assert that the file at midIndex overlaps with the range const Comparator* user_cmp = vset_->icmp_.user_comparator(); - assert(midIndex < files_[level].size()); - assert((user_cmp->Compare(files_[level][midIndex]->largest.user_key(), - user_begin) >= 0) || - (user_cmp->Compare(files_[level][midIndex]->smallest.user_key(), - user_end) <= 0)); +#ifndef NDEBUG + { + // assert that the file at midIndex overlaps with the range + assert(midIndex < files_[level].size()); + FileMetaData* f = files_[level][midIndex]; + const Slice fstart = f->smallest.user_key(); + const Slice flimit = f->largest.user_key(); + if (user_cmp->Compare(fstart, user_begin) >= 0) { + assert(user_cmp->Compare(fstart, user_end) <= 0); + } else { + assert(user_cmp->Compare(flimit, user_begin) >= 0); + } + } +#endif // check backwards from 'mid' to lower indices for (size_t i = midIndex; i < files_[level].size(); i--) { @@ -864,11 +894,11 @@ VersionSet::VersionSet(const std::string& dbname, last_sequence_(0), log_number_(0), prev_log_number_(0), + num_levels_(options_->num_levels), descriptor_file_(NULL), descriptor_log_(NULL), dummy_versions_(this), current_(NULL), - num_levels_(options_->num_levels), compactions_in_progress_(options_->num_levels), current_version_number_(0) { compact_pointer_ = new std::string[options_->num_levels]; @@ -940,9 +970,8 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu, // process all requests in the queue ManifestWriter* last_writer = &w; - ManifestWriter* first = manifest_writers_.front(); assert(!manifest_writers_.empty()); - assert(first == &w); + assert(manifest_writers_.front() == &w); std::deque::iterator iter = manifest_writers_.begin(); for (; iter != manifest_writers_.end(); ++iter) { last_writer = *iter; @@ -1014,7 +1043,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu, prev_log_number_ = edit->prev_log_number_; } else { - Log(options_->info_log, "Error in committing version %d", + Log(options_->info_log, "Error in committing version %ld", v->GetVersionNumber()); delete v; if (!new_manifest_file.empty()) { @@ -1732,10 +1761,13 @@ Compaction* VersionSet::PickCompactionBySize(int level) { } // Do not pick this file if its parents at level+1 are being compacted. // Maybe we can avoid redoing this work in SetupOtherInputs - if (ParentFilesInCompaction(f, level)) { + int parent_index = -1; + if (ParentFilesInCompaction(f, level, &parent_index)) { continue; } c->inputs_[0].push_back(f); + c->base_index_ = i; + c->parent_index_ = parent_index; break; } @@ -1748,7 +1780,7 @@ Compaction* VersionSet::PickCompactionBySize(int level) { Compaction* VersionSet::PickCompaction() { Compaction* c = NULL; - int level; + int level = -1; // compute the compactions needed. It is better to do it here // and also in LogAndApply(), otherwise the values could be stale. @@ -1795,7 +1827,8 @@ Compaction* VersionSet::PickCompaction() { current_->GetOverlappingInputs(0, &smallest, &largest, &more); for (unsigned int i = 0; i < more.size(); i++) { FileMetaData* f = more[i]; - if (!f->being_compacted && !ParentFilesInCompaction(f, level)) { + if (!f->being_compacted && + !ParentFilesInCompaction(f, level, &c->parent_index_)) { c->inputs_[0].push_back(f); } } @@ -1814,9 +1847,11 @@ Compaction* VersionSet::PickCompaction() { } // Returns true if any one of the parent files are being compacted -bool VersionSet::ParentFilesInCompaction(FileMetaData* f, int level) { +bool VersionSet::ParentFilesInCompaction(FileMetaData* f, int level, + int* parent_index) { std::vector inputs; - current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest, &inputs); + current_->GetOverlappingInputs(level+1, &f->smallest, &f->largest, + &inputs, *parent_index, parent_index); return FilesInCompaction(inputs); } @@ -1835,7 +1870,8 @@ void VersionSet::SetupOtherInputs(Compaction* c) { InternalKey smallest, largest; GetRange(c->inputs_[0], &smallest, &largest); - current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1]); + current_->GetOverlappingInputs(level+1, &smallest, &largest, &c->inputs_[1], + c->parent_index_, &c->parent_index_); // Get entire range covered by compaction InternalKey all_start, all_limit; @@ -1845,7 +1881,8 @@ void VersionSet::SetupOtherInputs(Compaction* c) { // changing the number of "level+1" files we pick up. if (!c->inputs_[1].empty()) { std::vector expanded0; - current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0); + current_->GetOverlappingInputs(level, &all_start, &all_limit, &expanded0, + c->base_index_, NULL); const int64_t inputs0_size = TotalFileSize(c->inputs_[0]); const int64_t inputs1_size = TotalFileSize(c->inputs_[1]); const int64_t expanded0_size = TotalFileSize(expanded0); @@ -1857,7 +1894,8 @@ void VersionSet::SetupOtherInputs(Compaction* c) { GetRange(expanded0, &new_start, &new_limit); std::vector expanded1; current_->GetOverlappingInputs(level+1, &new_start, &new_limit, - &expanded1); + &expanded1, c->parent_index_, + &c->parent_index_); if (expanded1.size() == c->inputs_[1].size() && !FilesInCompaction(expanded1)) { Log(options_->info_log, @@ -1947,7 +1985,9 @@ Compaction::Compaction(int level, uint64_t target_file_size, seek_compaction_(seek_compaction), grandparent_index_(0), seen_key_(false), - overlapped_bytes_(0) { + overlapped_bytes_(0), + base_index_(-1), + parent_index_(-1) { edit_ = new VersionEdit(number_levels_); level_ptrs_ = new size_t[number_levels_]; for (int i = 0; i < number_levels_; i++) { diff --git a/db/version_set.h b/db/version_set.h index a71d61b2a9..0bf6ce626d 100644 --- a/db/version_set.h +++ b/db/version_set.h @@ -88,13 +88,17 @@ class Version { int level, const InternalKey* begin, // NULL means before all keys const InternalKey* end, // NULL means after all keys - std::vector* inputs); + std::vector* inputs, + int hint_index = -1, // index of overlap file + int* file_index = NULL); // return index of overlap file void GetOverlappingInputsBinarySearch( int level, const Slice& begin, // NULL means before all keys const Slice& end, // NULL means after all keys - std::vector* inputs); + std::vector* inputs, + int hint_index, // index of overlap file + int* file_index); // return index of overlap file void ExtendOverlappingInputs( int level, @@ -496,6 +500,8 @@ class Compaction { bool seen_key_; // Some output key has been seen int64_t overlapped_bytes_; // Bytes of overlap between current output // and grandparent files + int base_index_; // index of the file in files_[level_] + int parent_index_; // index of some file with same range in files_[level_+1] // State for implementing IsBaseLevelForKey diff --git a/db/version_set_test.cc b/db/version_set_test.cc index 501e34d133..75c558a71d 100644 --- a/db/version_set_test.cc +++ b/db/version_set_test.cc @@ -17,7 +17,7 @@ class FindFileTest { FindFileTest() : disjoint_sorted_files_(true) { } ~FindFileTest() { - for (int i = 0; i < files_.size(); i++) { + for (unsigned int i = 0; i < files_.size(); i++) { delete files_[i]; } } diff --git a/db/write_batch_test.cc b/db/write_batch_test.cc index 9064e3d85e..71a5e89ccb 100644 --- a/db/write_batch_test.cc +++ b/db/write_batch_test.cc @@ -22,6 +22,7 @@ static std::string PrintContents(WriteBatch* b) { Iterator* iter = mem->NewIterator(); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ParsedInternalKey ikey; + memset((void *)&ikey, 0, sizeof(ikey)); ASSERT_TRUE(ParseInternalKey(iter->key(), &ikey)); switch (ikey.type) { case kTypeValue: @@ -66,7 +67,7 @@ TEST(WriteBatchTest, Multiple) { batch.Delete(Slice("box")); batch.Put(Slice("baz"), Slice("boo")); WriteBatchInternal::SetSequence(&batch, 100); - ASSERT_EQ(100, WriteBatchInternal::Sequence(&batch)); + ASSERT_EQ(100U, WriteBatchInternal::Sequence(&batch)); ASSERT_EQ(3, WriteBatchInternal::Count(&batch)); ASSERT_EQ("Put(baz, boo)@102" "Delete(box)@101" diff --git a/helpers/memenv/memenv_test.cc b/helpers/memenv/memenv_test.cc index a44310fed8..d8a5626109 100644 --- a/helpers/memenv/memenv_test.cc +++ b/helpers/memenv/memenv_test.cc @@ -36,7 +36,7 @@ TEST(MemEnvTest, Basics) { ASSERT_TRUE(!env_->FileExists("/dir/non_existent")); ASSERT_TRUE(!env_->GetFileSize("/dir/non_existent", &file_size).ok()); ASSERT_OK(env_->GetChildren("/dir", &children)); - ASSERT_EQ(0, children.size()); + ASSERT_EQ(0U, children.size()); // Create a file. ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file)); @@ -45,9 +45,9 @@ TEST(MemEnvTest, Basics) { // Check that the file exists. ASSERT_TRUE(env_->FileExists("/dir/f")); ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); - ASSERT_EQ(0, file_size); + ASSERT_EQ(0U, file_size); ASSERT_OK(env_->GetChildren("/dir", &children)); - ASSERT_EQ(1, children.size()); + ASSERT_EQ(1U, children.size()); ASSERT_EQ("f", children[0]); // Write to the file. @@ -57,7 +57,7 @@ TEST(MemEnvTest, Basics) { // Check for expected size. ASSERT_OK(env_->GetFileSize("/dir/f", &file_size)); - ASSERT_EQ(3, file_size); + ASSERT_EQ(3U, file_size); // Check that renaming works. ASSERT_TRUE(!env_->RenameFile("/dir/non_existent", "/dir/g").ok()); @@ -65,7 +65,7 @@ TEST(MemEnvTest, Basics) { ASSERT_TRUE(!env_->FileExists("/dir/f")); ASSERT_TRUE(env_->FileExists("/dir/g")); ASSERT_OK(env_->GetFileSize("/dir/g", &file_size)); - ASSERT_EQ(3, file_size); + ASSERT_EQ(3U, file_size); // Check that opening non-existent file fails. SequentialFile* seq_file; @@ -80,7 +80,7 @@ TEST(MemEnvTest, Basics) { ASSERT_OK(env_->DeleteFile("/dir/g")); ASSERT_TRUE(!env_->FileExists("/dir/g")); ASSERT_OK(env_->GetChildren("/dir", &children)); - ASSERT_EQ(0, children.size()); + ASSERT_EQ(0U, children.size()); ASSERT_OK(env_->DeleteDir("/dir")); } @@ -106,10 +106,10 @@ TEST(MemEnvTest, ReadWrite) { ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Read "world". ASSERT_EQ(0, result.compare("world")); ASSERT_OK(seq_file->Read(1000, &result, scratch)); // Try reading past EOF. - ASSERT_EQ(0, result.size()); + ASSERT_EQ(0U, result.size()); ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file. ASSERT_OK(seq_file->Read(1000, &result, scratch)); - ASSERT_EQ(0, result.size()); + ASSERT_EQ(0U, result.size()); delete seq_file; // Random reads. diff --git a/table/filter_block_test.cc b/table/filter_block_test.cc index 3a2a07cf53..cadb273b02 100644 --- a/table/filter_block_test.cc +++ b/table/filter_block_test.cc @@ -29,7 +29,7 @@ class TestHashFilter : public FilterPolicy { virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const { uint32_t h = Hash(key.data(), key.size(), 1); - for (int i = 0; i + 4 <= filter.size(); i += 4) { + for (unsigned int i = 0; i + 4 <= filter.size(); i += 4) { if (h == DecodeFixed32(filter.data() + i)) { return true; } diff --git a/table/table.cc b/table/table.cc index ad739e5463..59117ddcbe 100644 --- a/table/table.cc +++ b/table/table.cc @@ -238,7 +238,6 @@ Status Table::InternalGet(const ReadOptions& options, const Slice& k, !filter->KeyMayMatch(handle.offset(), k)) { // Not found } else { - Slice handle = iiter->value(); bool didIO = false; Iterator* block_iter = BlockReader(this, options, iiter->value(), &didIO); diff --git a/table/table_test.cc b/table/table_test.cc index c78c4689da..04b8bdeb74 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -75,7 +75,7 @@ static void Increment(const Comparator* cmp, std::string* key) { } // An STL comparator that uses a Comparator -namespace { +namespace anon { struct STLLessThan { const Comparator* cmp; @@ -134,13 +134,13 @@ class StringSource: public RandomAccessFile { std::string contents_; }; -typedef std::map KVMap; +typedef std::map KVMap; // Helper class for tests to unify the interface between // BlockBuilder/TableBuilder and Block/Table. class Constructor { public: - explicit Constructor(const Comparator* cmp) : data_(STLLessThan(cmp)) { } + explicit Constructor(const Comparator* cmp) : data_(anon::STLLessThan(cmp)) { } virtual ~Constructor() { } void Add(const std::string& key, const Slice& value) { @@ -464,7 +464,7 @@ static std::vector Generate_Arg_List() for(int i =0; i < test_type_len; i++) for (int j =0; j < reverse_compare_len; j++) for (int k =0; k < restart_interval_len; k++) - for (int n =0; n < compression_types.size(); n++) { + for (unsigned int n =0; n < compression_types.size(); n++) { TestArgs one_arg; one_arg.type = test_type[i]; one_arg.reverse_compare = reverse_compare[j]; @@ -690,7 +690,7 @@ class Harness { // Test the empty key TEST(Harness, SimpleEmptyKey) { std::vector args = Generate_Arg_List(); - for (int i = 0; i < args.size(); i++) { + for (unsigned int i = 0; i < args.size(); i++) { Init(args[i]); Random rnd(test::RandomSeed() + 1); Add("", "v"); @@ -700,7 +700,7 @@ TEST(Harness, SimpleEmptyKey) { TEST(Harness, SimpleSingle) { std::vector args = Generate_Arg_List(); - for (int i = 0; i < args.size(); i++) { + for (unsigned int i = 0; i < args.size(); i++) { Init(args[i]); Random rnd(test::RandomSeed() + 2); Add("abc", "v"); @@ -710,7 +710,7 @@ TEST(Harness, SimpleSingle) { TEST(Harness, SimpleMulti) { std::vector args = Generate_Arg_List(); - for (int i = 0; i < args.size(); i++) { + for (unsigned int i = 0; i < args.size(); i++) { Init(args[i]); Random rnd(test::RandomSeed() + 3); Add("abc", "v"); @@ -722,7 +722,7 @@ TEST(Harness, SimpleMulti) { TEST(Harness, SimpleSpecialKey) { std::vector args = Generate_Arg_List(); - for (int i = 0; i < args.size(); i++) { + for (unsigned int i = 0; i < args.size(); i++) { Init(args[i]); Random rnd(test::RandomSeed() + 4); Add("\xff\xff", "v3"); @@ -732,7 +732,7 @@ TEST(Harness, SimpleSpecialKey) { TEST(Harness, Randomized) { std::vector args = Generate_Arg_List(); - for (int i = 0; i < args.size(); i++) { + for (unsigned int i = 0; i < args.size(); i++) { Init(args[i]); Random rnd(test::RandomSeed() + 5); for (int num_entries = 0; num_entries < 2000; diff --git a/tools/db_stress.cc b/tools/db_stress.cc index 833ad2c38a..7ed1986535 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -109,7 +109,7 @@ static int FLAGS_level0_stop_writes_trigger = 12; static int FLAGS_level0_slowdown_writes_trigger = 8; // Ratio of reads to writes (expressed as a percentage) -static int FLAGS_readwritepercent = 10; +static unsigned int FLAGS_readwritepercent = 10; // Option to disable compation triggered by read. static int FLAGS_disable_seek_compaction = false; @@ -225,7 +225,6 @@ class Stats { double bytes_mb = bytes_ / 1048576.0; double rate = bytes_mb / elapsed; double throughput = (double)done_/elapsed; - long percent_writes = (writes_ * 100) / done_; fprintf(stdout, "%-12s: ", name); fprintf(stdout, "%.3f micros/op %ld ops/sec\n", @@ -395,7 +394,7 @@ class StressTest { db_(NULL) { std::vector files; FLAGS_env->GetChildren(FLAGS_db, &files); - for (int i = 0; i < files.size(); i++) { + for (unsigned int i = 0; i < files.size(); i++) { if (Slice(files[i]).starts_with("heap-")) { FLAGS_env->DeleteFile(std::string(FLAGS_db) + "/" + files[i]); } @@ -445,12 +444,12 @@ class StressTest { } } - for (int i = 1; i < n; i++) { + for (unsigned int i = 1; i < n; i++) { threads[0]->stats.Merge(threads[i]->stats); } threads[0]->stats.Report("Stress Test"); - for (int i = 0; i < n; i++) { + for (unsigned int i = 0; i < n; i++) { delete threads[i]; threads[i] = NULL; } @@ -588,7 +587,7 @@ class StressTest { static void PrintKeyValue(uint32_t key, const char *value, size_t sz) { if (!FLAGS_verbose) return; - fprintf(stdout, "%u ==> (%u) ", key, sz); + fprintf(stdout, "%u ==> (%u) ", key, (unsigned int)sz); for (size_t i=0; i= sizeof(uint32_t)); *((uint32_t*)v) = rand; - char c = (char) rand; for (size_t i=sizeof(uint32_t); i < value_sz; i++) { v[i] = (char)(rand ^ i); } @@ -610,13 +608,13 @@ class StressTest { fprintf(stdout, "LevelDB version : %d.%d\n", kMajorVersion, kMinorVersion); fprintf(stdout, "Number of threads : %d\n", FLAGS_threads); - fprintf(stdout, "Ops per thread : %ld\n", FLAGS_ops_per_thread); - fprintf(stdout, "Read percentage : %ld\n", FLAGS_readwritepercent); + fprintf(stdout, "Ops per thread : %d\n", FLAGS_ops_per_thread); + fprintf(stdout, "Read percentage : %d\n", FLAGS_readwritepercent); fprintf(stdout, "Max key : %ld\n", FLAGS_max_key); - fprintf(stdout, "Num keys per lock : %ld\n", + fprintf(stdout, "Num keys per lock : %d\n", 1 << FLAGS_log2_keys_per_lock); - char* compression; + char* compression = ""; switch (FLAGS_compression_type) { case leveldb::kNoCompression: compression = (char *)std::string("none").c_str(); @@ -698,7 +696,6 @@ int main(int argc, char** argv) { std::string default_db_path; for (int i = 1; i < argc; i++) { - double d; int n; uint32_t u; long l; diff --git a/tools/sst_dump.cc b/tools/sst_dump.cc index 374377eff1..bb1be35d26 100644 --- a/tools/sst_dump.cc +++ b/tools/sst_dump.cc @@ -85,7 +85,7 @@ static void print_help() { int main(int argc, char** argv) { - const char* dir_or_file; + const char* dir_or_file = NULL; uint64_t read_num = -1; std::string command; diff --git a/util/arena_test.cc b/util/arena_test.cc index 63d1778034..d5c33d75b7 100644 --- a/util/arena_test.cc +++ b/util/arena_test.cc @@ -40,7 +40,7 @@ TEST(ArenaTest, Simple) { r = arena.Allocate(s); } - for (int b = 0; b < s; b++) { + for (unsigned int b = 0; b < s; b++) { // Fill the "i"th allocation with a known bit pattern r[b] = i % 256; } @@ -51,12 +51,12 @@ TEST(ArenaTest, Simple) { ASSERT_LE(arena.MemoryUsage(), bytes * 1.10); } } - for (int i = 0; i < allocated.size(); i++) { + for (unsigned int i = 0; i < allocated.size(); i++) { size_t num_bytes = allocated[i].first; const char* p = allocated[i].second; - for (int b = 0; b < num_bytes; b++) { + for (unsigned int b = 0; b < num_bytes; b++) { // Check the "i"th allocation for the known bit pattern - ASSERT_EQ(int(p[b]) & 0xff, i % 256); + ASSERT_EQ(int(p[b]) & 0xff, (int)(i % 256)); } } } diff --git a/util/auto_split_logger.h b/util/auto_split_logger.h index 89429b0b4f..0a76977711 100644 --- a/util/auto_split_logger.h +++ b/util/auto_split_logger.h @@ -33,8 +33,11 @@ class AutoSplitLogger : public Logger { public: AutoSplitLogger(Env* env, const std::string& dbname, const std::string& db_log_dir, size_t log_max_size): - env_(env), dbname_(dbname), db_log_dir_(db_log_dir), - MAX_LOG_FILE_SIZE(log_max_size), status_(Status::OK()) { + dbname_(dbname), + db_log_dir_(db_log_dir), + env_(env), + MAX_LOG_FILE_SIZE(log_max_size), + status_(Status::OK()) { env->GetAbsolutePath(dbname, &db_absolute_path_); log_fname_ = InfoLogFileName(dbname_, db_absolute_path_, db_log_dir_); InitLogger(); @@ -67,7 +70,7 @@ class AutoSplitLogger : public Logger { logger_ = NULL; } if (logger_->GetLogFileSize() == - Logger::DO_NOT_SUPPORT_GET_LOG_FILE_SIZE) { + (size_t)Logger::DO_NOT_SUPPORT_GET_LOG_FILE_SIZE) { status_ = Status::NotSupported( "The underlying logger doesn't support GetLogFileSize()"); } diff --git a/util/bloom.cc b/util/bloom.cc index 24c17f2f43..1421053be1 100644 --- a/util/bloom.cc +++ b/util/bloom.cc @@ -58,7 +58,7 @@ class BloomFilterPolicy : public FilterPolicy { dst->resize(init_size + bytes, 0); dst->push_back(static_cast(k_)); // Remember # of probes in filter char* array = &(*dst)[init_size]; - for (size_t i = 0; i < n; i++) { + for (size_t i = 0; i < (size_t)n; i++) { // Use double-hashing to generate a sequence of hash values. // See analysis in [Kirsch,Mitzenmacher 2006]. uint32_t h = hash_func_(keys[i]); diff --git a/util/bloom_test.cc b/util/bloom_test.cc index 4a6ea1b7c8..d29a0b201d 100644 --- a/util/bloom_test.cc +++ b/util/bloom_test.cc @@ -125,7 +125,7 @@ TEST(BloomTest, VaryingLengths) { } Build(); - ASSERT_LE(FilterSize(), (length * 10 / 8) + 40) << length; + ASSERT_LE(FilterSize(), (size_t)((length * 10 / 8) + 40)) << length; // All added keys must match for (int i = 0; i < length; i++) { diff --git a/util/cache.cc b/util/cache.cc index 6dfa4d121b..79e5cc9bd0 100644 --- a/util/cache.cc +++ b/util/cache.cc @@ -116,7 +116,6 @@ class HandleTable { LRUHandle* h = list_[i]; while (h != NULL) { LRUHandle* next = h->next_hash; - Slice key = h->key(); uint32_t hash = h->hash; LRUHandle** ptr = &new_list[hash & (new_length - 1)]; h->next_hash = *ptr; @@ -268,7 +267,6 @@ void LRUCache::Erase(const Slice& key, uint32_t hash) { } static int kNumShardBits = 4; // default values, can be overridden -static int kNumShards = 1 << kNumShardBits; class ShardedLRUCache : public Cache { private: diff --git a/util/cache_test.cc b/util/cache_test.cc index 43716715a8..78e9e59e22 100644 --- a/util/cache_test.cc +++ b/util/cache_test.cc @@ -83,28 +83,28 @@ TEST(CacheTest, HitAndMiss) { ASSERT_EQ(201, Lookup(200)); ASSERT_EQ(-1, Lookup(300)); - ASSERT_EQ(1, deleted_keys_.size()); + ASSERT_EQ(1U, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[0]); ASSERT_EQ(101, deleted_values_[0]); } TEST(CacheTest, Erase) { Erase(200); - ASSERT_EQ(0, deleted_keys_.size()); + ASSERT_EQ(0U, deleted_keys_.size()); Insert(100, 101); Insert(200, 201); Erase(100); ASSERT_EQ(-1, Lookup(100)); ASSERT_EQ(201, Lookup(200)); - ASSERT_EQ(1, deleted_keys_.size()); + ASSERT_EQ(1U, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[0]); ASSERT_EQ(101, deleted_values_[0]); Erase(100); ASSERT_EQ(-1, Lookup(100)); ASSERT_EQ(201, Lookup(200)); - ASSERT_EQ(1, deleted_keys_.size()); + ASSERT_EQ(1U, deleted_keys_.size()); } TEST(CacheTest, EntriesArePinned) { @@ -115,19 +115,19 @@ TEST(CacheTest, EntriesArePinned) { Insert(100, 102); Cache::Handle* h2 = cache_->Lookup(EncodeKey(100)); ASSERT_EQ(102, DecodeValue(cache_->Value(h2))); - ASSERT_EQ(0, deleted_keys_.size()); + ASSERT_EQ(0U, deleted_keys_.size()); cache_->Release(h1); - ASSERT_EQ(1, deleted_keys_.size()); + ASSERT_EQ(1U, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[0]); ASSERT_EQ(101, deleted_values_[0]); Erase(100); ASSERT_EQ(-1, Lookup(100)); - ASSERT_EQ(1, deleted_keys_.size()); + ASSERT_EQ(1U, deleted_keys_.size()); cache_->Release(h2); - ASSERT_EQ(2, deleted_keys_.size()); + ASSERT_EQ(2U, deleted_keys_.size()); ASSERT_EQ(100, deleted_keys_[1]); ASSERT_EQ(102, deleted_values_[1]); } diff --git a/util/coding.cc b/util/coding.cc index dbd7a6545c..9148713e0a 100644 --- a/util/coding.cc +++ b/util/coding.cc @@ -79,7 +79,7 @@ void PutVarint32(std::string* dst, uint32_t v) { } char* EncodeVarint64(char* dst, uint64_t v) { - static const int B = 128; + static const unsigned int B = 128; unsigned char* ptr = reinterpret_cast(dst); while (v >= B) { *(ptr++) = (v & (B-1)) | B; diff --git a/util/coding_test.cc b/util/coding_test.cc index 2c52b17b60..465a88cffc 100644 --- a/util/coding_test.cc +++ b/util/coding_test.cc @@ -55,7 +55,7 @@ TEST(Coding, Fixed64) { TEST(Coding, EncodingOutput) { std::string dst; PutFixed32(&dst, 0x04030201); - ASSERT_EQ(4, dst.size()); + ASSERT_EQ(4U, dst.size()); ASSERT_EQ(0x01, static_cast(dst[0])); ASSERT_EQ(0x02, static_cast(dst[1])); ASSERT_EQ(0x03, static_cast(dst[2])); @@ -63,7 +63,7 @@ TEST(Coding, EncodingOutput) { dst.clear(); PutFixed64(&dst, 0x0807060504030201ull); - ASSERT_EQ(8, dst.size()); + ASSERT_EQ(8U, dst.size()); ASSERT_EQ(0x01, static_cast(dst[0])); ASSERT_EQ(0x02, static_cast(dst[1])); ASSERT_EQ(0x03, static_cast(dst[2])); @@ -112,13 +112,13 @@ TEST(Coding, Varint64) { }; std::string s; - for (int i = 0; i < values.size(); i++) { + for (unsigned int i = 0; i < values.size(); i++) { PutVarint64(&s, values[i]); } const char* p = s.data(); const char* limit = p + s.size(); - for (int i = 0; i < values.size(); i++) { + for (unsigned int i = 0; i < values.size(); i++) { ASSERT_TRUE(p < limit); uint64_t actual; const char* start = p; @@ -143,7 +143,7 @@ TEST(Coding, Varint32Truncation) { std::string s; PutVarint32(&s, large_value); uint32_t result; - for (int len = 0; len < s.size() - 1; len++) { + for (unsigned int len = 0; len < s.size() - 1; len++) { ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + len, &result) == NULL); } ASSERT_TRUE(GetVarint32Ptr(s.data(), s.data() + s.size(), &result) != NULL); @@ -162,7 +162,7 @@ TEST(Coding, Varint64Truncation) { std::string s; PutVarint64(&s, large_value); uint64_t result; - for (int len = 0; len < s.size() - 1; len++) { + for (unsigned int len = 0; len < s.size() - 1; len++) { ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + len, &result) == NULL); } ASSERT_TRUE(GetVarint64Ptr(s.data(), s.data() + s.size(), &result) != NULL); diff --git a/util/crc32c_test.cc b/util/crc32c_test.cc index 4b957ee120..28ae5b79fe 100644 --- a/util/crc32c_test.cc +++ b/util/crc32c_test.cc @@ -15,20 +15,20 @@ TEST(CRC, StandardResults) { char buf[32]; memset(buf, 0, sizeof(buf)); - ASSERT_EQ(0x8a9136aa, Value(buf, sizeof(buf))); + ASSERT_EQ(0x8a9136aaU, Value(buf, sizeof(buf))); memset(buf, 0xff, sizeof(buf)); - ASSERT_EQ(0x62a8ab43, Value(buf, sizeof(buf))); + ASSERT_EQ(0x62a8ab43U, Value(buf, sizeof(buf))); for (int i = 0; i < 32; i++) { buf[i] = i; } - ASSERT_EQ(0x46dd794e, Value(buf, sizeof(buf))); + ASSERT_EQ(0x46dd794eU, Value(buf, sizeof(buf))); for (int i = 0; i < 32; i++) { buf[i] = 31 - i; } - ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf))); + ASSERT_EQ(0x113fdb5cU, Value(buf, sizeof(buf))); unsigned char data[48] = { 0x01, 0xc0, 0x00, 0x00, diff --git a/util/env_test.cc b/util/env_test.cc index b72cb44384..dcc1457e72 100644 --- a/util/env_test.cc +++ b/util/env_test.cc @@ -61,7 +61,7 @@ TEST(EnvPosixTest, RunMany) { Env::Default()->SleepForMicroseconds(kDelayMicros); void* cur = last_id.Acquire_Load(); - ASSERT_EQ(4, reinterpret_cast(cur)); + ASSERT_EQ(4U, reinterpret_cast(cur)); } struct State { diff --git a/util/ldb_cmd.cc b/util/ldb_cmd.cc index a8e5079907..8e932a5e45 100644 --- a/util/ldb_cmd.cc +++ b/util/ldb_cmd.cc @@ -12,7 +12,7 @@ const char* LDBCommand::HEX_ARG = "--hex"; Compactor::Compactor(std::string& db_name, std::vector& args) : LDBCommand(db_name, args), null_from_(true), null_to_(true), hex_(false) { - for (int i = 0; i < args.size(); i++) { + for (unsigned int i = 0; i < args.size(); i++) { std::string& arg = args.at(i); if (arg.find(FROM_ARG) == 0) { null_from_ = false; @@ -68,10 +68,15 @@ const char* DBDumper::STATS_ARG = "--stats"; const char* DBDumper::HEX_OUTPUT_ARG = "--output_hex"; DBDumper::DBDumper(std::string& db_name, std::vector& args) : - LDBCommand(db_name, args), null_from_(true), null_to_(true), hex_(false), - count_only_(false), print_stats_(false), max_keys_(-1), - hex_output_(false) { - for (int i = 0; i < args.size(); i++) { + LDBCommand(db_name, args), + null_from_(true), + null_to_(true), + max_keys_(-1), + count_only_(false), + print_stats_(false), + hex_(false), + hex_output_(false) { + for (unsigned int i = 0; i < args.size(); i++) { std::string& arg = args.at(i); if (arg.find(FROM_ARG) == 0) { null_from_ = false; @@ -154,12 +159,12 @@ void DBDumper::DoCommand() { if (!count_only_) { if (hex_output_) { std::string str = iter->key().ToString(); - for (int i = 0; i < str.length(); ++i) { + for (unsigned int i = 0; i < str.length(); ++i) { fprintf(stdout, "%X", str[i]); } fprintf(stdout, " ==> "); str = iter->value().ToString(); - for (int i = 0; i < str.length(); ++i) { + for (unsigned int i = 0; i < str.length(); ++i) { fprintf(stdout, "%X", str[i]); } fprintf(stdout, "\n"); @@ -183,7 +188,7 @@ ReduceDBLevels::ReduceDBLevels(std::string& db_name, : LDBCommand(db_name, args), new_levels_(-1), print_old_levels_(false) { - for (int i = 0; i < args.size(); i++) { + for (unsigned int i = 0; i < args.size(); i++) { std::string& arg = args.at(i); if (arg.find(NEW_LEVLES_ARG) == 0) { new_levels_ = atoi(arg.substr(strlen(NEW_LEVLES_ARG)).c_str()); diff --git a/util/ldb_cmd.h b/util/ldb_cmd.h index dc4ab00545..213f092a62 100644 --- a/util/ldb_cmd.h +++ b/util/ldb_cmd.h @@ -143,7 +143,7 @@ public: static std::string HexToString(const std::string& str) { std::string parsed; - for (int i = 0; i < str.length();) { + for (unsigned int i = 0; i < str.length();) { int c; sscanf(str.c_str() + i, "%2X", &c); parsed.push_back(c); diff --git a/util/logging.cc b/util/logging.cc index 22cf278512..9fb4630458 100644 --- a/util/logging.cc +++ b/util/logging.cc @@ -61,7 +61,7 @@ bool ConsumeDecimalNumber(Slice* in, uint64_t* val) { char c = (*in)[0]; if (c >= '0' && c <= '9') { ++digits; - const int delta = (c - '0'); + const unsigned int delta = (c - '0'); static const uint64_t kMaxUint64 = ~static_cast(0); if (v > kMaxUint64/10 || (v == kMaxUint64/10 && delta > kMaxUint64%10)) { diff --git a/util/options.cc b/util/options.cc index 64948f34bc..b992f2824c 100644 --- a/util/options.cc +++ b/util/options.cc @@ -44,12 +44,12 @@ Options::Options() db_stats_log_interval(1800), db_log_dir(""), disable_seek_compaction(false), - no_block_cache(false), - table_cache_numshardbits(4), + delete_obsolete_files_period_micros(0), max_background_compactions(1), max_log_file_size(0), - delete_obsolete_files_period_micros(0), rate_limit(0.0), + no_block_cache(false), + table_cache_numshardbits(4), CompactionFilter(NULL) { } @@ -64,7 +64,7 @@ Options::Dump( Log(log," Options.env: %p", env); Log(log," Options.info_log: %p", info_log); Log(log," Options.write_buffer_size: %zd", write_buffer_size); - Log(log," Options.max_write_buffer_number: %zd", max_write_buffer_number); + Log(log," Options.max_write_buffer_number: %d", max_write_buffer_number); Log(log," Options.max_open_files: %d", max_open_files); Log(log," Options.block_cache: %p", block_cache); if (block_cache) { @@ -74,7 +74,7 @@ Options::Dump( Log(log," Options.block_size: %zd", block_size); Log(log," Options.block_restart_interval: %d", block_restart_interval); if (compression_per_level != NULL) { - for (unsigned int i = 0; i < num_levels; i++){ + for (int i = 0; i < num_levels; i++){ Log(log," Options.compression[%d]: %d", i, compression_per_level[i]); } @@ -86,8 +86,8 @@ Options::Dump( Log(log," Options.num_levels: %d", num_levels); Log(log," Options.disableDataSync: %d", disableDataSync); Log(log," Options.use_fsync: %d", use_fsync); - Log(log," Options.max_log_file_size: %d", max_log_file_size); - Log(log," Options.db_stats_log_interval: %d", + Log(log," Options.max_log_file_size: %ld", max_log_file_size); + Log(log," Options.db_stats_log_interval: %d", db_stats_log_interval); Log(log," Options.compression_opts.window_bits: %d", compression_opts.window_bits); diff --git a/util/testharness.cc b/util/testharness.cc index eb1bdd554a..2060fca611 100644 --- a/util/testharness.cc +++ b/util/testharness.cc @@ -38,7 +38,7 @@ int RunAllTests() { int num = 0; if (tests != NULL) { - for (int i = 0; i < tests->size(); i++) { + for (unsigned int i = 0; i < tests->size(); i++) { const Test& t = (*tests)[i]; if (matcher != NULL) { std::string name = t.base; diff --git a/util/testutil.cc b/util/testutil.cc index 538d09516d..0b110edc6b 100644 --- a/util/testutil.cc +++ b/util/testutil.cc @@ -40,7 +40,7 @@ extern Slice CompressibleString(Random* rnd, double compressed_fraction, // Duplicate the random data until we have filled "len" bytes dst->clear(); - while (dst->size() < len) { + while (dst->size() < (unsigned int)len) { dst->append(raw_data); } dst->resize(len);