mirror of https://github.com/facebook/rocksdb.git
Fix clang analyzer errors
Summary: Fixing erros reported by clang static analyzer. * Removing some unused variables. * Adding assertions to fix false positives reported by clang analyzer. * Adding `__clang_analyzer__` macro to suppress false positive warnings. Test Plan: USE_CLANG=1 OPT=-g make analyze -j64 Reviewers: andrewkr, sdong Reviewed By: sdong Subscribers: andrewkr, dhruba, leveldb Differential Revision: https://reviews.facebook.net/D60549
This commit is contained in:
parent
61dbfbb6ce
commit
296545a2c7
|
@ -292,9 +292,6 @@ Status FlushJob::WriteLevel0Table(const autovector<MemTable*>& mems,
|
|||
}
|
||||
base->Unref();
|
||||
|
||||
// re-acquire the most current version
|
||||
base = cfd_->current();
|
||||
|
||||
// Note that if file_size is zero, the file has been deleted and
|
||||
// should not be added to the manifest.
|
||||
if (s.ok() && meta->fd.GetFileSize() > 0) {
|
||||
|
|
|
@ -20,6 +20,8 @@ void FlushScheduler::ScheduleFlush(ColumnFamilyData* cfd) {
|
|||
}
|
||||
#endif // NDEBUG
|
||||
cfd->Ref();
|
||||
// Suppress false positive clang analyzer warnings.
|
||||
#ifndef __clang_analyzer__
|
||||
Node* node = new Node{cfd, head_.load(std::memory_order_relaxed)};
|
||||
while (!head_.compare_exchange_strong(
|
||||
node->next, node, std::memory_order_relaxed, std::memory_order_relaxed)) {
|
||||
|
@ -28,6 +30,7 @@ void FlushScheduler::ScheduleFlush(ColumnFamilyData* cfd) {
|
|||
// inter-thread synchronization, so we don't even need release
|
||||
// semantics for this CAS
|
||||
}
|
||||
#endif // __clang_analyzer__
|
||||
}
|
||||
|
||||
ColumnFamilyData* FlushScheduler::TakeNextColumnFamily() {
|
||||
|
|
|
@ -91,7 +91,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch,
|
|||
Slice fragment;
|
||||
while (true) {
|
||||
uint64_t physical_record_offset = end_of_buffer_offset_ - buffer_.size();
|
||||
size_t drop_size;
|
||||
size_t drop_size = 0;
|
||||
const unsigned int record_type = ReadPhysicalRecord(&fragment, &drop_size);
|
||||
switch (record_type) {
|
||||
case kFullType:
|
||||
|
@ -199,10 +199,11 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch,
|
|||
scratch->clear();
|
||||
return false;
|
||||
}
|
||||
if (record_type == kBadRecordLen)
|
||||
if (record_type == kBadRecordLen) {
|
||||
ReportCorruption(drop_size, "bad record length");
|
||||
else
|
||||
} else {
|
||||
ReportCorruption(drop_size, "checksum mismatch");
|
||||
}
|
||||
if (in_fragmented_record) {
|
||||
ReportCorruption(scratch->size(), "error in middle of record");
|
||||
in_fragmented_record = false;
|
||||
|
|
|
@ -26,6 +26,7 @@ Status MergeHelper::TimedFullMerge(const MergeOperator* merge_operator,
|
|||
assert(merge_operator != nullptr);
|
||||
|
||||
if (operands.size() == 0) {
|
||||
assert(value != nullptr && result != nullptr);
|
||||
result->assign(value->data(), value->size());
|
||||
return Status::OK();
|
||||
}
|
||||
|
|
|
@ -2746,7 +2746,7 @@ Status VersionSet::Recover(
|
|||
}
|
||||
}
|
||||
|
||||
for (auto builder : builders) {
|
||||
for (auto& builder : builders) {
|
||||
delete builder.second;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ void BlockIter::Prev() {
|
|||
const CachedPrevEntry& current_prev_entry =
|
||||
prev_entries_[prev_entries_idx_];
|
||||
|
||||
const char* key_ptr = current_prev_entry.key_ptr;
|
||||
const char* key_ptr = nullptr;
|
||||
if (current_prev_entry.key_ptr != nullptr) {
|
||||
// The key is not delta encoded and stored in the data block
|
||||
key_ptr = current_prev_entry.key_ptr;
|
||||
|
|
|
@ -1052,6 +1052,7 @@ InternalIterator* BlockBasedTable::NewIndexIterator(
|
|||
Status s;
|
||||
s = CreateIndexReader(&index_reader);
|
||||
if (s.ok()) {
|
||||
assert(index_reader != nullptr);
|
||||
s = block_cache->Insert(key, index_reader, index_reader->usable_size(),
|
||||
&DeleteCachedIndexEntry, &cache_handle);
|
||||
}
|
||||
|
@ -1062,6 +1063,9 @@ InternalIterator* BlockBasedTable::NewIndexIterator(
|
|||
RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usable_size);
|
||||
RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, usable_size);
|
||||
} else {
|
||||
if (index_reader != nullptr) {
|
||||
delete index_reader;
|
||||
}
|
||||
RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
|
||||
// make sure if something goes wrong, index_reader shall remain intact.
|
||||
if (input_iter != nullptr) {
|
||||
|
@ -1189,7 +1193,8 @@ InternalIterator* BlockBasedTable::NewDataBlockIterator(
|
|||
}
|
||||
|
||||
InternalIterator* iter;
|
||||
if (s.ok() && block.value != nullptr) {
|
||||
if (s.ok()) {
|
||||
assert(block.value != nullptr);
|
||||
iter = block.value->NewIterator(&rep->internal_comparator, input_iter);
|
||||
if (block.cache_handle != nullptr) {
|
||||
iter->RegisterCleanup(&ReleaseCachedEntry, block_cache,
|
||||
|
@ -1198,6 +1203,7 @@ InternalIterator* BlockBasedTable::NewDataBlockIterator(
|
|||
iter->RegisterCleanup(&DeleteHeldResource<Block>, block.value, nullptr);
|
||||
}
|
||||
} else {
|
||||
assert(block.value == nullptr);
|
||||
if (input_iter != nullptr) {
|
||||
input_iter->SetStatus(s);
|
||||
iter = input_iter;
|
||||
|
|
|
@ -136,6 +136,7 @@ class BlockPrefixIndex::Builder {
|
|||
assert(prefixes_per_bucket[i]->next == nullptr);
|
||||
buckets[i] = prefixes_per_bucket[i]->start_block;
|
||||
} else {
|
||||
assert(total_block_array_entries > 0);
|
||||
assert(prefixes_per_bucket[i] != nullptr);
|
||||
buckets[i] = EncodeIndex(offset);
|
||||
block_array_buffer[offset] = num_blocks;
|
||||
|
|
|
@ -1765,7 +1765,6 @@ TEST_F(BlockBasedTableTest, FilterBlockInBlockCache) {
|
|||
ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
|
||||
ASSERT_EQ(props.GetCacheBytesWrite(),
|
||||
table_options.block_cache->GetUsage());
|
||||
last_cache_bytes_read = props.GetCacheBytesRead();
|
||||
}
|
||||
// release the iterator so that the block cache can reset correctly.
|
||||
iter.reset();
|
||||
|
|
|
@ -34,6 +34,9 @@
|
|||
// Sometimes it's desirable to build Google Test by compiling a single file.
|
||||
// This file serves this purpose.
|
||||
|
||||
// Suppress clang analyzer warnings.
|
||||
#ifndef __clang_analyzer__
|
||||
|
||||
// This line ensures that gtest.h can be compiled on its own, even
|
||||
// when it's fused.
|
||||
#include "gtest/gtest.h"
|
||||
|
@ -109,7 +112,6 @@
|
|||
#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_
|
||||
#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_
|
||||
|
||||
|
||||
namespace testing {
|
||||
|
||||
// This helper class can be used to mock out Google Test failure reporting
|
||||
|
@ -10255,3 +10257,5 @@ const char* TypedTestCasePState::VerifyRegisteredTestNames(
|
|||
|
||||
} // namespace internal
|
||||
} // namespace testing
|
||||
|
||||
#endif // __clang_analyzer__
|
||||
|
|
|
@ -230,6 +230,9 @@ class SanityTestBloomFilter : public SanityTest {
|
|||
|
||||
namespace {
|
||||
bool RunSanityTests(const std::string& command, const std::string& path) {
|
||||
bool result = true;
|
||||
// Suppress false positive clang static anaylzer warnings.
|
||||
#ifndef __clang_analyzer__
|
||||
std::vector<SanityTest*> sanity_tests = {
|
||||
new SanityTestBasic(path),
|
||||
new SanityTestSpecialComparator(path),
|
||||
|
@ -248,7 +251,6 @@ bool RunSanityTests(const std::string& command, const std::string& path) {
|
|||
} else {
|
||||
fprintf(stderr, "Verifying...\n");
|
||||
}
|
||||
bool result = true;
|
||||
for (auto sanity_test : sanity_tests) {
|
||||
Status s;
|
||||
fprintf(stderr, "%s -- ", sanity_test->Name().c_str());
|
||||
|
@ -266,6 +268,7 @@ bool RunSanityTests(const std::string& command, const std::string& path) {
|
|||
|
||||
delete sanity_test;
|
||||
}
|
||||
#endif // __clang_analyzer__
|
||||
return result;
|
||||
}
|
||||
} // namespace
|
||||
|
|
|
@ -201,6 +201,7 @@ TEST_F(DynamicBloomTest, perf) {
|
|||
}
|
||||
ASSERT_EQ(count, num_keys);
|
||||
elapsed = timer.ElapsedNanos();
|
||||
assert(count > 0);
|
||||
fprintf(stderr, "standard bloom, avg query latency %" PRIu64 "\n",
|
||||
elapsed / count);
|
||||
|
||||
|
@ -227,6 +228,7 @@ TEST_F(DynamicBloomTest, perf) {
|
|||
}
|
||||
|
||||
elapsed = timer.ElapsedNanos();
|
||||
assert(count > 0);
|
||||
fprintf(stderr,
|
||||
"blocked bloom(enable locality), avg query latency %" PRIu64 "\n",
|
||||
elapsed / count);
|
||||
|
|
|
@ -756,6 +756,7 @@ Status RocksDBOptionsParser::VerifyBlockBasedTableFactory(
|
|||
if (base_tf == nullptr) {
|
||||
return Status::OK();
|
||||
}
|
||||
assert(file_tf != nullptr);
|
||||
|
||||
const auto& base_opt = base_tf->table_options();
|
||||
const auto& file_opt = file_tf->table_options();
|
||||
|
|
|
@ -57,6 +57,8 @@ class IDChecker : public ThreadLocalPtr {
|
|||
|
||||
} // anonymous namespace
|
||||
|
||||
// Suppress false positive clang analyzer warnings.
|
||||
#ifndef __clang_analyzer__
|
||||
TEST_F(ThreadLocalTest, UniqueIdTest) {
|
||||
port::Mutex mu;
|
||||
port::CondVar cv(&mu);
|
||||
|
@ -103,6 +105,7 @@ TEST_F(ThreadLocalTest, UniqueIdTest) {
|
|||
// After exit, id sequence in queue:
|
||||
// 3, 1, 2, 0
|
||||
}
|
||||
#endif // __clang_analyzer__
|
||||
|
||||
TEST_F(ThreadLocalTest, SequentialReadWriteTest) {
|
||||
// global id list carries over 3, 1, 2, 0
|
||||
|
|
|
@ -226,6 +226,8 @@ class BaseDeltaIterator : public Iterator {
|
|||
bool BaseValid() const { return base_iterator_->Valid(); }
|
||||
bool DeltaValid() const { return delta_iterator_->Valid(); }
|
||||
void UpdateCurrent() {
|
||||
// Suppress false positive clang analyzer warnings.
|
||||
#ifndef __clang_analyzer__
|
||||
while (true) {
|
||||
WriteEntry delta_entry;
|
||||
if (DeltaValid()) {
|
||||
|
@ -275,6 +277,7 @@ class BaseDeltaIterator : public Iterator {
|
|||
}
|
||||
|
||||
AssertInvariants();
|
||||
#endif // __clang_analyzer__
|
||||
}
|
||||
|
||||
bool forward_;
|
||||
|
|
Loading…
Reference in New Issue