mirror of https://github.com/facebook/rocksdb.git
Fix clang analyzer errors
Summary: Fixing erros reported by clang static analyzer. * Removing some unused variables. * Adding assertions to fix false positives reported by clang analyzer. * Adding `__clang_analyzer__` macro to suppress false positive warnings. Test Plan: USE_CLANG=1 OPT=-g make analyze -j64 Reviewers: andrewkr, sdong Reviewed By: sdong Subscribers: andrewkr, dhruba, leveldb Differential Revision: https://reviews.facebook.net/D60549
This commit is contained in:
parent
61dbfbb6ce
commit
296545a2c7
|
@ -292,9 +292,6 @@ Status FlushJob::WriteLevel0Table(const autovector<MemTable*>& mems,
|
||||||
}
|
}
|
||||||
base->Unref();
|
base->Unref();
|
||||||
|
|
||||||
// re-acquire the most current version
|
|
||||||
base = cfd_->current();
|
|
||||||
|
|
||||||
// Note that if file_size is zero, the file has been deleted and
|
// Note that if file_size is zero, the file has been deleted and
|
||||||
// should not be added to the manifest.
|
// should not be added to the manifest.
|
||||||
if (s.ok() && meta->fd.GetFileSize() > 0) {
|
if (s.ok() && meta->fd.GetFileSize() > 0) {
|
||||||
|
|
|
@ -20,6 +20,8 @@ void FlushScheduler::ScheduleFlush(ColumnFamilyData* cfd) {
|
||||||
}
|
}
|
||||||
#endif // NDEBUG
|
#endif // NDEBUG
|
||||||
cfd->Ref();
|
cfd->Ref();
|
||||||
|
// Suppress false positive clang analyzer warnings.
|
||||||
|
#ifndef __clang_analyzer__
|
||||||
Node* node = new Node{cfd, head_.load(std::memory_order_relaxed)};
|
Node* node = new Node{cfd, head_.load(std::memory_order_relaxed)};
|
||||||
while (!head_.compare_exchange_strong(
|
while (!head_.compare_exchange_strong(
|
||||||
node->next, node, std::memory_order_relaxed, std::memory_order_relaxed)) {
|
node->next, node, std::memory_order_relaxed, std::memory_order_relaxed)) {
|
||||||
|
@ -28,6 +30,7 @@ void FlushScheduler::ScheduleFlush(ColumnFamilyData* cfd) {
|
||||||
// inter-thread synchronization, so we don't even need release
|
// inter-thread synchronization, so we don't even need release
|
||||||
// semantics for this CAS
|
// semantics for this CAS
|
||||||
}
|
}
|
||||||
|
#endif // __clang_analyzer__
|
||||||
}
|
}
|
||||||
|
|
||||||
ColumnFamilyData* FlushScheduler::TakeNextColumnFamily() {
|
ColumnFamilyData* FlushScheduler::TakeNextColumnFamily() {
|
||||||
|
|
|
@ -91,7 +91,7 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch,
|
||||||
Slice fragment;
|
Slice fragment;
|
||||||
while (true) {
|
while (true) {
|
||||||
uint64_t physical_record_offset = end_of_buffer_offset_ - buffer_.size();
|
uint64_t physical_record_offset = end_of_buffer_offset_ - buffer_.size();
|
||||||
size_t drop_size;
|
size_t drop_size = 0;
|
||||||
const unsigned int record_type = ReadPhysicalRecord(&fragment, &drop_size);
|
const unsigned int record_type = ReadPhysicalRecord(&fragment, &drop_size);
|
||||||
switch (record_type) {
|
switch (record_type) {
|
||||||
case kFullType:
|
case kFullType:
|
||||||
|
@ -199,10 +199,11 @@ bool Reader::ReadRecord(Slice* record, std::string* scratch,
|
||||||
scratch->clear();
|
scratch->clear();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (record_type == kBadRecordLen)
|
if (record_type == kBadRecordLen) {
|
||||||
ReportCorruption(drop_size, "bad record length");
|
ReportCorruption(drop_size, "bad record length");
|
||||||
else
|
} else {
|
||||||
ReportCorruption(drop_size, "checksum mismatch");
|
ReportCorruption(drop_size, "checksum mismatch");
|
||||||
|
}
|
||||||
if (in_fragmented_record) {
|
if (in_fragmented_record) {
|
||||||
ReportCorruption(scratch->size(), "error in middle of record");
|
ReportCorruption(scratch->size(), "error in middle of record");
|
||||||
in_fragmented_record = false;
|
in_fragmented_record = false;
|
||||||
|
|
|
@ -26,6 +26,7 @@ Status MergeHelper::TimedFullMerge(const MergeOperator* merge_operator,
|
||||||
assert(merge_operator != nullptr);
|
assert(merge_operator != nullptr);
|
||||||
|
|
||||||
if (operands.size() == 0) {
|
if (operands.size() == 0) {
|
||||||
|
assert(value != nullptr && result != nullptr);
|
||||||
result->assign(value->data(), value->size());
|
result->assign(value->data(), value->size());
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
|
@ -2746,7 +2746,7 @@ Status VersionSet::Recover(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto builder : builders) {
|
for (auto& builder : builders) {
|
||||||
delete builder.second;
|
delete builder.second;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -73,7 +73,7 @@ void BlockIter::Prev() {
|
||||||
const CachedPrevEntry& current_prev_entry =
|
const CachedPrevEntry& current_prev_entry =
|
||||||
prev_entries_[prev_entries_idx_];
|
prev_entries_[prev_entries_idx_];
|
||||||
|
|
||||||
const char* key_ptr = current_prev_entry.key_ptr;
|
const char* key_ptr = nullptr;
|
||||||
if (current_prev_entry.key_ptr != nullptr) {
|
if (current_prev_entry.key_ptr != nullptr) {
|
||||||
// The key is not delta encoded and stored in the data block
|
// The key is not delta encoded and stored in the data block
|
||||||
key_ptr = current_prev_entry.key_ptr;
|
key_ptr = current_prev_entry.key_ptr;
|
||||||
|
|
|
@ -1052,6 +1052,7 @@ InternalIterator* BlockBasedTable::NewIndexIterator(
|
||||||
Status s;
|
Status s;
|
||||||
s = CreateIndexReader(&index_reader);
|
s = CreateIndexReader(&index_reader);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
|
assert(index_reader != nullptr);
|
||||||
s = block_cache->Insert(key, index_reader, index_reader->usable_size(),
|
s = block_cache->Insert(key, index_reader, index_reader->usable_size(),
|
||||||
&DeleteCachedIndexEntry, &cache_handle);
|
&DeleteCachedIndexEntry, &cache_handle);
|
||||||
}
|
}
|
||||||
|
@ -1062,6 +1063,9 @@ InternalIterator* BlockBasedTable::NewIndexIterator(
|
||||||
RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usable_size);
|
RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usable_size);
|
||||||
RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, usable_size);
|
RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, usable_size);
|
||||||
} else {
|
} else {
|
||||||
|
if (index_reader != nullptr) {
|
||||||
|
delete index_reader;
|
||||||
|
}
|
||||||
RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
|
RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
|
||||||
// make sure if something goes wrong, index_reader shall remain intact.
|
// make sure if something goes wrong, index_reader shall remain intact.
|
||||||
if (input_iter != nullptr) {
|
if (input_iter != nullptr) {
|
||||||
|
@ -1189,7 +1193,8 @@ InternalIterator* BlockBasedTable::NewDataBlockIterator(
|
||||||
}
|
}
|
||||||
|
|
||||||
InternalIterator* iter;
|
InternalIterator* iter;
|
||||||
if (s.ok() && block.value != nullptr) {
|
if (s.ok()) {
|
||||||
|
assert(block.value != nullptr);
|
||||||
iter = block.value->NewIterator(&rep->internal_comparator, input_iter);
|
iter = block.value->NewIterator(&rep->internal_comparator, input_iter);
|
||||||
if (block.cache_handle != nullptr) {
|
if (block.cache_handle != nullptr) {
|
||||||
iter->RegisterCleanup(&ReleaseCachedEntry, block_cache,
|
iter->RegisterCleanup(&ReleaseCachedEntry, block_cache,
|
||||||
|
@ -1198,6 +1203,7 @@ InternalIterator* BlockBasedTable::NewDataBlockIterator(
|
||||||
iter->RegisterCleanup(&DeleteHeldResource<Block>, block.value, nullptr);
|
iter->RegisterCleanup(&DeleteHeldResource<Block>, block.value, nullptr);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
assert(block.value == nullptr);
|
||||||
if (input_iter != nullptr) {
|
if (input_iter != nullptr) {
|
||||||
input_iter->SetStatus(s);
|
input_iter->SetStatus(s);
|
||||||
iter = input_iter;
|
iter = input_iter;
|
||||||
|
|
|
@ -136,6 +136,7 @@ class BlockPrefixIndex::Builder {
|
||||||
assert(prefixes_per_bucket[i]->next == nullptr);
|
assert(prefixes_per_bucket[i]->next == nullptr);
|
||||||
buckets[i] = prefixes_per_bucket[i]->start_block;
|
buckets[i] = prefixes_per_bucket[i]->start_block;
|
||||||
} else {
|
} else {
|
||||||
|
assert(total_block_array_entries > 0);
|
||||||
assert(prefixes_per_bucket[i] != nullptr);
|
assert(prefixes_per_bucket[i] != nullptr);
|
||||||
buckets[i] = EncodeIndex(offset);
|
buckets[i] = EncodeIndex(offset);
|
||||||
block_array_buffer[offset] = num_blocks;
|
block_array_buffer[offset] = num_blocks;
|
||||||
|
|
|
@ -1765,7 +1765,6 @@ TEST_F(BlockBasedTableTest, FilterBlockInBlockCache) {
|
||||||
ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
|
ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
|
||||||
ASSERT_EQ(props.GetCacheBytesWrite(),
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
||||||
table_options.block_cache->GetUsage());
|
table_options.block_cache->GetUsage());
|
||||||
last_cache_bytes_read = props.GetCacheBytesRead();
|
|
||||||
}
|
}
|
||||||
// release the iterator so that the block cache can reset correctly.
|
// release the iterator so that the block cache can reset correctly.
|
||||||
iter.reset();
|
iter.reset();
|
||||||
|
|
|
@ -34,6 +34,9 @@
|
||||||
// Sometimes it's desirable to build Google Test by compiling a single file.
|
// Sometimes it's desirable to build Google Test by compiling a single file.
|
||||||
// This file serves this purpose.
|
// This file serves this purpose.
|
||||||
|
|
||||||
|
// Suppress clang analyzer warnings.
|
||||||
|
#ifndef __clang_analyzer__
|
||||||
|
|
||||||
// This line ensures that gtest.h can be compiled on its own, even
|
// This line ensures that gtest.h can be compiled on its own, even
|
||||||
// when it's fused.
|
// when it's fused.
|
||||||
#include "gtest/gtest.h"
|
#include "gtest/gtest.h"
|
||||||
|
@ -109,7 +112,6 @@
|
||||||
#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_
|
#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_
|
||||||
#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_
|
#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_
|
||||||
|
|
||||||
|
|
||||||
namespace testing {
|
namespace testing {
|
||||||
|
|
||||||
// This helper class can be used to mock out Google Test failure reporting
|
// This helper class can be used to mock out Google Test failure reporting
|
||||||
|
@ -10255,3 +10257,5 @@ const char* TypedTestCasePState::VerifyRegisteredTestNames(
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
} // namespace testing
|
} // namespace testing
|
||||||
|
|
||||||
|
#endif // __clang_analyzer__
|
||||||
|
|
|
@ -230,6 +230,9 @@ class SanityTestBloomFilter : public SanityTest {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
bool RunSanityTests(const std::string& command, const std::string& path) {
|
bool RunSanityTests(const std::string& command, const std::string& path) {
|
||||||
|
bool result = true;
|
||||||
|
// Suppress false positive clang static anaylzer warnings.
|
||||||
|
#ifndef __clang_analyzer__
|
||||||
std::vector<SanityTest*> sanity_tests = {
|
std::vector<SanityTest*> sanity_tests = {
|
||||||
new SanityTestBasic(path),
|
new SanityTestBasic(path),
|
||||||
new SanityTestSpecialComparator(path),
|
new SanityTestSpecialComparator(path),
|
||||||
|
@ -248,7 +251,6 @@ bool RunSanityTests(const std::string& command, const std::string& path) {
|
||||||
} else {
|
} else {
|
||||||
fprintf(stderr, "Verifying...\n");
|
fprintf(stderr, "Verifying...\n");
|
||||||
}
|
}
|
||||||
bool result = true;
|
|
||||||
for (auto sanity_test : sanity_tests) {
|
for (auto sanity_test : sanity_tests) {
|
||||||
Status s;
|
Status s;
|
||||||
fprintf(stderr, "%s -- ", sanity_test->Name().c_str());
|
fprintf(stderr, "%s -- ", sanity_test->Name().c_str());
|
||||||
|
@ -266,6 +268,7 @@ bool RunSanityTests(const std::string& command, const std::string& path) {
|
||||||
|
|
||||||
delete sanity_test;
|
delete sanity_test;
|
||||||
}
|
}
|
||||||
|
#endif // __clang_analyzer__
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
|
@ -201,6 +201,7 @@ TEST_F(DynamicBloomTest, perf) {
|
||||||
}
|
}
|
||||||
ASSERT_EQ(count, num_keys);
|
ASSERT_EQ(count, num_keys);
|
||||||
elapsed = timer.ElapsedNanos();
|
elapsed = timer.ElapsedNanos();
|
||||||
|
assert(count > 0);
|
||||||
fprintf(stderr, "standard bloom, avg query latency %" PRIu64 "\n",
|
fprintf(stderr, "standard bloom, avg query latency %" PRIu64 "\n",
|
||||||
elapsed / count);
|
elapsed / count);
|
||||||
|
|
||||||
|
@ -227,6 +228,7 @@ TEST_F(DynamicBloomTest, perf) {
|
||||||
}
|
}
|
||||||
|
|
||||||
elapsed = timer.ElapsedNanos();
|
elapsed = timer.ElapsedNanos();
|
||||||
|
assert(count > 0);
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"blocked bloom(enable locality), avg query latency %" PRIu64 "\n",
|
"blocked bloom(enable locality), avg query latency %" PRIu64 "\n",
|
||||||
elapsed / count);
|
elapsed / count);
|
||||||
|
|
|
@ -756,6 +756,7 @@ Status RocksDBOptionsParser::VerifyBlockBasedTableFactory(
|
||||||
if (base_tf == nullptr) {
|
if (base_tf == nullptr) {
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
assert(file_tf != nullptr);
|
||||||
|
|
||||||
const auto& base_opt = base_tf->table_options();
|
const auto& base_opt = base_tf->table_options();
|
||||||
const auto& file_opt = file_tf->table_options();
|
const auto& file_opt = file_tf->table_options();
|
||||||
|
|
|
@ -57,6 +57,8 @@ class IDChecker : public ThreadLocalPtr {
|
||||||
|
|
||||||
} // anonymous namespace
|
} // anonymous namespace
|
||||||
|
|
||||||
|
// Suppress false positive clang analyzer warnings.
|
||||||
|
#ifndef __clang_analyzer__
|
||||||
TEST_F(ThreadLocalTest, UniqueIdTest) {
|
TEST_F(ThreadLocalTest, UniqueIdTest) {
|
||||||
port::Mutex mu;
|
port::Mutex mu;
|
||||||
port::CondVar cv(&mu);
|
port::CondVar cv(&mu);
|
||||||
|
@ -103,6 +105,7 @@ TEST_F(ThreadLocalTest, UniqueIdTest) {
|
||||||
// After exit, id sequence in queue:
|
// After exit, id sequence in queue:
|
||||||
// 3, 1, 2, 0
|
// 3, 1, 2, 0
|
||||||
}
|
}
|
||||||
|
#endif // __clang_analyzer__
|
||||||
|
|
||||||
TEST_F(ThreadLocalTest, SequentialReadWriteTest) {
|
TEST_F(ThreadLocalTest, SequentialReadWriteTest) {
|
||||||
// global id list carries over 3, 1, 2, 0
|
// global id list carries over 3, 1, 2, 0
|
||||||
|
|
|
@ -226,6 +226,8 @@ class BaseDeltaIterator : public Iterator {
|
||||||
bool BaseValid() const { return base_iterator_->Valid(); }
|
bool BaseValid() const { return base_iterator_->Valid(); }
|
||||||
bool DeltaValid() const { return delta_iterator_->Valid(); }
|
bool DeltaValid() const { return delta_iterator_->Valid(); }
|
||||||
void UpdateCurrent() {
|
void UpdateCurrent() {
|
||||||
|
// Suppress false positive clang analyzer warnings.
|
||||||
|
#ifndef __clang_analyzer__
|
||||||
while (true) {
|
while (true) {
|
||||||
WriteEntry delta_entry;
|
WriteEntry delta_entry;
|
||||||
if (DeltaValid()) {
|
if (DeltaValid()) {
|
||||||
|
@ -275,6 +277,7 @@ class BaseDeltaIterator : public Iterator {
|
||||||
}
|
}
|
||||||
|
|
||||||
AssertInvariants();
|
AssertInvariants();
|
||||||
|
#endif // __clang_analyzer__
|
||||||
}
|
}
|
||||||
|
|
||||||
bool forward_;
|
bool forward_;
|
||||||
|
|
Loading…
Reference in New Issue