Make clang-analyzer happy (#5821)

Summary:
clang-analyzer has uncovered a bunch of places where the code is relying
on pointers being valid and one case (in VectorIterator) where a moved-from
object is being used:

In file included from db/range_tombstone_fragmenter.cc:17:
./util/vector_iterator.h:23:18: warning: Method called on moved-from object 'keys' of type 'std::vector'
        current_(keys.size()) {
                 ^~~~~~~~~~~
1 warning generated.
utilities/persistent_cache/block_cache_tier_file.cc:39:14: warning: Called C++ object pointer is null
  Status s = env->NewRandomAccessFile(filepath, file, opt);
             ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
utilities/persistent_cache/block_cache_tier_file.cc:47:19: warning: Called C++ object pointer is null
  Status status = env_->GetFileSize(Path(), size);
                  ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
utilities/persistent_cache/block_cache_tier_file.cc:290:14: warning: Called C++ object pointer is null
  Status s = env_->FileExists(Path());
             ^~~~~~~~~~~~~~~~~~~~~~~~
utilities/persistent_cache/block_cache_tier_file.cc:363:35: warning: Called C++ object pointer is null
    CacheWriteBuffer* const buf = alloc_->Allocate();
                                  ^~~~~~~~~~~~~~~~~~
utilities/persistent_cache/block_cache_tier_file.cc:399:41: warning: Called C++ object pointer is null
  const uint64_t file_off = buf_doff_ * alloc_->BufferSize();
                                        ^~~~~~~~~~~~~~~~~~~~
utilities/persistent_cache/block_cache_tier_file.cc:463:33: warning: Called C++ object pointer is null
  size_t start_idx = lba.off_ / alloc_->BufferSize();
                                ^~~~~~~~~~~~~~~~~~~~
utilities/persistent_cache/block_cache_tier_file.cc:515:5: warning: Called C++ object pointer is null
    alloc_->Deallocate(bufs_[i]);
    ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
7 warnings generated.
ar: creating librocksdb_debug.a
utilities/memory/memory_test.cc:68:25: warning: Called C++ object pointer is null
      cache_set->insert(db->GetDBOptions().row_cache.get());
                        ^~~~~~~~~~~~~~~~~~
1 warning generated.

The patch fixes these by adding assertions and explicitly passing in zero
when initializing VectorIterator::current_ (which preserves the existing
behavior).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5821

Test Plan: Ran make check and make analyze to make sure the warnings have disappeared.

Differential Revision: D17455949

Pulled By: ltamasi

fbshipit-source-id: 363619618ea649a0674287f9f3b3393e390571ee
This commit is contained in:
Levi Tamasi 2019-09-18 15:22:46 -07:00 committed by Facebook Github Bot
parent 2389aa2da9
commit 2cbb61eadb
3 changed files with 15 additions and 1 deletions

View File

@ -20,7 +20,7 @@ class VectorIterator : public InternalIterator {
: keys_(std::move(keys)),
values_(std::move(values)),
indexed_cmp_(icmp, &keys_),
current_(keys.size()) {
current_(0) {
assert(keys_.size() == values_.size());
indices_.reserve(keys_.size());

View File

@ -57,6 +57,8 @@ class MemoryTest : public testing::Test {
cache_set->clear();
for (auto* db : dbs) {
assert(db);
// Cache from DBImpl
StackableDB* sdb = dynamic_cast<StackableDB*>(db);
DBImpl* db_impl = dynamic_cast<DBImpl*>(sdb ? sdb->GetBaseDB() : db);

View File

@ -34,6 +34,8 @@ Status NewWritableCacheFile(Env* const env, const std::string& filepath,
Status NewRandomAccessCacheFile(Env* const env, const std::string& filepath,
std::unique_ptr<RandomAccessFile>* file,
const bool use_direct_reads = true) {
assert(env);
EnvOptions opt;
opt.use_direct_reads = use_direct_reads;
Status s = env->NewRandomAccessFile(filepath, file, opt);
@ -44,6 +46,8 @@ Status NewRandomAccessCacheFile(Env* const env, const std::string& filepath,
// BlockCacheFile
//
Status BlockCacheFile::Delete(uint64_t* size) {
assert(env_);
Status status = env_->GetFileSize(Path(), size);
if (!status.ok()) {
return status;
@ -287,6 +291,8 @@ bool WriteableCacheFile::Create(const bool /*enable_direct_writes*/,
ROCKS_LOG_DEBUG(log_, "Creating new cache %s (max size is %d B)",
Path().c_str(), max_size_);
assert(env_);
Status s = env_->FileExists(Path());
if (s.ok()) {
ROCKS_LOG_WARN(log_, "File %s already exists. %s", Path().c_str(),
@ -359,6 +365,8 @@ bool WriteableCacheFile::ExpandBuffer(const size_t size) {
// expand the buffer until there is enough space to write `size` bytes
assert(free < size);
assert(alloc_);
while (free < size) {
CacheWriteBuffer* const buf = alloc_->Allocate();
if (!buf) {
@ -394,6 +402,7 @@ void WriteableCacheFile::DispatchBuffer() {
assert(eof_ || buf_doff_ < buf_woff_);
assert(buf_doff_ < bufs_.size());
assert(file_);
assert(alloc_);
auto* buf = bufs_[buf_doff_];
const uint64_t file_off = buf_doff_ * alloc_->BufferSize();
@ -453,6 +462,7 @@ bool WriteableCacheFile::ReadBuffer(const LBA& lba, char* data) {
rwlock_.AssertHeld();
assert(lba.off_ < disk_woff_);
assert(alloc_);
// we read from the buffers like reading from a flat file. The list of buffers
// are treated as contiguous stream of data
@ -511,6 +521,8 @@ void WriteableCacheFile::Close() {
}
void WriteableCacheFile::ClearBuffers() {
assert(alloc_);
for (size_t i = 0; i < bufs_.size(); ++i) {
alloc_->Deallocate(bufs_[i]);
}