From a08d6f18f05c188a36d54c5f8209ef50fd082903 Mon Sep 17 00:00:00 2001 From: mrambacher Date: Wed, 16 Sep 2020 15:45:30 -0700 Subject: [PATCH] Add more tests to ASSERT_STATUS_CHECKED (#7367) Summary: db_options_test options_file_test auto_roll_logger_test options_util_test persistent_cache_test Pull Request resolved: https://github.com/facebook/rocksdb/pull/7367 Reviewed By: jay-zhuang Differential Revision: D23712520 Pulled By: zhichao-cao fbshipit-source-id: 99b331e357f5d6a6aabee89d1bd933002cbb3908 --- Makefile | 5 ++ db/builder.cc | 11 ++- db/compaction/compaction_job.cc | 19 +++-- db/db_impl/db_impl.cc | 14 ++-- db/db_impl/db_impl_write.cc | 4 + db/db_options_test.cc | 77 ++++++++++--------- db/db_test_util.cc | 2 +- db/event_helpers.cc | 2 + db/options_file_test.cc | 4 +- include/rocksdb/status.h | 2 +- logging/auto_roll_logger.cc | 9 ++- logging/auto_roll_logger.h | 3 +- logging/auto_roll_logger_test.cc | 4 +- table/persistent_cache_helper.cc | 9 ++- utilities/options/options_util_test.cc | 6 +- utilities/persistent_cache/block_cache_tier.h | 2 +- .../persistent_cache/persistent_cache_test.cc | 2 +- .../persistent_cache/persistent_cache_test.h | 6 +- .../persistent_cache/volatile_tier_impl.cc | 6 +- 19 files changed, 108 insertions(+), 79 deletions(-) diff --git a/Makefile b/Makefile index 0eaf9b408b..8917dd8021 100644 --- a/Makefile +++ b/Makefile @@ -581,6 +581,8 @@ ifdef ASSERT_STATUS_CHECKED coding_test \ crc32c_test \ dbformat_test \ + db_options_test \ + options_file_test \ defer_test \ filename_test \ dynamic_bloom_test \ @@ -588,6 +590,7 @@ ifdef ASSERT_STATUS_CHECKED env_test \ env_logger_test \ event_logger_test \ + auto_roll_logger_test \ file_indexer_test \ hash_table_test \ hash_test \ @@ -616,6 +619,8 @@ ifdef ASSERT_STATUS_CHECKED filelock_test \ timer_queue_test \ timer_test \ + options_util_test \ + persistent_cache_test \ util_merge_operators_test \ block_cache_trace_analyzer_test \ block_cache_tracer_test \ diff --git a/db/builder.cc b/db/builder.cc index a8bfff2647..9e530bfcaa 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -98,7 +98,6 @@ Status BuildTable( const size_t kReportFlushIOStatsEvery = 1048576; uint64_t paranoid_hash = 0; Status s; - IOStatus io_s; meta->fd.file_size = 0; iter->SeekToFirst(); std::unique_ptr range_del_agg( @@ -131,7 +130,7 @@ Status BuildTable( bool use_direct_writes = file_options.use_direct_writes; TEST_SYNC_POINT_CALLBACK("BuildTable:create_file", &use_direct_writes); #endif // !NDEBUG - io_s = NewWritableFile(fs, fname, &file, file_options); + IOStatus io_s = NewWritableFile(fs, fname, &file, file_options); s = io_s; if (io_status->ok()) { *io_status = io_s; @@ -229,9 +228,8 @@ Status BuildTable( } else { s = builder->Finish(); } - io_s = builder->io_status(); if (io_status->ok()) { - *io_status = io_s; + *io_status = builder->io_status(); } if (s.ok() && !empty) { @@ -313,17 +311,18 @@ Status BuildTable( if (!s.ok() || meta->fd.GetFileSize() == 0) { constexpr IODebugContext* dbg = nullptr; - fs->DeleteFile(fname, IOOptions(), dbg); + Status ignored = fs->DeleteFile(fname, IOOptions(), dbg); assert(blob_file_additions || blob_file_paths.empty()); if (blob_file_additions) { for (const std::string& blob_file_path : blob_file_paths) { - fs->DeleteFile(blob_file_path, IOOptions(), dbg); + ignored = fs->DeleteFile(blob_file_path, IOOptions(), dbg); } blob_file_additions->clear(); } + ignored.PermitUncheckedError(); } if (meta->fd.GetFileSize() == 0) { diff --git a/db/compaction/compaction_job.cc b/db/compaction/compaction_job.cc index 672d40b96b..9168e47f7d 100644 --- a/db/compaction/compaction_job.cc +++ b/db/compaction/compaction_job.cc @@ -978,13 +978,11 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) { // going to be 1.2MB and max_output_file_size = 1MB, prefer to have 0.6MB // and 0.6MB instead of 1MB and 0.2MB) bool output_file_ended = false; - Status input_status; if (sub_compact->compaction->output_level() != 0 && sub_compact->current_output_file_size >= sub_compact->compaction->max_output_file_size()) { // (1) this key terminates the file. For historical reasons, the iterator // status before advancing will be given to FinishCompactionOutputFile(). - input_status = input->status(); output_file_ended = true; } TEST_SYNC_POINT_CALLBACK( @@ -1011,7 +1009,6 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) { // (2) this key belongs to the next file. For historical reasons, the // iterator status after advancing will be given to // FinishCompactionOutputFile(). - input_status = input->status(); output_file_ended = true; } } @@ -1021,9 +1018,9 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) { next_key = &c_iter->key(); } CompactionIterationStats range_del_out_stats; - status = - FinishCompactionOutputFile(input_status, sub_compact, &range_del_agg, - &range_del_out_stats, next_key); + status = FinishCompactionOutputFile(input->status(), sub_compact, + &range_del_agg, &range_del_out_stats, + next_key); RecordDroppedKeys(range_del_out_stats, &sub_compact->compaction_job_stats); } @@ -1079,7 +1076,7 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) { CompactionIterationStats range_del_out_stats; Status s = FinishCompactionOutputFile(status, sub_compact, &range_del_agg, &range_del_out_stats); - if (status.ok()) { + if (!s.ok() && status.ok()) { status = s; } RecordDroppedKeys(range_del_out_stats, &sub_compact->compaction_job_stats); @@ -1374,6 +1371,9 @@ Status CompactionJob::FinishCompactionOutputFile( } if (sub_compact->io_status.ok()) { sub_compact->io_status = io_s; + // Since this error is really a copy of the + // "normal" status, it does not also need to be checked + sub_compact->io_status.PermitUncheckedError(); } sub_compact->outfile.reset(); @@ -1432,7 +1432,10 @@ Status CompactionJob::FinishCompactionOutputFile( auto sfm = static_cast(db_options_.sst_file_manager.get()); if (sfm && meta != nullptr && meta->fd.GetPathId() == 0) { - sfm->OnAddFile(fname); + Status add_s = sfm->OnAddFile(fname); + if (!add_s.ok() && s.ok()) { + s = add_s; + } if (sfm->IsMaxAllowedSpaceReached()) { // TODO(ajkr): should we return OK() if max space was reached by the final // compaction output file (similarly to how flush works when full)? diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc index 87fdf9f920..20d218b793 100644 --- a/db/db_impl/db_impl.cc +++ b/db/db_impl/db_impl.cc @@ -466,7 +466,8 @@ void DBImpl::CancelAllBackgroundWork(bool wait) { if (!cfd->IsDropped() && cfd->initialized() && !cfd->mem()->IsEmpty()) { cfd->Ref(); mutex_.Unlock(); - FlushMemTable(cfd, FlushOptions(), FlushReason::kShutDown); + Status s = FlushMemTable(cfd, FlushOptions(), FlushReason::kShutDown); + s.PermitUncheckedError(); //**TODO: What to do on error? mutex_.Lock(); cfd->UnrefAndTryDelete(); } @@ -969,8 +970,8 @@ Status DBImpl::SetOptions( new_options = *cfd->GetLatestMutableCFOptions(); // Append new version to recompute compaction score. VersionEdit dummy_edit; - versions_->LogAndApply(cfd, new_options, &dummy_edit, &mutex_, - directories_.GetDbDir()); + s = versions_->LogAndApply(cfd, new_options, &dummy_edit, &mutex_, + directories_.GetDbDir()); // Trigger possible flush/compactions. This has to be before we persist // options to file, otherwise there will be a deadlock with writer // thread. @@ -1020,7 +1021,7 @@ Status DBImpl::SetDBOptions( MutableDBOptions new_options; Status s; - Status persist_options_status; + Status persist_options_status = Status::OK(); bool wal_changed = false; WriteContext write_context; { @@ -1125,6 +1126,10 @@ Status DBImpl::SetDBOptions( persist_options_status = WriteOptionsFile( false /*need_mutex_lock*/, false /*need_enter_write_thread*/); write_thread_.ExitUnbatched(&w); + } else { + // To get here, we must have had invalid options and will not attempt to + // persist the options, which means the status is "OK/Uninitialized. + persist_options_status.PermitUncheckedError(); } } ROCKS_LOG_INFO(immutable_db_options_.info_log, "SetDBOptions(), inputs:"); @@ -2480,7 +2485,6 @@ Status DBImpl::CreateColumnFamilyImpl(const ColumnFamilyOptions& cf_options, const std::string& column_family_name, ColumnFamilyHandle** handle) { Status s; - Status persist_options_status; *handle = nullptr; DBOptions db_options = diff --git a/db/db_impl/db_impl_write.cc b/db/db_impl/db_impl_write.cc index aeefd4195e..633090be9c 100644 --- a/db/db_impl/db_impl_write.cc +++ b/db/db_impl/db_impl_write.cc @@ -1805,6 +1805,10 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) { NotifyOnMemTableSealed(cfd, memtable_info); mutex_.Lock(); #endif // ROCKSDB_LITE + // It is possible that we got here without checking the value of i_os, but + // that is okay. If we did, it most likely means that s was already an error. + // In any case, ignore any unchecked error for i_os here. + io_s.PermitUncheckedError(); return s; } diff --git a/db/db_options_test.cc b/db/db_options_test.cc index f167037803..ec538342da 100644 --- a/db/db_options_test.cc +++ b/db/db_options_test.cc @@ -37,9 +37,10 @@ class DBOptionsTest : public DBTestBase { ConfigOptions config_options; config_options.delimiter = "; "; - GetStringFromMutableDBOptions(config_options, MutableDBOptions(options), - &options_str); - StringToMap(options_str, &mutable_map); + EXPECT_OK(GetStringFromMutableDBOptions( + config_options, MutableDBOptions(options), &options_str)); + EXPECT_OK(StringToMap(options_str, &mutable_map)); + return mutable_map; } @@ -50,9 +51,9 @@ class DBOptionsTest : public DBTestBase { config_options.delimiter = "; "; std::unordered_map mutable_map; - GetStringFromMutableCFOptions(config_options, MutableCFOptions(options), - &options_str); - StringToMap(options_str, &mutable_map); + EXPECT_OK(GetStringFromMutableCFOptions( + config_options, MutableCFOptions(options), &options_str)); + EXPECT_OK(StringToMap(options_str, &mutable_map)); return mutable_map; } @@ -134,7 +135,7 @@ TEST_F(DBOptionsTest, SetBytesPerSync) { WriteOptions write_opts; // should sync approximately 40MB/1MB ~= 40 times. for (i = 0; i < 40; i++) { - Put(Key(i), kValue, write_opts); + ASSERT_OK(Put(Key(i), kValue, write_opts)); } ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); @@ -150,7 +151,7 @@ TEST_F(DBOptionsTest, SetBytesPerSync) { // should sync approximately 40MB*2/8MB ~= 10 times. // data will be 40*2MB because of previous Puts too. for (i = 0; i < 40; i++) { - Put(Key(i), kValue, write_opts); + ASSERT_OK(Put(Key(i), kValue, write_opts)); } ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); @@ -181,7 +182,7 @@ TEST_F(DBOptionsTest, SetWalBytesPerSync) { const std::string kValue(kValueSize, 'v'); int i = 0; for (; i < 10; i++) { - Put(Key(i), kValue); + ASSERT_OK(Put(Key(i), kValue)); } // Do not flush. If we flush here, SwitchWAL will reuse old WAL file since its // empty and will not get the new wal_bytes_per_sync value. @@ -192,7 +193,7 @@ TEST_F(DBOptionsTest, SetWalBytesPerSync) { counter = 0; i = 0; for (; i < 10; i++) { - Put(Key(i), kValue); + ASSERT_OK(Put(Key(i), kValue)); } ASSERT_GT(counter, 0); ASSERT_GT(low_bytes_per_sync, 0); @@ -227,9 +228,9 @@ TEST_F(DBOptionsTest, WritableFileMaxBufferSize) { for (; i < 3; i++) { ASSERT_OK(Put("foo", ToString(i))); ASSERT_OK(Put("bar", ToString(i))); - Flush(); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(unmatch_cnt, 0); ASSERT_GE(match_cnt, 11); @@ -245,9 +246,9 @@ TEST_F(DBOptionsTest, WritableFileMaxBufferSize) { for (; i < 3; i++) { ASSERT_OK(Put("foo", ToString(i))); ASSERT_OK(Put("bar", ToString(i))); - Flush(); + ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(unmatch_cnt, 0); ASSERT_GE(match_cnt, 11); } @@ -283,14 +284,14 @@ TEST_F(DBOptionsTest, EnableAutoCompactionAndTriggerStall) { DestroyAndReopen(options); int i = 0; for (; i < 1024; i++) { - Put(Key(i), kValue); + ASSERT_OK(Put(Key(i), kValue)); } - Flush(); + ASSERT_OK(Flush()); for (; i < 1024 * 2; i++) { - Put(Key(i), kValue); + ASSERT_OK(Put(Key(i), kValue)); } - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); ASSERT_EQ(2, NumTableFilesAtLevel(0)); uint64_t l0_size = SizeAtLevel(0); @@ -312,7 +313,7 @@ TEST_F(DBOptionsTest, EnableAutoCompactionAndTriggerStall) { break; } Reopen(options); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped()); ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay()); @@ -359,7 +360,7 @@ TEST_F(DBOptionsTest, EnableAutoCompactionAndTriggerStall) { TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:3"); // Background compaction executed. - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped()); ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay()); } @@ -376,12 +377,12 @@ TEST_F(DBOptionsTest, SetOptionsMayTriggerCompaction) { // Need to insert two keys to avoid trivial move. ASSERT_OK(Put("foo", ToString(i))); ASSERT_OK(Put("bar", ToString(i))); - Flush(); + ASSERT_OK(Flush()); } ASSERT_EQ("3", FilesPerLevel()); ASSERT_OK( dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "3"}})); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("0,1", FilesPerLevel()); } @@ -503,7 +504,7 @@ TEST_F(DBOptionsTest, MaxTotalWalSizeChange) { ASSERT_OK(dbfull()->SetDBOptions({{"max_total_wal_size", "10"}})); for (size_t cf = 0; cf < handles_.size(); ++cf) { - dbfull()->TEST_WaitForFlushMemTable(handles_[cf]); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf])); ASSERT_EQ("1", FilesPerLevel(static_cast(cf))); } } @@ -716,13 +717,13 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { // No files should be compacted as ttl is set to 1 hour. ASSERT_EQ(dbfull()->GetOptions().ttl, 3600); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ(NumTableFilesAtLevel(0), 10); // Set ttl to 1 minute. So all files should get deleted. ASSERT_OK(dbfull()->SetOptions({{"ttl", "60"}})); ASSERT_EQ(dbfull()->GetOptions().ttl, 60); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 0); @@ -738,7 +739,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { for (int j = 0; j < 10; j++) { ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); } ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 10); @@ -746,7 +747,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { // No files should be compacted as max_table_files_size is set to 500 KB. ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size, 500 << 10); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ(NumTableFilesAtLevel(0), 10); // Set max_table_files_size to 12 KB. So only 1 file should remain now. @@ -754,7 +755,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { {{"compaction_options_fifo", "{max_table_files_size=12288;}"}})); ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.max_table_files_size, 12 << 10); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 1); @@ -770,7 +771,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { for (int j = 0; j < 10; j++) { ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); } ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 10); @@ -779,7 +780,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { // allow_compaction is false ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction, false); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ(NumTableFilesAtLevel(0), 10); // Set allow_compaction to true. So number of files should be between 1 and 5. @@ -787,7 +788,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) { {{"compaction_options_fifo", "{allow_compaction=true;}"}})); ASSERT_EQ(dbfull()->GetOptions().compaction_options_fifo.allow_compaction, true); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_GE(NumTableFilesAtLevel(0), 1); ASSERT_LE(NumTableFilesAtLevel(0), 5); @@ -808,14 +809,14 @@ TEST_F(DBOptionsTest, CompactionReadaheadSizeChange) { ASSERT_OK(dbfull()->SetDBOptions({{"compaction_readahead_size", "256"}})); ASSERT_EQ(256, dbfull()->GetDBOptions().compaction_readahead_size); for (int i = 0; i < 1024; i++) { - Put(Key(i), kValue); + ASSERT_OK(Put(Key(i), kValue)); } - Flush(); + ASSERT_OK(Flush()); for (int i = 0; i < 1024 * 2; i++) { - Put(Key(i), kValue); + ASSERT_OK(Put(Key(i), kValue)); } - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(256, env_->compaction_readahead_size_); Close(); } @@ -834,7 +835,7 @@ TEST_F(DBOptionsTest, FIFOTtlBackwardCompatible) { for (int j = 0; j < 10; j++) { ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); } ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 10); diff --git a/db/db_test_util.cc b/db/db_test_util.cc index fae4be18ac..8effdb5f7d 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -666,7 +666,7 @@ void DBTestBase::Reopen(const Options& options) { void DBTestBase::Close() { for (auto h : handles_) { - db_->DestroyColumnFamilyHandle(h); + EXPECT_OK(db_->DestroyColumnFamilyHandle(h)); } handles_.clear(); delete db_; diff --git a/db/event_helpers.cc b/db/event_helpers.cc index 0b92d569a9..968f18f080 100644 --- a/db/event_helpers.cc +++ b/db/event_helpers.cc @@ -162,6 +162,7 @@ void EventHelpers::LogAndNotifyTableFileCreationFinished( for (auto& listener : listeners) { listener->OnTableFileCreated(info); } + info.status.PermitUncheckedError(); #else (void)listeners; (void)db_name; @@ -199,6 +200,7 @@ void EventHelpers::LogAndNotifyTableFileDeletion( for (auto& listener : listeners) { listener->OnTableFileDeleted(info); } + info.status.PermitUncheckedError(); #else (void)file_path; (void)dbname; diff --git a/db/options_file_test.cc b/db/options_file_test.cc index 00427de8ab..7ad8464277 100644 --- a/db/options_file_test.cc +++ b/db/options_file_test.cc @@ -25,7 +25,7 @@ void UpdateOptionsFiles(DB* db, std::unordered_set* filename_history, int* options_files_count) { std::vector filenames; - db->GetEnv()->GetChildren(db->GetName(), &filenames); + EXPECT_OK(db->GetEnv()->GetChildren(db->GetName(), &filenames)); uint64_t number; FileType type; *options_files_count = 0; @@ -42,7 +42,7 @@ void VerifyOptionsFileName( DB* db, const std::unordered_set& past_filenames) { std::vector filenames; std::unordered_set current_filenames; - db->GetEnv()->GetChildren(db->GetName(), &filenames); + EXPECT_OK(db->GetEnv()->GetChildren(db->GetName(), &filenames)); uint64_t number; FileType type; for (auto filename : filenames) { diff --git a/include/rocksdb/status.h b/include/rocksdb/status.h index b9151471a2..dc76c7ce71 100644 --- a/include/rocksdb/status.h +++ b/include/rocksdb/status.h @@ -38,7 +38,7 @@ class Status { ~Status() { #ifdef ROCKSDB_ASSERT_STATUS_CHECKED if (!checked_) { - fprintf(stderr, "Failed to check Status\n"); + fprintf(stderr, "Failed to check Status %p\n", this); port::PrintStack(); abort(); } diff --git a/logging/auto_roll_logger.cc b/logging/auto_roll_logger.cc index adfe9cc895..3533724ba3 100644 --- a/logging/auto_roll_logger.cc +++ b/logging/auto_roll_logger.cc @@ -45,8 +45,8 @@ AutoRollLogger::AutoRollLogger(Env* env, const std::string& dbname, RollLogFile(); } GetExistingFiles(); - ResetLogger(); - if (status_.ok()) { + s = ResetLogger(); + if (s.ok() && status_.ok()) { status_ = TrimOldLogFiles(); } } @@ -86,7 +86,10 @@ void AutoRollLogger::RollLogFile() { dbname_, now, db_absolute_path_, db_log_dir_); now++; } while (env_->FileExists(old_fname).ok()); - env_->RenameFile(log_fname_, old_fname); + Status s = env_->RenameFile(log_fname_, old_fname); + if (!s.ok()) { + // What should we do on error? + } old_log_files_.push(old_fname); } diff --git a/logging/auto_roll_logger.h b/logging/auto_roll_logger.h index 5d8c414138..2b63fc9d9c 100644 --- a/logging/auto_roll_logger.h +++ b/logging/auto_roll_logger.h @@ -69,8 +69,9 @@ class AutoRollLogger : public Logger { virtual ~AutoRollLogger() { if (logger_ && !closed_) { - logger_->Close(); + logger_->Close().PermitUncheckedError(); } + status_.PermitUncheckedError(); } using Logger::GetInfoLogLevel; diff --git a/logging/auto_roll_logger_test.cc b/logging/auto_roll_logger_test.cc index 6e72fb90ba..520d9de235 100644 --- a/logging/auto_roll_logger_test.cc +++ b/logging/auto_roll_logger_test.cc @@ -71,7 +71,7 @@ class AutoRollLoggerTest : public testing::Test { std::string deleteCmd = "rm -rf " + kTestDir; #endif ASSERT_TRUE(system(deleteCmd.c_str()) == 0); - Env::Default()->CreateDir(kTestDir); + ASSERT_OK(Env::Default()->CreateDir(kTestDir)); } void RollLogFileBySizeTest(AutoRollLogger* logger, size_t log_max_size, @@ -567,7 +567,7 @@ static std::vector GetOldFileNames(const std::string& path) { const std::string fname = path.substr(path.find_last_of("/") + 1); std::vector children; - Env::Default()->GetChildren(dirname, &children); + EXPECT_OK(Env::Default()->GetChildren(dirname, &children)); // We know that the old log files are named [path] // Return all entities that match the pattern diff --git a/table/persistent_cache_helper.cc b/table/persistent_cache_helper.cc index 8797c9b9b0..21d3bf734c 100644 --- a/table/persistent_cache_helper.cc +++ b/table/persistent_cache_helper.cc @@ -21,7 +21,8 @@ void PersistentCacheHelper::InsertRawPage( cache_options.key_prefix.size(), handle, cache_key); // insert content to cache - cache_options.persistent_cache->Insert(key, data, size); + cache_options.persistent_cache->Insert(key, data, size) + .PermitUncheckedError(); } void PersistentCacheHelper::InsertUncompressedPage( @@ -39,8 +40,10 @@ void PersistentCacheHelper::InsertUncompressedPage( cache_options.key_prefix.size(), handle, cache_key); // insert block contents to page cache - cache_options.persistent_cache->Insert(key, contents.data.data(), - contents.data.size()); + cache_options.persistent_cache + ->Insert(key, contents.data.data(), contents.data.size()) + .PermitUncheckedError(); + ; } Status PersistentCacheHelper::LookupRawPage( diff --git a/utilities/options/options_util_test.cc b/utilities/options/options_util_test.cc index 9bd736c1f9..515eacc56e 100644 --- a/utilities/options/options_util_test.cc +++ b/utilities/options/options_util_test.cc @@ -58,7 +58,8 @@ TEST_F(OptionsUtilTest, SaveAndLoad) { } const std::string kFileName = "OPTIONS-123456"; - PersistRocksDBOptions(db_opt, cf_names, cf_opts, kFileName, fs_.get()); + ASSERT_OK( + PersistRocksDBOptions(db_opt, cf_names, cf_opts, kFileName, fs_.get())); DBOptions loaded_db_opt; std::vector loaded_cf_descs; @@ -120,7 +121,8 @@ TEST_F(OptionsUtilTest, SaveAndLoadWithCacheCheck) { cf_names.push_back("cf_plain_table_sample"); // Saving DB in file const std::string kFileName = "OPTIONS-LOAD_CACHE_123456"; - PersistRocksDBOptions(db_opt, cf_names, cf_opts, kFileName, fs_.get()); + ASSERT_OK( + PersistRocksDBOptions(db_opt, cf_names, cf_opts, kFileName, fs_.get())); DBOptions loaded_db_opt; std::vector loaded_cf_descs; diff --git a/utilities/persistent_cache/block_cache_tier.h b/utilities/persistent_cache/block_cache_tier.h index ae0c13fdbe..13b04f95a7 100644 --- a/utilities/persistent_cache/block_cache_tier.h +++ b/utilities/persistent_cache/block_cache_tier.h @@ -53,7 +53,7 @@ class BlockCacheTier : public PersistentCacheTier { virtual ~BlockCacheTier() { // Close is re-entrant so we can call close even if it is already closed - Close(); + Close().PermitUncheckedError(); assert(!insert_th_.joinable()); } diff --git a/utilities/persistent_cache/persistent_cache_test.cc b/utilities/persistent_cache/persistent_cache_test.cc index 52fb66af59..5dc090d51d 100644 --- a/utilities/persistent_cache/persistent_cache_test.cc +++ b/utilities/persistent_cache/persistent_cache_test.cc @@ -433,7 +433,7 @@ void PersistentCacheDBTest::RunTest( options.create_if_missing = true; DestroyAndReopen(options); - pcache->Close(); + ASSERT_OK(pcache->Close()); } } diff --git a/utilities/persistent_cache/persistent_cache_test.h b/utilities/persistent_cache/persistent_cache_test.h index 394c649643..f13155ed6e 100644 --- a/utilities/persistent_cache/persistent_cache_test.h +++ b/utilities/persistent_cache/persistent_cache_test.h @@ -183,7 +183,7 @@ class PersistentCacheTierTest : public testing::Test { ASSERT_EQ(stats_verify_hits_, max_keys); ASSERT_EQ(stats_verify_missed_, 0); - cache_->Close(); + ASSERT_OK(cache_->Close()); cache_.reset(); } @@ -194,7 +194,7 @@ class PersistentCacheTierTest : public testing::Test { ASSERT_LT(stats_verify_hits_, max_keys); ASSERT_GT(stats_verify_missed_, 0); - cache_->Close(); + ASSERT_OK(cache_->Close()); cache_.reset(); } @@ -206,7 +206,7 @@ class PersistentCacheTierTest : public testing::Test { ASSERT_GT(stats_verify_hits_, 0); ASSERT_GT(stats_verify_missed_, 0); - cache_->Close(); + ASSERT_OK(cache_->Close()); cache_.reset(); } diff --git a/utilities/persistent_cache/volatile_tier_impl.cc b/utilities/persistent_cache/volatile_tier_impl.cc index ee63f828cb..45d2830aa8 100644 --- a/utilities/persistent_cache/volatile_tier_impl.cc +++ b/utilities/persistent_cache/volatile_tier_impl.cc @@ -122,8 +122,10 @@ bool VolatileCacheTier::Evict() { // push the evicted object to the next level if (next_tier()) { - next_tier()->Insert(Slice(edata->key), edata->value.c_str(), - edata->value.size()); + // TODO: Should the insert error be ignored? + Status s = next_tier()->Insert(Slice(edata->key), edata->value.c_str(), + edata->value.size()); + s.PermitUncheckedError(); } // adjust size and destroy data