From 8ff6557e7f096ed45de296a5073374958fbfaa55 Mon Sep 17 00:00:00 2001 From: Adam Retter Date: Wed, 9 Dec 2020 21:19:55 -0800 Subject: [PATCH] Add further tests to ASSERT_STATUS_CHECKED (2) (#7698) Summary: Second batch of adding more tests to ASSERT_STATUS_CHECKED. * external_sst_file_basic_test * checkpoint_test * db_wal_test * db_block_cache_test * db_logical_block_size_cache_test * db_blob_index_test * optimistic_transaction_test * transaction_test * point_lock_manager_test * write_prepared_transaction_test * write_unprepared_transaction_test Pull Request resolved: https://github.com/facebook/rocksdb/pull/7698 Reviewed By: cheng-chang Differential Revision: D25441664 Pulled By: pdillinger fbshipit-source-id: 9e78867f32321db5d4833e95eb96c5734526ef00 --- Makefile | 11 + db/blob/db_blob_index_test.cc | 6 +- db/db_block_cache_test.cc | 6 +- db/db_impl/db_impl.h | 2 +- db/db_impl/db_impl_debug.cc | 5 +- db/db_impl/db_impl_open.cc | 3 + db/db_impl/db_impl_write.cc | 4 +- db/db_iter.cc | 11 +- db/db_logical_block_size_cache_test.cc | 16 +- db/db_wal_test.cc | 106 +-- db/db_write_test.cc | 4 +- db/external_sst_file_basic_test.cc | 85 +-- db/external_sst_file_ingestion_job.cc | 6 + db/write_thread.cc | 1 + test_util/testutil.cc | 15 + test_util/testutil.h | 6 + utilities/checkpoint/checkpoint_impl.cc | 202 +++--- utilities/checkpoint/checkpoint_test.cc | 38 +- utilities/fault_injection_env.cc | 2 +- .../lock/point/point_lock_manager.cc | 18 +- .../transactions/optimistic_transaction.cc | 2 - .../optimistic_transaction_test.cc | 679 +++++++----------- utilities/transactions/transaction_test.cc | 6 +- utilities/transactions/transaction_test.h | 20 +- .../write_prepared_transaction_test.cc | 213 +++--- utilities/transactions/write_prepared_txn.cc | 23 +- .../write_unprepared_transaction_test.cc | 46 +- .../transactions/write_unprepared_txn_db.cc | 5 +- 28 files changed, 760 insertions(+), 781 deletions(-) diff --git a/Makefile b/Makefile index 5ac25b0ceb..6e230a08e3 100644 --- a/Makefile +++ b/Makefile @@ -589,16 +589,21 @@ ifdef ASSERT_STATUS_CHECKED cassandra_row_merge_test \ cassandra_serialize_test \ cleanable_test \ + checkpoint_test \ coding_test \ crc32c_test \ dbformat_test \ db_basic_test \ db_blob_basic_test \ + db_blob_index_test \ + db_block_cache_test \ db_flush_test \ db_iterator_test \ + db_logical_block_size_cache_test \ db_memtable_test \ db_merge_operand_test \ db_merge_operator_test \ + db_wal_test \ db_with_timestamp_basic_test \ db_with_timestamp_compaction_test \ db_options_test \ @@ -613,6 +618,7 @@ ifdef ASSERT_STATUS_CHECKED env_logger_test \ event_logger_test \ error_handler_fs_test \ + external_sst_file_basic_test \ auto_roll_logger_test \ file_indexer_test \ flush_job_test \ @@ -628,6 +634,7 @@ ifdef ASSERT_STATUS_CHECKED merger_test \ mock_env_test \ object_registry_test \ + optimistic_transaction_test \ prefix_test \ plain_table_db_test \ repair_test \ @@ -635,6 +642,7 @@ ifdef ASSERT_STATUS_CHECKED customizable_test \ options_settable_test \ options_test \ + point_lock_manager_test \ random_test \ range_del_aggregator_test \ sst_file_reader_test \ @@ -648,6 +656,7 @@ ifdef ASSERT_STATUS_CHECKED stats_history_test \ thread_local_test \ trace_analyzer_test \ + transaction_test \ env_timed_test \ filelock_test \ timer_queue_test \ @@ -663,6 +672,8 @@ ifdef ASSERT_STATUS_CHECKED version_edit_test \ work_queue_test \ write_controller_test \ + write_prepared_transaction_test \ + write_unprepared_transaction_test \ compaction_iterator_test \ compaction_job_test \ compaction_job_stats_test \ diff --git a/db/blob/db_blob_index_test.cc b/db/blob/db_blob_index_test.cc index 34cf5e0b70..8e9da81be7 100644 --- a/db/blob/db_blob_index_test.cc +++ b/db/blob/db_blob_index_test.cc @@ -305,6 +305,7 @@ TEST_F(DBBlobIndexTest, Iterate) { std::function extra_check = nullptr) { // Seek auto* iterator = create_iterator(); + ASSERT_OK(iterator->status()); ASSERT_OK(iterator->Refresh()); iterator->Seek(get_key(index)); check_iterator(iterator, expected_status, forward_value); @@ -318,6 +319,7 @@ TEST_F(DBBlobIndexTest, Iterate) { ASSERT_OK(iterator->Refresh()); iterator->Seek(get_key(index - 1)); ASSERT_TRUE(iterator->Valid()); + ASSERT_OK(iterator->status()); iterator->Next(); check_iterator(iterator, expected_status, forward_value); if (extra_check) { @@ -327,6 +329,7 @@ TEST_F(DBBlobIndexTest, Iterate) { // SeekForPrev iterator = create_iterator(); + ASSERT_OK(iterator->status()); ASSERT_OK(iterator->Refresh()); iterator->SeekForPrev(get_key(index)); check_iterator(iterator, expected_status, backward_value); @@ -339,6 +342,7 @@ TEST_F(DBBlobIndexTest, Iterate) { iterator = create_iterator(); iterator->Seek(get_key(index + 1)); ASSERT_TRUE(iterator->Valid()); + ASSERT_OK(iterator->status()); iterator->Prev(); check_iterator(iterator, expected_status, backward_value); if (extra_check) { @@ -376,7 +380,7 @@ TEST_F(DBBlobIndexTest, Iterate) { ASSERT_OK(Write(&batch)); break; default: - assert(false); + FAIL(); }; } snapshots.push_back(dbfull()->GetSnapshot()); diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index 57c4d47c0f..cf6f6dfd9b 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -677,7 +677,7 @@ TEST_F(DBBlockCacheTest, ParanoidFileChecks) { // Create a new SST file. This will further trigger a compaction // and generate another file. ASSERT_OK(Flush(1)); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(3, /* Totally 3 files created up to now */ TestGetTickerCount(options, BLOCK_CACHE_ADD)); @@ -692,7 +692,7 @@ TEST_F(DBBlockCacheTest, ParanoidFileChecks) { ASSERT_OK(Put(1, "1_key4", "val4")); ASSERT_OK(Put(1, "9_key4", "val4")); ASSERT_OK(Flush(1)); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(3, /* Totally 3 files created up to now */ TestGetTickerCount(options, BLOCK_CACHE_ADD)); } @@ -860,7 +860,7 @@ TEST_F(DBBlockCacheTest, CacheCompressionDict) { } ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(0, NumTableFilesAtLevel(0)); ASSERT_EQ(kNumFiles, NumTableFilesAtLevel(1)); diff --git a/db/db_impl/db_impl.h b/db/db_impl/db_impl.h index af47cfc4e2..67a7b46789 100644 --- a/db/db_impl/db_impl.h +++ b/db/db_impl/db_impl.h @@ -922,7 +922,7 @@ class DBImpl : public DB { ColumnFamilyHandle* column_family = nullptr, bool disallow_trivial_move = false); - void TEST_SwitchWAL(); + Status TEST_SwitchWAL(); bool TEST_UnableToReleaseOldestLog() { return unable_to_release_oldest_log_; } diff --git a/db/db_impl/db_impl_debug.cc b/db/db_impl/db_impl_debug.cc index 4825fb6ef8..44108d52e7 100644 --- a/db/db_impl/db_impl_debug.cc +++ b/db/db_impl/db_impl_debug.cc @@ -22,12 +22,13 @@ uint64_t DBImpl::TEST_GetLevel0TotalSize() { return default_cf_handle_->cfd()->current()->storage_info()->NumLevelBytes(0); } -void DBImpl::TEST_SwitchWAL() { +Status DBImpl::TEST_SwitchWAL() { WriteContext write_context; InstrumentedMutexLock l(&mutex_); void* writer = TEST_BeginWrite(); - SwitchWAL(&write_context); + auto s = SwitchWAL(&write_context); TEST_EndWrite(writer); + return s; } bool DBImpl::TEST_WALBufferIsEmpty(bool lock) { diff --git a/db/db_impl/db_impl_open.cc b/db/db_impl/db_impl_open.cc index a8e0812902..0ab52a6c28 100644 --- a/db/db_impl/db_impl_open.cc +++ b/db/db_impl/db_impl_open.cc @@ -1368,6 +1368,9 @@ Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd, cfd->GetName().c_str(), meta.fd.GetNumber(), meta.fd.GetFileSize(), s.ToString().c_str()); mutex_.Lock(); + + io_s.PermitUncheckedError(); // TODO(AR) is this correct, or should we + // return io_s if not ok()? } } ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem); diff --git a/db/db_impl/db_impl_write.cc b/db/db_impl/db_impl_write.cc index 1a459c36a7..7a92fe611d 100644 --- a/db/db_impl/db_impl_write.cc +++ b/db/db_impl/db_impl_write.cc @@ -666,7 +666,6 @@ Status DBImpl::WriteImplWALOnly( const uint64_t log_ref, uint64_t* seq_used, const size_t sub_batch_cnt, PreReleaseCallback* pre_release_callback, const AssignOrder assign_order, const PublishLastSeq publish_last_seq, const bool disable_memtable) { - Status status; PERF_TIMER_GUARD(write_pre_and_post_process_time); WriteThread::Writer w(write_options, my_batch, callback, log_ref, disable_memtable, sub_batch_cnt, pre_release_callback); @@ -688,6 +687,8 @@ Status DBImpl::WriteImplWALOnly( assert(w.state == WriteThread::STATE_GROUP_LEADER); if (publish_last_seq == kDoPublishLastSeq) { + Status status; + // Currently we only use kDoPublishLastSeq in unordered_write assert(immutable_db_options_.unordered_write); WriteContext write_context; @@ -764,6 +765,7 @@ Status DBImpl::WriteImplWALOnly( } seq_inc = total_batch_cnt; } + Status status; IOStatus io_s; if (!write_options.disableWAL) { io_s = ConcurrentWriteToWAL(write_group, log_used, &last_sequence, seq_inc); diff --git a/db/db_iter.cc b/db/db_iter.cc index 0d517ccfe4..00b2303265 100644 --- a/db/db_iter.cc +++ b/db/db_iter.cc @@ -545,7 +545,6 @@ bool DBIter::MergeValuesNewToOld() { TEST_SYNC_POINT("DBIter::MergeValuesNewToOld:PushedFirstOperand"); ParsedInternalKey ikey; - Status s; for (iter_.Next(); iter_.Valid(); iter_.Next()) { TEST_SYNC_POINT("DBIter::MergeValuesNewToOld:SteppedToNextOperand"); if (!ParseKey(&ikey)) { @@ -573,7 +572,7 @@ bool DBIter::MergeValuesNewToOld() { // hit a put, merge the put value with operands and store the // final result in saved_value_. We are done! const Slice val = iter_.value(); - s = MergeHelper::TimedFullMerge( + Status s = MergeHelper::TimedFullMerge( merge_operator_, ikey.user_key, &val, merge_context_.GetOperands(), &saved_value_, logger_, statistics_, env_, &pinned_value_, true); if (!s.ok()) { @@ -616,10 +615,10 @@ bool DBIter::MergeValuesNewToOld() { // a deletion marker. // feed null as the existing value to the merge operator, such that // client can differentiate this scenario and do things accordingly. - s = MergeHelper::TimedFullMerge(merge_operator_, saved_key_.GetUserKey(), - nullptr, merge_context_.GetOperands(), - &saved_value_, logger_, statistics_, env_, - &pinned_value_, true); + Status s = MergeHelper::TimedFullMerge( + merge_operator_, saved_key_.GetUserKey(), nullptr, + merge_context_.GetOperands(), &saved_value_, logger_, statistics_, env_, + &pinned_value_, true); if (!s.ok()) { valid_ = false; status_ = s; diff --git a/db/db_logical_block_size_cache_test.cc b/db/db_logical_block_size_cache_test.cc index 20f6abadca..1057871c9f 100644 --- a/db/db_logical_block_size_cache_test.cc +++ b/db/db_logical_block_size_cache_test.cc @@ -401,7 +401,7 @@ TEST_F(DBLogicalBlockSizeCacheTest, MultiDBWithDifferentPaths) { ColumnFamilyOptions cf_options0; cf_options0.cf_paths = {{cf_path_0_, 1024}}; ColumnFamilyHandle* cf0; - db0->CreateColumnFamily(cf_options0, "cf", &cf0); + ASSERT_OK(db0->CreateColumnFamily(cf_options0, "cf", &cf0)); ASSERT_EQ(2, cache_->Size()); ASSERT_TRUE(cache_->Contains(data_path_0_)); ASSERT_EQ(1, cache_->GetRefCount(data_path_0_)); @@ -421,7 +421,7 @@ TEST_F(DBLogicalBlockSizeCacheTest, MultiDBWithDifferentPaths) { ColumnFamilyOptions cf_options1; cf_options1.cf_paths = {{cf_path_1_, 1024}}; ColumnFamilyHandle* cf1; - db1->CreateColumnFamily(cf_options1, "cf", &cf1); + ASSERT_OK(db1->CreateColumnFamily(cf_options1, "cf", &cf1)); ASSERT_EQ(4, cache_->Size()); ASSERT_TRUE(cache_->Contains(data_path_0_)); ASSERT_EQ(1, cache_->GetRefCount(data_path_0_)); @@ -432,7 +432,7 @@ TEST_F(DBLogicalBlockSizeCacheTest, MultiDBWithDifferentPaths) { ASSERT_TRUE(cache_->Contains(cf_path_1_)); ASSERT_EQ(1, cache_->GetRefCount(cf_path_1_)); - db0->DestroyColumnFamilyHandle(cf0); + ASSERT_OK(db0->DestroyColumnFamilyHandle(cf0)); delete db0; ASSERT_EQ(2, cache_->Size()); ASSERT_TRUE(cache_->Contains(data_path_1_)); @@ -441,7 +441,7 @@ TEST_F(DBLogicalBlockSizeCacheTest, MultiDBWithDifferentPaths) { ASSERT_EQ(1, cache_->GetRefCount(cf_path_1_)); ASSERT_OK(DestroyDB(data_path_0_, options, {{"cf", cf_options0}})); - db1->DestroyColumnFamilyHandle(cf1); + ASSERT_OK(db1->DestroyColumnFamilyHandle(cf1)); delete db1; ASSERT_EQ(0, cache_->Size()); ASSERT_OK(DestroyDB(data_path_1_, options, {{"cf", cf_options1}})); @@ -466,7 +466,7 @@ TEST_F(DBLogicalBlockSizeCacheTest, MultiDBWithSamePaths) { ASSERT_EQ(1, cache_->GetRefCount(data_path_0_)); ColumnFamilyHandle* cf0; - db0->CreateColumnFamily(cf_options, "cf", &cf0); + ASSERT_OK(db0->CreateColumnFamily(cf_options, "cf", &cf0)); ASSERT_EQ(2, cache_->Size()); ASSERT_TRUE(cache_->Contains(data_path_0_)); ASSERT_EQ(1, cache_->GetRefCount(data_path_0_)); @@ -482,14 +482,14 @@ TEST_F(DBLogicalBlockSizeCacheTest, MultiDBWithSamePaths) { ASSERT_EQ(1, cache_->GetRefCount(cf_path_0_)); ColumnFamilyHandle* cf1; - db1->CreateColumnFamily(cf_options, "cf", &cf1); + ASSERT_OK(db1->CreateColumnFamily(cf_options, "cf", &cf1)); ASSERT_EQ(2, cache_->Size()); ASSERT_TRUE(cache_->Contains(data_path_0_)); ASSERT_EQ(2, cache_->GetRefCount(data_path_0_)); ASSERT_TRUE(cache_->Contains(cf_path_0_)); ASSERT_EQ(2, cache_->GetRefCount(cf_path_0_)); - db0->DestroyColumnFamilyHandle(cf0); + ASSERT_OK(db0->DestroyColumnFamilyHandle(cf0)); delete db0; ASSERT_EQ(2, cache_->Size()); ASSERT_TRUE(cache_->Contains(data_path_0_)); @@ -498,7 +498,7 @@ TEST_F(DBLogicalBlockSizeCacheTest, MultiDBWithSamePaths) { ASSERT_EQ(1, cache_->GetRefCount(cf_path_0_)); ASSERT_OK(DestroyDB(dbname_ + "/db0", options, {{"cf", cf_options}})); - db1->DestroyColumnFamilyHandle(cf1); + ASSERT_OK(db1->DestroyColumnFamilyHandle(cf1)); delete db1; ASSERT_EQ(0, cache_->Size()); ASSERT_OK(DestroyDB(dbname_ + "/db1", options, {{"cf", cf_options}})); diff --git a/db/db_wal_test.cc b/db/db_wal_test.cc index eb7481ab67..babaf5ad5c 100644 --- a/db/db_wal_test.cc +++ b/db/db_wal_test.cc @@ -358,16 +358,16 @@ TEST_F(DBWALTest, RecoverWithBlob) { // There should be no files just yet since we haven't flushed. { VersionSet* const versions = dbfull()->TEST_GetVersionSet(); - assert(versions); + ASSERT_NE(versions, nullptr); ColumnFamilyData* const cfd = versions->GetColumnFamilySet()->GetDefault(); - assert(cfd); + ASSERT_NE(cfd, nullptr); Version* const current = cfd->current(); - assert(current); + ASSERT_NE(current, nullptr); const VersionStorageInfo* const storage_info = current->storage_info(); - assert(storage_info); + ASSERT_NE(storage_info, nullptr); ASSERT_EQ(storage_info->num_non_empty_levels(), 0); ASSERT_TRUE(storage_info->GetBlobFiles().empty()); @@ -388,28 +388,28 @@ TEST_F(DBWALTest, RecoverWithBlob) { ASSERT_EQ(Get("key2"), long_value); VersionSet* const versions = dbfull()->TEST_GetVersionSet(); - assert(versions); + ASSERT_NE(versions, nullptr); ColumnFamilyData* const cfd = versions->GetColumnFamilySet()->GetDefault(); - assert(cfd); + ASSERT_NE(cfd, nullptr); Version* const current = cfd->current(); - assert(current); + ASSERT_NE(current, nullptr); const VersionStorageInfo* const storage_info = current->storage_info(); - assert(storage_info); + ASSERT_NE(storage_info, nullptr); const auto& l0_files = storage_info->LevelFiles(0); ASSERT_EQ(l0_files.size(), 1); const FileMetaData* const table_file = l0_files[0]; - assert(table_file); + ASSERT_NE(table_file, nullptr); const auto& blob_files = storage_info->GetBlobFiles(); ASSERT_EQ(blob_files.size(), 1); const auto& blob_file = blob_files.begin()->second; - assert(blob_file); + ASSERT_NE(blob_file, nullptr); ASSERT_EQ(table_file->smallest.user_key(), "key1"); ASSERT_EQ(table_file->largest.user_key(), "key2"); @@ -422,7 +422,7 @@ TEST_F(DBWALTest, RecoverWithBlob) { #ifndef ROCKSDB_LITE const InternalStats* const internal_stats = cfd->internal_stats(); - assert(internal_stats); + ASSERT_NE(internal_stats, nullptr); const uint64_t expected_bytes = table_file->fd.GetFileSize() + blob_file->GetTotalBlobBytes(); @@ -502,12 +502,12 @@ TEST_F(DBWALTest, IgnoreRecoveredLog) { do { // delete old files in backup_logs directory - env_->CreateDirIfMissing(backup_logs); + ASSERT_OK(env_->CreateDirIfMissing(backup_logs)); std::vector old_files; - env_->GetChildren(backup_logs, &old_files); + ASSERT_OK(env_->GetChildren(backup_logs, &old_files)); for (auto& file : old_files) { if (file != "." && file != "..") { - env_->DeleteFile(backup_logs + "/" + file); + ASSERT_OK(env_->DeleteFile(backup_logs + "/" + file)); } } Options options = CurrentOptions(); @@ -526,7 +526,7 @@ TEST_F(DBWALTest, IgnoreRecoveredLog) { // copy the logs to backup std::vector logs; - env_->GetChildren(options.wal_dir, &logs); + ASSERT_OK(env_->GetChildren(options.wal_dir, &logs)); for (auto& log : logs) { if (log != ".." && log != ".") { CopyFile(options.wal_dir + "/" + log, backup_logs + "/" + log); @@ -557,7 +557,7 @@ TEST_F(DBWALTest, IgnoreRecoveredLog) { Close(); // copy the logs from backup back to wal dir - env_->CreateDirIfMissing(options.wal_dir); + ASSERT_OK(env_->CreateDirIfMissing(options.wal_dir)); for (auto& log : logs) { if (log != ".." && log != ".") { CopyFile(backup_logs + "/" + log, options.wal_dir + "/" + log); @@ -572,16 +572,16 @@ TEST_F(DBWALTest, IgnoreRecoveredLog) { // Recovery will fail if DB directory doesn't exist. Destroy(options); // copy the logs from backup back to wal dir - env_->CreateDirIfMissing(options.wal_dir); + ASSERT_OK(env_->CreateDirIfMissing(options.wal_dir)); for (auto& log : logs) { if (log != ".." && log != ".") { CopyFile(backup_logs + "/" + log, options.wal_dir + "/" + log); // we won't be needing this file no more - env_->DeleteFile(backup_logs + "/" + log); + ASSERT_OK(env_->DeleteFile(backup_logs + "/" + log)); } } Status s = TryReopen(options); - ASSERT_TRUE(!s.ok()); + ASSERT_NOK(s); Destroy(options); } while (ChangeWalOptions()); } @@ -619,9 +619,9 @@ TEST_F(DBWALTest, PreallocateBlock) { called.fetch_add(1); }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - Put("", ""); - Flush(); - Put("", ""); + ASSERT_OK(Put("", "")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("", "")); Close(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ASSERT_EQ(2, called.load()); @@ -638,9 +638,9 @@ TEST_F(DBWALTest, PreallocateBlock) { called.fetch_add(1); }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - Put("", ""); - Flush(); - Put("", ""); + ASSERT_OK(Put("", "")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("", "")); Close(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ASSERT_EQ(2, called.load()); @@ -658,9 +658,9 @@ TEST_F(DBWALTest, PreallocateBlock) { called.fetch_add(1); }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - Put("", ""); - Flush(); - Put("", ""); + ASSERT_OK(Put("", "")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("", "")); Close(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ASSERT_EQ(2, called.load()); @@ -679,9 +679,9 @@ TEST_F(DBWALTest, PreallocateBlock) { called.fetch_add(1); }); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); - Put("", ""); - Flush(); - Put("", ""); + ASSERT_OK(Put("", "")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("", "")); Close(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ASSERT_EQ(2, called.load()); @@ -907,7 +907,7 @@ TEST_F(DBWALTest, RecoverCheckFileAmountWithSmallWriteBuffer) { // Make 'dobrynia' to be flushed and new WAL file to be created ASSERT_OK(Put(2, Key(10), DummyString(7500000))); ASSERT_OK(Put(2, Key(1), DummyString(1))); - dbfull()->TEST_WaitForFlushMemTable(handles_[2]); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[2])); { auto tables = ListTableFiles(env_, dbname_); ASSERT_EQ(tables.size(), static_cast(1)); @@ -961,7 +961,7 @@ TEST_F(DBWALTest, RecoverCheckFileAmount) { // Make 'nikitich' memtable to be flushed ASSERT_OK(Put(3, Key(10), DummyString(1002400))); ASSERT_OK(Put(3, Key(1), DummyString(1))); - dbfull()->TEST_WaitForFlushMemTable(handles_[3]); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[3])); // 4 memtable are not flushed, 1 sst file { auto tables = ListTableFiles(env_, dbname_); @@ -981,7 +981,7 @@ TEST_F(DBWALTest, RecoverCheckFileAmount) { ASSERT_OK(Put(3, Key(10), DummyString(1002400))); // make it flush ASSERT_OK(Put(3, Key(1), DummyString(1))); - dbfull()->TEST_WaitForFlushMemTable(handles_[3]); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[3])); // There are still 4 memtable not flushed, and 2 sst tables ASSERT_OK(Put(0, Key(1), DummyString(1))); ASSERT_OK(Put(1, Key(1), DummyString(1))); @@ -1029,10 +1029,10 @@ TEST_F(DBWALTest, SyncMultipleLogs) { for (uint64_t b = 0; b < kNumBatches; b++) { batch.Clear(); for (int i = 0; i < kBatchSize; i++) { - batch.Put(Key(i), DummyString(128)); + ASSERT_OK(batch.Put(Key(i), DummyString(128))); } - dbfull()->Write(wo, &batch); + ASSERT_OK(dbfull()->Write(wo, &batch)); } ASSERT_OK(dbfull()->SyncWAL()); @@ -1060,7 +1060,7 @@ TEST_F(DBWALTest, PartOfWritesWithWALDisabled) { ASSERT_OK(Flush(0)); ASSERT_OK(Put(0, "key", "v5", wal_on)); // seq id 5 ASSERT_EQ("v5", Get(0, "key")); - dbfull()->FlushWAL(false); + ASSERT_OK(dbfull()->FlushWAL(false)); // Simulate a crash. fault_env->SetFilesystemActive(false); Close(); @@ -1128,12 +1128,13 @@ class RecoveryTestHelper { for (int i = 0; i < kKeysPerWALFile; i++) { std::string key = "key" + ToString((*count)++); std::string value = test->DummyString(kValueSize); - assert(current_log_writer.get() != nullptr); + ASSERT_NE(current_log_writer.get(), nullptr); uint64_t seq = versions->LastSequence() + 1; batch.Clear(); - batch.Put(key, value); + ASSERT_OK(batch.Put(key, value)); WriteBatchInternal::SetSequence(&batch, seq); - current_log_writer->AddRecord(WriteBatchInternal::Contents(&batch)); + ASSERT_OK(current_log_writer->AddRecord( + WriteBatchInternal::Contents(&batch))); versions->SetLastAllocatedSequence(seq); versions->SetLastPublishedSequence(seq); versions->SetLastSequence(seq); @@ -1309,10 +1310,11 @@ TEST_F(DBWALTest, kPointInTimeRecoveryCFConsistency) { ASSERT_OK(Put(1, "key3", "val3")); // Corrupt WAL at location of key3 - test::CorruptFile(env, fname, static_cast(offset_to_corrupt), 4, false); + ASSERT_OK(test::CorruptFile(env, fname, static_cast(offset_to_corrupt), + 4, false)); ASSERT_OK(Put(2, "key4", "val4")); ASSERT_OK(Put(1, "key5", "val5")); - Flush(2); + ASSERT_OK(Flush(2)); // PIT recovery & verify options.wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery; @@ -1466,7 +1468,7 @@ TEST_F(DBWALTest, WalCleanupAfterAvoidFlushDuringRecovery) { for (int i = 0; i < 2; ++i) { if (i > 0) { // Flush() triggers deletion of obsolete tracked files - Flush(); + ASSERT_OK(Flush()); } VectorLogPtr log_files; ASSERT_OK(dbfull()->GetSortedWalFiles(log_files)); @@ -1508,7 +1510,7 @@ TEST_F(DBWALTest, RecoverWithoutFlush) { ASSERT_EQ(Get("foo"), "foo_v2"); ASSERT_EQ(Get("bar"), "bar_v2"); // manual flush and insert again - Flush(); + ASSERT_OK(Flush()); ASSERT_EQ(Get("foo"), "foo_v2"); ASSERT_EQ(Get("bar"), "bar_v2"); ASSERT_OK(Put("foo", "foo_v3")); @@ -1529,7 +1531,9 @@ TEST_F(DBWALTest, RecoverWithoutFlushMultipleCF) { auto countWalFiles = [this]() { VectorLogPtr log_files; - dbfull()->GetSortedWalFiles(log_files); + if (!dbfull()->GetSortedWalFiles(log_files).ok()) { + return size_t{0}; + } return log_files.size(); }; @@ -1537,11 +1541,11 @@ TEST_F(DBWALTest, RecoverWithoutFlushMultipleCF) { CreateAndReopenWithCF({"one", "two"}, options); ASSERT_OK(Put(0, "key1", kSmallValue)); ASSERT_OK(Put(1, "key2", kLargeValue)); - Flush(1); + ASSERT_OK(Flush(1)); ASSERT_EQ(1, countWalFiles()); ASSERT_OK(Put(0, "key3", kSmallValue)); ASSERT_OK(Put(2, "key4", kLargeValue)); - Flush(2); + ASSERT_OK(Flush(2)); ASSERT_EQ(2, countWalFiles()); // Reopen, insert and flush. @@ -1555,9 +1559,9 @@ TEST_F(DBWALTest, RecoverWithoutFlushMultipleCF) { ASSERT_OK(Put(0, "key5", kLargeValue)); ASSERT_OK(Put(1, "key6", kLargeValue)); ASSERT_EQ(3, countWalFiles()); - Flush(1); + ASSERT_OK(Flush(1)); ASSERT_OK(Put(2, "key7", kLargeValue)); - dbfull()->FlushWAL(false); + ASSERT_OK(dbfull()->FlushWAL(false)); ASSERT_EQ(4, countWalFiles()); // Reopen twice and validate. @@ -1766,9 +1770,9 @@ TEST_F(DBWALTest, WalTermTest) { wo.disableWAL = false; WriteBatch batch; - batch.Put("foo", "bar"); + ASSERT_OK(batch.Put("foo", "bar")); batch.MarkWalTerminationPoint(); - batch.Put("foo2", "bar2"); + ASSERT_OK(batch.Put("foo2", "bar2")); ASSERT_OK(dbfull()->Write(wo, &batch)); diff --git a/db/db_write_test.cc b/db/db_write_test.cc index 945b08eda8..d51672a89d 100644 --- a/db/db_write_test.cc +++ b/db/db_write_test.cc @@ -320,7 +320,7 @@ TEST_P(DBWriteTest, ManualWalFlushInEffect) { ASSERT_TRUE(dbfull()->FlushWAL(false).ok()); ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty()); // try the 2nd wal created during SwitchWAL - dbfull()->TEST_SwitchWAL(); + ASSERT_OK(dbfull()->TEST_SwitchWAL()); ASSERT_TRUE(Put("key" + ToString(0), "value").ok()); ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty()); ASSERT_TRUE(dbfull()->FlushWAL(false).ok()); @@ -395,7 +395,7 @@ TEST_P(DBWriteTest, LockWalInEffect) { ASSERT_TRUE(dbfull()->TEST_WALBufferIsEmpty(false)); ASSERT_OK(dbfull()->UnlockWAL()); // try the 2nd wal created during SwitchWAL - dbfull()->TEST_SwitchWAL(); + ASSERT_OK(dbfull()->TEST_SwitchWAL()); ASSERT_OK(Put("key" + ToString(0), "value")); ASSERT_TRUE(options.manual_wal_flush != dbfull()->TEST_WALBufferIsEmpty()); ASSERT_OK(dbfull()->LockWAL()); diff --git a/db/external_sst_file_basic_test.cc b/db/external_sst_file_basic_test.cc index 9f6d7cc325..f61f78df02 100644 --- a/db/external_sst_file_basic_test.cc +++ b/db/external_sst_file_basic_test.cc @@ -29,8 +29,8 @@ class ExternalSSTFileBasicTest } void DestroyAndRecreateExternalSSTFilesDir() { - DestroyDir(env_, sst_files_dir_); - env_->CreateDir(sst_files_dir_); + ASSERT_OK(DestroyDir(env_, sst_files_dir_)); + ASSERT_OK(env_->CreateDir(sst_files_dir_)); } Status DeprecatedAddFile(const std::vector& files, @@ -162,7 +162,9 @@ class ExternalSSTFileBasicTest write_global_seqno, verify_checksums_before_ingest, true_data); } - ~ExternalSSTFileBasicTest() override { DestroyDir(env_, sst_files_dir_); } + ~ExternalSSTFileBasicTest() override { + DestroyDir(env_, sst_files_dir_).PermitUncheckedError(); + } protected: std::string sst_files_dir_; @@ -186,7 +188,7 @@ TEST_F(ExternalSSTFileBasicTest, Basic) { } ExternalSstFileInfo file1_info; Status s = sst_file_writer.Finish(&file1_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); // Current file size should be non-zero after success write. ASSERT_GT(sst_file_writer.FileSize(), 0); @@ -202,14 +204,14 @@ TEST_F(ExternalSSTFileBasicTest, Basic) { ASSERT_EQ(file1_info.file_checksum_func_name, kUnknownFileChecksumFuncName); // sst_file_writer already finished, cannot add this value s = sst_file_writer.Put(Key(100), "bad_val"); - ASSERT_FALSE(s.ok()) << s.ToString(); + ASSERT_NOK(s) << s.ToString(); s = sst_file_writer.DeleteRange(Key(100), Key(200)); - ASSERT_FALSE(s.ok()) << s.ToString(); + ASSERT_NOK(s) << s.ToString(); DestroyAndReopen(options); // Add file using file path s = DeprecatedAddFile({file1}); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U); for (int k = 0; k < 100; k++) { ASSERT_EQ(Get(Key(k)), Key(k) + "_val"); @@ -286,7 +288,7 @@ TEST_F(ExternalSSTFileBasicTest, BasicWithFileChecksumCrc32c) { } ExternalSstFileInfo file1_info; Status s = sst_file_writer.Finish(&file1_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); std::string file_checksum, file_checksum_func_name; ASSERT_OK(checksum_helper.GetSingleFileChecksumAndFuncName( file1, &file_checksum, &file_checksum_func_name)); @@ -305,14 +307,14 @@ TEST_F(ExternalSSTFileBasicTest, BasicWithFileChecksumCrc32c) { ASSERT_EQ(file1_info.file_checksum_func_name, file_checksum_func_name); // sst_file_writer already finished, cannot add this value s = sst_file_writer.Put(Key(100), "bad_val"); - ASSERT_FALSE(s.ok()) << s.ToString(); + ASSERT_NOK(s) << s.ToString(); s = sst_file_writer.DeleteRange(Key(100), Key(200)); - ASSERT_FALSE(s.ok()) << s.ToString(); + ASSERT_NOK(s) << s.ToString(); DestroyAndReopen(options); // Add file using file path s = DeprecatedAddFile({file1}); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U); for (int k = 0; k < 100; k++) { ASSERT_EQ(Get(Key(k)), Key(k) + "_val"); @@ -338,7 +340,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { } ExternalSstFileInfo file1_info; Status s = sst_file_writer.Finish(&file1_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(file1_info.file_path, file1); ASSERT_EQ(file1_info.num_entries, 100); ASSERT_EQ(file1_info.smallest_key, Key(1000)); @@ -357,7 +359,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { } ExternalSstFileInfo file2_info; s = sst_file_writer.Finish(&file2_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(file2_info.file_path, file2); ASSERT_EQ(file2_info.num_entries, 200); ASSERT_EQ(file2_info.smallest_key, Key(1100)); @@ -376,7 +378,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { } ExternalSstFileInfo file3_info; s = sst_file_writer.Finish(&file3_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(file3_info.file_path, file3); ASSERT_EQ(file3_info.num_entries, 200); ASSERT_EQ(file3_info.smallest_key, Key(1300)); @@ -395,7 +397,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { } ExternalSstFileInfo file4_info; s = sst_file_writer.Finish(&file4_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(file4_info.file_path, file4); ASSERT_EQ(file4_info.num_entries, 300); ASSERT_EQ(file4_info.smallest_key, Key(1500)); @@ -414,7 +416,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { } ExternalSstFileInfo file5_info; s = sst_file_writer.Finish(&file5_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(file5_info.file_path, file5); ASSERT_EQ(file5_info.num_entries, 200); ASSERT_EQ(file5_info.smallest_key, Key(1800)); @@ -433,7 +435,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { } ExternalSstFileInfo file6_info; s = sst_file_writer.Finish(&file6_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(file6_info.file_path, file6); ASSERT_EQ(file6_info.num_entries, 200); ASSERT_EQ(file6_info.smallest_key, Key(2000)); @@ -447,7 +449,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { s = AddFileWithFileChecksum({file1}, {file_checksum1, "xyz"}, {file_checksum1}, true, false, false, false); // does not care the checksum input since db does not enable file checksum - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_OK(env_->FileExists(file1)); std::vector live_files; dbfull()->GetLiveFilesMetaData(&live_files); @@ -465,26 +467,26 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { s = AddFileWithFileChecksum({file2}, {file_checksum2, "xyz"}, {file_checksum_func_name2}, true, false, false, false); - ASSERT_FALSE(s.ok()) << s.ToString(); + ASSERT_NOK(s) << s.ToString(); // Enable verify_file_checksum option // The checksum name does not match, fail the ingestion s = AddFileWithFileChecksum({file2}, {file_checksum2}, {"xyz"}, true, false, false, false); - ASSERT_FALSE(s.ok()) << s.ToString(); + ASSERT_NOK(s) << s.ToString(); // Enable verify_file_checksum option // The checksum itself does not match, fail the ingestion s = AddFileWithFileChecksum({file2}, {"xyz"}, {file_checksum_func_name2}, true, false, false, false); - ASSERT_FALSE(s.ok()) << s.ToString(); + ASSERT_NOK(s) << s.ToString(); // Enable verify_file_checksum option // All matches, ingestion is successful s = AddFileWithFileChecksum({file2}, {file_checksum2}, {file_checksum_func_name2}, true, false, false, false); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); std::vector live_files1; dbfull()->GetLiveFilesMetaData(&live_files1); for (auto f : live_files1) { @@ -501,7 +503,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { std::vector checksum, checksum_func; s = AddFileWithFileChecksum({file3}, checksum, checksum_func, true, false, false, false); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); std::vector live_files2; dbfull()->GetLiveFilesMetaData(&live_files2); for (auto f : live_files2) { @@ -511,20 +513,20 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { set1.insert(f.name); } } - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_OK(env_->FileExists(file3)); // Does not enable verify_file_checksum options // The checksum name does not match, fail the ingestion s = AddFileWithFileChecksum({file4}, {file_checksum4}, {"xyz"}, false, false, false, false); - ASSERT_FALSE(s.ok()) << s.ToString(); + ASSERT_NOK(s) << s.ToString(); // Does not enable verify_file_checksum options // Checksum function name matches, store the checksum being ingested. s = AddFileWithFileChecksum({file4}, {"asd"}, {file_checksum_func_name4}, false, false, false, false); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); std::vector live_files3; dbfull()->GetLiveFilesMetaData(&live_files3); for (auto f : live_files3) { @@ -535,7 +537,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { set1.insert(f.name); } } - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_OK(env_->FileExists(file4)); // enable verify_file_checksum options, DB enable checksum, and enable @@ -544,8 +546,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { s = AddFileWithFileChecksum({file5}, {file_checksum5}, {file_checksum_func_name5}, true, false, false, true); - ASSERT_OK(s); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); std::vector live_files4; dbfull()->GetLiveFilesMetaData(&live_files4); for (auto f : live_files4) { @@ -558,7 +559,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { set1.insert(f.name); } } - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_OK(env_->FileExists(file5)); // Does not enable verify_file_checksum options and also the ingested file @@ -567,7 +568,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { std::vector files_c6, files_name6; s = AddFileWithFileChecksum({file6}, files_c6, files_name6, false, false, false, false); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); std::vector live_files6; dbfull()->GetLiveFilesMetaData(&live_files6); for (auto f : live_files6) { @@ -577,7 +578,7 @@ TEST_F(ExternalSSTFileBasicTest, IngestFileWithFileChecksum) { set1.insert(f.name); } } - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_OK(env_->FileExists(file6)); } @@ -595,7 +596,7 @@ TEST_F(ExternalSSTFileBasicTest, NoCopy) { } ExternalSstFileInfo file1_info; Status s = sst_file_writer.Finish(&file1_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(file1_info.file_path, file1); ASSERT_EQ(file1_info.num_entries, 100); ASSERT_EQ(file1_info.smallest_key, Key(0)); @@ -609,7 +610,7 @@ TEST_F(ExternalSSTFileBasicTest, NoCopy) { } ExternalSstFileInfo file2_info; s = sst_file_writer.Finish(&file2_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(file2_info.file_path, file2); ASSERT_EQ(file2_info.num_entries, 200); ASSERT_EQ(file2_info.smallest_key, Key(100)); @@ -623,23 +624,23 @@ TEST_F(ExternalSSTFileBasicTest, NoCopy) { } ExternalSstFileInfo file3_info; s = sst_file_writer.Finish(&file3_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(file3_info.file_path, file3); ASSERT_EQ(file3_info.num_entries, 15); ASSERT_EQ(file3_info.smallest_key, Key(110)); ASSERT_EQ(file3_info.largest_key, Key(124)); s = DeprecatedAddFile({file1}, true /* move file */); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(Status::NotFound(), env_->FileExists(file1)); s = DeprecatedAddFile({file2}, false /* copy file */); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_OK(env_->FileExists(file2)); // This file has overlapping values with the existing data s = DeprecatedAddFile({file3}, true /* move file */); - ASSERT_FALSE(s.ok()) << s.ToString(); + ASSERT_NOK(s) << s.ToString(); ASSERT_OK(env_->FileExists(file3)); for (int k = 0; k < 300; k++) { @@ -1126,7 +1127,7 @@ TEST_F(ExternalSSTFileBasicTest, SyncFailure) { if (i == 2) { ingest_opt.write_global_seqno = true; } - ASSERT_FALSE(db_->IngestExternalFile({file_name}, ingest_opt).ok()); + ASSERT_NOK(db_->IngestExternalFile({file_name}, ingest_opt)); db_->ReleaseSnapshot(snapshot); SyncPoint::GetInstance()->DisableProcessing(); @@ -1326,7 +1327,7 @@ TEST_F(ExternalSSTFileBasicTest, AdjacentRangeDeletionTombstones) { ASSERT_OK(sst_file_writer.DeleteRange(Key(300), Key(400))); ExternalSstFileInfo file8_info; Status s = sst_file_writer.Finish(&file8_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(file8_info.file_path, file8); ASSERT_EQ(file8_info.num_entries, 0); ASSERT_EQ(file8_info.smallest_key, ""); @@ -1341,7 +1342,7 @@ TEST_F(ExternalSSTFileBasicTest, AdjacentRangeDeletionTombstones) { ASSERT_OK(sst_file_writer.DeleteRange(Key(400), Key(500))); ExternalSstFileInfo file9_info; s = sst_file_writer.Finish(&file9_info); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(file9_info.file_path, file9); ASSERT_EQ(file9_info.num_entries, 0); ASSERT_EQ(file9_info.smallest_key, ""); @@ -1353,7 +1354,7 @@ TEST_F(ExternalSSTFileBasicTest, AdjacentRangeDeletionTombstones) { // Range deletion tombstones are exclusive on their end key, so these SSTs // should not be considered as overlapping. s = DeprecatedAddFile({file8, file9}); - ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(s) << s.ToString(); ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U); DestroyAndRecreateExternalSSTFilesDir(); } diff --git a/db/external_sst_file_ingestion_job.cc b/db/external_sst_file_ingestion_job.cc index 1913a211ac..a08081b870 100644 --- a/db/external_sst_file_ingestion_job.cc +++ b/db/external_sst_file_ingestion_job.cc @@ -335,6 +335,12 @@ Status ExternalSstFileIngestionJob::Run() { // with the files we are ingesting bool need_flush = false; status = NeedsFlush(&need_flush, super_version); + if (!status.ok()) { + return status; + } + if (need_flush) { + return Status::TryAgain(); + } assert(status.ok() && need_flush == false); #endif diff --git a/db/write_thread.cc b/db/write_thread.cc index d26a694aac..fa414a1efb 100644 --- a/db/write_thread.cc +++ b/db/write_thread.cc @@ -208,6 +208,7 @@ uint8_t WriteThread::AwaitState(Writer* w, uint8_t goal_mask, } void WriteThread::SetState(Writer* w, uint8_t new_state) { + assert(w); auto state = w->state.load(std::memory_order_acquire); if (state == STATE_LOCKED_WAITING || !w->state.compare_exchange_strong(state, new_state)) { diff --git a/test_util/testutil.cc b/test_util/testutil.cc index 86c67a1824..f85ee224a8 100644 --- a/test_util/testutil.cc +++ b/test_util/testutil.cc @@ -546,5 +546,20 @@ Status TruncateFile(Env* env, const std::string& fname, uint64_t new_length) { return s; } +// Try and delete a directory if it exists +Status TryDeleteDir(Env* env, const std::string& dirname) { + bool is_dir = false; + Status s = env->IsDirectory(dirname, &is_dir); + if (s.ok() && is_dir) { + s = env->DeleteDir(dirname); + } + return s; +} + +// Delete a directory if it exists +void DeleteDir(Env* env, const std::string& dirname) { + TryDeleteDir(env, dirname).PermitUncheckedError(); +} + } // namespace test } // namespace ROCKSDB_NAMESPACE diff --git a/test_util/testutil.h b/test_util/testutil.h index b8cfc83525..9098200d02 100644 --- a/test_util/testutil.h +++ b/test_util/testutil.h @@ -807,5 +807,11 @@ Status CorruptFile(Env* env, const std::string& fname, int offset, int bytes_to_corrupt, bool verify_checksum = true); Status TruncateFile(Env* env, const std::string& fname, uint64_t length); +// Try and delete a directory if it exists +Status TryDeleteDir(Env* env, const std::string& dirname); + +// Delete a directory if it exists +void DeleteDir(Env* env, const std::string& dirname); + } // namespace test } // namespace ROCKSDB_NAMESPACE diff --git a/utilities/checkpoint/checkpoint_impl.cc b/utilities/checkpoint/checkpoint_impl.cc index 94868e63e1..2caea30372 100644 --- a/utilities/checkpoint/checkpoint_impl.cc +++ b/utilities/checkpoint/checkpoint_impl.cc @@ -51,12 +51,14 @@ void CheckpointImpl::CleanStagingDirectory( } ROCKS_LOG_INFO(info_log, "File exists %s -- %s", full_private_path.c_str(), s.ToString().c_str()); - db_->GetEnv()->GetChildren(full_private_path, &subchildren); - for (auto& subchild : subchildren) { - std::string subchild_path = full_private_path + "/" + subchild; - s = db_->GetEnv()->DeleteFile(subchild_path); - ROCKS_LOG_INFO(info_log, "Delete file %s -- %s", - subchild_path.c_str(), s.ToString().c_str()); + s = db_->GetEnv()->GetChildren(full_private_path, &subchildren); + if (s.ok()) { + for (auto& subchild : subchildren) { + std::string subchild_path = full_private_path + "/" + subchild; + s = db_->GetEnv()->DeleteFile(subchild_path); + ROCKS_LOG_INFO(info_log, "Delete file %s -- %s", subchild_path.c_str(), + s.ToString().c_str()); + } } // finally delete the private dir s = db_->GetEnv()->DeleteDir(full_private_path); @@ -109,33 +111,44 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir, s = db_->GetEnv()->CreateDir(full_private_path); uint64_t sequence_number = 0; if (s.ok()) { - db_->DisableFileDeletions(); - s = CreateCustomCheckpoint( - db_options, - [&](const std::string& src_dirname, const std::string& fname, - FileType) { - ROCKS_LOG_INFO(db_options.info_log, "Hard Linking %s", fname.c_str()); - return db_->GetFileSystem()->LinkFile(src_dirname + fname, - full_private_path + fname, - IOOptions(), nullptr); - } /* link_file_cb */, - [&](const std::string& src_dirname, const std::string& fname, - uint64_t size_limit_bytes, FileType, - const std::string& /* checksum_func_name */, - const std::string& /* checksum_val */) { - ROCKS_LOG_INFO(db_options.info_log, "Copying %s", fname.c_str()); - return CopyFile(db_->GetFileSystem(), src_dirname + fname, - full_private_path + fname, size_limit_bytes, - db_options.use_fsync); - } /* copy_file_cb */, - [&](const std::string& fname, const std::string& contents, FileType) { - ROCKS_LOG_INFO(db_options.info_log, "Creating %s", fname.c_str()); - return CreateFile(db_->GetFileSystem(), full_private_path + fname, - contents, db_options.use_fsync); - } /* create_file_cb */, - &sequence_number, log_size_for_flush); - // we copied all the files, enable file deletions - db_->EnableFileDeletions(false); + // enable file deletions + s = db_->DisableFileDeletions(); + const bool disabled_file_deletions = s.ok(); + + if (s.ok() || s.IsNotSupported()) { + s = CreateCustomCheckpoint( + db_options, + [&](const std::string& src_dirname, const std::string& fname, + FileType) { + ROCKS_LOG_INFO(db_options.info_log, "Hard Linking %s", + fname.c_str()); + return db_->GetFileSystem()->LinkFile(src_dirname + fname, + full_private_path + fname, + IOOptions(), nullptr); + } /* link_file_cb */, + [&](const std::string& src_dirname, const std::string& fname, + uint64_t size_limit_bytes, FileType, + const std::string& /* checksum_func_name */, + const std::string& /* checksum_val */) { + ROCKS_LOG_INFO(db_options.info_log, "Copying %s", fname.c_str()); + return CopyFile(db_->GetFileSystem(), src_dirname + fname, + full_private_path + fname, size_limit_bytes, + db_options.use_fsync); + } /* copy_file_cb */, + [&](const std::string& fname, const std::string& contents, FileType) { + ROCKS_LOG_INFO(db_options.info_log, "Creating %s", fname.c_str()); + return CreateFile(db_->GetFileSystem(), full_private_path + fname, + contents, db_options.use_fsync); + } /* create_file_cb */, + &sequence_number, log_size_for_flush); + + // we copied all the files, enable file deletions + if (disabled_file_deletions) { + Status ss = db_->EnableFileDeletions(false); + assert(ss.ok()); + ss.PermitUncheckedError(); + } + } } if (s.ok()) { @@ -144,8 +157,8 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir, } if (s.ok()) { std::unique_ptr checkpoint_directory; - db_->GetEnv()->NewDirectory(checkpoint_dir, &checkpoint_directory); - if (checkpoint_directory != nullptr) { + s = db_->GetEnv()->NewDirectory(checkpoint_dir, &checkpoint_directory); + if (s.ok() && checkpoint_directory != nullptr) { s = checkpoint_directory->Fsync(); } } @@ -191,68 +204,71 @@ Status CheckpointImpl::CreateCustomCheckpoint( VectorLogPtr live_wal_files; bool flush_memtable = true; - if (s.ok()) { - if (!db_options.allow_2pc) { - if (log_size_for_flush == port::kMaxUint64) { + if (!db_options.allow_2pc) { + if (log_size_for_flush == port::kMaxUint64) { + flush_memtable = false; + } else if (log_size_for_flush > 0) { + // If out standing log files are small, we skip the flush. + s = db_->GetSortedWalFiles(live_wal_files); + + if (!s.ok()) { + return s; + } + + // Don't flush column families if total log size is smaller than + // log_size_for_flush. We copy the log files instead. + // We may be able to cover 2PC case too. + uint64_t total_wal_size = 0; + for (auto& wal : live_wal_files) { + total_wal_size += wal->SizeFileBytes(); + } + if (total_wal_size < log_size_for_flush) { flush_memtable = false; - } else if (log_size_for_flush > 0) { - // If out standing log files are small, we skip the flush. - s = db_->GetSortedWalFiles(live_wal_files); - - if (!s.ok()) { - return s; - } - - // Don't flush column families if total log size is smaller than - // log_size_for_flush. We copy the log files instead. - // We may be able to cover 2PC case too. - uint64_t total_wal_size = 0; - for (auto& wal : live_wal_files) { - total_wal_size += wal->SizeFileBytes(); - } - if (total_wal_size < log_size_for_flush) { - flush_memtable = false; - } - live_wal_files.clear(); } + live_wal_files.clear(); } - - // this will return live_files prefixed with "/" - s = db_->GetLiveFiles(live_files, &manifest_file_size, flush_memtable); - - if (s.ok() && db_options.allow_2pc) { - // If 2PC is enabled, we need to get minimum log number after the flush. - // Need to refetch the live files to recapture the snapshot. - if (!db_->GetIntProperty(DB::Properties::kMinLogNumberToKeep, - &min_log_num)) { - return Status::InvalidArgument( - "2PC enabled but cannot fine the min log number to keep."); - } - // We need to refetch live files with flush to handle this case: - // A previous 000001.log contains the prepare record of transaction tnx1. - // The current log file is 000002.log, and sequence_number points to this - // file. - // After calling GetLiveFiles(), 000003.log is created. - // Then tnx1 is committed. The commit record is written to 000003.log. - // Now we fetch min_log_num, which will be 3. - // Then only 000002.log and 000003.log will be copied, and 000001.log will - // be skipped. 000003.log contains commit message of tnx1, but we don't - // have respective prepare record for it. - // In order to avoid this situation, we need to force flush to make sure - // all transactions committed before getting min_log_num will be flushed - // to SST files. - // We cannot get min_log_num before calling the GetLiveFiles() for the - // first time, because if we do that, all the logs files will be included, - // far more than needed. - s = db_->GetLiveFiles(live_files, &manifest_file_size, flush_memtable); - } - - TEST_SYNC_POINT("CheckpointImpl::CreateCheckpoint:SavedLiveFiles1"); - TEST_SYNC_POINT("CheckpointImpl::CreateCheckpoint:SavedLiveFiles2"); - db_->FlushWAL(false /* sync */); } + + // this will return live_files prefixed with "/" + s = db_->GetLiveFiles(live_files, &manifest_file_size, flush_memtable); + + if (s.ok() && db_options.allow_2pc) { + // If 2PC is enabled, we need to get minimum log number after the flush. + // Need to refetch the live files to recapture the snapshot. + if (!db_->GetIntProperty(DB::Properties::kMinLogNumberToKeep, + &min_log_num)) { + return Status::InvalidArgument( + "2PC enabled but cannot fine the min log number to keep."); + } + // We need to refetch live files with flush to handle this case: + // A previous 000001.log contains the prepare record of transaction tnx1. + // The current log file is 000002.log, and sequence_number points to this + // file. + // After calling GetLiveFiles(), 000003.log is created. + // Then tnx1 is committed. The commit record is written to 000003.log. + // Now we fetch min_log_num, which will be 3. + // Then only 000002.log and 000003.log will be copied, and 000001.log will + // be skipped. 000003.log contains commit message of tnx1, but we don't + // have respective prepare record for it. + // In order to avoid this situation, we need to force flush to make sure + // all transactions committed before getting min_log_num will be flushed + // to SST files. + // We cannot get min_log_num before calling the GetLiveFiles() for the + // first time, because if we do that, all the logs files will be included, + // far more than needed. + s = db_->GetLiveFiles(live_files, &manifest_file_size, flush_memtable); + } + + TEST_SYNC_POINT("CheckpointImpl::CreateCheckpoint:SavedLiveFiles1"); + TEST_SYNC_POINT("CheckpointImpl::CreateCheckpoint:SavedLiveFiles2"); + + if (s.ok()) { + s = db_->FlushWAL(false /* sync */); + } + TEST_SYNC_POINT("CheckpointImpl::CreateCustomCheckpoint:AfterGetLive1"); TEST_SYNC_POINT("CheckpointImpl::CreateCustomCheckpoint:AfterGetLive2"); + // if we have more than one column family, we need to also get WAL files if (s.ok()) { s = db_->GetSortedWalFiles(live_wal_files); @@ -358,8 +374,8 @@ Status CheckpointImpl::CreateCustomCheckpoint( } } if (s.ok() && !current_fname.empty() && !manifest_fname.empty()) { - create_file_cb(current_fname, manifest_fname.substr(1) + "\n", - kCurrentFile); + s = create_file_cb(current_fname, manifest_fname.substr(1) + "\n", + kCurrentFile); } ROCKS_LOG_INFO(db_options.info_log, "Number of log files %" ROCKSDB_PRIszt, live_wal_files.size()); diff --git a/utilities/checkpoint/checkpoint_test.cc b/utilities/checkpoint/checkpoint_test.cc index 11c4931b45..784fb5d46c 100644 --- a/utilities/checkpoint/checkpoint_test.cc +++ b/utilities/checkpoint/checkpoint_test.cc @@ -66,12 +66,12 @@ class CheckpointTest : public testing::Test { snapshot_name_ = test::PerThreadDBPath(env_, "snapshot"); std::string snapshot_tmp_name = snapshot_name_ + ".tmp"; EXPECT_OK(DestroyDB(snapshot_name_, options)); - env_->DeleteDir(snapshot_name_); + test::DeleteDir(env_, snapshot_name_); EXPECT_OK(DestroyDB(snapshot_tmp_name, options)); - env_->DeleteDir(snapshot_tmp_name); + test::DeleteDir(env_, snapshot_tmp_name); Reopen(options); export_path_ = test::PerThreadDBPath("/export"); - DestroyDir(env_, export_path_); + DestroyDir(env_, export_path_).PermitUncheckedError(); cfh_reverse_comp_ = nullptr; metadata_ = nullptr; } @@ -96,7 +96,7 @@ class CheckpointTest : public testing::Test { options.db_paths.emplace_back(dbname_ + "_4", 0); EXPECT_OK(DestroyDB(dbname_, options)); EXPECT_OK(DestroyDB(snapshot_name_, options)); - DestroyDir(env_, export_path_); + DestroyDir(env_, export_path_).PermitUncheckedError(); } // Return the current option configuration. @@ -274,7 +274,6 @@ TEST_F(CheckpointTest, GetSnapshotLink) { ASSERT_OK(DestroyDB(dbname_, options)); // Create a database - Status s; options.create_if_missing = true; ASSERT_OK(DB::Open(options, dbname_, &db_)); std::string key = std::string("foo"); @@ -316,7 +315,6 @@ TEST_F(CheckpointTest, GetSnapshotLink) { TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) { // Create a database - Status s; auto options = CurrentOptions(); options.create_if_missing = true; CreateAndReopenWithCF({}, options); @@ -326,7 +324,7 @@ TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) { int num_files_expected) { ASSERT_EQ(metadata.files.size(), num_files_expected); std::vector subchildren; - env_->GetChildren(export_path_, &subchildren); + ASSERT_OK(env_->GetChildren(export_path_, &subchildren)); int num_children = 0; for (const auto& child : subchildren) { if (child != "." && child != "..") { @@ -349,7 +347,7 @@ TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) { export_path_, &metadata_)); verify_files_exported(*metadata_, 1); ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name()); - DestroyDir(env_, export_path_); + ASSERT_OK(DestroyDir(env_, export_path_)); delete metadata_; metadata_ = nullptr; @@ -360,7 +358,7 @@ TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) { export_path_, &metadata_)); verify_files_exported(*metadata_, 2); ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name()); - DestroyDir(env_, export_path_); + ASSERT_OK(DestroyDir(env_, export_path_)); delete metadata_; metadata_ = nullptr; delete checkpoint; @@ -390,7 +388,6 @@ TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) { TEST_F(CheckpointTest, ExportColumnFamilyNegativeTest) { // Create a database - Status s; auto options = CurrentOptions(); options.create_if_missing = true; CreateAndReopenWithCF({}, options); @@ -402,11 +399,11 @@ TEST_F(CheckpointTest, ExportColumnFamilyNegativeTest) { ASSERT_OK(Checkpoint::Create(db_, &checkpoint)); // Export onto existing directory - env_->CreateDirIfMissing(export_path_); + ASSERT_OK(env_->CreateDirIfMissing(export_path_)); ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(), export_path_, &metadata_), Status::InvalidArgument("Specified export_dir exists")); - DestroyDir(env_, export_path_); + ASSERT_OK(DestroyDir(env_, export_path_)); // Export with invalid directory specification export_path_ = ""; @@ -437,7 +434,6 @@ TEST_F(CheckpointTest, CheckpointCF) { std::string result; std::vector cphandles; - Status s; // Take a snapshot ROCKSDB_NAMESPACE::port::Thread t([&]() { Checkpoint* checkpoint; @@ -465,7 +461,7 @@ TEST_F(CheckpointTest, CheckpointCF) { // Open snapshot and verify contents while DB is running options.create_if_missing = false; std::vector cfs; - cfs= {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"}; + cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"}; std::vector column_families; for (size_t i = 0; i < cfs.size(); ++i) { column_families.push_back(ColumnFamilyDescriptor(cfs[i], options)); @@ -493,7 +489,7 @@ TEST_F(CheckpointTest, CheckpointCFNoFlush) { ASSERT_OK(Put(0, "Default", "Default")); ASSERT_OK(Put(1, "one", "one")); - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(Put(2, "two", "two")); DB* snapshotDB; @@ -501,7 +497,6 @@ TEST_F(CheckpointTest, CheckpointCFNoFlush) { std::string result; std::vector cphandles; - Status s; // Take a snapshot ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( "DBImpl::BackgroundCallFlush:start", [&](void* /*arg*/) { @@ -590,7 +585,7 @@ TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing2PC) { Close(); const std::string dbname = test::PerThreadDBPath("transaction_testdb"); ASSERT_OK(DestroyDB(dbname, CurrentOptions())); - env_->DeleteDir(dbname); + test::DeleteDir(env_, dbname); Options options = CurrentOptions(); options.allow_2pc = true; @@ -599,7 +594,7 @@ TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing2PC) { TransactionDBOptions txn_db_options; TransactionDB* txdb; Status s = TransactionDB::Open(options, txn_db_options, dbname, &txdb); - assert(s.ok()); + ASSERT_OK(s); ColumnFamilyHandle* cfa; ColumnFamilyHandle* cfb; ColumnFamilyOptions cf_options; @@ -620,6 +615,7 @@ TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing2PC) { ASSERT_EQ(txdb->GetTransactionByName("xid"), txn); s = txn->Put(Slice("foo"), Slice("bar")); + ASSERT_OK(s); s = txn->Put(cfa, Slice("foocfa"), Slice("barcfa")); ASSERT_OK(s); // Writing prepare into middle of first WAL, then flush WALs many times @@ -631,7 +627,7 @@ TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing2PC) { ASSERT_OK(tx->Prepare()); ASSERT_OK(tx->Commit()); if (i % 10000 == 0) { - txdb->Flush(FlushOptions()); + ASSERT_OK(txdb->Flush(FlushOptions())); } if (i == 88888) { ASSERT_OK(txn->Prepare()); @@ -662,7 +658,7 @@ TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing2PC) { // No more than two logs files should exist. std::vector files; - env_->GetChildren(snapshot_name_, &files); + ASSERT_OK(env_->GetChildren(snapshot_name_, &files)); int num_log_files = 0; for (auto& file : files) { uint64_t num; @@ -733,7 +729,7 @@ TEST_F(CheckpointTest, CheckpointWithUnsyncedDataDropped) { ASSERT_OK(Checkpoint::Create(db_, &checkpoint)); ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_)); delete checkpoint; - env->DropUnsyncedFileData(); + ASSERT_OK(env->DropUnsyncedFileData()); // make sure it's openable even though whatever data that wasn't synced got // dropped. diff --git a/utilities/fault_injection_env.cc b/utilities/fault_injection_env.cc index 668daa04a3..ab1c5b9a08 100644 --- a/utilities/fault_injection_env.cc +++ b/utilities/fault_injection_env.cc @@ -217,7 +217,7 @@ TestRandomRWFile::TestRandomRWFile(const std::string& /*fname*/, TestRandomRWFile::~TestRandomRWFile() { if (file_opened_) { - Close(); + Close().PermitUncheckedError(); } } diff --git a/utilities/transactions/lock/point/point_lock_manager.cc b/utilities/transactions/lock/point/point_lock_manager.cc index 7244fdbbe8..79954d8f05 100644 --- a/utilities/transactions/lock/point/point_lock_manager.cc +++ b/utilities/transactions/lock/point/point_lock_manager.cc @@ -432,7 +432,14 @@ bool PointLockManager::IncrementWaiters( extracted_info.m_waiting_key}); head = queue_parents[head]; } - env->GetCurrentTime(&deadlock_time); + if (!env->GetCurrentTime(&deadlock_time).ok()) { + /* + TODO(AR) this preserves the current behaviour whilst checking the + status of env->GetCurrentTime to ensure that ASSERT_STATUS_CHECKED + passes. Should we instead raise an error if !ok() ? + */ + deadlock_time = 0; + } std::reverse(path.begin(), path.end()); dlock_buffer_.AddNewPath(DeadlockPath(path, deadlock_time)); deadlock_time = 0; @@ -448,7 +455,14 @@ bool PointLockManager::IncrementWaiters( } // Wait cycle too big, just assume deadlock. - env->GetCurrentTime(&deadlock_time); + if (!env->GetCurrentTime(&deadlock_time).ok()) { + /* + TODO(AR) this preserves the current behaviour whilst checking the status + of env->GetCurrentTime to ensure that ASSERT_STATUS_CHECKED passes. + Should we instead raise an error if !ok() ? + */ + deadlock_time = 0; + } dlock_buffer_.AddNewPath(DeadlockPath(deadlock_time, true)); DecrementWaitersImpl(txn, wait_ids); return true; diff --git a/utilities/transactions/optimistic_transaction.cc b/utilities/transactions/optimistic_transaction.cc index fd70927f66..0ee0f28b67 100644 --- a/utilities/transactions/optimistic_transaction.cc +++ b/utilities/transactions/optimistic_transaction.cc @@ -177,8 +177,6 @@ Status OptimisticTransaction::TryLock(ColumnFamilyHandle* column_family, // Should only be called on writer thread in order to avoid any race conditions // in detecting write conflicts. Status OptimisticTransaction::CheckTransactionForConflicts(DB* db) { - Status result; - auto db_impl = static_cast_with_check(db); // Since we are on the write thread and do not want to block other writers, diff --git a/utilities/transactions/optimistic_transaction_test.cc b/utilities/transactions/optimistic_transaction_test.cc index e04aaa8335..ad27bd9643 100644 --- a/utilities/transactions/optimistic_transaction_test.cc +++ b/utilities/transactions/optimistic_transaction_test.cc @@ -68,9 +68,9 @@ private: OptimisticTransactionDB::Open(DBOptions(options), occ_opts, dbname, column_families, &handles, &txn_db); - assert(s.ok()); - assert(txn_db != nullptr); - assert(handles.size() == 1); + ASSERT_OK(s); + ASSERT_NE(txn_db, nullptr); + ASSERT_EQ(handles.size(), 1); delete handles[0]; } }; @@ -79,26 +79,24 @@ TEST_P(OptimisticTransactionTest, SuccessTest) { WriteOptions write_options; ReadOptions read_options; string value; - Status s; - txn_db->Put(write_options, Slice("foo"), Slice("bar")); - txn_db->Put(write_options, Slice("foo2"), Slice("bar")); + ASSERT_OK(txn_db->Put(write_options, Slice("foo"), Slice("bar"))); + ASSERT_OK(txn_db->Put(write_options, Slice("foo2"), Slice("bar"))); Transaction* txn = txn_db->BeginTransaction(write_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); - txn->GetForUpdate(read_options, "foo", &value); + ASSERT_OK(txn->GetForUpdate(read_options, "foo", &value)); ASSERT_EQ(value, "bar"); - txn->Put(Slice("foo"), Slice("bar2")); + ASSERT_OK(txn->Put(Slice("foo"), Slice("bar2"))); - txn->GetForUpdate(read_options, "foo", &value); + ASSERT_OK(txn->GetForUpdate(read_options, "foo", &value)); ASSERT_EQ(value, "bar2"); - s = txn->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn->Commit()); - txn_db->Get(read_options, "foo", &value); + ASSERT_OK(txn_db->Get(read_options, "foo", &value)); ASSERT_EQ(value, "bar2"); delete txn; @@ -108,31 +106,29 @@ TEST_P(OptimisticTransactionTest, WriteConflictTest) { WriteOptions write_options; ReadOptions read_options; string value; - Status s; - txn_db->Put(write_options, "foo", "bar"); - txn_db->Put(write_options, "foo2", "bar"); + ASSERT_OK(txn_db->Put(write_options, "foo", "bar")); + ASSERT_OK(txn_db->Put(write_options, "foo2", "bar")); Transaction* txn = txn_db->BeginTransaction(write_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); - txn->Put("foo", "bar2"); + ASSERT_OK(txn->Put("foo", "bar2")); // This Put outside of a transaction will conflict with the previous write - s = txn_db->Put(write_options, "foo", "barz"); - ASSERT_OK(s); + ASSERT_OK(txn_db->Put(write_options, "foo", "barz")); - s = txn_db->Get(read_options, "foo", &value); + ASSERT_OK(txn_db->Get(read_options, "foo", &value)); ASSERT_EQ(value, "barz"); ASSERT_EQ(1, txn->GetNumKeys()); - s = txn->Commit(); + Status s = txn->Commit(); ASSERT_TRUE(s.IsBusy()); // Txn should not commit // Verify that transaction did not write anything - txn_db->Get(read_options, "foo", &value); + ASSERT_OK(txn_db->Get(read_options, "foo", &value)); ASSERT_EQ(value, "barz"); - txn_db->Get(read_options, "foo2", &value); + ASSERT_OK(txn_db->Get(read_options, "foo2", &value)); ASSERT_EQ(value, "bar"); delete txn; @@ -143,31 +139,30 @@ TEST_P(OptimisticTransactionTest, WriteConflictTest2) { ReadOptions read_options; OptimisticTransactionOptions txn_options; string value; - Status s; - txn_db->Put(write_options, "foo", "bar"); - txn_db->Put(write_options, "foo2", "bar"); + ASSERT_OK(txn_db->Put(write_options, "foo", "bar")); + ASSERT_OK(txn_db->Put(write_options, "foo2", "bar")); txn_options.set_snapshot = true; Transaction* txn = txn_db->BeginTransaction(write_options, txn_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); // This Put outside of a transaction will conflict with a later write - s = txn_db->Put(write_options, "foo", "barz"); - ASSERT_OK(s); + ASSERT_OK(txn_db->Put(write_options, "foo", "barz")); - txn->Put("foo", "bar2"); // Conflicts with write done after snapshot taken + ASSERT_OK(txn->Put( + "foo", "bar2")); // Conflicts with write done after snapshot taken - s = txn_db->Get(read_options, "foo", &value); + ASSERT_OK(txn_db->Get(read_options, "foo", &value)); ASSERT_EQ(value, "barz"); - s = txn->Commit(); + Status s = txn->Commit(); ASSERT_TRUE(s.IsBusy()); // Txn should not commit // Verify that transaction did not write anything - txn_db->Get(read_options, "foo", &value); + ASSERT_OK(txn_db->Get(read_options, "foo", &value)); ASSERT_EQ(value, "barz"); - txn_db->Get(read_options, "foo2", &value); + ASSERT_OK(txn_db->Get(read_options, "foo2", &value)); ASSERT_EQ(value, "bar"); delete txn; @@ -178,35 +173,33 @@ TEST_P(OptimisticTransactionTest, ReadConflictTest) { ReadOptions read_options, snapshot_read_options; OptimisticTransactionOptions txn_options; string value; - Status s; - txn_db->Put(write_options, "foo", "bar"); - txn_db->Put(write_options, "foo2", "bar"); + ASSERT_OK(txn_db->Put(write_options, "foo", "bar")); + ASSERT_OK(txn_db->Put(write_options, "foo2", "bar")); txn_options.set_snapshot = true; Transaction* txn = txn_db->BeginTransaction(write_options, txn_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); txn->SetSnapshot(); snapshot_read_options.snapshot = txn->GetSnapshot(); - txn->GetForUpdate(snapshot_read_options, "foo", &value); + ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "foo", &value)); ASSERT_EQ(value, "bar"); // This Put outside of a transaction will conflict with the previous read - s = txn_db->Put(write_options, "foo", "barz"); - ASSERT_OK(s); + ASSERT_OK(txn_db->Put(write_options, "foo", "barz")); - s = txn_db->Get(read_options, "foo", &value); + ASSERT_OK(txn_db->Get(read_options, "foo", &value)); ASSERT_EQ(value, "barz"); - s = txn->Commit(); + Status s = txn->Commit(); ASSERT_TRUE(s.IsBusy()); // Txn should not commit // Verify that transaction did not write anything - txn->GetForUpdate(read_options, "foo", &value); + ASSERT_OK(txn->GetForUpdate(read_options, "foo", &value)); ASSERT_EQ(value, "barz"); - txn->GetForUpdate(read_options, "foo2", &value); + ASSERT_OK(txn->GetForUpdate(read_options, "foo2", &value)); ASSERT_EQ(value, "bar"); delete txn; @@ -219,15 +212,13 @@ TEST_P(OptimisticTransactionTest, TxnOnlyTest) { WriteOptions write_options; ReadOptions read_options; string value; - Status s; Transaction* txn = txn_db->BeginTransaction(write_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); - txn->Put("x", "y"); + ASSERT_OK(txn->Put("x", "y")); - s = txn->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn->Commit()); delete txn; } @@ -236,37 +227,34 @@ TEST_P(OptimisticTransactionTest, FlushTest) { WriteOptions write_options; ReadOptions read_options, snapshot_read_options; string value; - Status s; - txn_db->Put(write_options, Slice("foo"), Slice("bar")); - txn_db->Put(write_options, Slice("foo2"), Slice("bar")); + ASSERT_OK(txn_db->Put(write_options, Slice("foo"), Slice("bar"))); + ASSERT_OK(txn_db->Put(write_options, Slice("foo2"), Slice("bar"))); Transaction* txn = txn_db->BeginTransaction(write_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); snapshot_read_options.snapshot = txn->GetSnapshot(); - txn->GetForUpdate(snapshot_read_options, "foo", &value); + ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "foo", &value)); ASSERT_EQ(value, "bar"); - txn->Put(Slice("foo"), Slice("bar2")); + ASSERT_OK(txn->Put(Slice("foo"), Slice("bar2"))); - txn->GetForUpdate(snapshot_read_options, "foo", &value); + ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "foo", &value)); ASSERT_EQ(value, "bar2"); // Put a random key so we have a memtable to flush - s = txn_db->Put(write_options, "dummy", "dummy"); - ASSERT_OK(s); + ASSERT_OK(txn_db->Put(write_options, "dummy", "dummy")); // force a memtable flush FlushOptions flush_ops; - txn_db->Flush(flush_ops); + ASSERT_OK(txn_db->Flush(flush_ops)); - s = txn->Commit(); // txn should commit since the flushed table is still in MemtableList History - ASSERT_OK(s); + ASSERT_OK(txn->Commit()); - txn_db->Get(read_options, "foo", &value); + ASSERT_OK(txn_db->Get(read_options, "foo", &value)); ASSERT_EQ(value, "bar2"); delete txn; @@ -276,52 +264,48 @@ TEST_P(OptimisticTransactionTest, FlushTest2) { WriteOptions write_options; ReadOptions read_options, snapshot_read_options; string value; - Status s; - txn_db->Put(write_options, Slice("foo"), Slice("bar")); - txn_db->Put(write_options, Slice("foo2"), Slice("bar")); + ASSERT_OK(txn_db->Put(write_options, Slice("foo"), Slice("bar"))); + ASSERT_OK(txn_db->Put(write_options, Slice("foo2"), Slice("bar"))); Transaction* txn = txn_db->BeginTransaction(write_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); snapshot_read_options.snapshot = txn->GetSnapshot(); - txn->GetForUpdate(snapshot_read_options, "foo", &value); + ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "foo", &value)); ASSERT_EQ(value, "bar"); - txn->Put(Slice("foo"), Slice("bar2")); + ASSERT_OK(txn->Put(Slice("foo"), Slice("bar2"))); - txn->GetForUpdate(snapshot_read_options, "foo", &value); + ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "foo", &value)); ASSERT_EQ(value, "bar2"); // Put a random key so we have a MemTable to flush - s = txn_db->Put(write_options, "dummy", "dummy"); - ASSERT_OK(s); + ASSERT_OK(txn_db->Put(write_options, "dummy", "dummy")); // force a memtable flush FlushOptions flush_ops; - txn_db->Flush(flush_ops); + ASSERT_OK(txn_db->Flush(flush_ops)); // Put a random key so we have a MemTable to flush - s = txn_db->Put(write_options, "dummy", "dummy2"); - ASSERT_OK(s); + ASSERT_OK(txn_db->Put(write_options, "dummy", "dummy2")); // force a memtable flush - txn_db->Flush(flush_ops); + ASSERT_OK(txn_db->Flush(flush_ops)); - s = txn_db->Put(write_options, "dummy", "dummy3"); - ASSERT_OK(s); + ASSERT_OK(txn_db->Put(write_options, "dummy", "dummy3")); // force a memtable flush // Since our test db has max_write_buffer_number=2, this flush will cause // the first memtable to get purged from the MemtableList history. - txn_db->Flush(flush_ops); + ASSERT_OK(txn_db->Flush(flush_ops)); - s = txn->Commit(); + Status s = txn->Commit(); // txn should not commit since MemTableList History is not large enough ASSERT_TRUE(s.IsTryAgain()); - txn_db->Get(read_options, "foo", &value); + ASSERT_OK(txn_db->Get(read_options, "foo", &value)); ASSERT_EQ(value, "bar"); delete txn; @@ -342,7 +326,6 @@ TEST_P(OptimisticTransactionTest, CheckKeySkipOldMemtable) { ReadOptions snapshot_read_options; ReadOptions snapshot_read_options2; string value; - Status s; ASSERT_OK(txn_db->Put(write_options, Slice("foo"), Slice("bar"))); ASSERT_OK(txn_db->Put(write_options, Slice("foo2"), Slice("bar"))); @@ -382,9 +365,9 @@ TEST_P(OptimisticTransactionTest, CheckKeySkipOldMemtable) { if (attempt == kAttemptHistoryMemtable) { ASSERT_OK(txn_db->Flush(flush_ops)); } else { - assert(attempt == kAttemptImmMemTable); + ASSERT_EQ(attempt, kAttemptImmMemTable); DBImpl* db_impl = static_cast(txn_db->GetRootDB()); - db_impl->TEST_SwitchMemtable(); + ASSERT_OK(db_impl->TEST_SwitchMemtable()); } uint64_t num_imm_mems; ASSERT_TRUE(txn_db->GetIntProperty(DB::Properties::kNumImmutableMemTable, @@ -392,7 +375,7 @@ TEST_P(OptimisticTransactionTest, CheckKeySkipOldMemtable) { if (attempt == kAttemptHistoryMemtable) { ASSERT_EQ(0, num_imm_mems); } else { - assert(attempt == kAttemptImmMemTable); + ASSERT_EQ(attempt, kAttemptImmMemTable); ASSERT_EQ(1, num_imm_mems); } @@ -409,7 +392,7 @@ TEST_P(OptimisticTransactionTest, CheckKeySkipOldMemtable) { SetPerfLevel(PerfLevel::kEnableCount); get_perf_context()->Reset(); - s = txn->Commit(); + Status s = txn->Commit(); // We should have checked two memtables ASSERT_EQ(2, get_perf_context()->get_from_memtable_count); // txn should fail because of conflict, even if the memtable @@ -422,7 +405,7 @@ TEST_P(OptimisticTransactionTest, CheckKeySkipOldMemtable) { ASSERT_EQ(2, get_perf_context()->get_from_memtable_count); ASSERT_TRUE(s.ok()); - txn3->Put(Slice("foo2"), Slice("bar2")); + ASSERT_OK(txn3->Put(Slice("foo2"), Slice("bar2"))); get_perf_context()->Reset(); s = txn3->Commit(); // txn3 is created after the active memtable is created, so that is the only @@ -445,26 +428,24 @@ TEST_P(OptimisticTransactionTest, NoSnapshotTest) { WriteOptions write_options; ReadOptions read_options; string value; - Status s; - txn_db->Put(write_options, "AAA", "bar"); + ASSERT_OK(txn_db->Put(write_options, "AAA", "bar")); Transaction* txn = txn_db->BeginTransaction(write_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); // Modify key after transaction start - txn_db->Put(write_options, "AAA", "bar1"); + ASSERT_OK(txn_db->Put(write_options, "AAA", "bar1")); // Read and write without a snapshot - txn->GetForUpdate(read_options, "AAA", &value); + ASSERT_OK(txn->GetForUpdate(read_options, "AAA", &value)); ASSERT_EQ(value, "bar1"); - txn->Put("AAA", "bar2"); + ASSERT_OK(txn->Put("AAA", "bar2")); // Should commit since read/write was done after data changed - s = txn->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn->Commit()); - txn->GetForUpdate(read_options, "AAA", &value); + ASSERT_OK(txn->GetForUpdate(read_options, "AAA", &value)); ASSERT_EQ(value, "bar2"); delete txn; @@ -474,75 +455,64 @@ TEST_P(OptimisticTransactionTest, MultipleSnapshotTest) { WriteOptions write_options; ReadOptions read_options, snapshot_read_options; string value; - Status s; - txn_db->Put(write_options, "AAA", "bar"); - txn_db->Put(write_options, "BBB", "bar"); - txn_db->Put(write_options, "CCC", "bar"); + ASSERT_OK(txn_db->Put(write_options, "AAA", "bar")); + ASSERT_OK(txn_db->Put(write_options, "BBB", "bar")); + ASSERT_OK(txn_db->Put(write_options, "CCC", "bar")); Transaction* txn = txn_db->BeginTransaction(write_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); - txn_db->Put(write_options, "AAA", "bar1"); + ASSERT_OK(txn_db->Put(write_options, "AAA", "bar1")); // Read and write without a snapshot - txn->GetForUpdate(read_options, "AAA", &value); + ASSERT_OK(txn->GetForUpdate(read_options, "AAA", &value)); ASSERT_EQ(value, "bar1"); - txn->Put("AAA", "bar2"); + ASSERT_OK(txn->Put("AAA", "bar2")); // Modify BBB before snapshot is taken - txn_db->Put(write_options, "BBB", "bar1"); + ASSERT_OK(txn_db->Put(write_options, "BBB", "bar1")); txn->SetSnapshot(); snapshot_read_options.snapshot = txn->GetSnapshot(); // Read and write with snapshot - txn->GetForUpdate(snapshot_read_options, "BBB", &value); + ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "BBB", &value)); ASSERT_EQ(value, "bar1"); - txn->Put("BBB", "bar2"); + ASSERT_OK(txn->Put("BBB", "bar2")); - txn_db->Put(write_options, "CCC", "bar1"); + ASSERT_OK(txn_db->Put(write_options, "CCC", "bar1")); // Set a new snapshot txn->SetSnapshot(); snapshot_read_options.snapshot = txn->GetSnapshot(); // Read and write with snapshot - txn->GetForUpdate(snapshot_read_options, "CCC", &value); + ASSERT_OK(txn->GetForUpdate(snapshot_read_options, "CCC", &value)); ASSERT_EQ(value, "bar1"); - txn->Put("CCC", "bar2"); + ASSERT_OK(txn->Put("CCC", "bar2")); - s = txn->GetForUpdate(read_options, "AAA", &value); - ASSERT_OK(s); + ASSERT_OK(txn->GetForUpdate(read_options, "AAA", &value)); ASSERT_EQ(value, "bar2"); - s = txn->GetForUpdate(read_options, "BBB", &value); - ASSERT_OK(s); + ASSERT_OK(txn->GetForUpdate(read_options, "BBB", &value)); ASSERT_EQ(value, "bar2"); - s = txn->GetForUpdate(read_options, "CCC", &value); - ASSERT_OK(s); + ASSERT_OK(txn->GetForUpdate(read_options, "CCC", &value)); ASSERT_EQ(value, "bar2"); - s = txn_db->Get(read_options, "AAA", &value); - ASSERT_OK(s); + ASSERT_OK(txn_db->Get(read_options, "AAA", &value)); ASSERT_EQ(value, "bar1"); - s = txn_db->Get(read_options, "BBB", &value); - ASSERT_OK(s); + ASSERT_OK(txn_db->Get(read_options, "BBB", &value)); ASSERT_EQ(value, "bar1"); - s = txn_db->Get(read_options, "CCC", &value); - ASSERT_OK(s); + ASSERT_OK(txn_db->Get(read_options, "CCC", &value)); ASSERT_EQ(value, "bar1"); - s = txn->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn->Commit()); - s = txn_db->Get(read_options, "AAA", &value); - ASSERT_OK(s); + ASSERT_OK(txn_db->Get(read_options, "AAA", &value)); ASSERT_EQ(value, "bar2"); - s = txn_db->Get(read_options, "BBB", &value); - ASSERT_OK(s); + ASSERT_OK(txn_db->Get(read_options, "BBB", &value)); ASSERT_EQ(value, "bar2"); - s = txn_db->Get(read_options, "CCC", &value); - ASSERT_OK(s); + ASSERT_OK(txn_db->Get(read_options, "CCC", &value)); ASSERT_EQ(value, "bar2"); // verify that we track multiple writes to the same key at different snapshots @@ -550,8 +520,8 @@ TEST_P(OptimisticTransactionTest, MultipleSnapshotTest) { txn = txn_db->BeginTransaction(write_options); // Potentially conflicting writes - txn_db->Put(write_options, "ZZZ", "zzz"); - txn_db->Put(write_options, "XXX", "xxx"); + ASSERT_OK(txn_db->Put(write_options, "ZZZ", "zzz")); + ASSERT_OK(txn_db->Put(write_options, "XXX", "xxx")); txn->SetSnapshot(); @@ -562,16 +532,15 @@ TEST_P(OptimisticTransactionTest, MultipleSnapshotTest) { // This should not conflict in txn since the snapshot is later than the // previous write (spoiler alert: it will later conflict with txn2). - txn->Put("ZZZ", "zzzz"); - s = txn->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn->Put("ZZZ", "zzzz")); + ASSERT_OK(txn->Commit()); delete txn; // This will conflict since the snapshot is earlier than another write to ZZZ - txn2->Put("ZZZ", "xxxxx"); + ASSERT_OK(txn2->Put("ZZZ", "xxxxx")); - s = txn2->Commit(); + Status s = txn2->Commit(); ASSERT_TRUE(s.IsBusy()); delete txn2; @@ -582,16 +551,13 @@ TEST_P(OptimisticTransactionTest, ColumnFamiliesTest) { ReadOptions read_options, snapshot_read_options; OptimisticTransactionOptions txn_options; string value; - Status s; ColumnFamilyHandle *cfa, *cfb; ColumnFamilyOptions cf_options; // Create 2 new column families - s = txn_db->CreateColumnFamily(cf_options, "CFA", &cfa); - ASSERT_OK(s); - s = txn_db->CreateColumnFamily(cf_options, "CFB", &cfb); - ASSERT_OK(s); + ASSERT_OK(txn_db->CreateColumnFamily(cf_options, "CFA", &cfa)); + ASSERT_OK(txn_db->CreateColumnFamily(cf_options, "CFB", &cfb)); delete cfa; delete cfb; @@ -609,13 +575,13 @@ TEST_P(OptimisticTransactionTest, ColumnFamiliesTest) { column_families.push_back( ColumnFamilyDescriptor("CFB", ColumnFamilyOptions())); std::vector handles; - s = OptimisticTransactionDB::Open(options, dbname, column_families, &handles, - &txn_db); - ASSERT_OK(s); + ASSERT_OK(OptimisticTransactionDB::Open(options, dbname, column_families, + &handles, &txn_db)); assert(txn_db != nullptr); + ASSERT_NE(txn_db, nullptr); Transaction* txn = txn_db->BeginTransaction(write_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); txn->SetSnapshot(); snapshot_read_options.snapshot = txn->GetSnapshot(); @@ -626,26 +592,27 @@ TEST_P(OptimisticTransactionTest, ColumnFamiliesTest) { // Write some data to the db WriteBatch batch; - batch.Put("foo", "foo"); - batch.Put(handles[1], "AAA", "bar"); - batch.Put(handles[1], "AAAZZZ", "bar"); - s = txn_db->Write(write_options, &batch); - ASSERT_OK(s); - txn_db->Delete(write_options, handles[1], "AAAZZZ"); + ASSERT_OK(batch.Put("foo", "foo")); + ASSERT_OK(batch.Put(handles[1], "AAA", "bar")); + ASSERT_OK(batch.Put(handles[1], "AAAZZZ", "bar")); + ASSERT_OK(txn_db->Write(write_options, &batch)); + ASSERT_OK(txn_db->Delete(write_options, handles[1], "AAAZZZ")); // These keys do no conflict with existing writes since they're in // different column families - txn->Delete("AAA"); - txn->GetForUpdate(snapshot_read_options, handles[1], "foo", &value); + ASSERT_OK(txn->Delete("AAA")); + Status s = + txn->GetForUpdate(snapshot_read_options, handles[1], "foo", &value); + ASSERT_TRUE(s.IsNotFound()); Slice key_slice("AAAZZZ"); Slice value_slices[2] = {Slice("bar"), Slice("bar")}; - txn->Put(handles[2], SliceParts(&key_slice, 1), SliceParts(value_slices, 2)); + ASSERT_OK(txn->Put(handles[2], SliceParts(&key_slice, 1), + SliceParts(value_slices, 2))); ASSERT_EQ(3, txn->GetNumKeys()); // Txn should commit - s = txn->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn->Commit()); s = txn_db->Get(read_options, "AAA", &value); ASSERT_TRUE(s.IsNotFound()); s = txn_db->Get(read_options, handles[2], "AAAZZZ", &value); @@ -654,10 +621,11 @@ TEST_P(OptimisticTransactionTest, ColumnFamiliesTest) { Slice key_slices[3] = {Slice("AAA"), Slice("ZZ"), Slice("Z")}; Slice value_slice("barbarbar"); // This write will cause a conflict with the earlier batch write - txn2->Put(handles[1], SliceParts(key_slices, 3), SliceParts(&value_slice, 1)); + ASSERT_OK(txn2->Put(handles[1], SliceParts(key_slices, 3), + SliceParts(&value_slice, 1))); - txn2->Delete(handles[2], "XXX"); - txn2->Delete(handles[1], "XXX"); + ASSERT_OK(txn2->Delete(handles[2], "XXX")); + ASSERT_OK(txn2->Delete(handles[1], "XXX")); s = txn2->GetForUpdate(snapshot_read_options, handles[1], "AAA", &value); ASSERT_TRUE(s.IsNotFound()); @@ -665,6 +633,7 @@ TEST_P(OptimisticTransactionTest, ColumnFamiliesTest) { s = txn2->Commit(); ASSERT_TRUE(s.IsBusy()); s = txn_db->Get(read_options, handles[1], "AAAZZZ", &value); + ASSERT_TRUE(s.IsNotFound()); ASSERT_EQ(value, "barbar"); delete txn; @@ -674,7 +643,7 @@ TEST_P(OptimisticTransactionTest, ColumnFamiliesTest) { snapshot_read_options.snapshot = txn->GetSnapshot(); txn2 = txn_db->BeginTransaction(write_options, txn_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); std::vector multiget_cfh = {handles[1], handles[2], handles[0], handles[2]}; @@ -691,22 +660,21 @@ TEST_P(OptimisticTransactionTest, ColumnFamiliesTest) { ASSERT_EQ(values[1], "barbar"); ASSERT_EQ(values[2], "foo"); - txn->Delete(handles[2], "ZZZ"); - txn->Put(handles[2], "ZZZ", "YYY"); - txn->Put(handles[2], "ZZZ", "YYYY"); - txn->Delete(handles[2], "ZZZ"); - txn->Put(handles[2], "AAAZZZ", "barbarbar"); + ASSERT_OK(txn->Delete(handles[2], "ZZZ")); + ASSERT_OK(txn->Put(handles[2], "ZZZ", "YYY")); + ASSERT_OK(txn->Put(handles[2], "ZZZ", "YYYY")); + ASSERT_OK(txn->Delete(handles[2], "ZZZ")); + ASSERT_OK(txn->Put(handles[2], "AAAZZZ", "barbarbar")); ASSERT_EQ(5, txn->GetNumKeys()); // Txn should commit - s = txn->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn->Commit()); s = txn_db->Get(read_options, handles[2], "ZZZ", &value); ASSERT_TRUE(s.IsNotFound()); // Put a key which will conflict with the next txn using the previous snapshot - txn_db->Put(write_options, handles[2], "foo", "000"); + ASSERT_OK(txn_db->Put(write_options, handles[2], "foo", "000")); results = txn2->MultiGetForUpdate(snapshot_read_options, multiget_cfh, multiget_keys, &values); @@ -739,35 +707,31 @@ TEST_P(OptimisticTransactionTest, EmptyTest) { WriteOptions write_options; ReadOptions read_options; string value; - Status s; - s = txn_db->Put(write_options, "aaa", "aaa"); - ASSERT_OK(s); + ASSERT_OK(txn_db->Put(write_options, "aaa", "aaa")); Transaction* txn = txn_db->BeginTransaction(write_options); - s = txn->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn->Commit()); delete txn; txn = txn_db->BeginTransaction(write_options); - txn->Rollback(); + ASSERT_OK(txn->Rollback()); delete txn; txn = txn_db->BeginTransaction(write_options); - s = txn->GetForUpdate(read_options, "aaa", &value); + ASSERT_OK(txn->GetForUpdate(read_options, "aaa", &value)); ASSERT_EQ(value, "aaa"); - s = txn->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn->Commit()); delete txn; txn = txn_db->BeginTransaction(write_options); txn->SetSnapshot(); - s = txn->GetForUpdate(read_options, "aaa", &value); + ASSERT_OK(txn->GetForUpdate(read_options, "aaa", &value)); ASSERT_EQ(value, "aaa"); - s = txn_db->Put(write_options, "aaa", "xxx"); - s = txn->Commit(); + ASSERT_OK(txn_db->Put(write_options, "aaa", "xxx")); + Status s = txn->Commit(); ASSERT_TRUE(s.IsBusy()); delete txn; } @@ -777,7 +741,6 @@ TEST_P(OptimisticTransactionTest, PredicateManyPreceders) { ReadOptions read_options1, read_options2; OptimisticTransactionOptions txn_options; string value; - Status s; txn_options.set_snapshot = true; Transaction* txn1 = txn_db->BeginTransaction(write_options, txn_options); @@ -792,20 +755,23 @@ TEST_P(OptimisticTransactionTest, PredicateManyPreceders) { std::vector results = txn1->MultiGetForUpdate(read_options1, multiget_keys, &multiget_values); + ASSERT_TRUE(results[0].IsNotFound()); ASSERT_TRUE(results[1].IsNotFound()); + ASSERT_TRUE(results[2].IsNotFound()); - txn2->Put("2", "x"); + ASSERT_OK(txn2->Put("2", "x")); - s = txn2->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn2->Commit()); multiget_values.clear(); results = txn1->MultiGetForUpdate(read_options1, multiget_keys, &multiget_values); + ASSERT_TRUE(results[0].IsNotFound()); ASSERT_TRUE(results[1].IsNotFound()); + ASSERT_TRUE(results[2].IsNotFound()); // should not commit since txn2 wrote a key txn has read - s = txn1->Commit(); + Status s = txn1->Commit(); ASSERT_TRUE(s.IsBusy()); delete txn1; @@ -817,13 +783,12 @@ TEST_P(OptimisticTransactionTest, PredicateManyPreceders) { txn2 = txn_db->BeginTransaction(write_options, txn_options); read_options2.snapshot = txn2->GetSnapshot(); - txn1->Put("4", "x"); + ASSERT_OK(txn1->Put("4", "x")); - txn2->Delete("4"); + ASSERT_OK(txn2->Delete("4")); // txn1 can commit since txn2's delete hasn't happened yet (it's just batched) - s = txn1->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn1->Commit()); s = txn2->GetForUpdate(read_options2, "4", &value); ASSERT_TRUE(s.IsNotFound()); @@ -841,7 +806,6 @@ TEST_P(OptimisticTransactionTest, LostUpdate) { ReadOptions read_options, read_options1, read_options2; OptimisticTransactionOptions txn_options; string value; - Status s; // Test 2 transactions writing to the same key in multiple orders and // with/without snapshots @@ -849,13 +813,12 @@ TEST_P(OptimisticTransactionTest, LostUpdate) { Transaction* txn1 = txn_db->BeginTransaction(write_options); Transaction* txn2 = txn_db->BeginTransaction(write_options); - txn1->Put("1", "1"); - txn2->Put("1", "2"); + ASSERT_OK(txn1->Put("1", "1")); + ASSERT_OK(txn2->Put("1", "2")); - s = txn1->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn1->Commit()); - s = txn2->Commit(); + Status s = txn2->Commit(); ASSERT_TRUE(s.IsBusy()); delete txn1; @@ -868,11 +831,10 @@ TEST_P(OptimisticTransactionTest, LostUpdate) { txn2 = txn_db->BeginTransaction(write_options, txn_options); read_options2.snapshot = txn2->GetSnapshot(); - txn1->Put("1", "3"); - txn2->Put("1", "4"); + ASSERT_OK(txn1->Put("1", "3")); + ASSERT_OK(txn2->Put("1", "4")); - s = txn1->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn1->Commit()); s = txn2->Commit(); ASSERT_TRUE(s.IsBusy()); @@ -886,11 +848,10 @@ TEST_P(OptimisticTransactionTest, LostUpdate) { txn2 = txn_db->BeginTransaction(write_options, txn_options); read_options2.snapshot = txn2->GetSnapshot(); - txn1->Put("1", "5"); - s = txn1->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn1->Put("1", "5")); + ASSERT_OK(txn1->Commit()); - txn2->Put("1", "6"); + ASSERT_OK(txn2->Put("1", "6")); s = txn2->Commit(); ASSERT_TRUE(s.IsBusy()); @@ -903,14 +864,12 @@ TEST_P(OptimisticTransactionTest, LostUpdate) { txn2 = txn_db->BeginTransaction(write_options, txn_options); read_options2.snapshot = txn2->GetSnapshot(); - txn1->Put("1", "5"); - s = txn1->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn1->Put("1", "5")); + ASSERT_OK(txn1->Commit()); txn2->SetSnapshot(); - txn2->Put("1", "6"); - s = txn2->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn2->Put("1", "6")); + ASSERT_OK(txn2->Commit()); delete txn1; delete txn2; @@ -918,19 +877,16 @@ TEST_P(OptimisticTransactionTest, LostUpdate) { txn1 = txn_db->BeginTransaction(write_options); txn2 = txn_db->BeginTransaction(write_options); - txn1->Put("1", "7"); - s = txn1->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn1->Put("1", "7")); + ASSERT_OK(txn1->Commit()); - txn2->Put("1", "8"); - s = txn2->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn2->Put("1", "8")); + ASSERT_OK(txn2->Commit()); delete txn1; delete txn2; - s = txn_db->Get(read_options, "1", &value); - ASSERT_OK(s); + ASSERT_OK(txn_db->Get(read_options, "1", &value)); ASSERT_EQ(value, "8"); } @@ -942,26 +898,24 @@ TEST_P(OptimisticTransactionTest, UntrackedWrites) { // Verify transaction rollback works for untracked keys. Transaction* txn = txn_db->BeginTransaction(write_options); - txn->PutUntracked("untracked", "0"); - txn->Rollback(); + ASSERT_OK(txn->PutUntracked("untracked", "0")); + ASSERT_OK(txn->Rollback()); s = txn_db->Get(read_options, "untracked", &value); ASSERT_TRUE(s.IsNotFound()); delete txn; txn = txn_db->BeginTransaction(write_options); - txn->Put("tracked", "1"); - txn->PutUntracked("untracked", "1"); - txn->MergeUntracked("untracked", "2"); - txn->DeleteUntracked("untracked"); + ASSERT_OK(txn->Put("tracked", "1")); + ASSERT_OK(txn->PutUntracked("untracked", "1")); + ASSERT_OK(txn->MergeUntracked("untracked", "2")); + ASSERT_OK(txn->DeleteUntracked("untracked")); // Write to the untracked key outside of the transaction and verify // it doesn't prevent the transaction from committing. - s = txn_db->Put(write_options, "untracked", "x"); - ASSERT_OK(s); + ASSERT_OK(txn_db->Put(write_options, "untracked", "x")); - s = txn->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn->Commit()); s = txn_db->Get(read_options, "untracked", &value); ASSERT_TRUE(s.IsNotFound()); @@ -969,12 +923,12 @@ TEST_P(OptimisticTransactionTest, UntrackedWrites) { delete txn; txn = txn_db->BeginTransaction(write_options); - txn->Put("tracked", "10"); - txn->PutUntracked("untracked", "A"); + ASSERT_OK(txn->Put("tracked", "10")); + ASSERT_OK(txn->PutUntracked("untracked", "A")); // Write to tracked key outside of the transaction and verify that the // untracked keys are not written when the commit fails. - s = txn_db->Delete(write_options, "tracked"); + ASSERT_OK(txn_db->Delete(write_options, "tracked")); s = txn->Commit(); ASSERT_TRUE(s.IsBusy()); @@ -990,49 +944,29 @@ TEST_P(OptimisticTransactionTest, IteratorTest) { ReadOptions read_options, snapshot_read_options; OptimisticTransactionOptions txn_options; string value; - Status s; // Write some keys to the db - s = txn_db->Put(write_options, "A", "a"); - ASSERT_OK(s); - - s = txn_db->Put(write_options, "G", "g"); - ASSERT_OK(s); - - s = txn_db->Put(write_options, "F", "f"); - ASSERT_OK(s); - - s = txn_db->Put(write_options, "C", "c"); - ASSERT_OK(s); - - s = txn_db->Put(write_options, "D", "d"); - ASSERT_OK(s); + ASSERT_OK(txn_db->Put(write_options, "A", "a")); + ASSERT_OK(txn_db->Put(write_options, "G", "g")); + ASSERT_OK(txn_db->Put(write_options, "F", "f")); + ASSERT_OK(txn_db->Put(write_options, "C", "c")); + ASSERT_OK(txn_db->Put(write_options, "D", "d")); Transaction* txn = txn_db->BeginTransaction(write_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); // Write some keys in a txn - s = txn->Put("B", "b"); - ASSERT_OK(s); - - s = txn->Put("H", "h"); - ASSERT_OK(s); - - s = txn->Delete("D"); - ASSERT_OK(s); - - s = txn->Put("E", "e"); - ASSERT_OK(s); + ASSERT_OK(txn->Put("B", "b")); + ASSERT_OK(txn->Put("H", "h")); + ASSERT_OK(txn->Delete("D")); + ASSERT_OK(txn->Put("E", "e")); txn->SetSnapshot(); const Snapshot* snapshot = txn->GetSnapshot(); // Write some keys to the db after the snapshot - s = txn_db->Put(write_options, "BB", "xx"); - ASSERT_OK(s); - - s = txn_db->Put(write_options, "C", "xx"); - ASSERT_OK(s); + ASSERT_OK(txn_db->Put(write_options, "BB", "xx")); + ASSERT_OK(txn_db->Put(write_options, "C", "xx")); read_options.snapshot = snapshot; Iterator* iter = txn->GetIterator(read_options); @@ -1046,8 +980,7 @@ TEST_P(OptimisticTransactionTest, IteratorTest) { ASSERT_TRUE(iter->Valid()); ASSERT_EQ(results[i], iter->value().ToString()); - s = txn->GetForUpdate(read_options, iter->key(), nullptr); - ASSERT_OK(s); + ASSERT_OK(txn->GetForUpdate(read_options, iter->key(), nullptr)); iter->Next(); } @@ -1093,7 +1026,7 @@ TEST_P(OptimisticTransactionTest, IteratorTest) { ASSERT_EQ("h", iter->value().ToString()); // key "C" was modified in the db after txn's snapshot. txn will not commit. - s = txn->Commit(); + Status s = txn->Commit(); ASSERT_TRUE(s.IsBusy()); delete iter; @@ -1105,12 +1038,11 @@ TEST_P(OptimisticTransactionTest, SavepointTest) { ReadOptions read_options, snapshot_read_options; OptimisticTransactionOptions txn_options; string value; - Status s; Transaction* txn = txn_db->BeginTransaction(write_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); - s = txn->RollbackToSavePoint(); + Status s = txn->RollbackToSavePoint(); ASSERT_TRUE(s.IsNotFound()); txn->SetSavePoint(); // 1 @@ -1119,104 +1051,68 @@ TEST_P(OptimisticTransactionTest, SavepointTest) { s = txn->RollbackToSavePoint(); ASSERT_TRUE(s.IsNotFound()); - s = txn->Put("B", "b"); - ASSERT_OK(s); + ASSERT_OK(txn->Put("B", "b")); - s = txn->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn->Commit()); - s = txn_db->Get(read_options, "B", &value); - ASSERT_OK(s); + ASSERT_OK(txn_db->Get(read_options, "B", &value)); ASSERT_EQ("b", value); delete txn; txn = txn_db->BeginTransaction(write_options); - ASSERT_TRUE(txn); + ASSERT_NE(txn, nullptr); - s = txn->Put("A", "a"); - ASSERT_OK(s); - - s = txn->Put("B", "bb"); - ASSERT_OK(s); - - s = txn->Put("C", "c"); - ASSERT_OK(s); + ASSERT_OK(txn->Put("A", "a")); + ASSERT_OK(txn->Put("B", "bb")); + ASSERT_OK(txn->Put("C", "c")); txn->SetSavePoint(); // 2 - s = txn->Delete("B"); - ASSERT_OK(s); - - s = txn->Put("C", "cc"); - ASSERT_OK(s); - - s = txn->Put("D", "d"); - ASSERT_OK(s); + ASSERT_OK(txn->Delete("B")); + ASSERT_OK(txn->Put("C", "cc")); + ASSERT_OK(txn->Put("D", "d")); ASSERT_OK(txn->RollbackToSavePoint()); // Rollback to 2 - s = txn->Get(read_options, "A", &value); - ASSERT_OK(s); + ASSERT_OK(txn->Get(read_options, "A", &value)); ASSERT_EQ("a", value); - - s = txn->Get(read_options, "B", &value); - ASSERT_OK(s); + ASSERT_OK(txn->Get(read_options, "B", &value)); ASSERT_EQ("bb", value); - - s = txn->Get(read_options, "C", &value); - ASSERT_OK(s); + ASSERT_OK(txn->Get(read_options, "C", &value)); ASSERT_EQ("c", value); - s = txn->Get(read_options, "D", &value); ASSERT_TRUE(s.IsNotFound()); - s = txn->Put("A", "a"); - ASSERT_OK(s); - - s = txn->Put("E", "e"); - ASSERT_OK(s); + ASSERT_OK(txn->Put("A", "a")); + ASSERT_OK(txn->Put("E", "e")); // Rollback to beginning of txn s = txn->RollbackToSavePoint(); ASSERT_TRUE(s.IsNotFound()); - txn->Rollback(); + ASSERT_OK(txn->Rollback()); s = txn->Get(read_options, "A", &value); ASSERT_TRUE(s.IsNotFound()); - - s = txn->Get(read_options, "B", &value); - ASSERT_OK(s); + ASSERT_OK(txn->Get(read_options, "B", &value)); ASSERT_EQ("b", value); - s = txn->Get(read_options, "D", &value); ASSERT_TRUE(s.IsNotFound()); - s = txn->Get(read_options, "D", &value); ASSERT_TRUE(s.IsNotFound()); - s = txn->Get(read_options, "E", &value); ASSERT_TRUE(s.IsNotFound()); - s = txn->Put("A", "aa"); - ASSERT_OK(s); - - s = txn->Put("F", "f"); - ASSERT_OK(s); + ASSERT_OK(txn->Put("A", "aa")); + ASSERT_OK(txn->Put("F", "f")); txn->SetSavePoint(); // 3 txn->SetSavePoint(); // 4 - s = txn->Put("G", "g"); - ASSERT_OK(s); + ASSERT_OK(txn->Put("G", "g")); + ASSERT_OK(txn->Delete("F")); + ASSERT_OK(txn->Delete("B")); - s = txn->Delete("F"); - ASSERT_OK(s); - - s = txn->Delete("B"); - ASSERT_OK(s); - - s = txn->Get(read_options, "A", &value); - ASSERT_OK(s); + ASSERT_OK(txn->Get(read_options, "A", &value)); ASSERT_EQ("aa", value); s = txn->Get(read_options, "F", &value); @@ -1227,29 +1123,24 @@ TEST_P(OptimisticTransactionTest, SavepointTest) { ASSERT_OK(txn->RollbackToSavePoint()); // Rollback to 3 - s = txn->Get(read_options, "F", &value); - ASSERT_OK(s); + ASSERT_OK(txn->Get(read_options, "F", &value)); ASSERT_EQ("f", value); s = txn->Get(read_options, "G", &value); ASSERT_TRUE(s.IsNotFound()); - s = txn->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn->Commit()); - s = txn_db->Get(read_options, "F", &value); - ASSERT_OK(s); + ASSERT_OK(txn_db->Get(read_options, "F", &value)); ASSERT_EQ("f", value); s = txn_db->Get(read_options, "G", &value); ASSERT_TRUE(s.IsNotFound()); - s = txn_db->Get(read_options, "A", &value); - ASSERT_OK(s); + ASSERT_OK(txn_db->Get(read_options, "A", &value)); ASSERT_EQ("aa", value); - s = txn_db->Get(read_options, "B", &value); - ASSERT_OK(s); + ASSERT_OK(txn_db->Get(read_options, "B", &value)); ASSERT_EQ("b", value); s = txn_db->Get(read_options, "C", &value); @@ -1269,41 +1160,52 @@ TEST_P(OptimisticTransactionTest, UndoGetForUpdateTest) { ReadOptions read_options, snapshot_read_options; OptimisticTransactionOptions txn_options; string value; - Status s; - txn_db->Put(write_options, "A", ""); + ASSERT_OK(txn_db->Put(write_options, "A", "")); Transaction* txn1 = txn_db->BeginTransaction(write_options); ASSERT_TRUE(txn1); - s = txn1->GetForUpdate(read_options, "A", &value); - ASSERT_OK(s); + ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value)); txn1->UndoGetForUpdate("A"); Transaction* txn2 = txn_db->BeginTransaction(write_options); txn2->Put("A", "x"); - s = txn2->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn2->Commit()); delete txn2; // Verify that txn1 can commit since A isn't conflict checked - s = txn1->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn1->Commit()); delete txn1; txn1 = txn_db->BeginTransaction(write_options); - txn1->Put("A", "a"); + ASSERT_OK(txn1->Put("A", "a")); - s = txn1->GetForUpdate(read_options, "A", &value); - ASSERT_OK(s); + ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value)); txn1->UndoGetForUpdate("A"); txn2 = txn_db->BeginTransaction(write_options); - txn2->Put("A", "x"); - s = txn2->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn2->Put("A", "x")); + ASSERT_OK(txn2->Commit()); + delete txn2; + + // Verify that txn1 cannot commit since A will still be conflict checked + Status s = txn1->Commit(); + ASSERT_TRUE(s.IsBusy()); + delete txn1; + + txn1 = txn_db->BeginTransaction(write_options); + + ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value)); + ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value)); + + txn1->UndoGetForUpdate("A"); + + txn2 = txn_db->BeginTransaction(write_options); + ASSERT_OK(txn2->Put("A", "x")); + ASSERT_OK(txn2->Commit()); delete txn2; // Verify that txn1 cannot commit since A will still be conflict checked @@ -1313,57 +1215,31 @@ TEST_P(OptimisticTransactionTest, UndoGetForUpdateTest) { txn1 = txn_db->BeginTransaction(write_options); - s = txn1->GetForUpdate(read_options, "A", &value); - ASSERT_OK(s); - s = txn1->GetForUpdate(read_options, "A", &value); - ASSERT_OK(s); - - txn1->UndoGetForUpdate("A"); - - txn2 = txn_db->BeginTransaction(write_options); - txn2->Put("A", "x"); - s = txn2->Commit(); - ASSERT_OK(s); - delete txn2; - - // Verify that txn1 cannot commit since A will still be conflict checked - s = txn1->Commit(); - ASSERT_TRUE(s.IsBusy()); - delete txn1; - - txn1 = txn_db->BeginTransaction(write_options); - - s = txn1->GetForUpdate(read_options, "A", &value); - ASSERT_OK(s); - s = txn1->GetForUpdate(read_options, "A", &value); - ASSERT_OK(s); + ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value)); + ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value)); txn1->UndoGetForUpdate("A"); txn1->UndoGetForUpdate("A"); txn2 = txn_db->BeginTransaction(write_options); - txn2->Put("A", "x"); - s = txn2->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn2->Put("A", "x")); + ASSERT_OK(txn2->Commit()); delete txn2; // Verify that txn1 can commit since A isn't conflict checked - s = txn1->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn1->Commit()); delete txn1; txn1 = txn_db->BeginTransaction(write_options); - s = txn1->GetForUpdate(read_options, "A", &value); - ASSERT_OK(s); + ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value)); txn1->SetSavePoint(); txn1->UndoGetForUpdate("A"); txn2 = txn_db->BeginTransaction(write_options); - txn2->Put("A", "x"); - s = txn2->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn2->Put("A", "x")); + ASSERT_OK(txn2->Commit()); delete txn2; // Verify that txn1 cannot commit since A will still be conflict checked @@ -1373,18 +1249,15 @@ TEST_P(OptimisticTransactionTest, UndoGetForUpdateTest) { txn1 = txn_db->BeginTransaction(write_options); - s = txn1->GetForUpdate(read_options, "A", &value); - ASSERT_OK(s); + ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value)); txn1->SetSavePoint(); - s = txn1->GetForUpdate(read_options, "A", &value); - ASSERT_OK(s); + ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value)); txn1->UndoGetForUpdate("A"); txn2 = txn_db->BeginTransaction(write_options); - txn2->Put("A", "x"); - s = txn2->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn2->Put("A", "x")); + ASSERT_OK(txn2->Commit()); delete txn2; // Verify that txn1 cannot commit since A will still be conflict checked @@ -1394,26 +1267,22 @@ TEST_P(OptimisticTransactionTest, UndoGetForUpdateTest) { txn1 = txn_db->BeginTransaction(write_options); - s = txn1->GetForUpdate(read_options, "A", &value); - ASSERT_OK(s); + ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value)); txn1->SetSavePoint(); - s = txn1->GetForUpdate(read_options, "A", &value); - ASSERT_OK(s); + ASSERT_OK(txn1->GetForUpdate(read_options, "A", &value)); txn1->UndoGetForUpdate("A"); - txn1->RollbackToSavePoint(); + ASSERT_OK(txn1->RollbackToSavePoint()); txn1->UndoGetForUpdate("A"); txn2 = txn_db->BeginTransaction(write_options); - txn2->Put("A", "x"); - s = txn2->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn2->Put("A", "x")); + ASSERT_OK(txn2->Commit()); delete txn2; // Verify that txn1 can commit since A isn't conflict checked - s = txn1->Commit(); - ASSERT_OK(s); + ASSERT_OK(txn1->Commit()); delete txn1; } @@ -1441,6 +1310,8 @@ Status OptimisticTransactionStressTestInserter(OptimisticTransactionDB* db, } } + inserter.GetLastStatus().PermitUncheckedError(); + // Make sure at least some of the transactions succeeded. It's ok if // some failed due to write-conflicts. if (inserter.GetFailureCount() > num_transactions / 2) { diff --git a/utilities/transactions/transaction_test.cc b/utilities/transactions/transaction_test.cc index 72068cb85d..1dc2ef362c 100644 --- a/utilities/transactions/transaction_test.cc +++ b/utilities/transactions/transaction_test.cc @@ -102,7 +102,7 @@ TEST_P(TransactionTest, DoubleEmptyWrite) { // Also test that it works during recovery txn0 = db->BeginTransaction(write_options, txn_options); ASSERT_OK(txn0->SetName("xid2")); - txn0->Put(Slice("foo0"), Slice("bar0a")); + ASSERT_OK(txn0->Put(Slice("foo0"), Slice("bar0a"))); ASSERT_OK(txn0->Prepare()); delete txn0; reinterpret_cast(db)->TEST_Crash(); @@ -1936,7 +1936,7 @@ TEST_P(TransactionTest, TwoPhaseLogRollingTest2) { // request a flush for all column families such that the earliest // alive log file can be killed - db_impl->TEST_SwitchWAL(); + ASSERT_OK(db_impl->TEST_SwitchWAL()); // log cannot be flushed because txn2 has not been commited ASSERT_TRUE(!db_impl->TEST_IsLogGettingFlushed()); ASSERT_TRUE(db_impl->TEST_UnableToReleaseOldestLog()); @@ -1962,7 +1962,7 @@ TEST_P(TransactionTest, TwoPhaseLogRollingTest2) { s = txn2->Commit(); ASSERT_OK(s); - db_impl->TEST_SwitchWAL(); + ASSERT_OK(db_impl->TEST_SwitchWAL()); ASSERT_TRUE(!db_impl->TEST_UnableToReleaseOldestLog()); // we should see that cfb now has a flush requested diff --git a/utilities/transactions/transaction_test.h b/utilities/transactions/transaction_test.h index eabc4676b2..492137cb81 100644 --- a/utilities/transactions/transaction_test.h +++ b/utilities/transactions/transaction_test.h @@ -68,7 +68,7 @@ class TransactionTestBase : public ::testing::Test { options.two_write_queues = two_write_queue; dbname = test::PerThreadDBPath("transaction_testdb"); - DestroyDB(dbname, options); + EXPECT_OK(DestroyDB(dbname, options)); txn_db_options.transaction_lock_timeout = 0; txn_db_options.default_lock_timeout = 0; txn_db_options.write_policy = write_policy; @@ -85,7 +85,7 @@ class TransactionTestBase : public ::testing::Test { } else { s = OpenWithStackableDB(); } - assert(s.ok()); + EXPECT_OK(s); } ~TransactionTestBase() { @@ -96,7 +96,7 @@ class TransactionTestBase : public ::testing::Test { // unlink-ed files. By using the default fs we simply ignore errors resulted // from attempting to delete such files in DestroyDB. options.env = Env::Default(); - DestroyDB(dbname, options); + EXPECT_OK(DestroyDB(dbname, options)); delete env; } @@ -391,7 +391,7 @@ class TransactionTestBase : public ::testing::Test { if (txn_db_options.write_policy == WRITE_COMMITTED) { options.unordered_write = false; } - ReOpen(); + ASSERT_OK(ReOpen()); for (int i = 0; i < 1024; i++) { auto istr = std::to_string(index); @@ -410,9 +410,9 @@ class TransactionTestBase : public ::testing::Test { case 1: { WriteBatch wb; committed_kvs[k] = v; - wb.Put(k, v); + ASSERT_OK(wb.Put(k, v)); committed_kvs[k] = v2; - wb.Put(k, v2); + ASSERT_OK(wb.Put(k, v2)); ASSERT_OK(db->Write(write_options, &wb)); } break; @@ -432,7 +432,7 @@ class TransactionTestBase : public ::testing::Test { delete txn; break; default: - assert(0); + FAIL(); } index++; @@ -445,9 +445,9 @@ class TransactionTestBase : public ::testing::Test { auto db_impl = static_cast_with_check(db->GetRootDB()); // Before upgrade/downgrade the WAL must be emptied if (empty_wal) { - db_impl->TEST_FlushMemTable(); + ASSERT_OK(db_impl->TEST_FlushMemTable()); } else { - db_impl->FlushWAL(true); + ASSERT_OK(db_impl->FlushWAL(true)); } auto s = ReOpenNoDelete(); if (empty_wal) { @@ -461,7 +461,7 @@ class TransactionTestBase : public ::testing::Test { db_impl = static_cast_with_check(db->GetRootDB()); // Check that WAL is empty VectorLogPtr log_files; - db_impl->GetSortedWalFiles(log_files); + ASSERT_OK(db_impl->GetSortedWalFiles(log_files)); ASSERT_EQ(0, log_files.size()); for (auto& kv : committed_kvs) { diff --git a/utilities/transactions/write_prepared_transaction_test.cc b/utilities/transactions/write_prepared_transaction_test.cc index 686d985c25..a63374f61f 100644 --- a/utilities/transactions/write_prepared_transaction_test.cc +++ b/utilities/transactions/write_prepared_transaction_test.cc @@ -201,7 +201,7 @@ TEST(WriteBatchWithIndex, SubBatchCnt) { Options options; options.create_if_missing = true; const std::string dbname = test::PerThreadDBPath("transaction_testdb"); - DestroyDB(dbname, options); + EXPECT_OK(DestroyDB(dbname, options)); ASSERT_OK(DB::Open(options, dbname, &db)); ColumnFamilyHandle* cf_handle = nullptr; ASSERT_OK(db->CreateColumnFamily(cf_options, cf_name, &cf_handle)); @@ -215,18 +215,18 @@ TEST(WriteBatchWithIndex, SubBatchCnt) { batch_cnt_at.push_back(batch_cnt); batch.SetSavePoint(); save_points++; - batch.Put(Slice("key"), Slice("value")); + ASSERT_OK(batch.Put(Slice("key"), Slice("value"))); ASSERT_EQ(batch_cnt, batch.SubBatchCnt()); batch_cnt_at.push_back(batch_cnt); batch.SetSavePoint(); save_points++; - batch.Put(Slice("key2"), Slice("value2")); + ASSERT_OK(batch.Put(Slice("key2"), Slice("value2"))); ASSERT_EQ(batch_cnt, batch.SubBatchCnt()); // duplicate the keys batch_cnt_at.push_back(batch_cnt); batch.SetSavePoint(); save_points++; - batch.Put(Slice("key"), Slice("value3")); + ASSERT_OK(batch.Put(Slice("key"), Slice("value3"))); batch_cnt++; ASSERT_EQ(batch_cnt, batch.SubBatchCnt()); // duplicate the 2nd key. It should not be counted duplicate since a @@ -234,14 +234,14 @@ TEST(WriteBatchWithIndex, SubBatchCnt) { batch_cnt_at.push_back(batch_cnt); batch.SetSavePoint(); save_points++; - batch.Put(Slice("key2"), Slice("value4")); + ASSERT_OK(batch.Put(Slice("key2"), Slice("value4"))); ASSERT_EQ(batch_cnt, batch.SubBatchCnt()); // duplicate the keys but in a different cf. It should not be counted as // duplicate keys batch_cnt_at.push_back(batch_cnt); batch.SetSavePoint(); save_points++; - batch.Put(cf_handle, Slice("key"), Slice("value5")); + ASSERT_OK(batch.Put(cf_handle, Slice("key"), Slice("value5"))); ASSERT_EQ(batch_cnt, batch.SubBatchCnt()); // Test that the number of sub-batches matches what we count with @@ -256,7 +256,7 @@ TEST(WriteBatchWithIndex, SubBatchCnt) { // Test that RollbackToSavePoint will properly resets the number of // sub-batches for (size_t i = save_points; i > 0; i--) { - batch.RollbackToSavePoint(); + ASSERT_OK(batch.RollbackToSavePoint()); ASSERT_EQ(batch_cnt_at[i - 1], batch.SubBatchCnt()); } @@ -277,7 +277,7 @@ TEST(WriteBatchWithIndex, SubBatchCnt) { Slice key = Slice(keys[ki]); std::string tmp = rnd.RandomString(16); Slice value = Slice(tmp); - rndbatch.Put(key, value); + ASSERT_OK(rndbatch.Put(key, value)); } SubBatchCounter batch_counter(comparators); ASSERT_OK(rndbatch.GetWriteBatch()->Iterate(&batch_counter)); @@ -526,7 +526,7 @@ class WritePreparedTransactionTestBase : public TransactionTestBase { ASSERT_EQ(expected_versions[i].value, versions[i].value); } // Range delete not supported. - assert(expected_versions[i].type != kTypeRangeDeletion); + ASSERT_NE(expected_versions[i].type, kTypeRangeDeletion); } } }; @@ -702,8 +702,8 @@ INSTANTIATE_TEST_CASE_P( TEST_P(WritePreparedTransactionTest, CommitMap) { WritePreparedTxnDB* wp_db = dynamic_cast(db); - assert(wp_db); - assert(wp_db->db_impl_); + ASSERT_NE(wp_db, nullptr); + ASSERT_NE(wp_db->db_impl_, nullptr); size_t size = wp_db->COMMIT_CACHE_SIZE; CommitEntry c = {5, 12}, e; bool evicted = wp_db->AddCommitEntry(c.prep_seq % size, c, &e); @@ -797,14 +797,13 @@ TEST_P(WritePreparedTransactionTest, CheckKeySkipOldMemtable) { for (int attempt = kAttemptHistoryMemtable; attempt <= kAttemptImmMemTable; attempt++) { options.max_write_buffer_number_to_maintain = 3; - ReOpen(); + ASSERT_OK(ReOpen()); WriteOptions write_options; ReadOptions read_options; TransactionOptions txn_options; txn_options.set_snapshot = true; string value; - Status s; ASSERT_OK(db->Put(write_options, Slice("foo"), Slice("bar"))); ASSERT_OK(db->Put(write_options, Slice("foo2"), Slice("bar"))); @@ -841,9 +840,9 @@ TEST_P(WritePreparedTransactionTest, CheckKeySkipOldMemtable) { if (attempt == kAttemptHistoryMemtable) { ASSERT_OK(db->Flush(flush_ops)); } else { - assert(attempt == kAttemptImmMemTable); + ASSERT_EQ(attempt, kAttemptImmMemTable); DBImpl* db_impl = static_cast(db->GetRootDB()); - db_impl->TEST_SwitchMemtable(); + ASSERT_OK(db_impl->TEST_SwitchMemtable()); } uint64_t num_imm_mems; ASSERT_TRUE(db->GetIntProperty(DB::Properties::kNumImmutableMemTable, @@ -851,7 +850,7 @@ TEST_P(WritePreparedTransactionTest, CheckKeySkipOldMemtable) { if (attempt == kAttemptHistoryMemtable) { ASSERT_EQ(0, num_imm_mems); } else { - assert(attempt == kAttemptImmMemTable); + ASSERT_EQ(attempt, kAttemptImmMemTable); ASSERT_EQ(1, num_imm_mems); } @@ -893,7 +892,7 @@ TEST_P(WritePreparedTransactionTest, CheckKeySkipOldMemtable) { if (attempt == kAttemptHistoryMemtable) { ASSERT_EQ(3, get_perf_context()->get_from_memtable_count); } else { - assert(attempt == kAttemptImmMemTable); + ASSERT_EQ(attempt, kAttemptImmMemTable); ASSERT_EQ(4, get_perf_context()->get_from_memtable_count); } @@ -910,7 +909,7 @@ TEST_P(WritePreparedTransactionTest, CheckKeySkipOldMemtable) { // Only active memtable will be checked in snapshot validation but // both of active and immutable snapshot will be queried when // getting the value. - assert(attempt == kAttemptImmMemTable); + ASSERT_EQ(attempt, kAttemptImmMemTable); ASSERT_EQ(3, get_perf_context()->get_from_memtable_count); } @@ -1091,7 +1090,7 @@ TEST_P(WritePreparedTransactionTest, CheckAgainstSnapshots) { const uint64_t cache_size = 1ul << snapshot_cache_bits; // Safety check to express the intended size in the test. Can be adjusted if // the snapshots lists changed. - assert((1ul << snapshot_cache_bits) * 2 + 1 == snapshots.size()); + ASSERT_EQ((1ul << snapshot_cache_bits) * 2 + 1, snapshots.size()); DBImpl* mock_db = new DBImpl(options, dbname); UpdateTransactionDBOptions(snapshot_cache_bits); std::unique_ptr wp_db( @@ -1106,7 +1105,7 @@ TEST_P(WritePreparedTransactionTest, CheckAgainstSnapshots) { std::vector seqs = {50l, 55l, 150l, 155l, 250l, 255l, 350l, 355l, 450l, 455l, 550l, 555l, 650l, 655l, 750l, 755l, 850l, 855l, 950l, 955l}; - assert(seqs.size() > 1); + ASSERT_GT(seqs.size(), 1); for (size_t i = 0; i + 1 < seqs.size(); i++) { wp_db->old_commit_map_empty_ = true; // reset CommitEntry commit_entry = {seqs[i], seqs[i + 1]}; @@ -1184,7 +1183,7 @@ TEST_P(SnapshotConcurrentAccessTest, SnapshotConcurrentAccess) { const size_t snapshot_cache_bits = 2; // Safety check to express the intended size in the test. Can be adjusted if // the snapshots lists changed. - assert((1ul << snapshot_cache_bits) * 2 + 2 == snapshots.size()); + ASSERT_EQ((1ul << snapshot_cache_bits) * 2 + 2, snapshots.size()); SequenceNumber version = 1000l; // Choose the cache size so that the new snapshot list could replace all the // existing items in the cache and also have some overflow. @@ -1365,7 +1364,7 @@ TEST_P(WritePreparedTransactionTest, MaxCatchupWithNewSnapshot) { const size_t snapshot_cache_bits = 7; // same as default const size_t commit_cache_bits = 0; // only 1 entry => frequent eviction UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); WriteOptions woptions; WritePreparedTxnDB* wp_db = dynamic_cast(db); @@ -1378,9 +1377,9 @@ TEST_P(WritePreparedTransactionTest, MaxCatchupWithNewSnapshot) { // is not published yet, thus causing max evicted seq go higher than last // published. for (int b = 0; b < batch_cnt; b++) { - batch.Put("foo", "foo"); + ASSERT_OK(batch.Put("foo", "foo")); } - db->Write(woptions, &batch); + ASSERT_OK(db->Write(woptions, &batch)); } }); @@ -1415,7 +1414,7 @@ TEST_P(WritePreparedTransactionTest, MaxCatchupWithUnbackedSnapshot) { const size_t snapshot_cache_bits = 7; // same as default const size_t commit_cache_bits = 0; // only 1 entry => frequent eviction UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); WriteOptions woptions; WritePreparedTxnDB* wp_db = dynamic_cast(db); @@ -1423,8 +1422,8 @@ TEST_P(WritePreparedTransactionTest, MaxCatchupWithUnbackedSnapshot) { ROCKSDB_NAMESPACE::port::Thread t1([&]() { for (int i = 0; i < writes; i++) { WriteBatch batch; - batch.Put("key", "foo"); - db->Write(woptions, &batch); + ASSERT_OK(batch.Put("key", "foo")); + ASSERT_OK(db->Write(woptions, &batch)); } }); @@ -1474,7 +1473,7 @@ TEST_P(WritePreparedTransactionTest, CleanupSnapshotEqualToMax) { const size_t snapshot_cache_bits = 7; // same as default const size_t commit_cache_bits = 0; // only 1 entry => frequent eviction UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); WriteOptions woptions; WritePreparedTxnDB* wp_db = dynamic_cast(db); // Insert something to increase seq @@ -1534,8 +1533,8 @@ TEST_P(WritePreparedTransactionTest, TxnInitialize) { // udpated ASSERT_GT(snap_impl->min_uncommitted_, kMinUnCommittedSeq); - txn0->Rollback(); - txn1->Rollback(); + ASSERT_OK(txn0->Rollback()); + ASSERT_OK(txn1->Rollback()); delete txn0; delete txn1; } @@ -1548,7 +1547,7 @@ TEST_P(WritePreparedTransactionTest, AdvanceMaxEvictedSeqWithDuplicates) { const size_t snapshot_cache_bits = 7; // same as default const size_t commit_cache_bits = 1; // disable commit cache UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); ReadOptions ropt; PinnableSlice pinnable_val; @@ -1569,10 +1568,10 @@ TEST_P(WritePreparedTransactionTest, AdvanceMaxEvictedSeqWithDuplicates) { delete txn0; WritePreparedTxnDB* wp_db = dynamic_cast(db); - wp_db->db_impl_->FlushWAL(true); + ASSERT_OK(wp_db->db_impl_->FlushWAL(true)); wp_db->TEST_Crash(); - ReOpenNoDelete(); - assert(db != nullptr); + ASSERT_OK(ReOpenNoDelete()); + ASSERT_NE(db, nullptr); s = db->Get(ropt, db->DefaultColumnFamily(), "key", &pinnable_val); ASSERT_TRUE(s.IsNotFound()); @@ -1589,7 +1588,7 @@ TEST_P(WritePreparedTransactionTest, SmallestUnCommittedSeq) { const size_t snapshot_cache_bits = 7; // same as default const size_t commit_cache_bits = 1; // disable commit cache UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); WritePreparedTxnDB* wp_db = dynamic_cast(db); ReadOptions ropt; PinnableSlice pinnable_val; @@ -1622,7 +1621,7 @@ TEST_P(WritePreparedTransactionTest, SmallestUnCommittedSeq) { // Since commit cache is practically disabled, commit results in immediate // advance in max_evicted_seq_ and subsequently moving some prepared txns // to delayed_prepared_. - txn->Commit(); + ASSERT_OK(txn->Commit()); committed_txns.push_back(txn); } }); @@ -1651,7 +1650,7 @@ TEST_P(SeqAdvanceConcurrentTest, SeqAdvanceConcurrent) { // almost infeasible. txn_db_options.transaction_lock_timeout = 1000; txn_db_options.default_lock_timeout = 1000; - ReOpen(); + ASSERT_OK(ReOpen()); FlushOptions fopt; // Number of different txn types we use in this test @@ -1671,7 +1670,11 @@ TEST_P(SeqAdvanceConcurrentTest, SeqAdvanceConcurrent) { } const size_t max_n = static_cast(std::pow(type_cnt, txn_cnt)); printf("Number of cases being tested is %" ROCKSDB_PRIszt "\n", max_n); - for (size_t n = 0; n < max_n; n++, ReOpen()) { + for (size_t n = 0; n < max_n; n++) { + if (n > 0) { + ASSERT_OK(ReOpen()); + } + if (n % split_cnt_ != split_id_) continue; if (n % 1000 == 0) { printf("Tested %" ROCKSDB_PRIszt " cases so far\n", n); @@ -1731,7 +1734,7 @@ TEST_P(SeqAdvanceConcurrentTest, SeqAdvanceConcurrent) { threads.emplace_back(txn_t3, bi); break; default: - assert(false); + FAIL(); } // wait to be linked while (linked.load() <= bi) { @@ -1765,22 +1768,22 @@ TEST_P(SeqAdvanceConcurrentTest, SeqAdvanceConcurrent) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks(); // Check if recovery preserves the last sequence number - db_impl->FlushWAL(true); - ReOpenNoDelete(); - assert(db != nullptr); + ASSERT_OK(db_impl->FlushWAL(true)); + ASSERT_OK(ReOpenNoDelete()); + ASSERT_NE(db, nullptr); db_impl = static_cast_with_check(db->GetRootDB()); seq = db_impl->TEST_GetLastVisibleSequence(); ASSERT_LE(exp_seq, seq + with_empty_commits); // Check if flush preserves the last sequence number - db_impl->Flush(fopt); + ASSERT_OK(db_impl->Flush(fopt)); seq = db_impl->GetLatestSequenceNumber(); ASSERT_LE(exp_seq, seq + with_empty_commits); // Check if recovery after flush preserves the last sequence number - db_impl->FlushWAL(true); - ReOpenNoDelete(); - assert(db != nullptr); + ASSERT_OK(db_impl->FlushWAL(true)); + ASSERT_OK(ReOpenNoDelete()); + ASSERT_NE(db, nullptr); db_impl = static_cast_with_check(db->GetRootDB()); seq = db_impl->GetLatestSequenceNumber(); ASSERT_LE(exp_seq, seq + with_empty_commits); @@ -1792,7 +1795,7 @@ TEST_P(SeqAdvanceConcurrentTest, SeqAdvanceConcurrent) { // properly. TEST_P(WritePreparedTransactionTest, BasicRecovery) { options.disable_auto_compactions = true; - ReOpen(); + ASSERT_OK(ReOpen()); WritePreparedTxnDB* wp_db = dynamic_cast(db); txn_t0(0); @@ -1807,6 +1810,7 @@ TEST_P(WritePreparedTransactionTest, BasicRecovery) { s = txn0->Put(Slice("foo0" + istr0), Slice("bar0" + istr0)); ASSERT_OK(s); s = txn0->Prepare(); + ASSERT_OK(s); auto prep_seq_0 = txn0->GetId(); txn_t1(0); @@ -1819,6 +1823,7 @@ TEST_P(WritePreparedTransactionTest, BasicRecovery) { s = txn1->Put(Slice("foo1" + istr1), Slice("bar")); ASSERT_OK(s); s = txn1->Prepare(); + ASSERT_OK(s); auto prep_seq_1 = txn1->GetId(); txn_t2(0); @@ -1832,10 +1837,10 @@ TEST_P(WritePreparedTransactionTest, BasicRecovery) { delete txn0; delete txn1; - wp_db->db_impl_->FlushWAL(true); + ASSERT_OK(wp_db->db_impl_->FlushWAL(true)); wp_db->TEST_Crash(); - ReOpenNoDelete(); - assert(db != nullptr); + ASSERT_OK(ReOpenNoDelete()); + ASSERT_NE(db, nullptr); wp_db = dynamic_cast(db); // After recovery, all the uncommitted txns (0 and 1) should be inserted into // delayed_prepared_ @@ -1863,7 +1868,7 @@ TEST_P(WritePreparedTransactionTest, BasicRecovery) { // recovery txn1 = db->GetTransactionByName("xid" + istr1); ASSERT_NE(txn1, nullptr); - txn1->Commit(); + ASSERT_OK(txn1->Commit()); delete txn1; index++; @@ -1874,13 +1879,14 @@ TEST_P(WritePreparedTransactionTest, BasicRecovery) { s = txn2->Put(Slice("foo2" + istr2), Slice("bar")); ASSERT_OK(s); s = txn2->Prepare(); + ASSERT_OK(s); auto prep_seq_2 = txn2->GetId(); delete txn2; - wp_db->db_impl_->FlushWAL(true); + ASSERT_OK(wp_db->db_impl_->FlushWAL(true)); wp_db->TEST_Crash(); - ReOpenNoDelete(); - assert(db != nullptr); + ASSERT_OK(ReOpenNoDelete()); + ASSERT_NE(db, nullptr); wp_db = dynamic_cast(db); ASSERT_TRUE(wp_db->prepared_txns_.empty()); ASSERT_FALSE(wp_db->delayed_prepared_empty_); @@ -1900,10 +1906,10 @@ TEST_P(WritePreparedTransactionTest, BasicRecovery) { // Commit all the remaining txns txn0 = db->GetTransactionByName("xid" + istr0); ASSERT_NE(txn0, nullptr); - txn0->Commit(); + ASSERT_OK(txn0->Commit()); txn2 = db->GetTransactionByName("xid" + istr2); ASSERT_NE(txn2, nullptr); - txn2->Commit(); + ASSERT_OK(txn2->Commit()); // Check the value is committed after commit s = db->Get(ropt, db->DefaultColumnFamily(), "foo0" + istr0, &pinnable_val); @@ -1913,9 +1919,9 @@ TEST_P(WritePreparedTransactionTest, BasicRecovery) { delete txn0; delete txn2; - wp_db->db_impl_->FlushWAL(true); - ReOpenNoDelete(); - assert(db != nullptr); + ASSERT_OK(wp_db->db_impl_->FlushWAL(true)); + ASSERT_OK(ReOpenNoDelete()); + ASSERT_NE(db, nullptr); wp_db = dynamic_cast(db); ASSERT_TRUE(wp_db->prepared_txns_.empty()); ASSERT_TRUE(wp_db->delayed_prepared_empty_); @@ -1932,7 +1938,7 @@ TEST_P(WritePreparedTransactionTest, BasicRecovery) { // committed data before the restart is visible to all snapshots. TEST_P(WritePreparedTransactionTest, IsInSnapshotEmptyMap) { for (bool end_with_prepare : {false, true}) { - ReOpen(); + ASSERT_OK(ReOpen()); WriteOptions woptions; ASSERT_OK(db->Put(woptions, "key", "value")); ASSERT_OK(db->Put(woptions, "key", "value")); @@ -1948,10 +1954,10 @@ TEST_P(WritePreparedTransactionTest, IsInSnapshotEmptyMap) { } dynamic_cast(db)->TEST_Crash(); auto db_impl = static_cast_with_check(db->GetRootDB()); - db_impl->FlushWAL(true); - ReOpenNoDelete(); + ASSERT_OK(db_impl->FlushWAL(true)); + ASSERT_OK(ReOpenNoDelete()); WritePreparedTxnDB* wp_db = dynamic_cast(db); - assert(wp_db != nullptr); + ASSERT_NE(wp_db, nullptr); ASSERT_GT(wp_db->max_evicted_seq_, 0); // max after recovery // Take a snapshot right after recovery const Snapshot* snap = db->GetSnapshot(); @@ -2190,7 +2196,7 @@ void ASSERT_SAME(ReadOptions roptions, TransactionDB* db, Status exp_s, Status s; PinnableSlice v; s = db->Get(roptions, db->DefaultColumnFamily(), key, &v); - ASSERT_TRUE(exp_s == s); + ASSERT_EQ(exp_s, s); ASSERT_TRUE(s.ok() || s.IsNotFound()); if (s.ok()) { ASSERT_TRUE(exp_v == v); @@ -2203,7 +2209,7 @@ void ASSERT_SAME(ReadOptions roptions, TransactionDB* db, Status exp_s, ASSERT_EQ(1, values.size()); ASSERT_EQ(1, s_vec.size()); s = s_vec[0]; - ASSERT_TRUE(exp_s == s); + ASSERT_EQ(exp_s, s); ASSERT_TRUE(s.ok() || s.IsNotFound()); if (s.ok()) { ASSERT_TRUE(exp_v == values[0]); @@ -2224,7 +2230,7 @@ TEST_P(WritePreparedTransactionTest, Rollback) { for (size_t ikey = 1; ikey <= num_keys; ikey++) { for (size_t ivalue = 0; ivalue < num_values; ivalue++) { for (bool crash : {false, true}) { - ReOpen(); + ASSERT_OK(ReOpen()); WritePreparedTxnDB* wp_db = dynamic_cast(db); std::string key_str = "key" + ToString(ikey); switch (ivalue) { @@ -2243,7 +2249,7 @@ TEST_P(WritePreparedTransactionTest, Rollback) { ASSERT_OK(db->SingleDelete(woptions, key_str)); break; default: - assert(0); + FAIL(); } PinnableSlice v1; @@ -2286,10 +2292,10 @@ TEST_P(WritePreparedTransactionTest, Rollback) { if (crash) { delete txn; auto db_impl = static_cast_with_check(db->GetRootDB()); - db_impl->FlushWAL(true); + ASSERT_OK(db_impl->FlushWAL(true)); dynamic_cast(db)->TEST_Crash(); - ReOpenNoDelete(); - assert(db != nullptr); + ASSERT_OK(ReOpenNoDelete()); + ASSERT_NE(db, nullptr); wp_db = dynamic_cast(db); txn = db->GetTransactionByName("xid0"); ASSERT_FALSE(wp_db->delayed_prepared_empty_); @@ -2328,7 +2334,7 @@ TEST_P(WritePreparedTransactionTest, Rollback) { TEST_P(WritePreparedTransactionTest, DisableGCDuringRecovery) { // Use large buffer to avoid memtable flush after 1024 insertions options.write_buffer_size = 1024 * 1024; - ReOpen(); + ASSERT_OK(ReOpen()); std::vector versions; uint64_t seq = 0; for (uint64_t i = 1; i <= 1024; i++) { @@ -2345,10 +2351,10 @@ TEST_P(WritePreparedTransactionTest, DisableGCDuringRecovery) { std::reverse(std::begin(versions), std::end(versions)); VerifyInternalKeys(versions); DBImpl* db_impl = static_cast_with_check(db->GetRootDB()); - db_impl->FlushWAL(true); + ASSERT_OK(db_impl->FlushWAL(true)); // Use small buffer to ensure memtable flush during recovery options.write_buffer_size = 1024; - ReOpenNoDelete(); + ASSERT_OK(ReOpenNoDelete()); VerifyInternalKeys(versions); } @@ -2375,7 +2381,7 @@ TEST_P(WritePreparedTransactionTest, SequenceNumberZero) { // proceed with older versions of the key as-if the new version doesn't exist. TEST_P(WritePreparedTransactionTest, CompactionShouldKeepUncommittedKeys) { options.disable_auto_compactions = true; - ReOpen(); + ASSERT_OK(ReOpen()); DBImpl* db_impl = static_cast_with_check(db->GetRootDB()); // Snapshots to avoid keys get evicted. std::vector snapshots; @@ -2466,7 +2472,7 @@ TEST_P(WritePreparedTransactionTest, CompactionShouldKeepUncommittedKeys) { // not just prepare sequence. TEST_P(WritePreparedTransactionTest, CompactionShouldKeepSnapshotVisibleKeys) { options.disable_auto_compactions = true; - ReOpen(); + ASSERT_OK(ReOpen()); // Keep track of expected sequence number. SequenceNumber expected_seq = 0; auto* txn1 = db->BeginTransaction(WriteOptions()); @@ -2532,7 +2538,7 @@ TEST_P(WritePreparedTransactionTest, SmallestUncommittedOptimization) { const size_t commit_cache_bits = 0; // disable commit cache for (bool has_recent_prepare : {true, false}) { UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); ASSERT_OK(db->Put(WriteOptions(), "key1", "value1")); auto* transaction = @@ -2581,7 +2587,7 @@ TEST_P(WritePreparedTransactionTest, ReleaseSnapshotDuringCompaction) { const size_t snapshot_cache_bits = 7; // same as default const size_t commit_cache_bits = 0; // minimum commit cache UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); ASSERT_OK(db->Put(WriteOptions(), "key1", "value1_1")); auto* transaction = @@ -2630,7 +2636,7 @@ TEST_P(WritePreparedTransactionTest, ReleaseSnapshotDuringCompaction2) { const size_t snapshot_cache_bits = 7; // same as default const size_t commit_cache_bits = 0; // minimum commit cache UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); ASSERT_OK(db->Put(WriteOptions(), "key1", "value1")); ASSERT_OK(db->Put(WriteOptions(), "key1", "value2")); @@ -2680,7 +2686,7 @@ TEST_P(WritePreparedTransactionTest, ReleaseSnapshotDuringCompaction3) { const size_t snapshot_cache_bits = 7; // same as default const size_t commit_cache_bits = 1; // commit cache size = 2 UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); // Add a dummy key to evict v2 commit cache, but keep v1 commit cache. // It also advance max_evicted_seq and can trigger old_commit_map cleanup. @@ -2731,7 +2737,7 @@ TEST_P(WritePreparedTransactionTest, ReleaseEarliestSnapshotDuringCompaction) { const size_t snapshot_cache_bits = 7; // same as default const size_t commit_cache_bits = 0; // minimum commit cache UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); ASSERT_OK(db->Put(WriteOptions(), "key1", "value1")); auto* transaction = @@ -2795,7 +2801,7 @@ TEST_P(WritePreparedTransactionTest, Random rnd(1103); options.disable_auto_compactions = true; - ReOpen(); + ASSERT_OK(ReOpen()); for (size_t i = 0; i < kNumTransactions; i++) { std::string key = "key" + ToString(i); @@ -2836,7 +2842,7 @@ TEST_P(WritePreparedTransactionTest, snapshots.push_back(db->GetSnapshot()); snapshot_data.push_back(current_data); - assert(snapshots.size() == snapshot_data.size()); + ASSERT_EQ(snapshots.size(), snapshot_data.size()); for (size_t i = 0; i < snapshots.size(); i++) { VerifyKeys(snapshot_data[i], snapshots[i]); } @@ -2871,7 +2877,7 @@ TEST_P(WritePreparedTransactionTest, TEST_P(WritePreparedTransactionTest, CompactionShouldKeepSequenceForUncommittedKeys) { options.disable_auto_compactions = true; - ReOpen(); + ASSERT_OK(ReOpen()); // Keep track of expected sequence number. SequenceNumber expected_seq = 0; auto* transaction = db->BeginTransaction(WriteOptions()); @@ -2913,7 +2919,7 @@ TEST_P(WritePreparedTransactionTest, TEST_P(WritePreparedTransactionTest, CommitAndSnapshotDuringCompaction) { options.disable_auto_compactions = true; - ReOpen(); + ASSERT_OK(ReOpen()); const Snapshot* snapshot = nullptr; ASSERT_OK(db->Put(WriteOptions(), "key1", "value1")); @@ -2996,6 +3002,7 @@ TEST_P(WritePreparedTransactionTest, Iterate) { TEST_P(WritePreparedTransactionTest, IteratorRefreshNotSupported) { Iterator* iter = db->NewIterator(ReadOptions()); + ASSERT_OK(iter->status()); ASSERT_TRUE(iter->Refresh().IsNotSupported()); delete iter; } @@ -3017,13 +3024,13 @@ TEST_P(WritePreparedTransactionTest, NonAtomicCommitOfDelayedPrepared) { } for (auto split_before_mutex : split_options) { UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); WritePreparedTxnDB* wp_db = dynamic_cast(db); DBImpl* db_impl = static_cast_with_check(db->GetRootDB()); // Fill up the commit cache std::string init_value("value1"); for (int i = 0; i < 10; i++) { - db->Put(WriteOptions(), Slice("key1"), Slice(init_value)); + ASSERT_OK(db->Put(WriteOptions(), Slice("key1"), Slice(init_value))); } // Prepare a transaction but do not commit it Transaction* txn = @@ -3034,7 +3041,7 @@ TEST_P(WritePreparedTransactionTest, NonAtomicCommitOfDelayedPrepared) { // Commit a bunch of entries to advance max evicted seq and make the // prepared a delayed prepared for (int i = 0; i < 10; i++) { - db->Put(WriteOptions(), Slice("key3"), Slice("value3")); + ASSERT_OK(db->Put(WriteOptions(), Slice("key3"), Slice("value3"))); } // The snapshot should not see the delayed prepared entry auto snap = db->GetSnapshot(); @@ -3075,7 +3082,7 @@ TEST_P(WritePreparedTransactionTest, NonAtomicCommitOfDelayedPrepared) { auto seq = db_impl->TEST_GetLastVisibleSequence(); size_t tries = 0; while (wp_db->max_evicted_seq_ < seq && tries < 50) { - db->Put(WriteOptions(), Slice("key3"), Slice("value3")); + ASSERT_OK(db->Put(WriteOptions(), Slice("key3"), Slice("value3"))); tries++; }; ASSERT_LT(tries, 50); @@ -3115,12 +3122,12 @@ TEST_P(WritePreparedTransactionTest, NonAtomicUpdateOfDelayedPrepared) { const size_t snapshot_cache_bits = 7; // same as default const size_t commit_cache_bits = 3; // 8 entries UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); WritePreparedTxnDB* wp_db = dynamic_cast(db); // Fill up the commit cache std::string init_value("value1"); for (int i = 0; i < 10; i++) { - db->Put(WriteOptions(), Slice("key1"), Slice(init_value)); + ASSERT_OK(db->Put(WriteOptions(), Slice("key1"), Slice(init_value))); } // Prepare a transaction but do not commit it Transaction* txn = db->BeginTransaction(WriteOptions(), TransactionOptions()); @@ -3128,8 +3135,8 @@ TEST_P(WritePreparedTransactionTest, NonAtomicUpdateOfDelayedPrepared) { ASSERT_OK(txn->Put(Slice("key1"), Slice("value2"))); ASSERT_OK(txn->Prepare()); // Create a gap between prepare seq and snapshot seq - db->Put(WriteOptions(), Slice("key3"), Slice("value3")); - db->Put(WriteOptions(), Slice("key3"), Slice("value3")); + ASSERT_OK(db->Put(WriteOptions(), Slice("key3"), Slice("value3"))); + ASSERT_OK(db->Put(WriteOptions(), Slice("key3"), Slice("value3"))); // The snapshot should not see the delayed prepared entry auto snap = db->GetSnapshot(); ASSERT_LT(txn->GetId(), snap->GetSequenceNumber()); @@ -3148,7 +3155,7 @@ TEST_P(WritePreparedTransactionTest, NonAtomicUpdateOfDelayedPrepared) { // prepared a delayed prepared size_t tries = 0; while (wp_db->max_evicted_seq_ < txn->GetId() && tries < 50) { - db->Put(WriteOptions(), Slice("key3"), Slice("value3")); + ASSERT_OK(db->Put(WriteOptions(), Slice("key3"), Slice("value3"))); tries++; }; ASSERT_LT(tries, 50); @@ -3185,13 +3192,13 @@ TEST_P(WritePreparedTransactionTest, NonAtomicUpdateOfMaxEvictedSeq) { const size_t snapshot_cache_bits = 7; // same as default const size_t commit_cache_bits = 3; // 8 entries UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); WritePreparedTxnDB* wp_db = dynamic_cast(db); // Fill up the commit cache std::string init_value("value1"); std::string last_value("value_final"); for (int i = 0; i < 10; i++) { - db->Put(WriteOptions(), Slice("key1"), Slice(init_value)); + ASSERT_OK(db->Put(WriteOptions(), Slice("key1"), Slice(init_value))); } // Do an uncommitted write to prevent min_uncommitted optimization Transaction* txn1 = @@ -3206,8 +3213,8 @@ TEST_P(WritePreparedTransactionTest, NonAtomicUpdateOfMaxEvictedSeq) { ASSERT_OK(txn->Prepare()); ASSERT_OK(txn->Commit()); // Create a gap between commit entry and snapshot seq - db->Put(WriteOptions(), Slice("key3"), Slice("value3")); - db->Put(WriteOptions(), Slice("key3"), Slice("value3")); + ASSERT_OK(db->Put(WriteOptions(), Slice("key3"), Slice("value3"))); + ASSERT_OK(db->Put(WriteOptions(), Slice("key3"), Slice("value3"))); // The snapshot should see the last commit auto snap = db->GetSnapshot(); ASSERT_LE(txn->GetId(), snap->GetSequenceNumber()); @@ -3225,7 +3232,7 @@ TEST_P(WritePreparedTransactionTest, NonAtomicUpdateOfMaxEvictedSeq) { // Commit a bunch of entries to advance max evicted seq beyond txn->GetId() size_t tries = 0; while (wp_db->max_evicted_seq_ < txn->GetId() && tries < 50) { - db->Put(WriteOptions(), Slice("key3"), Slice("value3")); + ASSERT_OK(db->Put(WriteOptions(), Slice("key3"), Slice("value3"))); tries++; }; ASSERT_LT(tries, 50); @@ -3248,7 +3255,7 @@ TEST_P(WritePreparedTransactionTest, NonAtomicUpdateOfMaxEvictedSeq) { read_thread.join(); commit_thread.join(); delete txn; - txn1->Commit(); + ASSERT_OK(txn1->Commit()); delete txn1; ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks(); @@ -3266,7 +3273,7 @@ TEST_P(WritePreparedTransactionTest, AddPreparedBeforeMax) { // 1 entry to advance max after the 2nd commit const size_t commit_cache_bits = 0; UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); WritePreparedTxnDB* wp_db = dynamic_cast(db); std::string some_value("value_some"); std::string uncommitted_value("value_uncommitted"); @@ -3347,7 +3354,7 @@ TEST_P(WritePreparedTransactionTest, CommitOfDelayedPrepared) { for (const size_t commit_cache_bits : {0, 2, 3}) { for (const size_t sub_batch_cnt : {1, 2, 3}) { UpdateTransactionDBOptions(snapshot_cache_bits, commit_cache_bits); - ReOpen(); + ASSERT_OK(ReOpen()); std::atomic snap = {nullptr}; std::atomic exp_prepare = {0}; ROCKSDB_NAMESPACE::port::Thread callback_thread; @@ -3385,7 +3392,7 @@ TEST_P(WritePreparedTransactionTest, CommitOfDelayedPrepared) { // Too many txns might cause commit_seq - prepare_seq in another thread // to go beyond DELTA_UPPERBOUND for (int i = 0; i < 25 * (1 << commit_cache_bits); i++) { - db->Put(WriteOptions(), Slice("key1"), Slice("value1")); + ASSERT_OK(db->Put(WriteOptions(), Slice("key1"), Slice("value1"))); } }); ROCKSDB_NAMESPACE::port::Thread write_thread([&]() { @@ -3448,7 +3455,7 @@ TEST_P(WritePreparedTransactionTest, AtomicCommit) { ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); ROCKSDB_NAMESPACE::port::Thread write_thread([&]() { if (skip_prepare) { - db->Put(WriteOptions(), Slice("key"), Slice("value")); + ASSERT_OK(db->Put(WriteOptions(), Slice("key"), Slice("value"))); } else { Transaction* txn = db->BeginTransaction(WriteOptions(), TransactionOptions()); diff --git a/utilities/transactions/write_prepared_txn.cc b/utilities/transactions/write_prepared_txn.cc index e80f10239a..5f666b2801 100644 --- a/utilities/transactions/write_prepared_txn.cc +++ b/utilities/transactions/write_prepared_txn.cc @@ -70,16 +70,21 @@ Status WritePreparedTxn::Get(const ReadOptions& options, wpt_db_->AssignMinMaxSeqs(options.snapshot, &min_uncommitted, &snap_seq); WritePreparedTxnReadCallback callback(wpt_db_, snap_seq, min_uncommitted, backed_by_snapshot); - auto res = write_batch_.GetFromBatchAndDB(db_, options, column_family, key, - pinnable_val, &callback); - if (LIKELY(callback.valid() && - wpt_db_->ValidateSnapshot(callback.max_visible_seq(), - backed_by_snapshot))) { - return res; - } else { - wpt_db_->WPRecordTick(TXN_GET_TRY_AGAIN); - return Status::TryAgain(); + Status res = write_batch_.GetFromBatchAndDB(db_, options, column_family, key, + pinnable_val, &callback); + const bool callback_valid = + callback.valid(); // NOTE: validity of callback must always be checked + // before it is destructed + if (res.ok()) { + if (!LIKELY(callback_valid && + wpt_db_->ValidateSnapshot(callback.max_visible_seq(), + backed_by_snapshot))) { + wpt_db_->WPRecordTick(TXN_GET_TRY_AGAIN); + res = Status::TryAgain(); + } } + + return res; } Iterator* WritePreparedTxn::GetIterator(const ReadOptions& options) { diff --git a/utilities/transactions/write_unprepared_transaction_test.cc b/utilities/transactions/write_unprepared_transaction_test.cc index 7152ad9cdb..0c1d32e359 100644 --- a/utilities/transactions/write_unprepared_transaction_test.cc +++ b/utilities/transactions/write_unprepared_transaction_test.cc @@ -73,7 +73,7 @@ TEST_P(WriteUnpreparedTransactionTest, ReadYourOwnWrite) { for (uint64_t max_skip : {0, std::numeric_limits::max()}) { options.max_sequential_skip_in_iterations = max_skip; options.disable_auto_compactions = true; - ReOpen(); + ASSERT_OK(ReOpen()); TransactionOptions txn_options; WriteOptions woptions; @@ -90,7 +90,7 @@ TEST_P(WriteUnpreparedTransactionTest, ReadYourOwnWrite) { std::string stored_value = "v" + ToString(i); ASSERT_OK(txn->Put("a", stored_value)); ASSERT_OK(txn->Put("b", stored_value)); - wup_txn->FlushWriteBatchToDB(false); + ASSERT_OK(wup_txn->FlushWriteBatchToDB(false)); // Test Get() std::string value; @@ -155,7 +155,7 @@ TEST_P(WriteUnpreparedStressTest, ReadYourOwnWriteStress) { WriteOptions write_options; txn_db_options.transaction_lock_timeout = -1; options.disable_auto_compactions = true; - ReOpen(); + ASSERT_OK(ReOpen()); std::vector keys; for (uint32_t k = 0; k < kNumKeys * kNumThreads; k++) { @@ -188,7 +188,7 @@ TEST_P(WriteUnpreparedStressTest, ReadYourOwnWriteStress) { } txn = db->BeginTransaction(write_options, txn_options); - txn->SetName(ToString(id)); + ASSERT_OK(txn->SetName(ToString(id))); txn->SetSnapshot(); if (a >= RO_SNAPSHOT) { read_options.snapshot = txn->GetSnapshot(); @@ -273,23 +273,27 @@ TEST_P(WriteUnpreparedStressTest, ReadYourOwnWriteStress) { case 1: // Validate Next() { Iterator* iter = txn->GetIterator(read_options); + ASSERT_OK(iter->status()); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { verify_key(iter->key().ToString(), iter->value().ToString()); } + ASSERT_OK(iter->status()); delete iter; break; } case 2: // Validate Prev() { Iterator* iter = txn->GetIterator(read_options); + ASSERT_OK(iter->status()); for (iter->SeekToLast(); iter->Valid(); iter->Prev()) { verify_key(iter->key().ToString(), iter->value().ToString()); } + ASSERT_OK(iter->status()); delete iter; break; } default: - ASSERT_TRUE(false); + FAIL(); } if (rnd.OneIn(2)) { @@ -334,7 +338,7 @@ TEST_P(WriteUnpreparedTransactionTest, RecoveryTest) { for (int num_batches = 1; num_batches < 10; num_batches++) { // Reset database. prepared_trans.clear(); - ReOpen(); + ASSERT_OK(ReOpen()); wup_db = dynamic_cast(db); if (!empty) { for (int i = 0; i < num_batches; i++) { @@ -346,7 +350,7 @@ TEST_P(WriteUnpreparedTransactionTest, RecoveryTest) { // Write num_batches unprepared batches. Transaction* txn = db->BeginTransaction(write_options, txn_options); WriteUnpreparedTxn* wup_txn = dynamic_cast(txn); - txn->SetName("xid"); + ASSERT_OK(txn->SetName("xid")); for (int i = 0; i < num_batches; i++) { ASSERT_OK(txn->Put("k" + ToString(i), "value" + ToString(i))); if (txn_options.write_batch_flush_threshold == 1) { @@ -365,14 +369,14 @@ TEST_P(WriteUnpreparedTransactionTest, RecoveryTest) { // test that recovery does the rollback. wup_txn->unprep_seqs_.clear(); } else { - txn->Prepare(); + ASSERT_OK(txn->Prepare()); } delete txn; // Crash and run recovery code paths. - wup_db->db_impl_->FlushWAL(true); + ASSERT_OK(wup_db->db_impl_->FlushWAL(true)); wup_db->TEST_Crash(); - ReOpenNoDelete(); + ASSERT_OK(ReOpenNoDelete()); assert(db != nullptr); db->GetAllPreparedTransactions(&prepared_trans); @@ -386,6 +390,7 @@ TEST_P(WriteUnpreparedTransactionTest, RecoveryTest) { } Iterator* iter = db->NewIterator(ReadOptions()); + ASSERT_OK(iter->status()); iter->SeekToFirst(); // Check that DB has before values. if (!empty || a == COMMIT) { @@ -402,6 +407,7 @@ TEST_P(WriteUnpreparedTransactionTest, RecoveryTest) { } } ASSERT_FALSE(iter->Valid()); + ASSERT_OK(iter->status()); delete iter; } } @@ -422,13 +428,13 @@ TEST_P(WriteUnpreparedTransactionTest, UnpreparedBatch) { txn_options.write_batch_flush_threshold = batch_size; for (bool prepare : {false, true}) { for (bool commit : {false, true}) { - ReOpen(); + ASSERT_OK(ReOpen()); Transaction* txn = db->BeginTransaction(write_options, txn_options); WriteUnpreparedTxn* wup_txn = dynamic_cast(txn); - txn->SetName("xid"); + ASSERT_OK(txn->SetName("xid")); for (int i = 0; i < kNumKeys; i++) { - txn->Put("k" + ToString(i), "v" + ToString(i)); + ASSERT_OK(txn->Put("k" + ToString(i), "v" + ToString(i))); if (txn_options.write_batch_flush_threshold == 1) { // WriteUnprepared will check write_batch_flush_threshold and // possibly flush before appending to the write batch. No flush will @@ -445,9 +451,11 @@ TEST_P(WriteUnpreparedTransactionTest, UnpreparedBatch) { } Iterator* iter = db->NewIterator(ReadOptions()); + ASSERT_OK(iter->status()); iter->SeekToFirst(); assert(!iter->Valid()); ASSERT_FALSE(iter->Valid()); + ASSERT_OK(iter->status()); delete iter; if (commit) { @@ -458,6 +466,7 @@ TEST_P(WriteUnpreparedTransactionTest, UnpreparedBatch) { delete txn; iter = db->NewIterator(ReadOptions()); + ASSERT_OK(iter->status()); iter->SeekToFirst(); for (int i = 0; i < (commit ? kNumKeys : 0); i++) { @@ -467,6 +476,7 @@ TEST_P(WriteUnpreparedTransactionTest, UnpreparedBatch) { iter->Next(); } ASSERT_FALSE(iter->Valid()); + ASSERT_OK(iter->status()); delete iter; } } @@ -490,7 +500,7 @@ TEST_P(WriteUnpreparedTransactionTest, MarkLogWithPrepSection) { for (bool prepare : {false, true}) { for (bool commit : {false, true}) { - ReOpen(); + ASSERT_OK(ReOpen()); auto wup_db = dynamic_cast(db); auto db_impl = wup_db->db_impl_; @@ -508,7 +518,7 @@ TEST_P(WriteUnpreparedTransactionTest, MarkLogWithPrepSection) { } if (i > 0) { - db_impl->TEST_SwitchWAL(); + ASSERT_OK(db_impl->TEST_SwitchWAL()); } } @@ -568,12 +578,14 @@ TEST_P(WriteUnpreparedTransactionTest, NoSnapshotWrite) { // snapshot, if iterator snapshot is fresh enough. ReadOptions roptions; auto iter = txn->GetIterator(roptions); + ASSERT_OK(iter->status()); int keys = 0; for (iter->SeekToLast(); iter->Valid(); iter->Prev(), keys++) { ASSERT_OK(iter->status()); ASSERT_EQ(iter->key().ToString(), iter->value().ToString()); } ASSERT_EQ(keys, 3); + ASSERT_OK(iter->status()); delete iter; delete txn; @@ -598,6 +610,7 @@ TEST_P(WriteUnpreparedTransactionTest, IterateAndWrite) { ReadOptions roptions; auto iter = txn->GetIterator(roptions); + ASSERT_OK(iter->status()); for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { ASSERT_OK(iter->status()); if (iter->key() == "9") { @@ -612,11 +625,13 @@ TEST_P(WriteUnpreparedTransactionTest, IterateAndWrite) { ASSERT_OK(txn->Put(iter->key(), "b")); } } + ASSERT_OK(iter->status()); delete iter; ASSERT_OK(txn->Commit()); iter = db->NewIterator(roptions); + ASSERT_OK(iter->status()); if (a == DO_DELETE) { // Check that db is empty. iter->SeekToFirst(); @@ -630,6 +645,7 @@ TEST_P(WriteUnpreparedTransactionTest, IterateAndWrite) { } ASSERT_EQ(keys, 100); } + ASSERT_OK(iter->status()); delete iter; delete txn; diff --git a/utilities/transactions/write_unprepared_txn_db.cc b/utilities/transactions/write_unprepared_txn_db.cc index 9de447d92d..0ef96d0a45 100644 --- a/utilities/transactions/write_unprepared_txn_db.cc +++ b/utilities/transactions/write_unprepared_txn_db.cc @@ -167,7 +167,10 @@ Status WriteUnpreparedTxnDB::RollbackRecoveredTransaction( } // The Rollback marker will be used as a batch separator - WriteBatchInternal::MarkRollback(&rollback_batch, rtxn->name_); + s = WriteBatchInternal::MarkRollback(&rollback_batch, rtxn->name_); + if (!s.ok()) { + return s; + } const uint64_t kNoLogRef = 0; const bool kDisableMemtable = true;