mirror of
https://github.com/facebook/rocksdb.git
synced 2024-11-25 14:31:35 +00:00
Add few unit test cases in ASSERT_STATUS_CHECKED (#7500)
Summary: Add status enforcement for following tests: 1. import_column_family_test 2. memory_test 3. table_test Pull Request resolved: https://github.com/facebook/rocksdb/pull/7500 Reviewed By: zhichao-cao Differential Revision: D24095887 Pulled By: akankshamahajan15 fbshipit-source-id: db8e1ec595852df143fad78a0c07bfdd27dc3c84
This commit is contained in:
parent
810ab34ede
commit
24498ab1ec
3
Makefile
3
Makefile
|
@ -664,6 +664,9 @@ ifdef ASSERT_STATUS_CHECKED
|
|||
file_reader_writer_test \
|
||||
corruption_test \
|
||||
db_universal_compaction_test \
|
||||
import_column_family_test \
|
||||
memory_test \
|
||||
table_test \
|
||||
|
||||
ifeq ($(USE_FOLLY_DISTRIBUTED_MUTEX),1)
|
||||
TESTS_PASSING_ASC += folly_synchronization_distributed_mutex_test
|
||||
|
|
|
@ -4710,8 +4710,14 @@ Status DBImpl::CreateColumnFamilyWithImport(
|
|||
|
||||
import_job.Cleanup(status);
|
||||
if (!status.ok()) {
|
||||
DropColumnFamily(*handle);
|
||||
DestroyColumnFamilyHandle(*handle);
|
||||
Status temp_s = DropColumnFamily(*handle);
|
||||
if (!temp_s.ok()) {
|
||||
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
|
||||
"DropColumnFamily failed with error %s",
|
||||
temp_s.ToString().c_str());
|
||||
}
|
||||
// Always returns Status::OK()
|
||||
assert(DestroyColumnFamilyHandle(*handle).ok());
|
||||
*handle = nullptr;
|
||||
}
|
||||
return status;
|
||||
|
|
|
@ -25,27 +25,27 @@ class ImportColumnFamilyTest : public DBTestBase {
|
|||
|
||||
~ImportColumnFamilyTest() {
|
||||
if (import_cfh_) {
|
||||
db_->DropColumnFamily(import_cfh_);
|
||||
db_->DestroyColumnFamilyHandle(import_cfh_);
|
||||
EXPECT_OK(db_->DropColumnFamily(import_cfh_));
|
||||
EXPECT_OK(db_->DestroyColumnFamilyHandle(import_cfh_));
|
||||
import_cfh_ = nullptr;
|
||||
}
|
||||
if (import_cfh2_) {
|
||||
db_->DropColumnFamily(import_cfh2_);
|
||||
db_->DestroyColumnFamilyHandle(import_cfh2_);
|
||||
EXPECT_OK(db_->DropColumnFamily(import_cfh2_));
|
||||
EXPECT_OK(db_->DestroyColumnFamilyHandle(import_cfh2_));
|
||||
import_cfh2_ = nullptr;
|
||||
}
|
||||
if (metadata_ptr_) {
|
||||
delete metadata_ptr_;
|
||||
metadata_ptr_ = nullptr;
|
||||
}
|
||||
DestroyDir(env_, sst_files_dir_);
|
||||
DestroyDir(env_, export_files_dir_);
|
||||
EXPECT_OK(DestroyDir(env_, sst_files_dir_));
|
||||
EXPECT_OK(DestroyDir(env_, export_files_dir_));
|
||||
}
|
||||
|
||||
void DestroyAndRecreateExternalSSTFilesDir() {
|
||||
DestroyDir(env_, sst_files_dir_);
|
||||
env_->CreateDir(sst_files_dir_);
|
||||
DestroyDir(env_, export_files_dir_);
|
||||
EXPECT_OK(DestroyDir(env_, sst_files_dir_));
|
||||
EXPECT_OK(env_->CreateDir(sst_files_dir_));
|
||||
EXPECT_OK(DestroyDir(env_, export_files_dir_));
|
||||
}
|
||||
|
||||
LiveFileMetaData LiveFileMetaDataInit(std::string name, std::string path,
|
||||
|
@ -143,7 +143,7 @@ TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFilesWithOverlap) {
|
|||
const std::string file3_sst = sst_files_dir_ + file3_sst_name;
|
||||
ASSERT_OK(sfw_cf1.Open(file3_sst));
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
sfw_cf1.Put(Key(i), Key(i) + "_val");
|
||||
ASSERT_OK(sfw_cf1.Put(Key(i), Key(i) + "_val"));
|
||||
}
|
||||
ASSERT_OK(sfw_cf1.Finish());
|
||||
|
||||
|
@ -152,7 +152,7 @@ TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFilesWithOverlap) {
|
|||
const std::string file2_sst = sst_files_dir_ + file2_sst_name;
|
||||
ASSERT_OK(sfw_cf1.Open(file2_sst));
|
||||
for (int i = 0; i < 100; i += 2) {
|
||||
sfw_cf1.Put(Key(i), Key(i) + "_overwrite1");
|
||||
ASSERT_OK(sfw_cf1.Put(Key(i), Key(i) + "_overwrite1"));
|
||||
}
|
||||
ASSERT_OK(sfw_cf1.Finish());
|
||||
|
||||
|
@ -161,7 +161,7 @@ TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFilesWithOverlap) {
|
|||
const std::string file1a_sst = sst_files_dir_ + file1a_sst_name;
|
||||
ASSERT_OK(sfw_cf1.Open(file1a_sst));
|
||||
for (int i = 0; i < 52; i += 4) {
|
||||
sfw_cf1.Put(Key(i), Key(i) + "_overwrite2");
|
||||
ASSERT_OK(sfw_cf1.Put(Key(i), Key(i) + "_overwrite2"));
|
||||
}
|
||||
ASSERT_OK(sfw_cf1.Finish());
|
||||
|
||||
|
@ -170,7 +170,7 @@ TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFilesWithOverlap) {
|
|||
const std::string file1b_sst = sst_files_dir_ + file1b_sst_name;
|
||||
ASSERT_OK(sfw_cf1.Open(file1b_sst));
|
||||
for (int i = 52; i < 100; i += 4) {
|
||||
sfw_cf1.Put(Key(i), Key(i) + "_overwrite2");
|
||||
ASSERT_OK(sfw_cf1.Put(Key(i), Key(i) + "_overwrite2"));
|
||||
}
|
||||
ASSERT_OK(sfw_cf1.Finish());
|
||||
|
||||
|
@ -179,7 +179,7 @@ TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFilesWithOverlap) {
|
|||
const std::string file0a_sst = sst_files_dir_ + file0a_sst_name;
|
||||
ASSERT_OK(sfw_cf1.Open(file0a_sst));
|
||||
for (int i = 0; i < 100; i += 16) {
|
||||
sfw_cf1.Put(Key(i), Key(i) + "_overwrite3");
|
||||
ASSERT_OK(sfw_cf1.Put(Key(i), Key(i) + "_overwrite3"));
|
||||
}
|
||||
ASSERT_OK(sfw_cf1.Finish());
|
||||
|
||||
|
@ -188,7 +188,7 @@ TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFilesWithOverlap) {
|
|||
const std::string file0b_sst = sst_files_dir_ + file0b_sst_name;
|
||||
ASSERT_OK(sfw_cf1.Open(file0b_sst));
|
||||
for (int i = 0; i < 100; i += 16) {
|
||||
sfw_cf1.Put(Key(i), Key(i) + "_overwrite4");
|
||||
ASSERT_OK(sfw_cf1.Put(Key(i), Key(i) + "_overwrite4"));
|
||||
}
|
||||
ASSERT_OK(sfw_cf1.Finish());
|
||||
|
||||
|
@ -274,7 +274,7 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherCF) {
|
|||
CreateAndReopenWithCF({"koko"}, options);
|
||||
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
Put(1, Key(i), Key(i) + "_val");
|
||||
ASSERT_OK(Put(1, Key(i), Key(i) + "_val"));
|
||||
}
|
||||
ASSERT_OK(Flush(1));
|
||||
|
||||
|
@ -283,13 +283,13 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherCF) {
|
|||
|
||||
// Overwrite the value in the same set of keys.
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
Put(1, Key(i), Key(i) + "_overwrite");
|
||||
ASSERT_OK(Put(1, Key(i), Key(i) + "_overwrite"));
|
||||
}
|
||||
|
||||
// Flush to create L0 file.
|
||||
ASSERT_OK(Flush(1));
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
Put(1, Key(i), Key(i) + "_overwrite2");
|
||||
ASSERT_OK(Put(1, Key(i), Key(i) + "_overwrite2"));
|
||||
}
|
||||
|
||||
// Flush again to create another L0 file. It should have higher sequencer.
|
||||
|
@ -382,7 +382,7 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherDB) {
|
|||
CreateAndReopenWithCF({"koko"}, options);
|
||||
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
Put(1, Key(i), Key(i) + "_val");
|
||||
ASSERT_OK(Put(1, Key(i), Key(i) + "_val"));
|
||||
}
|
||||
ASSERT_OK(Flush(1));
|
||||
|
||||
|
@ -392,14 +392,14 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherDB) {
|
|||
|
||||
// Overwrite the value in the same set of keys.
|
||||
for (int i = 0; i < 50; ++i) {
|
||||
Put(1, Key(i), Key(i) + "_overwrite");
|
||||
ASSERT_OK(Put(1, Key(i), Key(i) + "_overwrite"));
|
||||
}
|
||||
|
||||
// Flush to create L0 file.
|
||||
ASSERT_OK(Flush(1));
|
||||
|
||||
for (int i = 0; i < 25; ++i) {
|
||||
Put(1, Key(i), Key(i) + "_overwrite2");
|
||||
ASSERT_OK(Put(1, Key(i), Key(i) + "_overwrite2"));
|
||||
}
|
||||
|
||||
// Flush again to create another L0 file. It should have higher sequencer.
|
||||
|
@ -414,7 +414,7 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherDB) {
|
|||
|
||||
// Create a new db and import the files.
|
||||
DB* db_copy;
|
||||
DestroyDir(env_, dbname_ + "/db_copy");
|
||||
ASSERT_OK(DestroyDir(env_, dbname_ + "/db_copy"));
|
||||
ASSERT_OK(DB::Open(options, dbname_ + "/db_copy", &db_copy));
|
||||
ColumnFamilyHandle* cfh = nullptr;
|
||||
ASSERT_OK(db_copy->CreateColumnFamilyWithImport(ColumnFamilyOptions(), "yoyo",
|
||||
|
@ -427,10 +427,10 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherDB) {
|
|||
db_copy->Get(ReadOptions(), cfh, Key(i), &value);
|
||||
ASSERT_EQ(Get(1, Key(i)), value);
|
||||
}
|
||||
db_copy->DropColumnFamily(cfh);
|
||||
db_copy->DestroyColumnFamilyHandle(cfh);
|
||||
ASSERT_OK(db_copy->DropColumnFamily(cfh));
|
||||
ASSERT_OK(db_copy->DestroyColumnFamilyHandle(cfh));
|
||||
delete db_copy;
|
||||
DestroyDir(env_, dbname_ + "/db_copy");
|
||||
ASSERT_OK(DestroyDir(env_, dbname_ + "/db_copy"));
|
||||
}
|
||||
|
||||
TEST_F(ImportColumnFamilyTest, LevelFilesOverlappingAtEndpoints) {
|
||||
|
@ -474,7 +474,7 @@ TEST_F(ImportColumnFamilyTest, LevelFilesOverlappingAtEndpoints) {
|
|||
|
||||
// Create a new db and import the files.
|
||||
DB* db_copy;
|
||||
DestroyDir(env_, dbname_ + "/db_copy");
|
||||
ASSERT_OK(DestroyDir(env_, dbname_ + "/db_copy"));
|
||||
ASSERT_OK(DB::Open(options, dbname_ + "/db_copy", &db_copy));
|
||||
ColumnFamilyHandle* cfh = nullptr;
|
||||
ASSERT_OK(db_copy->CreateColumnFamilyWithImport(ColumnFamilyOptions(), "yoyo",
|
||||
|
@ -486,10 +486,10 @@ TEST_F(ImportColumnFamilyTest, LevelFilesOverlappingAtEndpoints) {
|
|||
std::string value;
|
||||
ASSERT_OK(db_copy->Get(ReadOptions(), cfh, "key", &value));
|
||||
}
|
||||
db_copy->DropColumnFamily(cfh);
|
||||
db_copy->DestroyColumnFamilyHandle(cfh);
|
||||
ASSERT_OK(db_copy->DropColumnFamily(cfh));
|
||||
ASSERT_OK(db_copy->DestroyColumnFamilyHandle(cfh));
|
||||
delete db_copy;
|
||||
DestroyDir(env_, dbname_ + "/db_copy");
|
||||
ASSERT_OK(DestroyDir(env_, dbname_ + "/db_copy"));
|
||||
for (const Snapshot* snapshot : snapshots) {
|
||||
db_->ReleaseSnapshot(snapshot);
|
||||
}
|
||||
|
|
|
@ -1568,9 +1568,11 @@ Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
|
|||
no_insert, lookup_context->get_id,
|
||||
lookup_context->get_from_user_specified_snapshot,
|
||||
/*referenced_key=*/"");
|
||||
block_cache_tracer_->WriteBlockAccess(access_record, key,
|
||||
rep_->cf_name_for_tracing(),
|
||||
lookup_context->referenced_key);
|
||||
// TODO: Should handle this error?
|
||||
block_cache_tracer_
|
||||
->WriteBlockAccess(access_record, key, rep_->cf_name_for_tracing(),
|
||||
lookup_context->referenced_key)
|
||||
.PermitUncheckedError();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2392,9 +2394,12 @@ Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
|
|||
/*referenced_key=*/"", referenced_data_size,
|
||||
lookup_data_block_context.num_keys_in_block,
|
||||
does_referenced_key_exist);
|
||||
block_cache_tracer_->WriteBlockAccess(
|
||||
access_record, lookup_data_block_context.block_key,
|
||||
rep_->cf_name_for_tracing(), referenced_key);
|
||||
// TODO: Should handle status here?
|
||||
block_cache_tracer_
|
||||
->WriteBlockAccess(access_record,
|
||||
lookup_data_block_context.block_key,
|
||||
rep_->cf_name_for_tracing(), referenced_key)
|
||||
.PermitUncheckedError();
|
||||
}
|
||||
|
||||
if (done) {
|
||||
|
@ -2723,9 +2728,12 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options,
|
|||
/*referenced_key=*/"", referenced_data_size,
|
||||
lookup_data_block_context.num_keys_in_block,
|
||||
does_referenced_key_exist);
|
||||
block_cache_tracer_->WriteBlockAccess(
|
||||
access_record, lookup_data_block_context.block_key,
|
||||
rep_->cf_name_for_tracing(), referenced_key);
|
||||
// TODO: Should handle status here?
|
||||
block_cache_tracer_
|
||||
->WriteBlockAccess(access_record,
|
||||
lookup_data_block_context.block_key,
|
||||
rep_->cf_name_for_tracing(), referenced_key)
|
||||
.PermitUncheckedError();
|
||||
}
|
||||
s = biter->status();
|
||||
if (done) {
|
||||
|
|
|
@ -564,7 +564,7 @@ class DBConstructor: public Constructor {
|
|||
NewDB();
|
||||
for (const auto& kv : kv_map) {
|
||||
WriteBatch batch;
|
||||
batch.Put(kv.first, kv.second);
|
||||
EXPECT_OK(batch.Put(kv.first, kv.second));
|
||||
EXPECT_TRUE(db_->Write(WriteOptions(), &batch).ok());
|
||||
}
|
||||
return Status::OK();
|
||||
|
@ -872,12 +872,15 @@ class HarnessTest : public testing::Test {
|
|||
InternalIterator* iter = constructor_->NewIterator();
|
||||
ASSERT_TRUE(!iter->Valid());
|
||||
iter->SeekToFirst();
|
||||
ASSERT_OK(iter->status());
|
||||
for (stl_wrappers::KVMap::const_iterator model_iter = data.begin();
|
||||
model_iter != data.end(); ++model_iter) {
|
||||
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
||||
iter->Next();
|
||||
ASSERT_OK(iter->status());
|
||||
}
|
||||
ASSERT_TRUE(!iter->Valid());
|
||||
ASSERT_OK(iter->status());
|
||||
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
|
||||
iter->~InternalIterator();
|
||||
} else {
|
||||
|
@ -890,12 +893,15 @@ class HarnessTest : public testing::Test {
|
|||
InternalIterator* iter = constructor_->NewIterator();
|
||||
ASSERT_TRUE(!iter->Valid());
|
||||
iter->SeekToLast();
|
||||
ASSERT_OK(iter->status());
|
||||
for (stl_wrappers::KVMap::const_reverse_iterator model_iter = data.rbegin();
|
||||
model_iter != data.rend(); ++model_iter) {
|
||||
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
||||
iter->Prev();
|
||||
ASSERT_OK(iter->status());
|
||||
}
|
||||
ASSERT_TRUE(!iter->Valid());
|
||||
ASSERT_OK(iter->status());
|
||||
if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) {
|
||||
iter->~InternalIterator();
|
||||
} else {
|
||||
|
@ -917,6 +923,7 @@ class HarnessTest : public testing::Test {
|
|||
if (iter->Valid()) {
|
||||
if (kVerbose) fprintf(stderr, "Next\n");
|
||||
iter->Next();
|
||||
ASSERT_OK(iter->status());
|
||||
++model_iter;
|
||||
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
||||
}
|
||||
|
@ -926,6 +933,7 @@ class HarnessTest : public testing::Test {
|
|||
case 1: {
|
||||
if (kVerbose) fprintf(stderr, "SeekToFirst\n");
|
||||
iter->SeekToFirst();
|
||||
ASSERT_OK(iter->status());
|
||||
model_iter = data.begin();
|
||||
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
||||
break;
|
||||
|
@ -937,6 +945,7 @@ class HarnessTest : public testing::Test {
|
|||
if (kVerbose) fprintf(stderr, "Seek '%s'\n",
|
||||
EscapeString(key).c_str());
|
||||
iter->Seek(Slice(key));
|
||||
ASSERT_OK(iter->status());
|
||||
ASSERT_EQ(ToString(data, model_iter), ToString(iter));
|
||||
break;
|
||||
}
|
||||
|
@ -945,6 +954,7 @@ class HarnessTest : public testing::Test {
|
|||
if (iter->Valid()) {
|
||||
if (kVerbose) fprintf(stderr, "Prev\n");
|
||||
iter->Prev();
|
||||
ASSERT_OK(iter->status());
|
||||
if (model_iter == data.begin()) {
|
||||
model_iter = data.end(); // Wrap around to invalid value
|
||||
} else {
|
||||
|
@ -958,6 +968,7 @@ class HarnessTest : public testing::Test {
|
|||
case 4: {
|
||||
if (kVerbose) fprintf(stderr, "SeekToLast\n");
|
||||
iter->SeekToLast();
|
||||
ASSERT_OK(iter->status());
|
||||
if (keys.empty()) {
|
||||
model_iter = data.end();
|
||||
} else {
|
||||
|
@ -1117,7 +1128,10 @@ class BlockBasedTableTest
|
|||
std::unique_ptr<TraceWriter> trace_writer;
|
||||
EXPECT_OK(NewFileTraceWriter(env_, EnvOptions(), trace_file_path_,
|
||||
&trace_writer));
|
||||
c->block_cache_tracer_.StartTrace(env_, trace_opt, std::move(trace_writer));
|
||||
// Always return Status::OK().
|
||||
assert(c->block_cache_tracer_
|
||||
.StartTrace(env_, trace_opt, std::move(trace_writer))
|
||||
.ok());
|
||||
{
|
||||
std::string user_key = "k01";
|
||||
InternalKey internal_key(user_key, 0, kTypeValue);
|
||||
|
@ -1888,7 +1902,7 @@ TEST_P(BlockBasedTableTest, SkipPrefixBloomFilter) {
|
|||
options.prefix_extractor.reset(NewFixedPrefixTransform(9));
|
||||
const ImmutableCFOptions new_ioptions(options);
|
||||
const MutableCFOptions new_moptions(options);
|
||||
c.Reopen(new_ioptions, new_moptions);
|
||||
ASSERT_OK(c.Reopen(new_ioptions, new_moptions));
|
||||
auto reader = c.GetTableReader();
|
||||
ReadOptions read_options;
|
||||
std::unique_ptr<InternalIterator> db_iter(reader->NewIterator(
|
||||
|
@ -2826,8 +2840,8 @@ TEST_P(BlockBasedTableTest, BlockCacheDisabledTest) {
|
|||
GetContext::kNotFound, Slice(), nullptr, nullptr,
|
||||
nullptr, true, nullptr, nullptr);
|
||||
// a hack that just to trigger BlockBasedTable::GetFilter.
|
||||
reader->Get(ReadOptions(), "non-exist-key", &get_context,
|
||||
moptions.prefix_extractor.get());
|
||||
ASSERT_OK(reader->Get(ReadOptions(), "non-exist-key", &get_context,
|
||||
moptions.prefix_extractor.get()));
|
||||
BlockCachePropertiesSnapshot props(options.statistics.get());
|
||||
props.AssertIndexBlockStat(0, 0);
|
||||
props.AssertFilterBlockStat(0, 0);
|
||||
|
@ -2901,6 +2915,7 @@ TEST_P(BlockBasedTableTest, FilterBlockInBlockCache) {
|
|||
// Only data block will be accessed
|
||||
{
|
||||
iter->SeekToFirst();
|
||||
ASSERT_OK(iter->status());
|
||||
BlockCachePropertiesSnapshot props(options.statistics.get());
|
||||
props.AssertEqual(1, 1, 0 + 1, // data block miss
|
||||
0);
|
||||
|
@ -2915,6 +2930,7 @@ TEST_P(BlockBasedTableTest, FilterBlockInBlockCache) {
|
|||
{
|
||||
iter.reset(c.NewIterator(moptions.prefix_extractor.get()));
|
||||
iter->SeekToFirst();
|
||||
ASSERT_OK(iter->status());
|
||||
BlockCachePropertiesSnapshot props(options.statistics.get());
|
||||
props.AssertEqual(1, 1 + 1, /* index block hit */
|
||||
1, 0 + 1 /* data block hit */);
|
||||
|
@ -2936,7 +2952,7 @@ TEST_P(BlockBasedTableTest, FilterBlockInBlockCache) {
|
|||
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
||||
const ImmutableCFOptions ioptions2(options);
|
||||
const MutableCFOptions moptions2(options);
|
||||
c.Reopen(ioptions2, moptions2);
|
||||
ASSERT_OK(c.Reopen(ioptions2, moptions2));
|
||||
{
|
||||
BlockCachePropertiesSnapshot props(options.statistics.get());
|
||||
props.AssertEqual(1, // index block miss
|
||||
|
@ -2962,6 +2978,7 @@ TEST_P(BlockBasedTableTest, FilterBlockInBlockCache) {
|
|||
// SeekToFirst() accesses data block. With similar reason, we expect data
|
||||
// block's cache miss.
|
||||
iter->SeekToFirst();
|
||||
ASSERT_OK(iter->status());
|
||||
BlockCachePropertiesSnapshot props(options.statistics.get());
|
||||
props.AssertEqual(2, 0, 0 + 1, // data block miss
|
||||
0);
|
||||
|
@ -3298,7 +3315,7 @@ TEST_P(BlockBasedTableTest, NoFileChecksum) {
|
|||
column_family_name, level),
|
||||
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
||||
f.GetFileWriter()));
|
||||
f.ResetTableBuilder(std::move(builder));
|
||||
ASSERT_OK(f.ResetTableBuilder(std::move(builder)));
|
||||
f.AddKVtoKVMap(1000);
|
||||
f.WriteKVAndFlushTable();
|
||||
ASSERT_STREQ(f.GetFileChecksumFuncName(), kUnknownFileChecksumFuncName);
|
||||
|
@ -3337,7 +3354,7 @@ TEST_P(BlockBasedTableTest, Crc32cFileChecksum) {
|
|||
column_family_name, level),
|
||||
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
||||
f.GetFileWriter()));
|
||||
f.ResetTableBuilder(std::move(builder));
|
||||
ASSERT_OK(f.ResetTableBuilder(std::move(builder)));
|
||||
f.AddKVtoKVMap(1000);
|
||||
f.WriteKVAndFlushTable();
|
||||
ASSERT_STREQ(f.GetFileChecksumFuncName(), "FileChecksumCrc32c");
|
||||
|
@ -3443,7 +3460,7 @@ TEST_F(PlainTableTest, NoFileChecksum) {
|
|||
false /* skip_filters */, column_family_name, unknown_level),
|
||||
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
||||
f.GetFileWriter()));
|
||||
f.ResetTableBuilder(std::move(builder));
|
||||
ASSERT_OK(f.ResetTableBuilder(std::move(builder)));
|
||||
f.AddKVtoKVMap(1000);
|
||||
f.WriteKVAndFlushTable();
|
||||
ASSERT_STREQ(f.GetFileChecksumFuncName(), kUnknownFileChecksumFuncName);
|
||||
|
@ -3485,7 +3502,7 @@ TEST_F(PlainTableTest, Crc32cFileChecksum) {
|
|||
false /* skip_filters */, column_family_name, unknown_level),
|
||||
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
||||
f.GetFileWriter()));
|
||||
f.ResetTableBuilder(std::move(builder));
|
||||
ASSERT_OK(f.ResetTableBuilder(std::move(builder)));
|
||||
f.AddKVtoKVMap(1000);
|
||||
f.WriteKVAndFlushTable();
|
||||
ASSERT_STREQ(f.GetFileChecksumFuncName(), "FileChecksumCrc32c");
|
||||
|
@ -3659,12 +3676,12 @@ TEST_F(MemTableTest, Simple) {
|
|||
memtable->Ref();
|
||||
WriteBatch batch;
|
||||
WriteBatchInternal::SetSequence(&batch, 100);
|
||||
batch.Put(std::string("k1"), std::string("v1"));
|
||||
batch.Put(std::string("k2"), std::string("v2"));
|
||||
batch.Put(std::string("k3"), std::string("v3"));
|
||||
batch.Put(std::string("largekey"), std::string("vlarge"));
|
||||
batch.DeleteRange(std::string("chi"), std::string("xigua"));
|
||||
batch.DeleteRange(std::string("begin"), std::string("end"));
|
||||
ASSERT_OK(batch.Put(std::string("k1"), std::string("v1")));
|
||||
ASSERT_OK(batch.Put(std::string("k2"), std::string("v2")));
|
||||
ASSERT_OK(batch.Put(std::string("k3"), std::string("v3")));
|
||||
ASSERT_OK(batch.Put(std::string("largekey"), std::string("vlarge")));
|
||||
ASSERT_OK(batch.DeleteRange(std::string("chi"), std::string("xigua")));
|
||||
ASSERT_OK(batch.DeleteRange(std::string("begin"), std::string("end")));
|
||||
ColumnFamilyMemTablesDefault cf_mems_default(memtable);
|
||||
ASSERT_TRUE(
|
||||
WriteBatchInternal::InsertInto(&batch, &cf_mems_default, nullptr, nullptr)
|
||||
|
@ -3735,7 +3752,7 @@ TEST(TableTest, FooterTests) {
|
|||
footer.EncodeTo(&encoded);
|
||||
Footer decoded_footer;
|
||||
Slice encoded_slice(encoded);
|
||||
decoded_footer.DecodeFrom(&encoded_slice);
|
||||
ASSERT_OK(decoded_footer.DecodeFrom(&encoded_slice));
|
||||
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
||||
ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
|
||||
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
||||
|
@ -3755,7 +3772,7 @@ TEST(TableTest, FooterTests) {
|
|||
footer.EncodeTo(&encoded);
|
||||
Footer decoded_footer;
|
||||
Slice encoded_slice(encoded);
|
||||
decoded_footer.DecodeFrom(&encoded_slice);
|
||||
ASSERT_OK(decoded_footer.DecodeFrom(&encoded_slice));
|
||||
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
||||
ASSERT_EQ(decoded_footer.checksum(), kxxHash);
|
||||
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
||||
|
@ -3775,7 +3792,7 @@ TEST(TableTest, FooterTests) {
|
|||
footer.EncodeTo(&encoded);
|
||||
Footer decoded_footer;
|
||||
Slice encoded_slice(encoded);
|
||||
decoded_footer.DecodeFrom(&encoded_slice);
|
||||
ASSERT_OK(decoded_footer.DecodeFrom(&encoded_slice));
|
||||
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
||||
ASSERT_EQ(decoded_footer.checksum(), kxxHash64);
|
||||
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
||||
|
@ -3796,7 +3813,7 @@ TEST(TableTest, FooterTests) {
|
|||
footer.EncodeTo(&encoded);
|
||||
Footer decoded_footer;
|
||||
Slice encoded_slice(encoded);
|
||||
decoded_footer.DecodeFrom(&encoded_slice);
|
||||
ASSERT_OK(decoded_footer.DecodeFrom(&encoded_slice));
|
||||
ASSERT_EQ(decoded_footer.table_magic_number(), kPlainTableMagicNumber);
|
||||
ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
|
||||
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
||||
|
@ -3816,7 +3833,7 @@ TEST(TableTest, FooterTests) {
|
|||
footer.EncodeTo(&encoded);
|
||||
Footer decoded_footer;
|
||||
Slice encoded_slice(encoded);
|
||||
decoded_footer.DecodeFrom(&encoded_slice);
|
||||
ASSERT_OK(decoded_footer.DecodeFrom(&encoded_slice));
|
||||
ASSERT_EQ(decoded_footer.table_magic_number(), kPlainTableMagicNumber);
|
||||
ASSERT_EQ(decoded_footer.checksum(), kxxHash);
|
||||
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
||||
|
@ -3836,7 +3853,7 @@ TEST(TableTest, FooterTests) {
|
|||
footer.EncodeTo(&encoded);
|
||||
Footer decoded_footer;
|
||||
Slice encoded_slice(encoded);
|
||||
decoded_footer.DecodeFrom(&encoded_slice);
|
||||
ASSERT_OK(decoded_footer.DecodeFrom(&encoded_slice));
|
||||
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
||||
ASSERT_EQ(decoded_footer.checksum(), kCRC32c);
|
||||
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
||||
|
@ -3995,12 +4012,12 @@ TEST_F(PrefixTest, PrefixAndWholeKeyTest) {
|
|||
std::string prefix = "[" + std::to_string(i) + "]";
|
||||
for (int j = 0; j < 10; j++) {
|
||||
std::string key = prefix + std::to_string(j);
|
||||
db->Put(ROCKSDB_NAMESPACE::WriteOptions(), key, "1");
|
||||
ASSERT_OK(db->Put(ROCKSDB_NAMESPACE::WriteOptions(), key, "1"));
|
||||
}
|
||||
}
|
||||
|
||||
// Trigger compaction.
|
||||
db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||
ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||
delete db;
|
||||
// In the second round, turn whole_key_filtering off and expect
|
||||
// rocksdb still works.
|
||||
|
@ -4653,12 +4670,14 @@ TEST_P(BlockBasedTableTest, OutOfBoundOnSeek) {
|
|||
/*skip_filters=*/false, TableReaderCaller::kUncategorized)));
|
||||
iter->SeekToFirst();
|
||||
ASSERT_FALSE(iter->Valid());
|
||||
ASSERT_OK(iter->status());
|
||||
ASSERT_TRUE(iter->UpperBoundCheckResult() == IterBoundCheck::kOutOfBound);
|
||||
iter.reset(new KeyConvertingIterator(reader->NewIterator(
|
||||
read_opt, /*prefix_extractor=*/nullptr, /*arena=*/nullptr,
|
||||
/*skip_filters=*/false, TableReaderCaller::kUncategorized)));
|
||||
iter->Seek("foo");
|
||||
ASSERT_FALSE(iter->Valid());
|
||||
ASSERT_OK(iter->status());
|
||||
ASSERT_TRUE(iter->UpperBoundCheckResult() == IterBoundCheck::kOutOfBound);
|
||||
}
|
||||
|
||||
|
|
|
@ -116,8 +116,8 @@ TEST_F(MemoryTest, SharedBlockCacheTotal) {
|
|||
for (int i = 0; i < kNumDBs; ++i) {
|
||||
for (int j = 0; j < 100; ++j) {
|
||||
keys_by_db[i].emplace_back(rnd_.RandomString(kKeySize));
|
||||
dbs[i]->Put(WriteOptions(), keys_by_db[i].back(),
|
||||
rnd_.RandomString(kValueSize));
|
||||
ASSERT_OK(dbs[i]->Put(WriteOptions(), keys_by_db[i].back(),
|
||||
rnd_.RandomString(kValueSize)));
|
||||
}
|
||||
dbs[i]->Flush(FlushOptions());
|
||||
}
|
||||
|
@ -174,8 +174,9 @@ TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
|
|||
for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
|
||||
for (int i = 0; i < kNumDBs; ++i) {
|
||||
for (auto* handle : vec_handles[i]) {
|
||||
dbs[i]->Put(WriteOptions(), handle, rnd_.RandomString(kKeySize),
|
||||
rnd_.RandomString(kValueSize));
|
||||
ASSERT_OK(dbs[i]->Put(WriteOptions(), handle,
|
||||
rnd_.RandomString(kKeySize),
|
||||
rnd_.RandomString(kValueSize)));
|
||||
UpdateUsagesHistory(dbs);
|
||||
}
|
||||
}
|
||||
|
@ -226,6 +227,8 @@ TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
|
|||
}
|
||||
usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
|
||||
for (int i = 0; i < kNumDBs; ++i) {
|
||||
// iterator is not used.
|
||||
ASSERT_OK(iters[i]->status());
|
||||
delete iters[i];
|
||||
UpdateUsagesHistory(dbs);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue