mirror of https://github.com/facebook/rocksdb.git
upgrade gtest 1.7.0 => 1.8.1 for json result writing
Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/5332 Differential Revision: D17242232 fbshipit-source-id: c0d4646556a1335e51ac7382b986ca7f6ced7b64
This commit is contained in:
parent
adbc25a4c8
commit
fbab9913e2
|
@ -468,7 +468,7 @@ endif()
|
||||||
|
|
||||||
include_directories(${PROJECT_SOURCE_DIR})
|
include_directories(${PROJECT_SOURCE_DIR})
|
||||||
include_directories(${PROJECT_SOURCE_DIR}/include)
|
include_directories(${PROJECT_SOURCE_DIR}/include)
|
||||||
include_directories(SYSTEM ${PROJECT_SOURCE_DIR}/third-party/gtest-1.7.0/fused-src)
|
include_directories(SYSTEM ${PROJECT_SOURCE_DIR}/third-party/gtest-1.8.1/fused-src)
|
||||||
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
if(WITH_FOLLY_DISTRIBUTED_MUTEX)
|
||||||
include_directories(${PROJECT_SOURCE_DIR}/third-party/folly)
|
include_directories(${PROJECT_SOURCE_DIR}/third-party/folly)
|
||||||
endif()
|
endif()
|
||||||
|
@ -879,7 +879,7 @@ endif()
|
||||||
|
|
||||||
option(WITH_TESTS "build with tests" ON)
|
option(WITH_TESTS "build with tests" ON)
|
||||||
if(WITH_TESTS)
|
if(WITH_TESTS)
|
||||||
add_subdirectory(third-party/gtest-1.7.0/fused-src/gtest)
|
add_subdirectory(third-party/gtest-1.8.1/fused-src/gtest)
|
||||||
set(TESTS
|
set(TESTS
|
||||||
cache/cache_test.cc
|
cache/cache_test.cc
|
||||||
cache/lru_cache_test.cc
|
cache/lru_cache_test.cc
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -310,7 +310,7 @@ endif
|
||||||
|
|
||||||
export GTEST_THROW_ON_FAILURE=1
|
export GTEST_THROW_ON_FAILURE=1
|
||||||
export GTEST_HAS_EXCEPTIONS=1
|
export GTEST_HAS_EXCEPTIONS=1
|
||||||
GTEST_DIR = ./third-party/gtest-1.7.0/fused-src
|
GTEST_DIR = ./third-party/gtest-1.8.1/fused-src
|
||||||
# AIX: pre-defined system headers are surrounded by an extern "C" block
|
# AIX: pre-defined system headers are surrounded by an extern "C" block
|
||||||
ifeq ($(PLATFORM), OS_AIX)
|
ifeq ($(PLATFORM), OS_AIX)
|
||||||
PLATFORM_CCFLAGS += -I$(GTEST_DIR)
|
PLATFORM_CCFLAGS += -I$(GTEST_DIR)
|
||||||
|
|
|
@ -1504,7 +1504,7 @@ TEST_F(CompactionPickerTest, IntraL0MaxCompactionBytesNotHit) {
|
||||||
ASSERT_EQ(5U, compaction->num_input_files(0));
|
ASSERT_EQ(5U, compaction->num_input_files(0));
|
||||||
ASSERT_EQ(CompactionReason::kLevelL0FilesNum,
|
ASSERT_EQ(CompactionReason::kLevelL0FilesNum,
|
||||||
compaction->compaction_reason());
|
compaction->compaction_reason());
|
||||||
ASSERT_EQ(0U, compaction->output_level());
|
ASSERT_EQ(0, compaction->output_level());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(CompactionPickerTest, IntraL0MaxCompactionBytesHit) {
|
TEST_F(CompactionPickerTest, IntraL0MaxCompactionBytesHit) {
|
||||||
|
@ -1534,7 +1534,7 @@ TEST_F(CompactionPickerTest, IntraL0MaxCompactionBytesHit) {
|
||||||
ASSERT_EQ(4U, compaction->num_input_files(0));
|
ASSERT_EQ(4U, compaction->num_input_files(0));
|
||||||
ASSERT_EQ(CompactionReason::kLevelL0FilesNum,
|
ASSERT_EQ(CompactionReason::kLevelL0FilesNum,
|
||||||
compaction->compaction_reason());
|
compaction->compaction_reason());
|
||||||
ASSERT_EQ(0U, compaction->output_level());
|
ASSERT_EQ(0, compaction->output_level());
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
|
@ -484,11 +484,11 @@ TEST_F(DBBlockCacheTest, IndexAndFilterBlocksCachePriority) {
|
||||||
TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
||||||
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
|
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
|
||||||
if (priority == Cache::Priority::LOW) {
|
if (priority == Cache::Priority::LOW) {
|
||||||
ASSERT_EQ(0, MockCache::high_pri_insert_count);
|
ASSERT_EQ(0u, MockCache::high_pri_insert_count);
|
||||||
ASSERT_EQ(2, MockCache::low_pri_insert_count);
|
ASSERT_EQ(2u, MockCache::low_pri_insert_count);
|
||||||
} else {
|
} else {
|
||||||
ASSERT_EQ(2, MockCache::high_pri_insert_count);
|
ASSERT_EQ(2u, MockCache::high_pri_insert_count);
|
||||||
ASSERT_EQ(0, MockCache::low_pri_insert_count);
|
ASSERT_EQ(0u, MockCache::low_pri_insert_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Access data block.
|
// Access data block.
|
||||||
|
@ -502,11 +502,11 @@ TEST_F(DBBlockCacheTest, IndexAndFilterBlocksCachePriority) {
|
||||||
|
|
||||||
// Data block should be inserted with low priority.
|
// Data block should be inserted with low priority.
|
||||||
if (priority == Cache::Priority::LOW) {
|
if (priority == Cache::Priority::LOW) {
|
||||||
ASSERT_EQ(0, MockCache::high_pri_insert_count);
|
ASSERT_EQ(0u, MockCache::high_pri_insert_count);
|
||||||
ASSERT_EQ(3, MockCache::low_pri_insert_count);
|
ASSERT_EQ(3u, MockCache::low_pri_insert_count);
|
||||||
} else {
|
} else {
|
||||||
ASSERT_EQ(2, MockCache::high_pri_insert_count);
|
ASSERT_EQ(2u, MockCache::high_pri_insert_count);
|
||||||
ASSERT_EQ(1, MockCache::low_pri_insert_count);
|
ASSERT_EQ(1u, MockCache::low_pri_insert_count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -720,7 +720,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
|
||||||
cfilter_count = 0;
|
cfilter_count = 0;
|
||||||
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
// The filter should delete 40 records.
|
// The filter should delete 40 records.
|
||||||
ASSERT_EQ(40U, cfilter_count);
|
ASSERT_EQ(40, cfilter_count);
|
||||||
|
|
||||||
{
|
{
|
||||||
// Scan the entire database as of the snapshot to ensure
|
// Scan the entire database as of the snapshot to ensure
|
||||||
|
|
|
@ -732,7 +732,7 @@ TEST_F(DBCompactionTest, BGCompactionsAllowed) {
|
||||||
|
|
||||||
// Now all column families qualify compaction but only one should be
|
// Now all column families qualify compaction but only one should be
|
||||||
// scheduled, because no column family hits speed up condition.
|
// scheduled, because no column family hits speed up condition.
|
||||||
ASSERT_EQ(1, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
|
ASSERT_EQ(1u, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
|
||||||
|
|
||||||
// Create two more files for one column family, which triggers speed up
|
// Create two more files for one column family, which triggers speed up
|
||||||
// condition, three compactions will be scheduled.
|
// condition, three compactions will be scheduled.
|
||||||
|
@ -746,7 +746,7 @@ TEST_F(DBCompactionTest, BGCompactionsAllowed) {
|
||||||
ASSERT_EQ(options.level0_file_num_compaction_trigger + num + 1,
|
ASSERT_EQ(options.level0_file_num_compaction_trigger + num + 1,
|
||||||
NumTableFilesAtLevel(0, 2));
|
NumTableFilesAtLevel(0, 2));
|
||||||
}
|
}
|
||||||
ASSERT_EQ(3, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
|
ASSERT_EQ(3U, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
|
||||||
|
|
||||||
// Unblock all threads to unblock all compactions.
|
// Unblock all threads to unblock all compactions.
|
||||||
for (size_t i = 0; i < kTotalTasks; i++) {
|
for (size_t i = 0; i < kTotalTasks; i++) {
|
||||||
|
@ -777,7 +777,7 @@ TEST_F(DBCompactionTest, BGCompactionsAllowed) {
|
||||||
|
|
||||||
// Now all column families qualify compaction but only one should be
|
// Now all column families qualify compaction but only one should be
|
||||||
// scheduled, because no column family hits speed up condition.
|
// scheduled, because no column family hits speed up condition.
|
||||||
ASSERT_EQ(1, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
|
ASSERT_EQ(1U, env_->GetThreadPoolQueueLen(Env::Priority::LOW));
|
||||||
|
|
||||||
for (size_t i = 0; i < kTotalTasks; i++) {
|
for (size_t i = 0; i < kTotalTasks; i++) {
|
||||||
sleeping_tasks[i].WakeUp();
|
sleeping_tasks[i].WakeUp();
|
||||||
|
@ -4165,7 +4165,7 @@ TEST_F(DBCompactionTest, CompactionLimiter) {
|
||||||
|
|
||||||
const char* cf_names[] = {"default", "0", "1", "2", "3", "4", "5",
|
const char* cf_names[] = {"default", "0", "1", "2", "3", "4", "5",
|
||||||
"6", "7", "8", "9", "a", "b", "c", "d", "e", "f" };
|
"6", "7", "8", "9", "a", "b", "c", "d", "e", "f" };
|
||||||
const int cf_count = sizeof cf_names / sizeof cf_names[0];
|
const unsigned int cf_count = sizeof cf_names / sizeof cf_names[0];
|
||||||
|
|
||||||
std::unordered_map<std::string, CompactionLimiter*> cf_to_limiter;
|
std::unordered_map<std::string, CompactionLimiter*> cf_to_limiter;
|
||||||
|
|
||||||
|
@ -4184,7 +4184,7 @@ TEST_F(DBCompactionTest, CompactionLimiter) {
|
||||||
std::vector<Options> option_vector;
|
std::vector<Options> option_vector;
|
||||||
option_vector.reserve(cf_count);
|
option_vector.reserve(cf_count);
|
||||||
|
|
||||||
for (int cf = 0; cf < cf_count; cf++) {
|
for (unsigned int cf = 0; cf < cf_count; cf++) {
|
||||||
ColumnFamilyOptions cf_opt(options);
|
ColumnFamilyOptions cf_opt(options);
|
||||||
if (cf == 0) {
|
if (cf == 0) {
|
||||||
// "Default" CF does't use compaction limiter
|
// "Default" CF does't use compaction limiter
|
||||||
|
@ -4202,7 +4202,7 @@ TEST_F(DBCompactionTest, CompactionLimiter) {
|
||||||
option_vector.emplace_back(DBOptions(options), cf_opt);
|
option_vector.emplace_back(DBOptions(options), cf_opt);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int cf = 1; cf < cf_count; cf++) {
|
for (unsigned int cf = 1; cf < cf_count; cf++) {
|
||||||
CreateColumnFamilies({cf_names[cf]}, option_vector[cf]);
|
CreateColumnFamilies({cf_names[cf]}, option_vector[cf]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4254,7 +4254,7 @@ TEST_F(DBCompactionTest, CompactionLimiter) {
|
||||||
int keyIndex = 0;
|
int keyIndex = 0;
|
||||||
|
|
||||||
for (int n = 0; n < options.level0_file_num_compaction_trigger; n++) {
|
for (int n = 0; n < options.level0_file_num_compaction_trigger; n++) {
|
||||||
for (int cf = 0; cf < cf_count; cf++) {
|
for (unsigned int cf = 0; cf < cf_count; cf++) {
|
||||||
for (int i = 0; i < kNumKeysPerFile; i++) {
|
for (int i = 0; i < kNumKeysPerFile; i++) {
|
||||||
ASSERT_OK(Put(cf, Key(keyIndex++), ""));
|
ASSERT_OK(Put(cf, Key(keyIndex++), ""));
|
||||||
}
|
}
|
||||||
|
@ -4262,13 +4262,13 @@ TEST_F(DBCompactionTest, CompactionLimiter) {
|
||||||
ASSERT_OK(Put(cf, "", ""));
|
ASSERT_OK(Put(cf, "", ""));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int cf = 0; cf < cf_count; cf++) {
|
for (unsigned int cf = 0; cf < cf_count; cf++) {
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[cf]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[cf]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enough L0 files to trigger compaction
|
// Enough L0 files to trigger compaction
|
||||||
for (int cf = 0; cf < cf_count; cf++) {
|
for (unsigned int cf = 0; cf < cf_count; cf++) {
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(0, cf),
|
ASSERT_EQ(NumTableFilesAtLevel(0, cf),
|
||||||
options.level0_file_num_compaction_trigger);
|
options.level0_file_num_compaction_trigger);
|
||||||
}
|
}
|
||||||
|
@ -4295,7 +4295,7 @@ TEST_F(DBCompactionTest, CompactionLimiter) {
|
||||||
sleeping_compact_tasks[i].WaitUntilDone();
|
sleeping_compact_tasks[i].WaitUntilDone();
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int cf = 0; cf < cf_count; cf++) {
|
for (unsigned int cf = 0; cf < cf_count; cf++) {
|
||||||
dbfull()->TEST_WaitForFlushMemTable(handles_[cf]);
|
dbfull()->TEST_WaitForFlushMemTable(handles_[cf]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -322,7 +322,7 @@ TEST_F(DBMemTableTest, ColumnFamilyId) {
|
||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
CreateAndReopenWithCF({"pikachu"}, options);
|
CreateAndReopenWithCF({"pikachu"}, options);
|
||||||
|
|
||||||
for (int cf = 0; cf < 2; ++cf) {
|
for (uint32_t cf = 0; cf < 2; ++cf) {
|
||||||
ASSERT_OK(Put(cf, "key", "val"));
|
ASSERT_OK(Put(cf, "key", "val"));
|
||||||
ASSERT_OK(Flush(cf));
|
ASSERT_OK(Flush(cf));
|
||||||
ASSERT_EQ(
|
ASSERT_EQ(
|
||||||
|
|
|
@ -505,10 +505,10 @@ TEST_F(DBOptionsTest, SetStatsDumpPeriodSec) {
|
||||||
options.stats_dump_period_sec = 5;
|
options.stats_dump_period_sec = 5;
|
||||||
options.env = env_;
|
options.env = env_;
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
ASSERT_EQ(5, dbfull()->GetDBOptions().stats_dump_period_sec);
|
ASSERT_EQ(5u, dbfull()->GetDBOptions().stats_dump_period_sec);
|
||||||
|
|
||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
int num = rand() % 5000 + 1;
|
unsigned int num = rand() % 5000 + 1;
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
dbfull()->SetDBOptions({{"stats_dump_period_sec", ToString(num)}}));
|
dbfull()->SetDBOptions({{"stats_dump_period_sec", ToString(num)}}));
|
||||||
ASSERT_EQ(num, dbfull()->GetDBOptions().stats_dump_period_sec);
|
ASSERT_EQ(num, dbfull()->GetDBOptions().stats_dump_period_sec);
|
||||||
|
@ -522,12 +522,12 @@ TEST_F(DBOptionsTest, SetOptionsStatsPersistPeriodSec) {
|
||||||
options.stats_persist_period_sec = 5;
|
options.stats_persist_period_sec = 5;
|
||||||
options.env = env_;
|
options.env = env_;
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
ASSERT_EQ(5, dbfull()->GetDBOptions().stats_persist_period_sec);
|
ASSERT_EQ(5u, dbfull()->GetDBOptions().stats_persist_period_sec);
|
||||||
|
|
||||||
ASSERT_OK(dbfull()->SetDBOptions({{"stats_persist_period_sec", "12345"}}));
|
ASSERT_OK(dbfull()->SetDBOptions({{"stats_persist_period_sec", "12345"}}));
|
||||||
ASSERT_EQ(12345, dbfull()->GetDBOptions().stats_persist_period_sec);
|
ASSERT_EQ(12345u, dbfull()->GetDBOptions().stats_persist_period_sec);
|
||||||
ASSERT_NOK(dbfull()->SetDBOptions({{"stats_persist_period_sec", "abcde"}}));
|
ASSERT_NOK(dbfull()->SetDBOptions({{"stats_persist_period_sec", "abcde"}}));
|
||||||
ASSERT_EQ(12345, dbfull()->GetDBOptions().stats_persist_period_sec);
|
ASSERT_EQ(12345u, dbfull()->GetDBOptions().stats_persist_period_sec);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void assert_candidate_files_empty(DBImpl* dbfull, const bool empty) {
|
static void assert_candidate_files_empty(DBImpl* dbfull, const bool empty) {
|
||||||
|
|
|
@ -230,7 +230,7 @@ TEST_F(DBTablePropertiesTest, GetColumnFamilyNameProperty) {
|
||||||
|
|
||||||
// Create one table per CF, then verify it was created with the column family
|
// Create one table per CF, then verify it was created with the column family
|
||||||
// name property.
|
// name property.
|
||||||
for (int cf = 0; cf < 2; ++cf) {
|
for (uint32_t cf = 0; cf < 2; ++cf) {
|
||||||
Put(cf, "key", "val");
|
Put(cf, "key", "val");
|
||||||
Flush(cf);
|
Flush(cf);
|
||||||
|
|
||||||
|
|
|
@ -4790,15 +4790,15 @@ TEST_F(DBTest, DynamicUniversalCompactionOptions) {
|
||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
|
|
||||||
// Initial defaults
|
// Initial defaults
|
||||||
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.size_ratio, 1);
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.size_ratio, 1U);
|
||||||
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.min_merge_width,
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.min_merge_width,
|
||||||
2);
|
2u);
|
||||||
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.max_merge_width,
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.max_merge_width,
|
||||||
UINT_MAX);
|
UINT_MAX);
|
||||||
ASSERT_EQ(dbfull()
|
ASSERT_EQ(dbfull()
|
||||||
->GetOptions()
|
->GetOptions()
|
||||||
.compaction_options_universal.max_size_amplification_percent,
|
.compaction_options_universal.max_size_amplification_percent,
|
||||||
200);
|
200u);
|
||||||
ASSERT_EQ(dbfull()
|
ASSERT_EQ(dbfull()
|
||||||
->GetOptions()
|
->GetOptions()
|
||||||
.compaction_options_universal.compression_size_percent,
|
.compaction_options_universal.compression_size_percent,
|
||||||
|
@ -4811,15 +4811,15 @@ TEST_F(DBTest, DynamicUniversalCompactionOptions) {
|
||||||
|
|
||||||
ASSERT_OK(dbfull()->SetOptions(
|
ASSERT_OK(dbfull()->SetOptions(
|
||||||
{{"compaction_options_universal", "{size_ratio=7;}"}}));
|
{{"compaction_options_universal", "{size_ratio=7;}"}}));
|
||||||
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.size_ratio, 7);
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.size_ratio, 7u);
|
||||||
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.min_merge_width,
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.min_merge_width,
|
||||||
2);
|
2u);
|
||||||
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.max_merge_width,
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.max_merge_width,
|
||||||
UINT_MAX);
|
UINT_MAX);
|
||||||
ASSERT_EQ(dbfull()
|
ASSERT_EQ(dbfull()
|
||||||
->GetOptions()
|
->GetOptions()
|
||||||
.compaction_options_universal.max_size_amplification_percent,
|
.compaction_options_universal.max_size_amplification_percent,
|
||||||
200);
|
200u);
|
||||||
ASSERT_EQ(dbfull()
|
ASSERT_EQ(dbfull()
|
||||||
->GetOptions()
|
->GetOptions()
|
||||||
.compaction_options_universal.compression_size_percent,
|
.compaction_options_universal.compression_size_percent,
|
||||||
|
@ -4832,15 +4832,15 @@ TEST_F(DBTest, DynamicUniversalCompactionOptions) {
|
||||||
|
|
||||||
ASSERT_OK(dbfull()->SetOptions(
|
ASSERT_OK(dbfull()->SetOptions(
|
||||||
{{"compaction_options_universal", "{min_merge_width=11;}"}}));
|
{{"compaction_options_universal", "{min_merge_width=11;}"}}));
|
||||||
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.size_ratio, 7);
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.size_ratio, 7u);
|
||||||
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.min_merge_width,
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.min_merge_width,
|
||||||
11);
|
11u);
|
||||||
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.max_merge_width,
|
ASSERT_EQ(dbfull()->GetOptions().compaction_options_universal.max_merge_width,
|
||||||
UINT_MAX);
|
UINT_MAX);
|
||||||
ASSERT_EQ(dbfull()
|
ASSERT_EQ(dbfull()
|
||||||
->GetOptions()
|
->GetOptions()
|
||||||
.compaction_options_universal.max_size_amplification_percent,
|
.compaction_options_universal.max_size_amplification_percent,
|
||||||
200);
|
200u);
|
||||||
ASSERT_EQ(dbfull()
|
ASSERT_EQ(dbfull()
|
||||||
->GetOptions()
|
->GetOptions()
|
||||||
.compaction_options_universal.compression_size_percent,
|
.compaction_options_universal.compression_size_percent,
|
||||||
|
|
|
@ -441,17 +441,17 @@ TEST_P(DBTestUniversalCompaction, DynamicUniversalCompactionSizeAmplification) {
|
||||||
ASSERT_EQ(dbfull()
|
ASSERT_EQ(dbfull()
|
||||||
->GetOptions(handles_[1])
|
->GetOptions(handles_[1])
|
||||||
.compaction_options_universal.max_size_amplification_percent,
|
.compaction_options_universal.max_size_amplification_percent,
|
||||||
200);
|
200U);
|
||||||
ASSERT_OK(dbfull()->SetOptions(handles_[1],
|
ASSERT_OK(dbfull()->SetOptions(handles_[1],
|
||||||
{{"compaction_options_universal",
|
{{"compaction_options_universal",
|
||||||
"{max_size_amplification_percent=110;}"}}));
|
"{max_size_amplification_percent=110;}"}}));
|
||||||
ASSERT_EQ(dbfull()
|
ASSERT_EQ(dbfull()
|
||||||
->GetOptions(handles_[1])
|
->GetOptions(handles_[1])
|
||||||
.compaction_options_universal.max_size_amplification_percent,
|
.compaction_options_universal.max_size_amplification_percent,
|
||||||
110);
|
110u);
|
||||||
ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
|
ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
|
||||||
&mutable_cf_options));
|
&mutable_cf_options));
|
||||||
ASSERT_EQ(110, mutable_cf_options.compaction_options_universal
|
ASSERT_EQ(110u, mutable_cf_options.compaction_options_universal
|
||||||
.max_size_amplification_percent);
|
.max_size_amplification_percent);
|
||||||
|
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
|
@ -522,20 +522,20 @@ TEST_P(DBTestUniversalCompaction, DynamicUniversalCompactionReadAmplification) {
|
||||||
ASSERT_EQ(dbfull()
|
ASSERT_EQ(dbfull()
|
||||||
->GetOptions(handles_[1])
|
->GetOptions(handles_[1])
|
||||||
.compaction_options_universal.min_merge_width,
|
.compaction_options_universal.min_merge_width,
|
||||||
2);
|
2u);
|
||||||
ASSERT_EQ(dbfull()
|
ASSERT_EQ(dbfull()
|
||||||
->GetOptions(handles_[1])
|
->GetOptions(handles_[1])
|
||||||
.compaction_options_universal.max_merge_width,
|
.compaction_options_universal.max_merge_width,
|
||||||
2);
|
2u);
|
||||||
ASSERT_EQ(
|
ASSERT_EQ(
|
||||||
dbfull()->GetOptions(handles_[1]).compaction_options_universal.size_ratio,
|
dbfull()->GetOptions(handles_[1]).compaction_options_universal.size_ratio,
|
||||||
100);
|
100u);
|
||||||
|
|
||||||
ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
|
ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1],
|
||||||
&mutable_cf_options));
|
&mutable_cf_options));
|
||||||
ASSERT_EQ(mutable_cf_options.compaction_options_universal.size_ratio, 100);
|
ASSERT_EQ(mutable_cf_options.compaction_options_universal.size_ratio, 100u);
|
||||||
ASSERT_EQ(mutable_cf_options.compaction_options_universal.min_merge_width, 2);
|
ASSERT_EQ(mutable_cf_options.compaction_options_universal.min_merge_width, 2u);
|
||||||
ASSERT_EQ(mutable_cf_options.compaction_options_universal.max_merge_width, 2);
|
ASSERT_EQ(mutable_cf_options.compaction_options_universal.max_merge_width, 2u);
|
||||||
|
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
|
|
||||||
|
|
|
@ -78,9 +78,9 @@ TEST_F(VersionEditTest, EncodeDecodeNewFile4) {
|
||||||
ASSERT_TRUE(new_files[0].second.marked_for_compaction);
|
ASSERT_TRUE(new_files[0].second.marked_for_compaction);
|
||||||
ASSERT_TRUE(!new_files[1].second.marked_for_compaction);
|
ASSERT_TRUE(!new_files[1].second.marked_for_compaction);
|
||||||
ASSERT_TRUE(new_files[2].second.marked_for_compaction);
|
ASSERT_TRUE(new_files[2].second.marked_for_compaction);
|
||||||
ASSERT_EQ(3, new_files[0].second.fd.GetPathId());
|
ASSERT_EQ(3u, new_files[0].second.fd.GetPathId());
|
||||||
ASSERT_EQ(3, new_files[1].second.fd.GetPathId());
|
ASSERT_EQ(3u, new_files[1].second.fd.GetPathId());
|
||||||
ASSERT_EQ(0, new_files[2].second.fd.GetPathId());
|
ASSERT_EQ(0u, new_files[2].second.fd.GetPathId());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(VersionEditTest, ForwardCompatibleNewFile4) {
|
TEST_F(VersionEditTest, ForwardCompatibleNewFile4) {
|
||||||
|
@ -127,8 +127,8 @@ TEST_F(VersionEditTest, ForwardCompatibleNewFile4) {
|
||||||
auto& new_files = parsed.GetNewFiles();
|
auto& new_files = parsed.GetNewFiles();
|
||||||
ASSERT_TRUE(new_files[0].second.marked_for_compaction);
|
ASSERT_TRUE(new_files[0].second.marked_for_compaction);
|
||||||
ASSERT_TRUE(!new_files[1].second.marked_for_compaction);
|
ASSERT_TRUE(!new_files[1].second.marked_for_compaction);
|
||||||
ASSERT_EQ(3, new_files[0].second.fd.GetPathId());
|
ASSERT_EQ(3u, new_files[0].second.fd.GetPathId());
|
||||||
ASSERT_EQ(3, new_files[1].second.fd.GetPathId());
|
ASSERT_EQ(3u, new_files[1].second.fd.GetPathId());
|
||||||
ASSERT_EQ(1u, parsed.GetDeletedFiles().size());
|
ASSERT_EQ(1u, parsed.GetDeletedFiles().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -759,7 +759,7 @@ TEST_F(VersionSetTest, SameColumnFamilyGroupCommit) {
|
||||||
SyncPoint::GetInstance()->SetCallBack(
|
SyncPoint::GetInstance()->SetCallBack(
|
||||||
"VersionSet::ProcessManifestWrites:SameColumnFamily", [&](void* arg) {
|
"VersionSet::ProcessManifestWrites:SameColumnFamily", [&](void* arg) {
|
||||||
uint32_t* cf_id = reinterpret_cast<uint32_t*>(arg);
|
uint32_t* cf_id = reinterpret_cast<uint32_t*>(arg);
|
||||||
EXPECT_EQ(0, *cf_id);
|
EXPECT_EQ(0u, *cf_id);
|
||||||
++count;
|
++count;
|
||||||
});
|
});
|
||||||
SyncPoint::GetInstance()->EnableProcessing();
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
|
@ -333,7 +333,7 @@ void WriteBatch::Clear() {
|
||||||
wal_term_point_.clear();
|
wal_term_point_.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
int WriteBatch::Count() const {
|
uint32_t WriteBatch::Count() const {
|
||||||
return WriteBatchInternal::Count(this);
|
return WriteBatchInternal::Count(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -538,7 +538,7 @@ Status WriteBatchInternal::Iterate(const WriteBatch* wb,
|
||||||
// batches. We do that by checking whether the accumulated batch is empty
|
// batches. We do that by checking whether the accumulated batch is empty
|
||||||
// before seeing the next Noop.
|
// before seeing the next Noop.
|
||||||
bool empty_batch = true;
|
bool empty_batch = true;
|
||||||
int found = 0;
|
uint32_t found = 0;
|
||||||
Status s;
|
Status s;
|
||||||
char tag = 0;
|
char tag = 0;
|
||||||
uint32_t column_family = 0; // default
|
uint32_t column_family = 0; // default
|
||||||
|
@ -733,11 +733,11 @@ void WriteBatchInternal::SetAsLastestPersistentState(WriteBatch* b) {
|
||||||
b->is_latest_persistent_state_ = true;
|
b->is_latest_persistent_state_ = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
int WriteBatchInternal::Count(const WriteBatch* b) {
|
uint32_t WriteBatchInternal::Count(const WriteBatch* b) {
|
||||||
return DecodeFixed32(b->rep_.data() + 8);
|
return DecodeFixed32(b->rep_.data() + 8);
|
||||||
}
|
}
|
||||||
|
|
||||||
void WriteBatchInternal::SetCount(WriteBatch* b, int n) {
|
void WriteBatchInternal::SetCount(WriteBatch* b, uint32_t n) {
|
||||||
EncodeFixed32(&b->rep_[8], n);
|
EncodeFixed32(&b->rep_[8], n);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1149,7 +1149,7 @@ Status WriteBatch::RollbackToSavePoint() {
|
||||||
save_points_->stack.pop();
|
save_points_->stack.pop();
|
||||||
|
|
||||||
assert(savepoint.size <= rep_.size());
|
assert(savepoint.size <= rep_.size());
|
||||||
assert(savepoint.count <= Count());
|
assert(static_cast<uint32_t>(savepoint.count) <= Count());
|
||||||
|
|
||||||
if (savepoint.size == rep_.size()) {
|
if (savepoint.size == rep_.size()) {
|
||||||
// No changes to rollback
|
// No changes to rollback
|
||||||
|
|
|
@ -115,10 +115,10 @@ class WriteBatchInternal {
|
||||||
static Status InsertNoop(WriteBatch* batch);
|
static Status InsertNoop(WriteBatch* batch);
|
||||||
|
|
||||||
// Return the number of entries in the batch.
|
// Return the number of entries in the batch.
|
||||||
static int Count(const WriteBatch* batch);
|
static uint32_t Count(const WriteBatch* batch);
|
||||||
|
|
||||||
// Set the count for the number of entries in the batch.
|
// Set the count for the number of entries in the batch.
|
||||||
static void SetCount(WriteBatch* batch, int n);
|
static void SetCount(WriteBatch* batch, uint32_t n);
|
||||||
|
|
||||||
// Return the sequence number for the start of this batch.
|
// Return the sequence number for the start of this batch.
|
||||||
static SequenceNumber Sequence(const WriteBatch* batch);
|
static SequenceNumber Sequence(const WriteBatch* batch);
|
||||||
|
|
|
@ -37,7 +37,7 @@ static std::string PrintContents(WriteBatch* b) {
|
||||||
ColumnFamilyMemTablesDefault cf_mems_default(mem);
|
ColumnFamilyMemTablesDefault cf_mems_default(mem);
|
||||||
Status s =
|
Status s =
|
||||||
WriteBatchInternal::InsertInto(b, &cf_mems_default, nullptr, nullptr);
|
WriteBatchInternal::InsertInto(b, &cf_mems_default, nullptr, nullptr);
|
||||||
int count = 0;
|
uint32_t count = 0;
|
||||||
int put_count = 0;
|
int put_count = 0;
|
||||||
int delete_count = 0;
|
int delete_count = 0;
|
||||||
int single_delete_count = 0;
|
int single_delete_count = 0;
|
||||||
|
@ -132,8 +132,8 @@ class WriteBatchTest : public testing::Test {};
|
||||||
TEST_F(WriteBatchTest, Empty) {
|
TEST_F(WriteBatchTest, Empty) {
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
ASSERT_EQ("", PrintContents(&batch));
|
ASSERT_EQ("", PrintContents(&batch));
|
||||||
ASSERT_EQ(0, WriteBatchInternal::Count(&batch));
|
ASSERT_EQ(0u, WriteBatchInternal::Count(&batch));
|
||||||
ASSERT_EQ(0, batch.Count());
|
ASSERT_EQ(0u, batch.Count());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(WriteBatchTest, Multiple) {
|
TEST_F(WriteBatchTest, Multiple) {
|
||||||
|
@ -144,14 +144,14 @@ TEST_F(WriteBatchTest, Multiple) {
|
||||||
batch.Put(Slice("baz"), Slice("boo"));
|
batch.Put(Slice("baz"), Slice("boo"));
|
||||||
WriteBatchInternal::SetSequence(&batch, 100);
|
WriteBatchInternal::SetSequence(&batch, 100);
|
||||||
ASSERT_EQ(100U, WriteBatchInternal::Sequence(&batch));
|
ASSERT_EQ(100U, WriteBatchInternal::Sequence(&batch));
|
||||||
ASSERT_EQ(4, WriteBatchInternal::Count(&batch));
|
ASSERT_EQ(4u, WriteBatchInternal::Count(&batch));
|
||||||
ASSERT_EQ(
|
ASSERT_EQ(
|
||||||
"Put(baz, boo)@103"
|
"Put(baz, boo)@103"
|
||||||
"Delete(box)@101"
|
"Delete(box)@101"
|
||||||
"Put(foo, bar)@100"
|
"Put(foo, bar)@100"
|
||||||
"DeleteRange(bar, foo)@102",
|
"DeleteRange(bar, foo)@102",
|
||||||
PrintContents(&batch));
|
PrintContents(&batch));
|
||||||
ASSERT_EQ(4, batch.Count());
|
ASSERT_EQ(4u, batch.Count());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(WriteBatchTest, Corruption) {
|
TEST_F(WriteBatchTest, Corruption) {
|
||||||
|
@ -174,19 +174,19 @@ TEST_F(WriteBatchTest, Append) {
|
||||||
WriteBatchInternal::Append(&b1, &b2);
|
WriteBatchInternal::Append(&b1, &b2);
|
||||||
ASSERT_EQ("",
|
ASSERT_EQ("",
|
||||||
PrintContents(&b1));
|
PrintContents(&b1));
|
||||||
ASSERT_EQ(0, b1.Count());
|
ASSERT_EQ(0u, b1.Count());
|
||||||
b2.Put("a", "va");
|
b2.Put("a", "va");
|
||||||
WriteBatchInternal::Append(&b1, &b2);
|
WriteBatchInternal::Append(&b1, &b2);
|
||||||
ASSERT_EQ("Put(a, va)@200",
|
ASSERT_EQ("Put(a, va)@200",
|
||||||
PrintContents(&b1));
|
PrintContents(&b1));
|
||||||
ASSERT_EQ(1, b1.Count());
|
ASSERT_EQ(1u, b1.Count());
|
||||||
b2.Clear();
|
b2.Clear();
|
||||||
b2.Put("b", "vb");
|
b2.Put("b", "vb");
|
||||||
WriteBatchInternal::Append(&b1, &b2);
|
WriteBatchInternal::Append(&b1, &b2);
|
||||||
ASSERT_EQ("Put(a, va)@200"
|
ASSERT_EQ("Put(a, va)@200"
|
||||||
"Put(b, vb)@201",
|
"Put(b, vb)@201",
|
||||||
PrintContents(&b1));
|
PrintContents(&b1));
|
||||||
ASSERT_EQ(2, b1.Count());
|
ASSERT_EQ(2u, b1.Count());
|
||||||
b2.Delete("foo");
|
b2.Delete("foo");
|
||||||
WriteBatchInternal::Append(&b1, &b2);
|
WriteBatchInternal::Append(&b1, &b2);
|
||||||
ASSERT_EQ("Put(a, va)@200"
|
ASSERT_EQ("Put(a, va)@200"
|
||||||
|
@ -194,7 +194,7 @@ TEST_F(WriteBatchTest, Append) {
|
||||||
"Put(b, vb)@201"
|
"Put(b, vb)@201"
|
||||||
"Delete(foo)@203",
|
"Delete(foo)@203",
|
||||||
PrintContents(&b1));
|
PrintContents(&b1));
|
||||||
ASSERT_EQ(4, b1.Count());
|
ASSERT_EQ(4u, b1.Count());
|
||||||
b2.Clear();
|
b2.Clear();
|
||||||
b2.Put("c", "cc");
|
b2.Put("c", "cc");
|
||||||
b2.Put("d", "dd");
|
b2.Put("d", "dd");
|
||||||
|
@ -209,29 +209,29 @@ TEST_F(WriteBatchTest, Append) {
|
||||||
"Put(d, dd)@205"
|
"Put(d, dd)@205"
|
||||||
"Delete(foo)@203",
|
"Delete(foo)@203",
|
||||||
PrintContents(&b1));
|
PrintContents(&b1));
|
||||||
ASSERT_EQ(6, b1.Count());
|
ASSERT_EQ(6u, b1.Count());
|
||||||
ASSERT_EQ(
|
ASSERT_EQ(
|
||||||
"Put(c, cc)@0"
|
"Put(c, cc)@0"
|
||||||
"Put(d, dd)@1"
|
"Put(d, dd)@1"
|
||||||
"Put(e, ee)@2",
|
"Put(e, ee)@2",
|
||||||
PrintContents(&b2));
|
PrintContents(&b2));
|
||||||
ASSERT_EQ(3, b2.Count());
|
ASSERT_EQ(3u, b2.Count());
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(WriteBatchTest, SingleDeletion) {
|
TEST_F(WriteBatchTest, SingleDeletion) {
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
WriteBatchInternal::SetSequence(&batch, 100);
|
WriteBatchInternal::SetSequence(&batch, 100);
|
||||||
ASSERT_EQ("", PrintContents(&batch));
|
ASSERT_EQ("", PrintContents(&batch));
|
||||||
ASSERT_EQ(0, batch.Count());
|
ASSERT_EQ(0u, batch.Count());
|
||||||
batch.Put("a", "va");
|
batch.Put("a", "va");
|
||||||
ASSERT_EQ("Put(a, va)@100", PrintContents(&batch));
|
ASSERT_EQ("Put(a, va)@100", PrintContents(&batch));
|
||||||
ASSERT_EQ(1, batch.Count());
|
ASSERT_EQ(1u, batch.Count());
|
||||||
batch.SingleDelete("a");
|
batch.SingleDelete("a");
|
||||||
ASSERT_EQ(
|
ASSERT_EQ(
|
||||||
"SingleDelete(a)@101"
|
"SingleDelete(a)@101"
|
||||||
"Put(a, va)@100",
|
"Put(a, va)@100",
|
||||||
PrintContents(&batch));
|
PrintContents(&batch));
|
||||||
ASSERT_EQ(2, batch.Count());
|
ASSERT_EQ(2u, batch.Count());
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
@ -317,7 +317,7 @@ namespace {
|
||||||
TEST_F(WriteBatchTest, PutNotImplemented) {
|
TEST_F(WriteBatchTest, PutNotImplemented) {
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
batch.Put(Slice("k1"), Slice("v1"));
|
batch.Put(Slice("k1"), Slice("v1"));
|
||||||
ASSERT_EQ(1, batch.Count());
|
ASSERT_EQ(1u, batch.Count());
|
||||||
ASSERT_EQ("Put(k1, v1)@0", PrintContents(&batch));
|
ASSERT_EQ("Put(k1, v1)@0", PrintContents(&batch));
|
||||||
|
|
||||||
WriteBatch::Handler handler;
|
WriteBatch::Handler handler;
|
||||||
|
@ -327,7 +327,7 @@ TEST_F(WriteBatchTest, PutNotImplemented) {
|
||||||
TEST_F(WriteBatchTest, DeleteNotImplemented) {
|
TEST_F(WriteBatchTest, DeleteNotImplemented) {
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
batch.Delete(Slice("k2"));
|
batch.Delete(Slice("k2"));
|
||||||
ASSERT_EQ(1, batch.Count());
|
ASSERT_EQ(1u, batch.Count());
|
||||||
ASSERT_EQ("Delete(k2)@0", PrintContents(&batch));
|
ASSERT_EQ("Delete(k2)@0", PrintContents(&batch));
|
||||||
|
|
||||||
WriteBatch::Handler handler;
|
WriteBatch::Handler handler;
|
||||||
|
@ -337,7 +337,7 @@ TEST_F(WriteBatchTest, DeleteNotImplemented) {
|
||||||
TEST_F(WriteBatchTest, SingleDeleteNotImplemented) {
|
TEST_F(WriteBatchTest, SingleDeleteNotImplemented) {
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
batch.SingleDelete(Slice("k2"));
|
batch.SingleDelete(Slice("k2"));
|
||||||
ASSERT_EQ(1, batch.Count());
|
ASSERT_EQ(1u, batch.Count());
|
||||||
ASSERT_EQ("SingleDelete(k2)@0", PrintContents(&batch));
|
ASSERT_EQ("SingleDelete(k2)@0", PrintContents(&batch));
|
||||||
|
|
||||||
WriteBatch::Handler handler;
|
WriteBatch::Handler handler;
|
||||||
|
@ -347,7 +347,7 @@ TEST_F(WriteBatchTest, SingleDeleteNotImplemented) {
|
||||||
TEST_F(WriteBatchTest, MergeNotImplemented) {
|
TEST_F(WriteBatchTest, MergeNotImplemented) {
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
batch.Merge(Slice("foo"), Slice("bar"));
|
batch.Merge(Slice("foo"), Slice("bar"));
|
||||||
ASSERT_EQ(1, batch.Count());
|
ASSERT_EQ(1u, batch.Count());
|
||||||
ASSERT_EQ("Merge(foo, bar)@0", PrintContents(&batch));
|
ASSERT_EQ("Merge(foo, bar)@0", PrintContents(&batch));
|
||||||
|
|
||||||
WriteBatch::Handler handler;
|
WriteBatch::Handler handler;
|
||||||
|
@ -364,7 +364,7 @@ TEST_F(WriteBatchTest, Blob) {
|
||||||
batch.SingleDelete(Slice("k3"));
|
batch.SingleDelete(Slice("k3"));
|
||||||
batch.PutLogData(Slice("blob2"));
|
batch.PutLogData(Slice("blob2"));
|
||||||
batch.Merge(Slice("foo"), Slice("bar"));
|
batch.Merge(Slice("foo"), Slice("bar"));
|
||||||
ASSERT_EQ(6, batch.Count());
|
ASSERT_EQ(6u, batch.Count());
|
||||||
ASSERT_EQ(
|
ASSERT_EQ(
|
||||||
"Merge(foo, bar)@5"
|
"Merge(foo, bar)@5"
|
||||||
"Put(k1, v1)@0"
|
"Put(k1, v1)@0"
|
||||||
|
@ -399,7 +399,7 @@ TEST_F(WriteBatchTest, PrepareCommit) {
|
||||||
ASSERT_EQ(s, Status::NotFound());
|
ASSERT_EQ(s, Status::NotFound());
|
||||||
WriteBatchInternal::MarkCommit(&batch, Slice("xid1"));
|
WriteBatchInternal::MarkCommit(&batch, Slice("xid1"));
|
||||||
WriteBatchInternal::MarkRollback(&batch, Slice("xid1"));
|
WriteBatchInternal::MarkRollback(&batch, Slice("xid1"));
|
||||||
ASSERT_EQ(2, batch.Count());
|
ASSERT_EQ(2u, batch.Count());
|
||||||
|
|
||||||
TestHandler handler;
|
TestHandler handler;
|
||||||
batch.Iterate(&handler);
|
batch.Iterate(&handler);
|
||||||
|
@ -489,7 +489,7 @@ TEST_F(WriteBatchTest, DISABLED_LargeKeyValue) {
|
||||||
batch.Put(raw, raw);
|
batch.Put(raw, raw);
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_EQ(2, batch.Count());
|
ASSERT_EQ(2u, batch.Count());
|
||||||
|
|
||||||
struct NoopHandler : public WriteBatch::Handler {
|
struct NoopHandler : public WriteBatch::Handler {
|
||||||
int num_seen = 0;
|
int num_seen = 0;
|
||||||
|
@ -600,7 +600,7 @@ TEST_F(WriteBatchTest, PutGatherSlices) {
|
||||||
"Put(foo, bar)@100"
|
"Put(foo, bar)@100"
|
||||||
"Put(keypart2part3, value)@102",
|
"Put(keypart2part3, value)@102",
|
||||||
PrintContents(&batch));
|
PrintContents(&batch));
|
||||||
ASSERT_EQ(3, batch.Count());
|
ASSERT_EQ(3u, batch.Count());
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
|
@ -284,7 +284,7 @@ class WriteBatch : public WriteBatchBase {
|
||||||
size_t GetDataSize() const { return rep_.size(); }
|
size_t GetDataSize() const { return rep_.size(); }
|
||||||
|
|
||||||
// Returns the number of updates in the batch
|
// Returns the number of updates in the batch
|
||||||
int Count() const;
|
uint32_t Count() const;
|
||||||
|
|
||||||
// Returns true if PutCF will be called during Iterate
|
// Returns true if PutCF will be called during Iterate
|
||||||
bool HasPut() const;
|
bool HasPut() const;
|
||||||
|
|
|
@ -54,7 +54,7 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(JNIEnv* env,
|
||||||
rocksdb::ColumnFamilyMemTablesDefault cf_mems_default(mem);
|
rocksdb::ColumnFamilyMemTablesDefault cf_mems_default(mem);
|
||||||
rocksdb::Status s = rocksdb::WriteBatchInternal::InsertInto(
|
rocksdb::Status s = rocksdb::WriteBatchInternal::InsertInto(
|
||||||
b, &cf_mems_default, nullptr, nullptr);
|
b, &cf_mems_default, nullptr, nullptr);
|
||||||
int count = 0;
|
unsigned int count = 0;
|
||||||
rocksdb::Arena arena;
|
rocksdb::Arena arena;
|
||||||
rocksdb::ScopedArenaIterator iter(
|
rocksdb::ScopedArenaIterator iter(
|
||||||
mem->NewIterator(rocksdb::ReadOptions(), &arena));
|
mem->NewIterator(rocksdb::ReadOptions(), &arena));
|
||||||
|
|
|
@ -56,7 +56,7 @@ TEST_F(StatsHistoryTest, RunStatsDumpPeriodSec) {
|
||||||
"DBImpl::DumpStats:1", [&](void* /*arg*/) { counter++; });
|
"DBImpl::DumpStats:1", [&](void* /*arg*/) { counter++; });
|
||||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
ASSERT_EQ(5, dbfull()->GetDBOptions().stats_dump_period_sec);
|
ASSERT_EQ(5u, dbfull()->GetDBOptions().stats_dump_period_sec);
|
||||||
dbfull()->TEST_WaitForDumpStatsRun([&] { mock_env->set_current_time(5); });
|
dbfull()->TEST_WaitForDumpStatsRun([&] { mock_env->set_current_time(5); });
|
||||||
ASSERT_GE(counter, 1);
|
ASSERT_GE(counter, 1);
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ TEST_F(StatsHistoryTest, StatsPersistScheduling) {
|
||||||
"DBImpl::PersistStats:Entry", [&](void* /*arg*/) { counter++; });
|
"DBImpl::PersistStats:Entry", [&](void* /*arg*/) { counter++; });
|
||||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
ASSERT_EQ(5, dbfull()->GetDBOptions().stats_persist_period_sec);
|
ASSERT_EQ(5u, dbfull()->GetDBOptions().stats_persist_period_sec);
|
||||||
dbfull()->TEST_WaitForPersistStatsRun([&] { mock_env->set_current_time(5); });
|
dbfull()->TEST_WaitForPersistStatsRun([&] { mock_env->set_current_time(5); });
|
||||||
ASSERT_GE(counter, 1);
|
ASSERT_GE(counter, 1);
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ TEST_F(StatsHistoryTest, PersistentStatsFreshInstall) {
|
||||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
ASSERT_OK(dbfull()->SetDBOptions({{"stats_persist_period_sec", "5"}}));
|
ASSERT_OK(dbfull()->SetDBOptions({{"stats_persist_period_sec", "5"}}));
|
||||||
ASSERT_EQ(5, dbfull()->GetDBOptions().stats_persist_period_sec);
|
ASSERT_EQ(5u, dbfull()->GetDBOptions().stats_persist_period_sec);
|
||||||
dbfull()->TEST_WaitForPersistStatsRun([&] { mock_env->set_current_time(5); });
|
dbfull()->TEST_WaitForPersistStatsRun([&] { mock_env->set_current_time(5); });
|
||||||
ASSERT_GE(counter, 1);
|
ASSERT_GE(counter, 1);
|
||||||
Close();
|
Close();
|
||||||
|
|
|
@ -166,15 +166,15 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) {
|
||||||
ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 4);
|
ASSERT_EQ(new_cf_opt.compression_opts.window_bits, 4);
|
||||||
ASSERT_EQ(new_cf_opt.compression_opts.level, 5);
|
ASSERT_EQ(new_cf_opt.compression_opts.level, 5);
|
||||||
ASSERT_EQ(new_cf_opt.compression_opts.strategy, 6);
|
ASSERT_EQ(new_cf_opt.compression_opts.strategy, 6);
|
||||||
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes, 7);
|
ASSERT_EQ(new_cf_opt.compression_opts.max_dict_bytes, 7u);
|
||||||
ASSERT_EQ(new_cf_opt.compression_opts.zstd_max_train_bytes, 8);
|
ASSERT_EQ(new_cf_opt.compression_opts.zstd_max_train_bytes, 8u);
|
||||||
ASSERT_EQ(new_cf_opt.compression_opts.enabled, true);
|
ASSERT_EQ(new_cf_opt.compression_opts.enabled, true);
|
||||||
ASSERT_EQ(new_cf_opt.bottommost_compression, kLZ4Compression);
|
ASSERT_EQ(new_cf_opt.bottommost_compression, kLZ4Compression);
|
||||||
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.window_bits, 5);
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.window_bits, 5);
|
||||||
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.level, 6);
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.level, 6);
|
||||||
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.strategy, 7);
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.strategy, 7);
|
||||||
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_bytes, 8);
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.max_dict_bytes, 8u);
|
||||||
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.zstd_max_train_bytes, 9);
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.zstd_max_train_bytes, 9u);
|
||||||
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled, true);
|
ASSERT_EQ(new_cf_opt.bottommost_compression_opts.enabled, true);
|
||||||
ASSERT_EQ(new_cf_opt.num_levels, 8);
|
ASSERT_EQ(new_cf_opt.num_levels, 8);
|
||||||
ASSERT_EQ(new_cf_opt.level0_file_num_compaction_trigger, 8);
|
ASSERT_EQ(new_cf_opt.level0_file_num_compaction_trigger, 8);
|
||||||
|
@ -383,10 +383,10 @@ TEST_F(OptionsTest, GetColumnFamilyOptionsFromStringTest) {
|
||||||
"write_buffer_size=13; =100;", &new_cf_opt));
|
"write_buffer_size=13; =100;", &new_cf_opt));
|
||||||
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
|
ASSERT_OK(RocksDBOptionsParser::VerifyCFOptions(base_cf_opt, new_cf_opt));
|
||||||
|
|
||||||
const int64_t kilo = 1024UL;
|
const uint64_t kilo = 1024UL;
|
||||||
const int64_t mega = 1024 * kilo;
|
const uint64_t mega = 1024 * kilo;
|
||||||
const int64_t giga = 1024 * mega;
|
const uint64_t giga = 1024 * mega;
|
||||||
const int64_t tera = 1024 * giga;
|
const uint64_t tera = 1024 * giga;
|
||||||
|
|
||||||
// Units (k)
|
// Units (k)
|
||||||
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
||||||
|
@ -397,7 +397,7 @@ TEST_F(OptionsTest, GetColumnFamilyOptionsFromStringTest) {
|
||||||
"max_write_buffer_number=16m;inplace_update_num_locks=17M",
|
"max_write_buffer_number=16m;inplace_update_num_locks=17M",
|
||||||
&new_cf_opt));
|
&new_cf_opt));
|
||||||
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 16 * mega);
|
ASSERT_EQ(new_cf_opt.max_write_buffer_number, 16 * mega);
|
||||||
ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 17 * mega);
|
ASSERT_EQ(new_cf_opt.inplace_update_num_locks, 17u * mega);
|
||||||
// Units (g)
|
// Units (g)
|
||||||
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
ASSERT_OK(GetColumnFamilyOptionsFromString(
|
||||||
base_cf_opt,
|
base_cf_opt,
|
||||||
|
@ -543,7 +543,7 @@ TEST_F(OptionsTest, GetBlockBasedTableOptionsFromString) {
|
||||||
"cache_index_and_filter_blocks=1;index_type=kBinarySearch;"
|
"cache_index_and_filter_blocks=1;index_type=kBinarySearch;"
|
||||||
"bad_option=1",
|
"bad_option=1",
|
||||||
&new_opt));
|
&new_opt));
|
||||||
ASSERT_EQ(table_opt.cache_index_and_filter_blocks,
|
ASSERT_EQ(static_cast<bool>(table_opt.cache_index_and_filter_blocks),
|
||||||
new_opt.cache_index_and_filter_blocks);
|
new_opt.cache_index_and_filter_blocks);
|
||||||
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
ASSERT_EQ(table_opt.index_type, new_opt.index_type);
|
||||||
|
|
||||||
|
@ -692,7 +692,7 @@ TEST_F(OptionsTest, GetPlainTableOptionsFromString) {
|
||||||
"index_sparseness=8;huge_page_tlb_size=4;encoding_type=kPrefix;"
|
"index_sparseness=8;huge_page_tlb_size=4;encoding_type=kPrefix;"
|
||||||
"full_scan_mode=true;store_index_in_file=true",
|
"full_scan_mode=true;store_index_in_file=true",
|
||||||
&new_opt));
|
&new_opt));
|
||||||
ASSERT_EQ(new_opt.user_key_len, 66);
|
ASSERT_EQ(new_opt.user_key_len, 66u);
|
||||||
ASSERT_EQ(new_opt.bloom_bits_per_key, 20);
|
ASSERT_EQ(new_opt.bloom_bits_per_key, 20);
|
||||||
ASSERT_EQ(new_opt.hash_table_ratio, 0.5);
|
ASSERT_EQ(new_opt.hash_table_ratio, 0.5);
|
||||||
ASSERT_EQ(new_opt.index_sparseness, 8);
|
ASSERT_EQ(new_opt.index_sparseness, 8);
|
||||||
|
@ -792,15 +792,15 @@ TEST_F(OptionsTest, GetOptionsFromStringTest) {
|
||||||
ASSERT_EQ(new_options.compression_opts.window_bits, 4);
|
ASSERT_EQ(new_options.compression_opts.window_bits, 4);
|
||||||
ASSERT_EQ(new_options.compression_opts.level, 5);
|
ASSERT_EQ(new_options.compression_opts.level, 5);
|
||||||
ASSERT_EQ(new_options.compression_opts.strategy, 6);
|
ASSERT_EQ(new_options.compression_opts.strategy, 6);
|
||||||
ASSERT_EQ(new_options.compression_opts.max_dict_bytes, 0);
|
ASSERT_EQ(new_options.compression_opts.max_dict_bytes, 0u);
|
||||||
ASSERT_EQ(new_options.compression_opts.zstd_max_train_bytes, 0);
|
ASSERT_EQ(new_options.compression_opts.zstd_max_train_bytes, 0u);
|
||||||
ASSERT_EQ(new_options.compression_opts.enabled, false);
|
ASSERT_EQ(new_options.compression_opts.enabled, false);
|
||||||
ASSERT_EQ(new_options.bottommost_compression, kDisableCompressionOption);
|
ASSERT_EQ(new_options.bottommost_compression, kDisableCompressionOption);
|
||||||
ASSERT_EQ(new_options.bottommost_compression_opts.window_bits, 5);
|
ASSERT_EQ(new_options.bottommost_compression_opts.window_bits, 5);
|
||||||
ASSERT_EQ(new_options.bottommost_compression_opts.level, 6);
|
ASSERT_EQ(new_options.bottommost_compression_opts.level, 6);
|
||||||
ASSERT_EQ(new_options.bottommost_compression_opts.strategy, 7);
|
ASSERT_EQ(new_options.bottommost_compression_opts.strategy, 7);
|
||||||
ASSERT_EQ(new_options.bottommost_compression_opts.max_dict_bytes, 0);
|
ASSERT_EQ(new_options.bottommost_compression_opts.max_dict_bytes, 0u);
|
||||||
ASSERT_EQ(new_options.bottommost_compression_opts.zstd_max_train_bytes, 0);
|
ASSERT_EQ(new_options.bottommost_compression_opts.zstd_max_train_bytes, 0u);
|
||||||
ASSERT_EQ(new_options.bottommost_compression_opts.enabled, false);
|
ASSERT_EQ(new_options.bottommost_compression_opts.enabled, false);
|
||||||
ASSERT_EQ(new_options.write_buffer_size, 10U);
|
ASSERT_EQ(new_options.write_buffer_size, 10U);
|
||||||
ASSERT_EQ(new_options.max_write_buffer_number, 16);
|
ASSERT_EQ(new_options.max_write_buffer_number, 16);
|
||||||
|
@ -1874,9 +1874,9 @@ TEST_F(OptionsParserTest, IntegerParsing) {
|
||||||
ASSERT_EQ(ParseUint64("18446744073709551615"), 18446744073709551615U);
|
ASSERT_EQ(ParseUint64("18446744073709551615"), 18446744073709551615U);
|
||||||
ASSERT_EQ(ParseUint32("4294967295"), 4294967295U);
|
ASSERT_EQ(ParseUint32("4294967295"), 4294967295U);
|
||||||
ASSERT_EQ(ParseSizeT("18446744073709551615"), 18446744073709551615U);
|
ASSERT_EQ(ParseSizeT("18446744073709551615"), 18446744073709551615U);
|
||||||
ASSERT_EQ(ParseInt64("9223372036854775807"), 9223372036854775807U);
|
ASSERT_EQ(ParseInt64("9223372036854775807"), 9223372036854775807);
|
||||||
ASSERT_EQ(ParseInt64("-9223372036854775808"), port::kMinInt64);
|
ASSERT_EQ(ParseInt64("-9223372036854775808"), port::kMinInt64);
|
||||||
ASSERT_EQ(ParseInt32("2147483647"), 2147483647U);
|
ASSERT_EQ(ParseInt32("2147483647"), 2147483647);
|
||||||
ASSERT_EQ(ParseInt32("-2147483648"), port::kMinInt32);
|
ASSERT_EQ(ParseInt32("-2147483648"), port::kMinInt32);
|
||||||
ASSERT_EQ(ParseInt("-32767"), -32767);
|
ASSERT_EQ(ParseInt("-32767"), -32767);
|
||||||
ASSERT_EQ(ParseDouble("-1.234567"), -1.234567);
|
ASSERT_EQ(ParseDouble("-1.234567"), -1.234567);
|
||||||
|
|
|
@ -478,19 +478,19 @@ TEST_F(BlockTest, BlockWithReadAmpBitmap) {
|
||||||
|
|
||||||
TEST_F(BlockTest, ReadAmpBitmapPow2) {
|
TEST_F(BlockTest, ReadAmpBitmapPow2) {
|
||||||
std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
|
std::shared_ptr<Statistics> stats = rocksdb::CreateDBStatistics();
|
||||||
ASSERT_EQ(BlockReadAmpBitmap(100, 1, stats.get()).GetBytesPerBit(), 1);
|
ASSERT_EQ(BlockReadAmpBitmap(100, 1, stats.get()).GetBytesPerBit(), 1u);
|
||||||
ASSERT_EQ(BlockReadAmpBitmap(100, 2, stats.get()).GetBytesPerBit(), 2);
|
ASSERT_EQ(BlockReadAmpBitmap(100, 2, stats.get()).GetBytesPerBit(), 2u);
|
||||||
ASSERT_EQ(BlockReadAmpBitmap(100, 4, stats.get()).GetBytesPerBit(), 4);
|
ASSERT_EQ(BlockReadAmpBitmap(100, 4, stats.get()).GetBytesPerBit(), 4u);
|
||||||
ASSERT_EQ(BlockReadAmpBitmap(100, 8, stats.get()).GetBytesPerBit(), 8);
|
ASSERT_EQ(BlockReadAmpBitmap(100, 8, stats.get()).GetBytesPerBit(), 8u);
|
||||||
ASSERT_EQ(BlockReadAmpBitmap(100, 16, stats.get()).GetBytesPerBit(), 16);
|
ASSERT_EQ(BlockReadAmpBitmap(100, 16, stats.get()).GetBytesPerBit(), 16u);
|
||||||
ASSERT_EQ(BlockReadAmpBitmap(100, 32, stats.get()).GetBytesPerBit(), 32);
|
ASSERT_EQ(BlockReadAmpBitmap(100, 32, stats.get()).GetBytesPerBit(), 32u);
|
||||||
|
|
||||||
ASSERT_EQ(BlockReadAmpBitmap(100, 3, stats.get()).GetBytesPerBit(), 2);
|
ASSERT_EQ(BlockReadAmpBitmap(100, 3, stats.get()).GetBytesPerBit(), 2u);
|
||||||
ASSERT_EQ(BlockReadAmpBitmap(100, 7, stats.get()).GetBytesPerBit(), 4);
|
ASSERT_EQ(BlockReadAmpBitmap(100, 7, stats.get()).GetBytesPerBit(), 4u);
|
||||||
ASSERT_EQ(BlockReadAmpBitmap(100, 11, stats.get()).GetBytesPerBit(), 8);
|
ASSERT_EQ(BlockReadAmpBitmap(100, 11, stats.get()).GetBytesPerBit(), 8u);
|
||||||
ASSERT_EQ(BlockReadAmpBitmap(100, 17, stats.get()).GetBytesPerBit(), 16);
|
ASSERT_EQ(BlockReadAmpBitmap(100, 17, stats.get()).GetBytesPerBit(), 16u);
|
||||||
ASSERT_EQ(BlockReadAmpBitmap(100, 33, stats.get()).GetBytesPerBit(), 32);
|
ASSERT_EQ(BlockReadAmpBitmap(100, 33, stats.get()).GetBytesPerBit(), 32u);
|
||||||
ASSERT_EQ(BlockReadAmpBitmap(100, 35, stats.get()).GetBytesPerBit(), 32);
|
ASSERT_EQ(BlockReadAmpBitmap(100, 35, stats.get()).GetBytesPerBit(), 32u);
|
||||||
}
|
}
|
||||||
|
|
||||||
class IndexBlockTest
|
class IndexBlockTest
|
||||||
|
|
|
@ -2629,7 +2629,7 @@ TEST_P(BlockBasedTableTest, FilterBlockInBlockCache) {
|
||||||
0, 0, 0);
|
0, 0, 0);
|
||||||
ASSERT_EQ(props.GetCacheBytesRead(), 0);
|
ASSERT_EQ(props.GetCacheBytesRead(), 0);
|
||||||
ASSERT_EQ(props.GetCacheBytesWrite(),
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
||||||
table_options.block_cache->GetUsage());
|
static_cast<int64_t>(table_options.block_cache->GetUsage()));
|
||||||
last_cache_bytes_read = props.GetCacheBytesRead();
|
last_cache_bytes_read = props.GetCacheBytesRead();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2645,7 +2645,7 @@ TEST_P(BlockBasedTableTest, FilterBlockInBlockCache) {
|
||||||
// Cache hit, bytes read from cache should increase
|
// Cache hit, bytes read from cache should increase
|
||||||
ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
|
ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
|
||||||
ASSERT_EQ(props.GetCacheBytesWrite(),
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
||||||
table_options.block_cache->GetUsage());
|
static_cast<int64_t>(table_options.block_cache->GetUsage()));
|
||||||
last_cache_bytes_read = props.GetCacheBytesRead();
|
last_cache_bytes_read = props.GetCacheBytesRead();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2658,7 +2658,7 @@ TEST_P(BlockBasedTableTest, FilterBlockInBlockCache) {
|
||||||
// Cache miss, Bytes read from cache should not change
|
// Cache miss, Bytes read from cache should not change
|
||||||
ASSERT_EQ(props.GetCacheBytesRead(), last_cache_bytes_read);
|
ASSERT_EQ(props.GetCacheBytesRead(), last_cache_bytes_read);
|
||||||
ASSERT_EQ(props.GetCacheBytesWrite(),
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
||||||
table_options.block_cache->GetUsage());
|
static_cast<int64_t>(table_options.block_cache->GetUsage()));
|
||||||
last_cache_bytes_read = props.GetCacheBytesRead();
|
last_cache_bytes_read = props.GetCacheBytesRead();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2672,7 +2672,7 @@ TEST_P(BlockBasedTableTest, FilterBlockInBlockCache) {
|
||||||
// Cache hit, bytes read from cache should increase
|
// Cache hit, bytes read from cache should increase
|
||||||
ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
|
ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read);
|
||||||
ASSERT_EQ(props.GetCacheBytesWrite(),
|
ASSERT_EQ(props.GetCacheBytesWrite(),
|
||||||
table_options.block_cache->GetUsage());
|
static_cast<int64_t>(table_options.block_cache->GetUsage()));
|
||||||
}
|
}
|
||||||
// release the iterator so that the block cache can reset correctly.
|
// release the iterator so that the block cache can reset correctly.
|
||||||
iter.reset();
|
iter.reset();
|
||||||
|
@ -3745,8 +3745,8 @@ TEST_P(BlockBasedTableTest, DISABLED_TableWithGlobalSeqno) {
|
||||||
};
|
};
|
||||||
|
|
||||||
GetVersionAndGlobalSeqno();
|
GetVersionAndGlobalSeqno();
|
||||||
ASSERT_EQ(2, version);
|
ASSERT_EQ(2u, version);
|
||||||
ASSERT_EQ(0, global_seqno);
|
ASSERT_EQ(0u, global_seqno);
|
||||||
|
|
||||||
InternalIterator* iter = GetTableInternalIter();
|
InternalIterator* iter = GetTableInternalIter();
|
||||||
char current_c = 'a';
|
char current_c = 'a';
|
||||||
|
@ -3766,8 +3766,8 @@ TEST_P(BlockBasedTableTest, DISABLED_TableWithGlobalSeqno) {
|
||||||
// Update global sequence number to 10
|
// Update global sequence number to 10
|
||||||
SetGlobalSeqno(10);
|
SetGlobalSeqno(10);
|
||||||
GetVersionAndGlobalSeqno();
|
GetVersionAndGlobalSeqno();
|
||||||
ASSERT_EQ(2, version);
|
ASSERT_EQ(2u, version);
|
||||||
ASSERT_EQ(10, global_seqno);
|
ASSERT_EQ(10u, global_seqno);
|
||||||
|
|
||||||
iter = GetTableInternalIter();
|
iter = GetTableInternalIter();
|
||||||
current_c = 'a';
|
current_c = 'a';
|
||||||
|
@ -3803,8 +3803,8 @@ TEST_P(BlockBasedTableTest, DISABLED_TableWithGlobalSeqno) {
|
||||||
// Update global sequence number to 3
|
// Update global sequence number to 3
|
||||||
SetGlobalSeqno(3);
|
SetGlobalSeqno(3);
|
||||||
GetVersionAndGlobalSeqno();
|
GetVersionAndGlobalSeqno();
|
||||||
ASSERT_EQ(2, version);
|
ASSERT_EQ(2u, version);
|
||||||
ASSERT_EQ(3, global_seqno);
|
ASSERT_EQ(3u, global_seqno);
|
||||||
|
|
||||||
iter = GetTableInternalIter();
|
iter = GetTableInternalIter();
|
||||||
current_c = 'a';
|
current_c = 'a';
|
||||||
|
@ -4023,7 +4023,7 @@ TEST_P(BlockBasedTableTest, PropertiesBlockRestartPointTest) {
|
||||||
Block properties_block(std::move(properties_contents),
|
Block properties_block(std::move(properties_contents),
|
||||||
kDisableGlobalSequenceNumber);
|
kDisableGlobalSequenceNumber);
|
||||||
|
|
||||||
ASSERT_EQ(properties_block.NumRestarts(), 1);
|
ASSERT_EQ(properties_block.NumRestarts(), 1u);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2006, Google Inc.
|
||||||
|
// All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include "gtest/gtest.h"
|
||||||
|
|
||||||
|
GTEST_API_ int main(int argc, char **argv) {
|
||||||
|
printf("Running main() from %s\n", __FILE__);
|
||||||
|
testing::InitGoogleTest(&argc, argv);
|
||||||
|
return RUN_ALL_TESTS();
|
||||||
|
}
|
|
@ -19,51 +19,51 @@ TEST(HashTest, Values) {
|
||||||
using rocksdb::Hash;
|
using rocksdb::Hash;
|
||||||
constexpr uint32_t kSeed = 0xbc9f1d34; // Same as BloomHash.
|
constexpr uint32_t kSeed = 0xbc9f1d34; // Same as BloomHash.
|
||||||
|
|
||||||
EXPECT_EQ(Hash("", 0, kSeed), 3164544308);
|
EXPECT_EQ(Hash("", 0, kSeed), 3164544308u);
|
||||||
EXPECT_EQ(Hash("\x08", 1, kSeed), 422599524);
|
EXPECT_EQ(Hash("\x08", 1, kSeed), 422599524u);
|
||||||
EXPECT_EQ(Hash("\x17", 1, kSeed), 3168152998);
|
EXPECT_EQ(Hash("\x17", 1, kSeed), 3168152998u);
|
||||||
EXPECT_EQ(Hash("\x9a", 1, kSeed), 3195034349);
|
EXPECT_EQ(Hash("\x9a", 1, kSeed), 3195034349u);
|
||||||
EXPECT_EQ(Hash("\x1c", 1, kSeed), 2651681383);
|
EXPECT_EQ(Hash("\x1c", 1, kSeed), 2651681383u);
|
||||||
EXPECT_EQ(Hash("\x4d\x76", 2, kSeed), 2447836956);
|
EXPECT_EQ(Hash("\x4d\x76", 2, kSeed), 2447836956u);
|
||||||
EXPECT_EQ(Hash("\x52\xd5", 2, kSeed), 3854228105);
|
EXPECT_EQ(Hash("\x52\xd5", 2, kSeed), 3854228105u);
|
||||||
EXPECT_EQ(Hash("\x91\xf7", 2, kSeed), 31066776);
|
EXPECT_EQ(Hash("\x91\xf7", 2, kSeed), 31066776u);
|
||||||
EXPECT_EQ(Hash("\xd6\x27", 2, kSeed), 1806091603);
|
EXPECT_EQ(Hash("\xd6\x27", 2, kSeed), 1806091603u);
|
||||||
EXPECT_EQ(Hash("\x30\x46\x0b", 3, kSeed), 3808221797);
|
EXPECT_EQ(Hash("\x30\x46\x0b", 3, kSeed), 3808221797u);
|
||||||
EXPECT_EQ(Hash("\x56\xdc\xd6", 3, kSeed), 2157698265);
|
EXPECT_EQ(Hash("\x56\xdc\xd6", 3, kSeed), 2157698265u);
|
||||||
EXPECT_EQ(Hash("\xd4\x52\x33", 3, kSeed), 1721992661);
|
EXPECT_EQ(Hash("\xd4\x52\x33", 3, kSeed), 1721992661u);
|
||||||
EXPECT_EQ(Hash("\x6a\xb5\xf4", 3, kSeed), 2469105222);
|
EXPECT_EQ(Hash("\x6a\xb5\xf4", 3, kSeed), 2469105222u);
|
||||||
EXPECT_EQ(Hash("\x67\x53\x81\x1c", 4, kSeed), 118283265);
|
EXPECT_EQ(Hash("\x67\x53\x81\x1c", 4, kSeed), 118283265u);
|
||||||
EXPECT_EQ(Hash("\x69\xb8\xc0\x88", 4, kSeed), 3416318611);
|
EXPECT_EQ(Hash("\x69\xb8\xc0\x88", 4, kSeed), 3416318611u);
|
||||||
EXPECT_EQ(Hash("\x1e\x84\xaf\x2d", 4, kSeed), 3315003572);
|
EXPECT_EQ(Hash("\x1e\x84\xaf\x2d", 4, kSeed), 3315003572u);
|
||||||
EXPECT_EQ(Hash("\x46\xdc\x54\xbe", 4, kSeed), 447346355);
|
EXPECT_EQ(Hash("\x46\xdc\x54\xbe", 4, kSeed), 447346355u);
|
||||||
EXPECT_EQ(Hash("\xd0\x7a\x6e\xea\x56", 5, kSeed), 4255445370);
|
EXPECT_EQ(Hash("\xd0\x7a\x6e\xea\x56", 5, kSeed), 4255445370u);
|
||||||
EXPECT_EQ(Hash("\x86\x83\xd5\xa4\xd8", 5, kSeed), 2390603402);
|
EXPECT_EQ(Hash("\x86\x83\xd5\xa4\xd8", 5, kSeed), 2390603402u);
|
||||||
EXPECT_EQ(Hash("\xb7\x46\xbb\x77\xce", 5, kSeed), 2048907743);
|
EXPECT_EQ(Hash("\xb7\x46\xbb\x77\xce", 5, kSeed), 2048907743u);
|
||||||
EXPECT_EQ(Hash("\x6c\xa8\xbc\xe5\x99", 5, kSeed), 2177978500);
|
EXPECT_EQ(Hash("\x6c\xa8\xbc\xe5\x99", 5, kSeed), 2177978500u);
|
||||||
EXPECT_EQ(Hash("\x5c\x5e\xe1\xa0\x73\x81", 6, kSeed), 1036846008);
|
EXPECT_EQ(Hash("\x5c\x5e\xe1\xa0\x73\x81", 6, kSeed), 1036846008u);
|
||||||
EXPECT_EQ(Hash("\x08\x5d\x73\x1c\xe5\x2e", 6, kSeed), 229980482);
|
EXPECT_EQ(Hash("\x08\x5d\x73\x1c\xe5\x2e", 6, kSeed), 229980482u);
|
||||||
EXPECT_EQ(Hash("\x42\xfb\xf2\x52\xb4\x10", 6, kSeed), 3655585422);
|
EXPECT_EQ(Hash("\x42\xfb\xf2\x52\xb4\x10", 6, kSeed), 3655585422u);
|
||||||
EXPECT_EQ(Hash("\x73\xe1\xff\x56\x9c\xce", 6, kSeed), 3502708029);
|
EXPECT_EQ(Hash("\x73\xe1\xff\x56\x9c\xce", 6, kSeed), 3502708029u);
|
||||||
EXPECT_EQ(Hash("\x5c\xbe\x97\x75\x54\x9a\x52", 7, kSeed), 815120748);
|
EXPECT_EQ(Hash("\x5c\xbe\x97\x75\x54\x9a\x52", 7, kSeed), 815120748u);
|
||||||
EXPECT_EQ(Hash("\x16\x82\x39\x49\x88\x2b\x36", 7, kSeed), 3056033698);
|
EXPECT_EQ(Hash("\x16\x82\x39\x49\x88\x2b\x36", 7, kSeed), 3056033698u);
|
||||||
EXPECT_EQ(Hash("\x59\x77\xf0\xa7\x24\xf4\x78", 7, kSeed), 587205227);
|
EXPECT_EQ(Hash("\x59\x77\xf0\xa7\x24\xf4\x78", 7, kSeed), 587205227u);
|
||||||
EXPECT_EQ(Hash("\xd3\xa5\x7c\x0e\xc0\x02\x07", 7, kSeed), 2030937252);
|
EXPECT_EQ(Hash("\xd3\xa5\x7c\x0e\xc0\x02\x07", 7, kSeed), 2030937252u);
|
||||||
EXPECT_EQ(Hash("\x31\x1b\x98\x75\x96\x22\xd3\x9a", 8, kSeed), 469635402);
|
EXPECT_EQ(Hash("\x31\x1b\x98\x75\x96\x22\xd3\x9a", 8, kSeed), 469635402u);
|
||||||
EXPECT_EQ(Hash("\x38\xd6\xf7\x28\x20\xb4\x8a\xe9", 8, kSeed), 3530274698);
|
EXPECT_EQ(Hash("\x38\xd6\xf7\x28\x20\xb4\x8a\xe9", 8, kSeed), 3530274698u);
|
||||||
EXPECT_EQ(Hash("\xbb\x18\x5d\xf4\x12\x03\xf7\x99", 8, kSeed), 1974545809);
|
EXPECT_EQ(Hash("\xbb\x18\x5d\xf4\x12\x03\xf7\x99", 8, kSeed), 1974545809u);
|
||||||
EXPECT_EQ(Hash("\x80\xd4\x3b\x3b\xae\x22\xa2\x78", 8, kSeed), 3563570120);
|
EXPECT_EQ(Hash("\x80\xd4\x3b\x3b\xae\x22\xa2\x78", 8, kSeed), 3563570120u);
|
||||||
EXPECT_EQ(Hash("\x1a\xb5\xd0\xfe\xab\xc3\x61\xb2\x99", 9, kSeed), 2706087434);
|
EXPECT_EQ(Hash("\x1a\xb5\xd0\xfe\xab\xc3\x61\xb2\x99", 9, kSeed), 2706087434u);
|
||||||
EXPECT_EQ(Hash("\x8e\x4a\xc3\x18\x20\x2f\x06\xe6\x3c", 9, kSeed), 1534654151);
|
EXPECT_EQ(Hash("\x8e\x4a\xc3\x18\x20\x2f\x06\xe6\x3c", 9, kSeed), 1534654151u);
|
||||||
EXPECT_EQ(Hash("\xb6\xc0\xdd\x05\x3f\xc4\x86\x4c\xef", 9, kSeed), 2355554696);
|
EXPECT_EQ(Hash("\xb6\xc0\xdd\x05\x3f\xc4\x86\x4c\xef", 9, kSeed), 2355554696u);
|
||||||
EXPECT_EQ(Hash("\x9a\x5f\x78\x0d\xaf\x50\xe1\x1f\x55", 9, kSeed), 1400800912);
|
EXPECT_EQ(Hash("\x9a\x5f\x78\x0d\xaf\x50\xe1\x1f\x55", 9, kSeed), 1400800912u);
|
||||||
EXPECT_EQ(Hash("\x22\x6f\x39\x1f\xf8\xdd\x4f\x52\x17\x94", 10, kSeed),
|
EXPECT_EQ(Hash("\x22\x6f\x39\x1f\xf8\xdd\x4f\x52\x17\x94", 10, kSeed),
|
||||||
3420325137);
|
3420325137u);
|
||||||
EXPECT_EQ(Hash("\x32\x89\x2a\x75\x48\x3a\x4a\x02\x69\xdd", 10, kSeed),
|
EXPECT_EQ(Hash("\x32\x89\x2a\x75\x48\x3a\x4a\x02\x69\xdd", 10, kSeed),
|
||||||
3427803584);
|
3427803584u);
|
||||||
EXPECT_EQ(Hash("\x06\x92\x5c\xf4\x88\x0e\x7e\x68\x38\x3e", 10, kSeed),
|
EXPECT_EQ(Hash("\x06\x92\x5c\xf4\x88\x0e\x7e\x68\x38\x3e", 10, kSeed),
|
||||||
1152407945);
|
1152407945u);
|
||||||
EXPECT_EQ(Hash("\xbd\x2c\x63\x38\xbf\xe9\x78\xb7\xbf\x15", 10, kSeed),
|
EXPECT_EQ(Hash("\xbd\x2c\x63\x38\xbf\xe9\x78\xb7\xbf\x15", 10, kSeed),
|
||||||
3382479516);
|
3382479516u);
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
|
|
|
@ -290,7 +290,7 @@ TEST_P(TransactionTest, WaitingTxn) {
|
||||||
ASSERT_EQ(key, "foo");
|
ASSERT_EQ(key, "foo");
|
||||||
ASSERT_EQ(wait.size(), 1);
|
ASSERT_EQ(wait.size(), 1);
|
||||||
ASSERT_EQ(wait[0], id1);
|
ASSERT_EQ(wait[0], id1);
|
||||||
ASSERT_EQ(cf_id, 0);
|
ASSERT_EQ(cf_id, 0U);
|
||||||
});
|
});
|
||||||
|
|
||||||
get_perf_context()->Reset();
|
get_perf_context()->Reset();
|
||||||
|
@ -568,7 +568,7 @@ TEST_P(TransactionTest, DeadlockCycleShared) {
|
||||||
for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); ++it) {
|
for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); ++it) {
|
||||||
auto dl_node = *it;
|
auto dl_node = *it;
|
||||||
ASSERT_EQ(dl_node.m_txn_id, offset_root + leaf_id);
|
ASSERT_EQ(dl_node.m_txn_id, offset_root + leaf_id);
|
||||||
ASSERT_EQ(dl_node.m_cf_id, 0);
|
ASSERT_EQ(dl_node.m_cf_id, 0U);
|
||||||
ASSERT_EQ(dl_node.m_waiting_key, ToString(curr_waiting_key));
|
ASSERT_EQ(dl_node.m_waiting_key, ToString(curr_waiting_key));
|
||||||
ASSERT_EQ(dl_node.m_exclusive, true);
|
ASSERT_EQ(dl_node.m_exclusive, true);
|
||||||
|
|
||||||
|
@ -775,7 +775,7 @@ TEST_P(TransactionStressTest, DeadlockCycle) {
|
||||||
for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); ++it) {
|
for (auto it = dlock_entry.rbegin(); it != dlock_entry.rend(); ++it) {
|
||||||
auto dl_node = *it;
|
auto dl_node = *it;
|
||||||
ASSERT_EQ(dl_node.m_txn_id, len + curr_txn_id - 1);
|
ASSERT_EQ(dl_node.m_txn_id, len + curr_txn_id - 1);
|
||||||
ASSERT_EQ(dl_node.m_cf_id, 0);
|
ASSERT_EQ(dl_node.m_cf_id, 0u);
|
||||||
ASSERT_EQ(dl_node.m_waiting_key, ToString(curr_waiting_key));
|
ASSERT_EQ(dl_node.m_waiting_key, ToString(curr_waiting_key));
|
||||||
ASSERT_EQ(dl_node.m_exclusive, true);
|
ASSERT_EQ(dl_node.m_exclusive, true);
|
||||||
|
|
||||||
|
|
|
@ -568,7 +568,7 @@ Status WriteBatchWithIndex::Rep::ReBuildIndex() {
|
||||||
input.remove_prefix(offset);
|
input.remove_prefix(offset);
|
||||||
|
|
||||||
// Loop through all entries in Rep and add each one to the index
|
// Loop through all entries in Rep and add each one to the index
|
||||||
int found = 0;
|
uint32_t found = 0;
|
||||||
while (s.ok() && !input.empty()) {
|
while (s.ok() && !input.empty()) {
|
||||||
Slice key, value, blob, xid;
|
Slice key, value, blob, xid;
|
||||||
uint32_t column_family_id = 0; // default
|
uint32_t column_family_id = 0; // default
|
||||||
|
|
Loading…
Reference in New Issue