2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2016-01-20 23:17:52 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
#include <string>
|
|
|
|
|
|
|
|
#include "db/db_test_util.h"
|
2023-03-18 16:51:58 +00:00
|
|
|
#include "db/write_stall_stats.h"
|
2022-04-06 22:54:40 +00:00
|
|
|
#include "options/cf_options.h"
|
2016-01-20 23:17:52 +00:00
|
|
|
#include "port/stack_trace.h"
|
2018-03-02 01:50:54 +00:00
|
|
|
#include "rocksdb/listener.h"
|
2016-01-20 23:17:52 +00:00
|
|
|
#include "rocksdb/options.h"
|
|
|
|
#include "rocksdb/perf_context.h"
|
|
|
|
#include "rocksdb/perf_level.h"
|
|
|
|
#include "rocksdb/table.h"
|
2022-04-06 22:54:40 +00:00
|
|
|
#include "table/block_based/block.h"
|
|
|
|
#include "table/format.h"
|
|
|
|
#include "table/meta_blocks.h"
|
|
|
|
#include "table/table_builder.h"
|
2021-10-20 20:15:33 +00:00
|
|
|
#include "test_util/mock_time_env.h"
|
2016-01-20 23:17:52 +00:00
|
|
|
#include "util/random.h"
|
2016-04-21 01:46:54 +00:00
|
|
|
#include "util/string_util.h"
|
2016-01-20 23:17:52 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
class DBPropertiesTest : public DBTestBase {
|
|
|
|
public:
|
2020-08-18 01:41:20 +00:00
|
|
|
DBPropertiesTest()
|
2021-07-23 15:37:27 +00:00
|
|
|
: DBTestBase("db_properties_test", /*env_do_fsync=*/false) {}
|
2021-10-20 20:15:33 +00:00
|
|
|
|
|
|
|
void AssertDbStats(const std::map<std::string, std::string>& db_stats,
|
|
|
|
double expected_uptime, int expected_user_bytes_written,
|
|
|
|
int expected_wal_bytes_written,
|
|
|
|
int expected_user_writes_by_self,
|
|
|
|
int expected_user_writes_with_wal) {
|
|
|
|
ASSERT_EQ(std::to_string(expected_uptime), db_stats.at("db.uptime"));
|
|
|
|
ASSERT_EQ(std::to_string(expected_wal_bytes_written),
|
|
|
|
db_stats.at("db.wal_bytes_written"));
|
|
|
|
ASSERT_EQ("0", db_stats.at("db.wal_syncs"));
|
|
|
|
ASSERT_EQ(std::to_string(expected_user_bytes_written),
|
|
|
|
db_stats.at("db.user_bytes_written"));
|
|
|
|
ASSERT_EQ("0", db_stats.at("db.user_writes_by_other"));
|
|
|
|
ASSERT_EQ(std::to_string(expected_user_writes_by_self),
|
|
|
|
db_stats.at("db.user_writes_by_self"));
|
|
|
|
ASSERT_EQ(std::to_string(expected_user_writes_with_wal),
|
|
|
|
db_stats.at("db.user_writes_with_wal"));
|
|
|
|
ASSERT_EQ("0", db_stats.at("db.user_write_stall_micros"));
|
|
|
|
}
|
2016-01-20 23:17:52 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(DBPropertiesTest, Empty) {
|
|
|
|
do {
|
|
|
|
Options options;
|
|
|
|
options.env = env_;
|
|
|
|
options.write_buffer_size = 100000; // Small write buffer
|
2016-11-16 17:24:52 +00:00
|
|
|
options.allow_concurrent_memtable_write = false;
|
2016-01-20 23:17:52 +00:00
|
|
|
options = CurrentOptions(options);
|
|
|
|
CreateAndReopenWithCF({"pikachu"}, options);
|
|
|
|
|
|
|
|
std::string num;
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
handles_[1], "rocksdb.num-entries-active-mem-table", &num));
|
|
|
|
ASSERT_EQ("0", num);
|
|
|
|
|
|
|
|
ASSERT_OK(Put(1, "foo", "v1"));
|
|
|
|
ASSERT_EQ("v1", Get(1, "foo"));
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
handles_[1], "rocksdb.num-entries-active-mem-table", &num));
|
|
|
|
ASSERT_EQ("1", num);
|
|
|
|
|
|
|
|
// Block sync calls
|
|
|
|
env_->delay_sstable_sync_.store(true, std::memory_order_release);
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Put(1, "k1", std::string(100000, 'x'))); // Fill memtable
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
handles_[1], "rocksdb.num-entries-active-mem-table", &num));
|
|
|
|
ASSERT_EQ("2", num);
|
|
|
|
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Put(1, "k2", std::string(100000, 'y'))); // Trigger compaction
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
handles_[1], "rocksdb.num-entries-active-mem-table", &num));
|
|
|
|
ASSERT_EQ("1", num);
|
|
|
|
|
|
|
|
ASSERT_EQ("v1", Get(1, "foo"));
|
|
|
|
// Release sync calls
|
|
|
|
env_->delay_sstable_sync_.store(false, std::memory_order_release);
|
|
|
|
|
|
|
|
ASSERT_OK(db_->DisableFileDeletions());
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
|
2018-03-22 05:07:55 +00:00
|
|
|
ASSERT_EQ("0", num);
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
ASSERT_OK(db_->DisableFileDeletions());
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
|
2018-03-22 05:07:55 +00:00
|
|
|
ASSERT_EQ("0", num);
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
ASSERT_OK(db_->DisableFileDeletions());
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
|
2018-03-22 05:07:55 +00:00
|
|
|
ASSERT_EQ("0", num);
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
ASSERT_OK(db_->EnableFileDeletions(false));
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
|
2018-03-22 05:07:55 +00:00
|
|
|
ASSERT_EQ("0", num);
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
ASSERT_OK(db_->EnableFileDeletions());
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num));
|
2018-03-22 05:07:55 +00:00
|
|
|
ASSERT_EQ("1", num);
|
2016-01-20 23:17:52 +00:00
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2016-03-01 17:34:50 +00:00
|
|
|
TEST_F(DBPropertiesTest, CurrentVersionNumber) {
|
|
|
|
uint64_t v1, v2, v3;
|
2016-03-03 21:18:56 +00:00
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v1));
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Put("12345678", ""));
|
2016-03-03 21:18:56 +00:00
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v2));
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
2016-03-03 21:18:56 +00:00
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v3));
|
2016-03-01 17:34:50 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(v1, v2);
|
|
|
|
ASSERT_GT(v3, v2);
|
|
|
|
}
|
|
|
|
|
2016-01-20 23:17:52 +00:00
|
|
|
TEST_F(DBPropertiesTest, GetAggregatedIntPropertyTest) {
|
|
|
|
const int kKeySize = 100;
|
|
|
|
const int kValueSize = 500;
|
|
|
|
const int kKeyNum = 100;
|
|
|
|
|
|
|
|
Options options;
|
|
|
|
options.env = env_;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.write_buffer_size = (kKeySize + kValueSize) * kKeyNum / 10;
|
|
|
|
// Make them never flush
|
|
|
|
options.min_write_buffer_number_to_merge = 1000;
|
|
|
|
options.max_write_buffer_number = 1000;
|
|
|
|
options = CurrentOptions(options);
|
|
|
|
CreateAndReopenWithCF({"one", "two", "three", "four"}, options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
for (auto* handle : handles_) {
|
|
|
|
for (int i = 0; i < kKeyNum; ++i) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), handle, rnd.RandomString(kKeySize),
|
|
|
|
rnd.RandomString(kValueSize)));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t manual_sum = 0;
|
|
|
|
uint64_t api_sum = 0;
|
|
|
|
uint64_t value = 0;
|
|
|
|
for (auto* handle : handles_) {
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(handle, DB::Properties::kSizeAllMemTables, &value));
|
|
|
|
manual_sum += value;
|
|
|
|
}
|
|
|
|
ASSERT_TRUE(db_->GetAggregatedIntProperty(DB::Properties::kSizeAllMemTables,
|
|
|
|
&api_sum));
|
|
|
|
ASSERT_GT(manual_sum, 0);
|
|
|
|
ASSERT_EQ(manual_sum, api_sum);
|
|
|
|
|
|
|
|
ASSERT_FALSE(db_->GetAggregatedIntProperty(DB::Properties::kDBStats, &value));
|
|
|
|
|
|
|
|
uint64_t before_flush_trm;
|
|
|
|
uint64_t after_flush_trm;
|
|
|
|
for (auto* handle : handles_) {
|
|
|
|
ASSERT_TRUE(db_->GetAggregatedIntProperty(
|
|
|
|
DB::Properties::kEstimateTableReadersMem, &before_flush_trm));
|
|
|
|
|
|
|
|
// Issue flush and expect larger memory usage of table readers.
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Flush(FlushOptions(), handle));
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
ASSERT_TRUE(db_->GetAggregatedIntProperty(
|
|
|
|
DB::Properties::kEstimateTableReadersMem, &after_flush_trm));
|
|
|
|
ASSERT_GT(after_flush_trm, before_flush_trm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
void VerifySimilar(uint64_t a, uint64_t b, double bias) {
|
|
|
|
ASSERT_EQ(a == 0U, b == 0U);
|
|
|
|
if (a == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
double dbl_a = static_cast<double>(a);
|
|
|
|
double dbl_b = static_cast<double>(b);
|
|
|
|
if (dbl_a > dbl_b) {
|
|
|
|
ASSERT_LT(static_cast<double>(dbl_a - dbl_b) / (dbl_a + dbl_b), bias);
|
|
|
|
} else {
|
|
|
|
ASSERT_LT(static_cast<double>(dbl_b - dbl_a) / (dbl_a + dbl_b), bias);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-16 23:15:18 +00:00
|
|
|
void VerifyTableProperties(
|
|
|
|
const TableProperties& base_tp, const TableProperties& new_tp,
|
2021-10-22 17:12:09 +00:00
|
|
|
double filter_size_bias = CACHE_LINE_SIZE >= 256 ? 0.18 : 0.1,
|
2019-09-16 23:15:18 +00:00
|
|
|
double index_size_bias = 0.1, double data_size_bias = 0.1,
|
|
|
|
double num_data_blocks_bias = 0.05) {
|
2016-01-20 23:17:52 +00:00
|
|
|
VerifySimilar(base_tp.data_size, new_tp.data_size, data_size_bias);
|
|
|
|
VerifySimilar(base_tp.index_size, new_tp.index_size, index_size_bias);
|
|
|
|
VerifySimilar(base_tp.filter_size, new_tp.filter_size, filter_size_bias);
|
|
|
|
VerifySimilar(base_tp.num_data_blocks, new_tp.num_data_blocks,
|
|
|
|
num_data_blocks_bias);
|
2018-10-30 22:29:58 +00:00
|
|
|
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_EQ(base_tp.raw_key_size, new_tp.raw_key_size);
|
|
|
|
ASSERT_EQ(base_tp.raw_value_size, new_tp.raw_value_size);
|
|
|
|
ASSERT_EQ(base_tp.num_entries, new_tp.num_entries);
|
2018-10-30 22:29:58 +00:00
|
|
|
ASSERT_EQ(base_tp.num_deletions, new_tp.num_deletions);
|
2018-06-27 03:18:43 +00:00
|
|
|
ASSERT_EQ(base_tp.num_range_deletions, new_tp.num_range_deletions);
|
2018-10-30 22:29:58 +00:00
|
|
|
|
|
|
|
// Merge operands may become Puts, so we only have an upper bound the exact
|
|
|
|
// number of merge operands.
|
|
|
|
ASSERT_GE(base_tp.num_merge_operands, new_tp.num_merge_operands);
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
|
2018-08-09 23:49:45 +00:00
|
|
|
void GetExpectedTableProperties(
|
|
|
|
TableProperties* expected_tp, const int kKeySize, const int kValueSize,
|
2018-10-30 22:29:58 +00:00
|
|
|
const int kPutsPerTable, const int kDeletionsPerTable,
|
|
|
|
const int kMergeOperandsPerTable, const int kRangeDeletionsPerTable,
|
2018-08-09 23:49:45 +00:00
|
|
|
const int kTableCount, const int kBloomBitsPerKey, const size_t kBlockSize,
|
|
|
|
const bool index_key_is_user_key, const bool value_delta_encoding) {
|
2018-10-30 22:29:58 +00:00
|
|
|
const int kKeysPerTable =
|
|
|
|
kPutsPerTable + kDeletionsPerTable + kMergeOperandsPerTable;
|
|
|
|
const int kPutCount = kTableCount * kPutsPerTable;
|
|
|
|
const int kDeletionCount = kTableCount * kDeletionsPerTable;
|
|
|
|
const int kMergeCount = kTableCount * kMergeOperandsPerTable;
|
2018-06-27 03:18:43 +00:00
|
|
|
const int kRangeDeletionCount = kTableCount * kRangeDeletionsPerTable;
|
2022-11-02 21:34:24 +00:00
|
|
|
const int kKeyCount =
|
|
|
|
kPutCount + kDeletionCount + kMergeCount + kRangeDeletionCount;
|
2016-04-26 06:02:14 +00:00
|
|
|
const int kAvgSuccessorSize = kKeySize / 5;
|
2016-01-20 23:17:52 +00:00
|
|
|
const int kEncodingSavePerKey = kKeySize / 4;
|
2019-01-02 23:05:41 +00:00
|
|
|
expected_tp->raw_key_size = kKeyCount * (kKeySize + 8);
|
2018-10-30 22:29:58 +00:00
|
|
|
expected_tp->raw_value_size =
|
|
|
|
(kPutCount + kMergeCount + kRangeDeletionCount) * kValueSize;
|
2016-01-20 23:17:52 +00:00
|
|
|
expected_tp->num_entries = kKeyCount;
|
2019-01-02 23:05:41 +00:00
|
|
|
expected_tp->num_deletions = kDeletionCount + kRangeDeletionCount;
|
2018-10-30 22:29:58 +00:00
|
|
|
expected_tp->num_merge_operands = kMergeCount;
|
2018-06-27 03:18:43 +00:00
|
|
|
expected_tp->num_range_deletions = kRangeDeletionCount;
|
2016-01-20 23:17:52 +00:00
|
|
|
expected_tp->num_data_blocks =
|
2022-11-02 21:34:24 +00:00
|
|
|
kTableCount *
|
|
|
|
(kKeysPerTable * (kKeySize - kEncodingSavePerKey + kValueSize)) /
|
2016-01-20 23:17:52 +00:00
|
|
|
kBlockSize;
|
|
|
|
expected_tp->data_size =
|
|
|
|
kTableCount * (kKeysPerTable * (kKeySize + 8 + kValueSize));
|
|
|
|
expected_tp->index_size =
|
2018-05-26 01:41:31 +00:00
|
|
|
expected_tp->num_data_blocks *
|
2018-08-09 23:49:45 +00:00
|
|
|
(kAvgSuccessorSize + (index_key_is_user_key ? 0 : 8) -
|
|
|
|
// discount 1 byte as value size is not encoded in value delta encoding
|
|
|
|
(value_delta_encoding ? 1 : 0));
|
2016-01-20 23:17:52 +00:00
|
|
|
expected_tp->filter_size =
|
2019-09-16 23:15:18 +00:00
|
|
|
kTableCount * ((kKeysPerTable * kBloomBitsPerKey + 7) / 8 +
|
|
|
|
/*average-ish overhead*/ CACHE_LINE_SIZE / 2);
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
} // anonymous namespace
|
|
|
|
|
Eliminate duplicated property constants
Summary:
Before this diff, there were duplicated constants to refer to properties (user-
facing API had strings and InternalStats had an enum). I noticed these were
inconsistent in terms of which constants are provided, names of constants, and
documentation of constants. Overall it seemed annoying/error-prone to maintain
these duplicated constants.
So, this diff gets rid of InternalStats's constants and replaces them with a map
keyed on the user-facing constant. The value in that map contains a function
pointer to get the property value, so we don't need to do string matching while
holding db->mutex_. This approach has a side benefit of making many small
handler functions rather than a giant switch-statement.
Test Plan: db_properties_test passes, running "make commit-prereq -j32"
Reviewers: sdong, yhchiang, kradhakrishnan, IslamAbdelRahman, rven, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D53253
2016-02-03 03:14:56 +00:00
|
|
|
TEST_F(DBPropertiesTest, ValidatePropertyInfo) {
|
|
|
|
for (const auto& ppt_name_and_info : InternalStats::ppt_name_to_info) {
|
|
|
|
// If C++ gets a std::string_literal, this would be better to check at
|
|
|
|
// compile-time using static_assert.
|
|
|
|
ASSERT_TRUE(ppt_name_and_info.first.empty() ||
|
|
|
|
!isdigit(ppt_name_and_info.first.back()));
|
|
|
|
|
2018-06-16 00:23:08 +00:00
|
|
|
int count = 0;
|
|
|
|
count += (ppt_name_and_info.second.handle_string == nullptr) ? 0 : 1;
|
|
|
|
count += (ppt_name_and_info.second.handle_int == nullptr) ? 0 : 1;
|
|
|
|
count += (ppt_name_and_info.second.handle_string_dbimpl == nullptr) ? 0 : 1;
|
|
|
|
ASSERT_TRUE(count == 1);
|
Eliminate duplicated property constants
Summary:
Before this diff, there were duplicated constants to refer to properties (user-
facing API had strings and InternalStats had an enum). I noticed these were
inconsistent in terms of which constants are provided, names of constants, and
documentation of constants. Overall it seemed annoying/error-prone to maintain
these duplicated constants.
So, this diff gets rid of InternalStats's constants and replaces them with a map
keyed on the user-facing constant. The value in that map contains a function
pointer to get the property value, so we don't need to do string matching while
holding db->mutex_. This approach has a side benefit of making many small
handler functions rather than a giant switch-statement.
Test Plan: db_properties_test passes, running "make commit-prereq -j32"
Reviewers: sdong, yhchiang, kradhakrishnan, IslamAbdelRahman, rven, anthony
Reviewed By: anthony
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D53253
2016-02-03 03:14:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-01 23:19:12 +00:00
|
|
|
TEST_F(DBPropertiesTest, ValidateSampleNumber) {
|
|
|
|
// When "max_open_files" is -1, we read all the files for
|
|
|
|
// "rocksdb.estimate-num-keys" computation, which is the ground truth.
|
|
|
|
// Otherwise, we sample 20 newest files to make an estimation.
|
|
|
|
// Formula: lastest_20_files_active_key_ratio * total_files
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.disable_auto_compactions = true;
|
|
|
|
options.level0_stop_writes_trigger = 1000;
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
int key = 0;
|
|
|
|
for (int files = 20; files >= 10; files -= 10) {
|
|
|
|
for (int i = 0; i < files; i++) {
|
|
|
|
int rows = files / 10;
|
|
|
|
for (int j = 0; j < rows; j++) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), std::to_string(++key), "foo"));
|
2016-04-01 23:19:12 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
2016-04-01 23:19:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
std::string num;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
|
|
|
|
ASSERT_EQ("45", num);
|
|
|
|
options.max_open_files = -1;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
|
|
|
|
ASSERT_EQ("50", num);
|
|
|
|
}
|
|
|
|
|
2016-01-20 23:17:52 +00:00
|
|
|
TEST_F(DBPropertiesTest, AggregatedTableProperties) {
|
|
|
|
for (int kTableCount = 40; kTableCount <= 100; kTableCount += 30) {
|
2022-01-28 21:26:32 +00:00
|
|
|
const int kDeletionsPerTable = 0;
|
2018-10-30 22:29:58 +00:00
|
|
|
const int kMergeOperandsPerTable = 15;
|
2018-06-27 03:18:43 +00:00
|
|
|
const int kRangeDeletionsPerTable = 5;
|
2018-10-30 22:29:58 +00:00
|
|
|
const int kPutsPerTable = 100;
|
2016-01-20 23:17:52 +00:00
|
|
|
const int kKeySize = 80;
|
|
|
|
const int kValueSize = 200;
|
|
|
|
const int kBloomBitsPerKey = 20;
|
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.level0_file_num_compaction_trigger = 8;
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
options.create_if_missing = true;
|
2018-10-30 22:29:58 +00:00
|
|
|
options.merge_operator.reset(new TestPutOperator());
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.filter_policy.reset(
|
|
|
|
NewBloomFilterPolicy(kBloomBitsPerKey, false));
|
|
|
|
table_options.block_size = 1024;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
2018-06-27 03:18:43 +00:00
|
|
|
// Hold open a snapshot to prevent range tombstones from being compacted
|
|
|
|
// away.
|
|
|
|
ManagedSnapshot snapshot(db_);
|
|
|
|
|
2016-01-20 23:17:52 +00:00
|
|
|
Random rnd(5632);
|
|
|
|
for (int table = 1; table <= kTableCount; ++table) {
|
2018-10-30 22:29:58 +00:00
|
|
|
for (int i = 0; i < kPutsPerTable; ++i) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), rnd.RandomString(kKeySize),
|
|
|
|
rnd.RandomString(kValueSize)));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
2018-10-30 22:29:58 +00:00
|
|
|
for (int i = 0; i < kDeletionsPerTable; i++) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(), rnd.RandomString(kKeySize)));
|
2018-10-30 22:29:58 +00:00
|
|
|
}
|
|
|
|
for (int i = 0; i < kMergeOperandsPerTable; i++) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Merge(WriteOptions(), rnd.RandomString(kKeySize),
|
|
|
|
rnd.RandomString(kValueSize)));
|
2018-10-30 22:29:58 +00:00
|
|
|
}
|
2018-06-27 03:18:43 +00:00
|
|
|
for (int i = 0; i < kRangeDeletionsPerTable; i++) {
|
2020-07-09 21:33:42 +00:00
|
|
|
std::string start = rnd.RandomString(kKeySize);
|
2018-06-27 03:18:43 +00:00
|
|
|
std::string end = start;
|
|
|
|
end.resize(kValueSize);
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
|
|
|
|
start, end));
|
2018-06-27 03:18:43 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
std::string property;
|
|
|
|
db_->GetProperty(DB::Properties::kAggregatedTableProperties, &property);
|
2018-05-26 01:41:31 +00:00
|
|
|
TableProperties output_tp;
|
|
|
|
ParseTablePropertiesString(property, &output_tp);
|
|
|
|
bool index_key_is_user_key = output_tp.index_key_is_user_key > 0;
|
2018-08-09 23:49:45 +00:00
|
|
|
bool value_is_delta_encoded = output_tp.index_value_is_delta_encoded > 0;
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
TableProperties expected_tp;
|
2018-10-30 22:29:58 +00:00
|
|
|
GetExpectedTableProperties(
|
|
|
|
&expected_tp, kKeySize, kValueSize, kPutsPerTable, kDeletionsPerTable,
|
|
|
|
kMergeOperandsPerTable, kRangeDeletionsPerTable, kTableCount,
|
|
|
|
kBloomBitsPerKey, table_options.block_size, index_key_is_user_key,
|
|
|
|
value_is_delta_encoded);
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
VerifyTableProperties(expected_tp, output_tp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBPropertiesTest, ReadLatencyHistogramByLevel) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.write_buffer_size = 110 << 10;
|
|
|
|
options.level0_file_num_compaction_trigger = 6;
|
|
|
|
options.num_levels = 4;
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
options.max_bytes_for_level_base = 4500 << 10;
|
|
|
|
options.target_file_size_base = 98 << 10;
|
|
|
|
options.max_write_buffer_number = 2;
|
2020-02-20 20:07:53 +00:00
|
|
|
options.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
2018-12-29 02:00:00 +00:00
|
|
|
options.max_open_files = 11; // Make sure no proloading of table readers
|
|
|
|
|
|
|
|
// RocksDB sanitize max open files to at least 20. Modify it back.
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
2018-12-29 02:00:00 +00:00
|
|
|
"SanitizeOptions::AfterChangeMaxOpenFiles", [&](void* arg) {
|
|
|
|
int* max_open_files = static_cast<int*>(arg);
|
|
|
|
*max_open_files = 11;
|
|
|
|
});
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.no_block_cache = true;
|
|
|
|
|
2017-04-11 15:38:22 +00:00
|
|
|
CreateAndReopenWithCF({"pikachu"}, options);
|
2016-01-20 23:17:52 +00:00
|
|
|
int key_index = 0;
|
|
|
|
Random rnd(301);
|
|
|
|
for (int num = 0; num < 8; num++) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Put("foo", "bar"));
|
2016-01-20 23:17:52 +00:00
|
|
|
GenerateNewFile(&rnd, &key_index);
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
std::string prop;
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop));
|
|
|
|
|
|
|
|
// Get() after flushes, See latency histogram tracked.
|
|
|
|
for (int key = 0; key < key_index; key++) {
|
|
|
|
Get(Key(key));
|
|
|
|
}
|
2017-04-11 15:38:22 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop));
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
|
|
|
|
ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
|
|
|
|
|
|
|
|
// Reopen and issue Get(). See thee latency tracked
|
2017-04-11 15:38:22 +00:00
|
|
|
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-01-20 23:17:52 +00:00
|
|
|
for (int key = 0; key < key_index; key++) {
|
|
|
|
Get(Key(key));
|
|
|
|
}
|
2018-06-16 00:23:08 +00:00
|
|
|
|
|
|
|
// Test for getting immutable_db_options_.statistics
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
|
|
|
|
"rocksdb.options-statistics", &prop));
|
|
|
|
ASSERT_NE(std::string::npos, prop.find("rocksdb.block.cache.miss"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("rocksdb.db.f.micros"));
|
|
|
|
|
2017-04-11 15:38:22 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
|
|
|
|
"rocksdb.cf-file-histogram", &prop));
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
|
|
|
|
ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
|
|
|
|
|
|
|
|
// Reopen and issue iterating. See thee latency tracked
|
2017-04-11 15:38:22 +00:00
|
|
|
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
2017-04-11 15:38:22 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop));
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
|
|
|
|
{
|
2018-11-09 19:17:34 +00:00
|
|
|
std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
|
2016-01-20 23:17:52 +00:00
|
|
|
for (iter->Seek(Key(0)); iter->Valid(); iter->Next()) {
|
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
2017-04-11 15:38:22 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cf-file-histogram", &prop));
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
|
|
|
|
ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
|
|
|
|
|
2017-04-11 15:38:22 +00:00
|
|
|
// CF 1 should show no histogram.
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetProperty(handles_[1], "rocksdb.cf-file-histogram", &prop));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
|
|
|
|
// put something and read it back , CF 1 should show histogram.
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Put(1, "foo", "bar"));
|
|
|
|
ASSERT_OK(Flush(1));
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2017-04-11 15:38:22 +00:00
|
|
|
ASSERT_EQ("bar", Get(1, "foo"));
|
|
|
|
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetProperty(handles_[1], "rocksdb.cf-file-histogram", &prop));
|
|
|
|
ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
|
|
|
|
|
2016-01-20 23:17:52 +00:00
|
|
|
// options.max_open_files preloads table readers.
|
|
|
|
options.max_open_files = -1;
|
2017-04-11 15:38:22 +00:00
|
|
|
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(dbfull()->DefaultColumnFamily(),
|
|
|
|
"rocksdb.cf-file-histogram", &prop));
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
|
|
|
|
ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
|
|
|
|
for (int key = 0; key < key_index; key++) {
|
|
|
|
Get(Key(key));
|
|
|
|
}
|
2017-04-11 15:38:22 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop));
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
|
|
|
|
ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
|
2017-04-18 23:30:51 +00:00
|
|
|
|
|
|
|
// Clear internal stats
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->ResetStats());
|
2017-04-18 23:30:51 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cfstats", &prop));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
|
|
|
|
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBPropertiesTest, AggregatedTablePropertiesAtLevel) {
|
|
|
|
const int kTableCount = 100;
|
2022-01-28 21:26:32 +00:00
|
|
|
const int kDeletionsPerTable = 0;
|
2018-10-30 22:29:58 +00:00
|
|
|
const int kMergeOperandsPerTable = 2;
|
2018-06-27 03:18:43 +00:00
|
|
|
const int kRangeDeletionsPerTable = 2;
|
2018-10-30 22:29:58 +00:00
|
|
|
const int kPutsPerTable = 10;
|
2016-01-20 23:17:52 +00:00
|
|
|
const int kKeySize = 50;
|
|
|
|
const int kValueSize = 400;
|
|
|
|
const int kMaxLevel = 7;
|
|
|
|
const int kBloomBitsPerKey = 20;
|
|
|
|
Random rnd(301);
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.level0_file_num_compaction_trigger = 8;
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
options.target_file_size_base = 8192;
|
|
|
|
options.max_bytes_for_level_base = 10000;
|
|
|
|
options.max_bytes_for_level_multiplier = 2;
|
|
|
|
// This ensures there no compaction happening when we call GetProperty().
|
|
|
|
options.disable_auto_compactions = true;
|
2018-10-30 22:29:58 +00:00
|
|
|
options.merge_operator.reset(new TestPutOperator());
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.filter_policy.reset(
|
|
|
|
NewBloomFilterPolicy(kBloomBitsPerKey, false));
|
|
|
|
table_options.block_size = 1024;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
2018-06-27 03:18:43 +00:00
|
|
|
// Hold open a snapshot to prevent range tombstones from being compacted away.
|
|
|
|
ManagedSnapshot snapshot(db_);
|
|
|
|
|
2016-01-20 23:17:52 +00:00
|
|
|
std::string level_tp_strings[kMaxLevel];
|
|
|
|
std::string tp_string;
|
|
|
|
TableProperties level_tps[kMaxLevel];
|
|
|
|
TableProperties tp, sum_tp, expected_tp;
|
|
|
|
for (int table = 1; table <= kTableCount; ++table) {
|
2018-10-30 22:29:58 +00:00
|
|
|
for (int i = 0; i < kPutsPerTable; ++i) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), rnd.RandomString(kKeySize),
|
|
|
|
rnd.RandomString(kValueSize)));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
2018-10-30 22:29:58 +00:00
|
|
|
for (int i = 0; i < kDeletionsPerTable; i++) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Delete(WriteOptions(), rnd.RandomString(kKeySize)));
|
2018-10-30 22:29:58 +00:00
|
|
|
}
|
|
|
|
for (int i = 0; i < kMergeOperandsPerTable; i++) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Merge(WriteOptions(), rnd.RandomString(kKeySize),
|
|
|
|
rnd.RandomString(kValueSize)));
|
2018-10-30 22:29:58 +00:00
|
|
|
}
|
2018-06-27 03:18:43 +00:00
|
|
|
for (int i = 0; i < kRangeDeletionsPerTable; i++) {
|
2020-07-09 21:33:42 +00:00
|
|
|
std::string start = rnd.RandomString(kKeySize);
|
2018-06-27 03:18:43 +00:00
|
|
|
std::string end = start;
|
|
|
|
end.resize(kValueSize);
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
|
|
|
|
start, end));
|
2018-06-27 03:18:43 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
|
|
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
2016-01-20 23:17:52 +00:00
|
|
|
ResetTableProperties(&sum_tp);
|
|
|
|
for (int level = 0; level < kMaxLevel; ++level) {
|
2022-05-06 20:03:58 +00:00
|
|
|
db_->GetProperty(DB::Properties::kAggregatedTablePropertiesAtLevel +
|
|
|
|
std::to_string(level),
|
|
|
|
&level_tp_strings[level]);
|
2016-01-20 23:17:52 +00:00
|
|
|
ParseTablePropertiesString(level_tp_strings[level], &level_tps[level]);
|
|
|
|
sum_tp.data_size += level_tps[level].data_size;
|
|
|
|
sum_tp.index_size += level_tps[level].index_size;
|
|
|
|
sum_tp.filter_size += level_tps[level].filter_size;
|
|
|
|
sum_tp.raw_key_size += level_tps[level].raw_key_size;
|
|
|
|
sum_tp.raw_value_size += level_tps[level].raw_value_size;
|
|
|
|
sum_tp.num_data_blocks += level_tps[level].num_data_blocks;
|
|
|
|
sum_tp.num_entries += level_tps[level].num_entries;
|
2018-10-30 22:29:58 +00:00
|
|
|
sum_tp.num_deletions += level_tps[level].num_deletions;
|
|
|
|
sum_tp.num_merge_operands += level_tps[level].num_merge_operands;
|
2018-06-27 03:18:43 +00:00
|
|
|
sum_tp.num_range_deletions += level_tps[level].num_range_deletions;
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
db_->GetProperty(DB::Properties::kAggregatedTableProperties, &tp_string);
|
|
|
|
ParseTablePropertiesString(tp_string, &tp);
|
2018-05-26 01:41:31 +00:00
|
|
|
bool index_key_is_user_key = tp.index_key_is_user_key > 0;
|
2018-08-09 23:49:45 +00:00
|
|
|
bool value_is_delta_encoded = tp.index_value_is_delta_encoded > 0;
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_EQ(sum_tp.data_size, tp.data_size);
|
|
|
|
ASSERT_EQ(sum_tp.index_size, tp.index_size);
|
|
|
|
ASSERT_EQ(sum_tp.filter_size, tp.filter_size);
|
|
|
|
ASSERT_EQ(sum_tp.raw_key_size, tp.raw_key_size);
|
|
|
|
ASSERT_EQ(sum_tp.raw_value_size, tp.raw_value_size);
|
|
|
|
ASSERT_EQ(sum_tp.num_data_blocks, tp.num_data_blocks);
|
|
|
|
ASSERT_EQ(sum_tp.num_entries, tp.num_entries);
|
2018-10-30 22:29:58 +00:00
|
|
|
ASSERT_EQ(sum_tp.num_deletions, tp.num_deletions);
|
|
|
|
ASSERT_EQ(sum_tp.num_merge_operands, tp.num_merge_operands);
|
2018-06-27 03:18:43 +00:00
|
|
|
ASSERT_EQ(sum_tp.num_range_deletions, tp.num_range_deletions);
|
2016-01-20 23:17:52 +00:00
|
|
|
if (table > 3) {
|
2018-10-30 22:29:58 +00:00
|
|
|
GetExpectedTableProperties(
|
|
|
|
&expected_tp, kKeySize, kValueSize, kPutsPerTable, kDeletionsPerTable,
|
|
|
|
kMergeOperandsPerTable, kRangeDeletionsPerTable, table,
|
|
|
|
kBloomBitsPerKey, table_options.block_size, index_key_is_user_key,
|
|
|
|
value_is_delta_encoded);
|
2016-01-20 23:17:52 +00:00
|
|
|
// Gives larger bias here as index block size, filter block size,
|
|
|
|
// and data block size become much harder to estimate in this test.
|
2021-10-22 17:12:09 +00:00
|
|
|
VerifyTableProperties(expected_tp, tp, CACHE_LINE_SIZE >= 256 ? 0.6 : 0.5,
|
2022-01-28 21:26:32 +00:00
|
|
|
0.5, 0.5, 0.25);
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBPropertiesTest, NumImmutableMemTable) {
|
|
|
|
do {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
WriteOptions writeOpt = WriteOptions();
|
|
|
|
writeOpt.disableWAL = true;
|
|
|
|
options.max_write_buffer_number = 4;
|
|
|
|
options.min_write_buffer_number_to_merge = 3;
|
|
|
|
options.write_buffer_size = 1000000;
|
Refactor trimming logic for immutable memtables (#5022)
Summary:
MyRocks currently sets `max_write_buffer_number_to_maintain` in order to maintain enough history for transaction conflict checking. The effectiveness of this approach depends on the size of memtables. When memtables are small, it may not keep enough history; when memtables are large, this may consume too much memory.
We are proposing a new way to configure memtable list history: by limiting the memory usage of immutable memtables. The new option is `max_write_buffer_size_to_maintain` and it will take precedence over the old `max_write_buffer_number_to_maintain` if they are both set to non-zero values. The new option accounts for the total memory usage of flushed immutable memtables and mutable memtable. When the total usage exceeds the limit, RocksDB may start dropping immutable memtables (which is also called trimming history), starting from the oldest one.
The semantics of the old option actually works both as an upper bound and lower bound. History trimming will start if number of immutable memtables exceeds the limit, but it will never go below (limit-1) due to history trimming.
In order the mimic the behavior with the new option, history trimming will stop if dropping the next immutable memtable causes the total memory usage go below the size limit. For example, assuming the size limit is set to 64MB, and there are 3 immutable memtables with sizes of 20, 30, 30. Although the total memory usage is 80MB > 64MB, dropping the oldest memtable will reduce the memory usage to 60MB < 64MB, so in this case no memtable will be dropped.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5022
Differential Revision: D14394062
Pulled By: miasantreble
fbshipit-source-id: 60457a509c6af89d0993f988c9b5c2aa9e45f5c5
2019-08-23 20:54:09 +00:00
|
|
|
options.max_write_buffer_size_to_maintain =
|
|
|
|
5 * static_cast<int64_t>(options.write_buffer_size);
|
2016-01-20 23:17:52 +00:00
|
|
|
CreateAndReopenWithCF({"pikachu"}, options);
|
|
|
|
|
|
|
|
std::string big_value(1000000 * 2, 'x');
|
|
|
|
std::string num;
|
2016-11-16 04:05:36 +00:00
|
|
|
uint64_t value;
|
2016-01-20 23:17:52 +00:00
|
|
|
SetPerfLevel(kEnableTime);
|
|
|
|
ASSERT_TRUE(GetPerfLevel() == kEnableTime);
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k1", big_value));
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
|
|
|
|
"rocksdb.num-immutable-mem-table", &num));
|
|
|
|
ASSERT_EQ(num, "0");
|
2016-01-21 18:59:36 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
handles_[1], DB::Properties::kNumImmutableMemTableFlushed, &num));
|
|
|
|
ASSERT_EQ(num, "0");
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
handles_[1], "rocksdb.num-entries-active-mem-table", &num));
|
|
|
|
ASSERT_EQ(num, "1");
|
2017-06-03 00:12:39 +00:00
|
|
|
get_perf_context()->Reset();
|
2016-01-20 23:17:52 +00:00
|
|
|
Get(1, "k1");
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count));
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value));
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
|
|
|
|
"rocksdb.num-immutable-mem-table", &num));
|
|
|
|
ASSERT_EQ(num, "1");
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
handles_[1], "rocksdb.num-entries-active-mem-table", &num));
|
|
|
|
ASSERT_EQ(num, "1");
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
handles_[1], "rocksdb.num-entries-imm-mem-tables", &num));
|
|
|
|
ASSERT_EQ(num, "1");
|
|
|
|
|
2017-06-03 00:12:39 +00:00
|
|
|
get_perf_context()->Reset();
|
2016-01-20 23:17:52 +00:00
|
|
|
Get(1, "k1");
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_EQ(2, static_cast<int>(get_perf_context()->get_from_memtable_count));
|
|
|
|
get_perf_context()->Reset();
|
2016-01-20 23:17:52 +00:00
|
|
|
Get(1, "k2");
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count));
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k3", big_value));
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
handles_[1], "rocksdb.cur-size-active-mem-table", &num));
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
|
|
|
|
"rocksdb.num-immutable-mem-table", &num));
|
|
|
|
ASSERT_EQ(num, "2");
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
handles_[1], "rocksdb.num-entries-active-mem-table", &num));
|
|
|
|
ASSERT_EQ(num, "1");
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
handles_[1], "rocksdb.num-entries-imm-mem-tables", &num));
|
|
|
|
ASSERT_EQ(num, "2");
|
2017-06-03 00:12:39 +00:00
|
|
|
get_perf_context()->Reset();
|
2016-01-20 23:17:52 +00:00
|
|
|
Get(1, "k2");
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_EQ(2, static_cast<int>(get_perf_context()->get_from_memtable_count));
|
|
|
|
get_perf_context()->Reset();
|
2016-01-20 23:17:52 +00:00
|
|
|
Get(1, "k3");
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_EQ(1, static_cast<int>(get_perf_context()->get_from_memtable_count));
|
|
|
|
get_perf_context()->Reset();
|
2016-01-20 23:17:52 +00:00
|
|
|
Get(1, "k1");
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_EQ(3, static_cast<int>(get_perf_context()->get_from_memtable_count));
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
ASSERT_OK(Flush(1));
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(handles_[1],
|
|
|
|
"rocksdb.num-immutable-mem-table", &num));
|
|
|
|
ASSERT_EQ(num, "0");
|
2016-01-21 18:59:36 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetProperty(
|
|
|
|
handles_[1], DB::Properties::kNumImmutableMemTableFlushed, &num));
|
|
|
|
ASSERT_EQ(num, "3");
|
2016-11-16 04:05:36 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
handles_[1], "rocksdb.cur-size-active-mem-table", &value));
|
2016-11-22 22:06:54 +00:00
|
|
|
// "192" is the size of the metadata of two empty skiplists, this would
|
2016-10-27 17:07:28 +00:00
|
|
|
// break if we change the default skiplist implementation
|
2016-11-22 22:06:54 +00:00
|
|
|
ASSERT_GE(value, 192);
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
uint64_t int_num;
|
|
|
|
uint64_t base_total_size;
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
handles_[1], "rocksdb.estimate-num-keys", &base_total_size));
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Delete(writeOpt, handles_[1], "k2"));
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k3", ""));
|
|
|
|
ASSERT_OK(dbfull()->Delete(writeOpt, handles_[1], "k3"));
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
handles_[1], "rocksdb.num-deletes-active-mem-table", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 2U);
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
handles_[1], "rocksdb.num-entries-active-mem-table", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 3U);
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value));
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value));
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
handles_[1], "rocksdb.num-entries-imm-mem-tables", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 4U);
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
handles_[1], "rocksdb.num-deletes-imm-mem-tables", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 2U);
|
|
|
|
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
handles_[1], "rocksdb.estimate-num-keys", &int_num));
|
|
|
|
ASSERT_EQ(int_num, base_total_size + 1);
|
|
|
|
|
|
|
|
SetPerfLevel(kDisable);
|
|
|
|
ASSERT_TRUE(GetPerfLevel() == kDisable);
|
|
|
|
} while (ChangeCompactOptions());
|
|
|
|
}
|
|
|
|
|
2016-08-24 22:32:01 +00:00
|
|
|
// TODO(techdept) : Disabled flaky test #12863555
|
|
|
|
TEST_F(DBPropertiesTest, DISABLED_GetProperty) {
|
2016-01-20 23:17:52 +00:00
|
|
|
// Set sizes to both background thread pool to be 1 and block them.
|
|
|
|
env_->SetBackgroundThreads(1, Env::HIGH);
|
|
|
|
env_->SetBackgroundThreads(1, Env::LOW);
|
|
|
|
test::SleepingBackgroundTask sleeping_task_low;
|
|
|
|
env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
|
|
|
|
Env::Priority::LOW);
|
|
|
|
test::SleepingBackgroundTask sleeping_task_high;
|
|
|
|
env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
|
|
|
|
&sleeping_task_high, Env::Priority::HIGH);
|
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
WriteOptions writeOpt = WriteOptions();
|
|
|
|
writeOpt.disableWAL = true;
|
|
|
|
options.compaction_style = kCompactionStyleUniversal;
|
|
|
|
options.level0_file_num_compaction_trigger = 1;
|
|
|
|
options.compaction_options_universal.size_ratio = 50;
|
|
|
|
options.max_background_compactions = 1;
|
|
|
|
options.max_background_flushes = 1;
|
|
|
|
options.max_write_buffer_number = 10;
|
|
|
|
options.min_write_buffer_number_to_merge = 1;
|
Refactor trimming logic for immutable memtables (#5022)
Summary:
MyRocks currently sets `max_write_buffer_number_to_maintain` in order to maintain enough history for transaction conflict checking. The effectiveness of this approach depends on the size of memtables. When memtables are small, it may not keep enough history; when memtables are large, this may consume too much memory.
We are proposing a new way to configure memtable list history: by limiting the memory usage of immutable memtables. The new option is `max_write_buffer_size_to_maintain` and it will take precedence over the old `max_write_buffer_number_to_maintain` if they are both set to non-zero values. The new option accounts for the total memory usage of flushed immutable memtables and mutable memtable. When the total usage exceeds the limit, RocksDB may start dropping immutable memtables (which is also called trimming history), starting from the oldest one.
The semantics of the old option actually works both as an upper bound and lower bound. History trimming will start if number of immutable memtables exceeds the limit, but it will never go below (limit-1) due to history trimming.
In order the mimic the behavior with the new option, history trimming will stop if dropping the next immutable memtable causes the total memory usage go below the size limit. For example, assuming the size limit is set to 64MB, and there are 3 immutable memtables with sizes of 20, 30, 30. Although the total memory usage is 80MB > 64MB, dropping the oldest memtable will reduce the memory usage to 60MB < 64MB, so in this case no memtable will be dropped.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5022
Differential Revision: D14394062
Pulled By: miasantreble
fbshipit-source-id: 60457a509c6af89d0993f988c9b5c2aa9e45f5c5
2019-08-23 20:54:09 +00:00
|
|
|
options.max_write_buffer_size_to_maintain = 0;
|
2016-01-20 23:17:52 +00:00
|
|
|
options.write_buffer_size = 1000000;
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
std::string big_value(1000000 * 2, 'x');
|
|
|
|
std::string num;
|
|
|
|
uint64_t int_num;
|
|
|
|
SetPerfLevel(kEnableTime);
|
|
|
|
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 0U);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.estimate-live-data-size", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 0U);
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value));
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
|
|
|
|
ASSERT_EQ(num, "0");
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
|
|
|
|
ASSERT_EQ(num, "0");
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
|
|
|
|
ASSERT_EQ(num, "0");
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
|
|
|
|
ASSERT_EQ(num, "1");
|
2017-06-03 00:12:39 +00:00
|
|
|
get_perf_context()->Reset();
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value));
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
|
|
|
|
ASSERT_EQ(num, "1");
|
|
|
|
ASSERT_OK(dbfull()->Delete(writeOpt, "k-non-existing"));
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value));
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
|
|
|
|
ASSERT_EQ(num, "2");
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
|
|
|
|
ASSERT_EQ(num, "1");
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
|
|
|
|
ASSERT_EQ(num, "0");
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
|
|
|
|
ASSERT_EQ(num, "2");
|
|
|
|
// Verify the same set of properties through GetIntProperty
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.num-immutable-mem-table", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 2U);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.mem-table-flush-pending", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 1U);
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.compaction-pending", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 0U);
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 2U);
|
|
|
|
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 0U);
|
|
|
|
|
|
|
|
sleeping_task_high.WakeUp();
|
|
|
|
sleeping_task_high.WaitUntilDone();
|
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k4", big_value));
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k5", big_value));
|
|
|
|
dbfull()->TEST_WaitForFlushMemTable();
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
|
|
|
|
ASSERT_EQ(num, "0");
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
|
|
|
|
ASSERT_EQ(num, "1");
|
|
|
|
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num));
|
|
|
|
ASSERT_EQ(num, "4");
|
|
|
|
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
|
|
|
|
ASSERT_GT(int_num, 0U);
|
|
|
|
|
|
|
|
sleeping_task_low.WakeUp();
|
|
|
|
sleeping_task_low.WaitUntilDone();
|
|
|
|
|
|
|
|
// Wait for compaction to be done. This is important because otherwise RocksDB
|
|
|
|
// might schedule a compaction when reopening the database, failing assertion
|
|
|
|
// (A) as a result.
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-01-20 23:17:52 +00:00
|
|
|
options.max_open_files = 10;
|
|
|
|
Reopen(options);
|
|
|
|
// After reopening, no table reader is loaded, so no memory for table readers
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 0U); // (A)
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num));
|
|
|
|
ASSERT_GT(int_num, 0U);
|
|
|
|
|
|
|
|
// After reading a key, at least one table reader is loaded.
|
|
|
|
Get("k5");
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num));
|
|
|
|
ASSERT_GT(int_num, 0U);
|
|
|
|
|
|
|
|
// Test rocksdb.num-live-versions
|
|
|
|
{
|
|
|
|
options.level0_file_num_compaction_trigger = 20;
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 1U);
|
|
|
|
|
|
|
|
// Use an iterator to hold current version
|
|
|
|
std::unique_ptr<Iterator> iter1(dbfull()->NewIterator(ReadOptions()));
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k6", big_value));
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 2U);
|
|
|
|
|
|
|
|
// Use an iterator to hold current version
|
|
|
|
std::unique_ptr<Iterator> iter2(dbfull()->NewIterator(ReadOptions()));
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k7", big_value));
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 3U);
|
|
|
|
|
|
|
|
iter2.reset();
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 2U);
|
|
|
|
|
|
|
|
iter1.reset();
|
|
|
|
ASSERT_TRUE(
|
|
|
|
dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 1U);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBPropertiesTest, ApproximateMemoryUsage) {
|
|
|
|
const int kNumRounds = 10;
|
|
|
|
// TODO(noetzli) kFlushesPerRound does not really correlate with how many
|
|
|
|
// flushes happen.
|
|
|
|
const int kFlushesPerRound = 10;
|
|
|
|
const int kWritesPerFlush = 10;
|
|
|
|
const int kKeySize = 100;
|
|
|
|
const int kValueSize = 1000;
|
|
|
|
Options options;
|
|
|
|
options.write_buffer_size = 1000; // small write buffer
|
|
|
|
options.min_write_buffer_number_to_merge = 4;
|
|
|
|
options.compression = kNoCompression;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options = CurrentOptions(options);
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
std::vector<Iterator*> iters;
|
|
|
|
|
|
|
|
uint64_t active_mem;
|
|
|
|
uint64_t unflushed_mem;
|
|
|
|
uint64_t all_mem;
|
|
|
|
uint64_t prev_all_mem;
|
|
|
|
|
|
|
|
// Phase 0. The verify the initial value of all these properties are the same
|
|
|
|
// as we have no mem-tables.
|
|
|
|
dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
|
|
|
|
dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
|
|
|
|
dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
|
|
|
|
ASSERT_EQ(all_mem, active_mem);
|
|
|
|
ASSERT_EQ(all_mem, unflushed_mem);
|
|
|
|
|
|
|
|
// Phase 1. Simply issue Put() and expect "cur-size-all-mem-tables" equals to
|
|
|
|
// "size-all-mem-tables"
|
|
|
|
for (int r = 0; r < kNumRounds; ++r) {
|
|
|
|
for (int f = 0; f < kFlushesPerRound; ++f) {
|
|
|
|
for (int w = 0; w < kWritesPerFlush; ++w) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
Put(rnd.RandomString(kKeySize), rnd.RandomString(kValueSize)));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Make sure that there is no flush between getting the two properties.
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
2016-01-20 23:17:52 +00:00
|
|
|
dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
|
|
|
|
dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
|
|
|
|
// in no iterator case, these two number should be the same.
|
|
|
|
ASSERT_EQ(unflushed_mem, all_mem);
|
|
|
|
}
|
|
|
|
prev_all_mem = all_mem;
|
|
|
|
|
|
|
|
// Phase 2. Keep issuing Put() but also create new iterators. This time we
|
|
|
|
// expect "size-all-mem-tables" > "cur-size-all-mem-tables".
|
|
|
|
for (int r = 0; r < kNumRounds; ++r) {
|
|
|
|
iters.push_back(db_->NewIterator(ReadOptions()));
|
|
|
|
for (int f = 0; f < kFlushesPerRound; ++f) {
|
|
|
|
for (int w = 0; w < kWritesPerFlush; ++w) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
Put(rnd.RandomString(kKeySize), rnd.RandomString(kValueSize)));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Force flush to prevent flush from happening between getting the
|
|
|
|
// properties or after getting the properties and before the new round.
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
// In the second round, add iterators.
|
|
|
|
dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
|
|
|
|
dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
|
|
|
|
dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
|
|
|
|
ASSERT_GT(all_mem, active_mem);
|
|
|
|
ASSERT_GT(all_mem, unflushed_mem);
|
|
|
|
ASSERT_GT(all_mem, prev_all_mem);
|
|
|
|
prev_all_mem = all_mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Phase 3. Delete iterators and expect "size-all-mem-tables" shrinks
|
|
|
|
// whenever we release an iterator.
|
|
|
|
for (auto* iter : iters) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2016-01-20 23:17:52 +00:00
|
|
|
delete iter;
|
|
|
|
dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
|
|
|
|
// Expect the size shrinking
|
|
|
|
ASSERT_LT(all_mem, prev_all_mem);
|
|
|
|
prev_all_mem = all_mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Expect all these three counters to be the same.
|
|
|
|
dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
|
|
|
|
dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
|
|
|
|
dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
|
|
|
|
ASSERT_EQ(active_mem, unflushed_mem);
|
|
|
|
ASSERT_EQ(unflushed_mem, all_mem);
|
|
|
|
|
|
|
|
// Phase 5. Reopen, and expect all these three counters to be the same again.
|
|
|
|
Reopen(options);
|
|
|
|
dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem);
|
|
|
|
dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem);
|
|
|
|
dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem);
|
|
|
|
ASSERT_EQ(active_mem, unflushed_mem);
|
|
|
|
ASSERT_EQ(unflushed_mem, all_mem);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBPropertiesTest, EstimatePendingCompBytes) {
|
|
|
|
// Set sizes to both background thread pool to be 1 and block them.
|
|
|
|
env_->SetBackgroundThreads(1, Env::HIGH);
|
|
|
|
env_->SetBackgroundThreads(1, Env::LOW);
|
|
|
|
test::SleepingBackgroundTask sleeping_task_low;
|
|
|
|
env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
|
|
|
|
Env::Priority::LOW);
|
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
WriteOptions writeOpt = WriteOptions();
|
|
|
|
writeOpt.disableWAL = true;
|
|
|
|
options.compaction_style = kCompactionStyleLevel;
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
options.max_background_compactions = 1;
|
|
|
|
options.max_background_flushes = 1;
|
|
|
|
options.max_write_buffer_number = 10;
|
|
|
|
options.min_write_buffer_number_to_merge = 1;
|
Refactor trimming logic for immutable memtables (#5022)
Summary:
MyRocks currently sets `max_write_buffer_number_to_maintain` in order to maintain enough history for transaction conflict checking. The effectiveness of this approach depends on the size of memtables. When memtables are small, it may not keep enough history; when memtables are large, this may consume too much memory.
We are proposing a new way to configure memtable list history: by limiting the memory usage of immutable memtables. The new option is `max_write_buffer_size_to_maintain` and it will take precedence over the old `max_write_buffer_number_to_maintain` if they are both set to non-zero values. The new option accounts for the total memory usage of flushed immutable memtables and mutable memtable. When the total usage exceeds the limit, RocksDB may start dropping immutable memtables (which is also called trimming history), starting from the oldest one.
The semantics of the old option actually works both as an upper bound and lower bound. History trimming will start if number of immutable memtables exceeds the limit, but it will never go below (limit-1) due to history trimming.
In order the mimic the behavior with the new option, history trimming will stop if dropping the next immutable memtable causes the total memory usage go below the size limit. For example, assuming the size limit is set to 64MB, and there are 3 immutable memtables with sizes of 20, 30, 30. Although the total memory usage is 80MB > 64MB, dropping the oldest memtable will reduce the memory usage to 60MB < 64MB, so in this case no memtable will be dropped.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5022
Differential Revision: D14394062
Pulled By: miasantreble
fbshipit-source-id: 60457a509c6af89d0993f988c9b5c2aa9e45f5c5
2019-08-23 20:54:09 +00:00
|
|
|
options.max_write_buffer_size_to_maintain = 0;
|
2016-01-20 23:17:52 +00:00
|
|
|
options.write_buffer_size = 1000000;
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
std::string big_value(1000000 * 2, 'x');
|
|
|
|
std::string num;
|
|
|
|
uint64_t int_num;
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value));
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
"rocksdb.estimate-pending-compaction-bytes", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 0U);
|
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value));
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
"rocksdb.estimate-pending-compaction-bytes", &int_num));
|
2017-06-02 00:54:06 +00:00
|
|
|
ASSERT_GT(int_num, 0U);
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value));
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
"rocksdb.estimate-pending-compaction-bytes", &int_num));
|
|
|
|
ASSERT_GT(int_num, 0U);
|
|
|
|
|
|
|
|
sleeping_task_low.WakeUp();
|
|
|
|
sleeping_task_low.WaitUntilDone();
|
|
|
|
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(
|
|
|
|
"rocksdb.estimate-pending-compaction-bytes", &int_num));
|
|
|
|
ASSERT_EQ(int_num, 0U);
|
|
|
|
}
|
2016-04-21 01:46:54 +00:00
|
|
|
|
|
|
|
TEST_F(DBPropertiesTest, EstimateCompressionRatio) {
|
|
|
|
if (!Snappy_Supported()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
const int kNumL0Files = 3;
|
|
|
|
const int kNumEntriesPerFile = 1000;
|
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.disable_auto_compactions = true;
|
2022-03-08 02:06:19 +00:00
|
|
|
options.num_levels = 3;
|
2016-04-21 01:46:54 +00:00
|
|
|
Reopen(options);
|
|
|
|
|
2022-03-08 02:06:19 +00:00
|
|
|
ASSERT_OK(db_->SetOptions(
|
|
|
|
{{"compression_per_level", "kNoCompression:kSnappyCompression"}}));
|
|
|
|
auto opts = db_->GetOptions();
|
|
|
|
ASSERT_EQ(opts.compression_per_level.size(), 2);
|
|
|
|
ASSERT_EQ(opts.compression_per_level[0], kNoCompression);
|
|
|
|
ASSERT_EQ(opts.compression_per_level[1], kSnappyCompression);
|
|
|
|
|
2016-04-21 01:46:54 +00:00
|
|
|
// compression ratio is -1.0 when no open files at level
|
|
|
|
ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
|
|
|
|
|
|
|
|
const std::string kVal(100, 'a');
|
|
|
|
for (int i = 0; i < kNumL0Files; ++i) {
|
|
|
|
for (int j = 0; j < kNumEntriesPerFile; ++j) {
|
|
|
|
// Put common data ("key") at end to prevent delta encoding from
|
|
|
|
// compressing the key effectively
|
2022-05-06 20:03:58 +00:00
|
|
|
std::string key = std::to_string(i) + std::to_string(j) + "key";
|
2016-04-21 01:46:54 +00:00
|
|
|
ASSERT_OK(dbfull()->Put(WriteOptions(), key, kVal));
|
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
2016-04-21 01:46:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// no compression at L0, so ratio is less than one
|
|
|
|
ASSERT_LT(CompressionRatioAtLevel(0), 1.0);
|
|
|
|
ASSERT_GT(CompressionRatioAtLevel(0), 0.0);
|
|
|
|
ASSERT_EQ(CompressionRatioAtLevel(1), -1.0);
|
|
|
|
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
|
2016-04-21 01:46:54 +00:00
|
|
|
|
|
|
|
ASSERT_EQ(CompressionRatioAtLevel(0), -1.0);
|
|
|
|
// Data at L1 should be highly compressed thanks to Snappy and redundant data
|
|
|
|
// in values (ratio is 12.846 as of 4/19/2016).
|
|
|
|
ASSERT_GT(CompressionRatioAtLevel(1), 10.0);
|
|
|
|
}
|
|
|
|
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
class CountingUserTblPropCollector : public TablePropertiesCollector {
|
|
|
|
public:
|
|
|
|
const char* Name() const override { return "CountingUserTblPropCollector"; }
|
|
|
|
|
|
|
|
Status Finish(UserCollectedProperties* properties) override {
|
|
|
|
std::string encoded;
|
|
|
|
PutVarint32(&encoded, count_);
|
|
|
|
*properties = UserCollectedProperties{
|
2022-11-02 21:34:24 +00:00
|
|
|
{"CountingUserTblPropCollector", message_},
|
|
|
|
{"Count", encoded},
|
2016-01-20 23:17:52 +00:00
|
|
|
};
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2018-03-05 21:08:17 +00:00
|
|
|
Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/,
|
|
|
|
EntryType /*type*/, SequenceNumber /*seq*/,
|
|
|
|
uint64_t /*file_size*/) override {
|
2016-01-20 23:17:52 +00:00
|
|
|
++count_;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-02-14 21:52:47 +00:00
|
|
|
UserCollectedProperties GetReadableProperties() const override {
|
2016-01-20 23:17:52 +00:00
|
|
|
return UserCollectedProperties{};
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
std::string message_ = "Rocksdb";
|
|
|
|
uint32_t count_ = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
class CountingUserTblPropCollectorFactory
|
|
|
|
: public TablePropertiesCollectorFactory {
|
|
|
|
public:
|
|
|
|
explicit CountingUserTblPropCollectorFactory(
|
|
|
|
uint32_t expected_column_family_id)
|
|
|
|
: expected_column_family_id_(expected_column_family_id),
|
|
|
|
num_created_(0) {}
|
2019-02-14 21:52:47 +00:00
|
|
|
TablePropertiesCollector* CreateTablePropertiesCollector(
|
2016-01-20 23:17:52 +00:00
|
|
|
TablePropertiesCollectorFactory::Context context) override {
|
|
|
|
EXPECT_EQ(expected_column_family_id_, context.column_family_id);
|
|
|
|
num_created_++;
|
|
|
|
return new CountingUserTblPropCollector();
|
|
|
|
}
|
|
|
|
const char* Name() const override {
|
|
|
|
return "CountingUserTblPropCollectorFactory";
|
|
|
|
}
|
|
|
|
void set_expected_column_family_id(uint32_t v) {
|
|
|
|
expected_column_family_id_ = v;
|
|
|
|
}
|
|
|
|
uint32_t expected_column_family_id_;
|
|
|
|
uint32_t num_created_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class CountingDeleteTabPropCollector : public TablePropertiesCollector {
|
|
|
|
public:
|
|
|
|
const char* Name() const override { return "CountingDeleteTabPropCollector"; }
|
|
|
|
|
2018-03-05 21:08:17 +00:00
|
|
|
Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/,
|
|
|
|
EntryType type, SequenceNumber /*seq*/,
|
|
|
|
uint64_t /*file_size*/) override {
|
2016-01-20 23:17:52 +00:00
|
|
|
if (type == kEntryDelete) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool NeedCompact() const override { return num_deletes_ > 10; }
|
|
|
|
|
|
|
|
UserCollectedProperties GetReadableProperties() const override {
|
|
|
|
return UserCollectedProperties{};
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Finish(UserCollectedProperties* properties) override {
|
|
|
|
*properties =
|
2022-05-06 20:03:58 +00:00
|
|
|
UserCollectedProperties{{"num_delete", std::to_string(num_deletes_)}};
|
2016-01-20 23:17:52 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
uint32_t num_deletes_ = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
class CountingDeleteTabPropCollectorFactory
|
|
|
|
: public TablePropertiesCollectorFactory {
|
|
|
|
public:
|
2019-02-14 21:52:47 +00:00
|
|
|
TablePropertiesCollector* CreateTablePropertiesCollector(
|
2018-03-05 21:08:17 +00:00
|
|
|
TablePropertiesCollectorFactory::Context /*context*/) override {
|
2016-01-20 23:17:52 +00:00
|
|
|
return new CountingDeleteTabPropCollector();
|
|
|
|
}
|
|
|
|
const char* Name() const override {
|
|
|
|
return "CountingDeleteTabPropCollectorFactory";
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-03-25 21:58:23 +00:00
|
|
|
class BlockCountingTablePropertiesCollector : public TablePropertiesCollector {
|
|
|
|
public:
|
|
|
|
static const std::string kNumSampledBlocksPropertyName;
|
|
|
|
|
|
|
|
const char* Name() const override {
|
|
|
|
return "BlockCountingTablePropertiesCollector";
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Finish(UserCollectedProperties* properties) override {
|
|
|
|
(*properties)[kNumSampledBlocksPropertyName] =
|
2022-05-06 20:03:58 +00:00
|
|
|
std::to_string(num_sampled_blocks_);
|
2021-03-25 21:58:23 +00:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status AddUserKey(const Slice& /*user_key*/, const Slice& /*value*/,
|
|
|
|
EntryType /*type*/, SequenceNumber /*seq*/,
|
|
|
|
uint64_t /*file_size*/) override {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
Refactor to avoid confusing "raw block" (#10408)
Summary:
We have a lot of confusing code because of mixed, sometimes
completely opposite uses of of the term "raw block" or "raw contents",
sometimes within the same source file. For example, in `BlockBasedTableBuilder`,
`raw_block_contents` and `raw_size` generally referred to uncompressed block
contents and size, while `WriteRawBlock` referred to writing a block that
is already compressed if it is going to be. Meanwhile, in
`BlockBasedTable`, `raw_block_contents` either referred to a (maybe
compressed) block with trailer, or a maybe compressed block maybe
without trailer. (Note: left as follow-up work to use C++ typing to
better sort out the various kinds of BlockContents.)
This change primarily tries to apply some consistent terminology around
the kinds of block representations, avoiding the unclear "raw". (Any
meaning of "raw" assumes some bias toward the storage layer or toward
the logical data layer.) Preferred terminology:
* **Serialized block** - bytes that go into storage. For block-based table
(usually the case) this includes the block trailer. WART: block `size` may or
may not include the trailer; need to be clear about whether it does or not.
* **Maybe compressed block** - like a serialized block, but without the
trailer (or no promise of including a trailer). Must be accompanied by a
CompressionType.
* **Uncompressed block** - "payload" bytes that are either stored with no
compression, used as input to compression function, or result of
decompression function.
* **Parsed block** - an in-memory form of a block in block cache, as it is
used by the table reader. Different C++ types are used depending on the
block type (see block_like_traits.h).
Other refactorings:
* Misc corrections/improvements of internal API comments
* Remove a few misleading / unhelpful / redundant comments.
* Use move semantics in some places to simplify contracts
* Use better parameter names to indicate which parameters are used for
outputs
* Remove some extraneous `extern`
* Various clean-ups to `CacheDumperImpl` (mostly unnecessary code)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10408
Test Plan: existing tests
Reviewed By: akankshamahajan15
Differential Revision: D38172617
Pulled By: pdillinger
fbshipit-source-id: ccb99299f324ac5ca46996d34c5089621a4f260c
2022-09-22 18:25:32 +00:00
|
|
|
void BlockAdd(uint64_t /* block_uncomp_bytes */,
|
2021-03-25 21:58:23 +00:00
|
|
|
uint64_t block_compressed_bytes_fast,
|
|
|
|
uint64_t block_compressed_bytes_slow) override {
|
|
|
|
if (block_compressed_bytes_fast > 0 || block_compressed_bytes_slow > 0) {
|
|
|
|
num_sampled_blocks_++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
UserCollectedProperties GetReadableProperties() const override {
|
|
|
|
return UserCollectedProperties{
|
2022-05-06 20:03:58 +00:00
|
|
|
{kNumSampledBlocksPropertyName, std::to_string(num_sampled_blocks_)},
|
2021-03-25 21:58:23 +00:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
uint32_t num_sampled_blocks_ = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
const std::string
|
|
|
|
BlockCountingTablePropertiesCollector::kNumSampledBlocksPropertyName =
|
|
|
|
"NumSampledBlocks";
|
|
|
|
|
|
|
|
class BlockCountingTablePropertiesCollectorFactory
|
|
|
|
: public TablePropertiesCollectorFactory {
|
|
|
|
public:
|
|
|
|
const char* Name() const override {
|
|
|
|
return "BlockCountingTablePropertiesCollectorFactory";
|
|
|
|
}
|
|
|
|
|
|
|
|
TablePropertiesCollector* CreateTablePropertiesCollector(
|
|
|
|
TablePropertiesCollectorFactory::Context /* context */) override {
|
|
|
|
return new BlockCountingTablePropertiesCollector();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2016-01-20 23:17:52 +00:00
|
|
|
TEST_F(DBPropertiesTest, GetUserDefinedTableProperties) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.level0_file_num_compaction_trigger = (1 << 30);
|
|
|
|
options.table_properties_collector_factories.resize(1);
|
|
|
|
std::shared_ptr<CountingUserTblPropCollectorFactory> collector_factory =
|
|
|
|
std::make_shared<CountingUserTblPropCollectorFactory>(0);
|
|
|
|
options.table_properties_collector_factories[0] = collector_factory;
|
|
|
|
Reopen(options);
|
|
|
|
// Create 4 tables
|
|
|
|
for (int table = 0; table < 4; ++table) {
|
|
|
|
for (int i = 0; i < 10 + table; ++i) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(
|
|
|
|
db_->Put(WriteOptions(), std::to_string(table * 100 + i), "val"));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TablePropertiesCollection props;
|
|
|
|
ASSERT_OK(db_->GetPropertiesOfAllTables(&props));
|
|
|
|
ASSERT_EQ(4U, props.size());
|
|
|
|
uint32_t sum = 0;
|
|
|
|
for (const auto& item : props) {
|
|
|
|
auto& user_collected = item.second->user_collected_properties;
|
|
|
|
ASSERT_TRUE(user_collected.find("CountingUserTblPropCollector") !=
|
|
|
|
user_collected.end());
|
|
|
|
ASSERT_EQ(user_collected.at("CountingUserTblPropCollector"), "Rocksdb");
|
|
|
|
ASSERT_TRUE(user_collected.find("Count") != user_collected.end());
|
|
|
|
Slice key(user_collected.at("Count"));
|
|
|
|
uint32_t count;
|
|
|
|
ASSERT_TRUE(GetVarint32(&key, &count));
|
|
|
|
sum += count;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(10u + 11u + 12u + 13u, sum);
|
|
|
|
|
|
|
|
ASSERT_GT(collector_factory->num_created_, 0U);
|
|
|
|
collector_factory->num_created_ = 0;
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_GT(collector_factory->num_created_, 0U);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.level0_file_num_compaction_trigger = 3;
|
|
|
|
options.table_properties_collector_factories.resize(1);
|
|
|
|
std::shared_ptr<CountingUserTblPropCollectorFactory> collector_factory =
|
|
|
|
std::make_shared<CountingUserTblPropCollectorFactory>(1);
|
|
|
|
options.table_properties_collector_factories[0] = collector_factory,
|
|
|
|
CreateAndReopenWithCF({"pikachu"}, options);
|
|
|
|
// Create 2 files
|
|
|
|
for (int table = 0; table < 2; ++table) {
|
|
|
|
for (int i = 0; i < 10 + table; ++i) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put(1, std::to_string(table * 100 + i), "val"));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush(1));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
ASSERT_GT(collector_factory->num_created_, 0U);
|
|
|
|
|
|
|
|
collector_factory->num_created_ = 0;
|
|
|
|
// Trigger automatic compactions.
|
|
|
|
for (int table = 0; table < 3; ++table) {
|
|
|
|
for (int i = 0; i < 10 + table; ++i) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put(1, std::to_string(table * 100 + i), "val"));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush(1));
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
ASSERT_GT(collector_factory->num_created_, 0U);
|
|
|
|
|
|
|
|
collector_factory->num_created_ = 0;
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]));
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_GT(collector_factory->num_created_, 0U);
|
|
|
|
|
|
|
|
// Come back to write to default column family
|
|
|
|
collector_factory->num_created_ = 0;
|
|
|
|
collector_factory->set_expected_column_family_id(0); // default CF
|
|
|
|
// Create 4 tables in default column family
|
|
|
|
for (int table = 0; table < 2; ++table) {
|
|
|
|
for (int i = 0; i < 10 + table; ++i) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put(std::to_string(table * 100 + i), "val"));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
ASSERT_GT(collector_factory->num_created_, 0U);
|
|
|
|
|
|
|
|
collector_factory->num_created_ = 0;
|
|
|
|
// Trigger automatic compactions.
|
|
|
|
for (int table = 0; table < 3; ++table) {
|
|
|
|
for (int i = 0; i < 10 + table; ++i) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put(std::to_string(table * 100 + i), "val"));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
ASSERT_GT(collector_factory->num_created_, 0U);
|
|
|
|
|
|
|
|
collector_factory->num_created_ = 0;
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_GT(collector_factory->num_created_, 0U);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBPropertiesTest, TablePropertiesNeedCompactTest) {
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.write_buffer_size = 4096;
|
|
|
|
options.max_write_buffer_number = 8;
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
options.level0_slowdown_writes_trigger = 2;
|
|
|
|
options.level0_stop_writes_trigger = 4;
|
|
|
|
options.target_file_size_base = 2048;
|
|
|
|
options.max_bytes_for_level_base = 10240;
|
|
|
|
options.max_bytes_for_level_multiplier = 4;
|
|
|
|
options.soft_pending_compaction_bytes_limit = 1024 * 1024;
|
|
|
|
options.num_levels = 8;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
std::shared_ptr<TablePropertiesCollectorFactory> collector_factory =
|
|
|
|
std::make_shared<CountingDeleteTabPropCollectorFactory>();
|
|
|
|
options.table_properties_collector_factories.resize(1);
|
|
|
|
options.table_properties_collector_factories[0] = collector_factory;
|
|
|
|
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
const int kMaxKey = 1000;
|
|
|
|
for (int i = 0; i < kMaxKey; i++) {
|
2020-07-09 21:33:42 +00:00
|
|
|
ASSERT_OK(Put(Key(i), rnd.RandomString(102)));
|
|
|
|
ASSERT_OK(Put(Key(kMaxKey + i), rnd.RandomString(102)));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-01-20 23:17:52 +00:00
|
|
|
if (NumTableFilesAtLevel(0) == 1) {
|
|
|
|
// Clear Level 0 so that when later flush a file with deletions,
|
|
|
|
// we don't trigger an organic compaction.
|
|
|
|
ASSERT_OK(Put(Key(0), ""));
|
|
|
|
ASSERT_OK(Put(Key(kMaxKey * 2), ""));
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
|
|
|
|
{
|
|
|
|
int c = 0;
|
|
|
|
std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
|
|
|
|
iter->Seek(Key(kMaxKey - 100));
|
|
|
|
while (iter->Valid() && iter->key().compare(Key(kMaxKey + 100)) < 0) {
|
|
|
|
iter->Next();
|
|
|
|
++c;
|
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_EQ(c, 200);
|
|
|
|
}
|
|
|
|
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Delete(Key(0)));
|
2016-01-20 23:17:52 +00:00
|
|
|
for (int i = kMaxKey - 100; i < kMaxKey + 100; i++) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Delete(Key(i)));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Delete(Key(kMaxKey * 2)));
|
2016-01-20 23:17:52 +00:00
|
|
|
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
SetPerfLevel(kEnableCount);
|
2017-06-03 00:12:39 +00:00
|
|
|
get_perf_context()->Reset();
|
2016-01-20 23:17:52 +00:00
|
|
|
int c = 0;
|
|
|
|
std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
|
|
|
|
iter->Seek(Key(kMaxKey - 100));
|
|
|
|
while (iter->Valid() && iter->key().compare(Key(kMaxKey + 100)) < 0) {
|
|
|
|
iter->Next();
|
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_EQ(c, 0);
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_LT(get_perf_context()->internal_delete_skipped_count, 30u);
|
|
|
|
ASSERT_LT(get_perf_context()->internal_key_skipped_count, 30u);
|
2016-01-20 23:17:52 +00:00
|
|
|
SetPerfLevel(kDisable);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(DBPropertiesTest, NeedCompactHintPersistentTest) {
|
|
|
|
Random rnd(301);
|
|
|
|
|
|
|
|
Options options;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.max_write_buffer_number = 8;
|
|
|
|
options.level0_file_num_compaction_trigger = 10;
|
|
|
|
options.level0_slowdown_writes_trigger = 10;
|
|
|
|
options.level0_stop_writes_trigger = 10;
|
|
|
|
options.disable_auto_compactions = true;
|
2017-03-13 16:41:30 +00:00
|
|
|
options.env = env_;
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
std::shared_ptr<TablePropertiesCollectorFactory> collector_factory =
|
|
|
|
std::make_shared<CountingDeleteTabPropCollectorFactory>();
|
|
|
|
options.table_properties_collector_factories.resize(1);
|
|
|
|
options.table_properties_collector_factories[0] = collector_factory;
|
|
|
|
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
const int kMaxKey = 100;
|
|
|
|
for (int i = 0; i < kMaxKey; i++) {
|
|
|
|
ASSERT_OK(Put(Key(i), ""));
|
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
for (int i = 1; i < kMaxKey - 1; i++) {
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Delete(Key(i)));
|
2016-01-20 23:17:52 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 2);
|
|
|
|
|
|
|
|
// Restart the DB. Although number of files didn't reach
|
|
|
|
// options.level0_file_num_compaction_trigger, compaction should
|
|
|
|
// still be triggered because of the need-compaction hint.
|
|
|
|
options.disable_auto_compactions = false;
|
|
|
|
Reopen(options);
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
|
|
|
{
|
|
|
|
SetPerfLevel(kEnableCount);
|
2017-06-03 00:12:39 +00:00
|
|
|
get_perf_context()->Reset();
|
2016-01-20 23:17:52 +00:00
|
|
|
int c = 0;
|
|
|
|
std::unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
|
|
|
|
for (iter->Seek(Key(0)); iter->Valid(); iter->Next()) {
|
|
|
|
c++;
|
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(iter->status());
|
2016-01-20 23:17:52 +00:00
|
|
|
ASSERT_EQ(c, 2);
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_EQ(get_perf_context()->internal_delete_skipped_count, 0);
|
2016-01-20 23:17:52 +00:00
|
|
|
// We iterate every key twice. Is it a bug?
|
2017-06-03 00:12:39 +00:00
|
|
|
ASSERT_LE(get_perf_context()->internal_key_skipped_count, 2);
|
2016-01-20 23:17:52 +00:00
|
|
|
SetPerfLevel(kDisable);
|
|
|
|
}
|
|
|
|
}
|
2017-05-23 17:32:02 +00:00
|
|
|
|
2021-03-25 21:58:23 +00:00
|
|
|
// Excluded from RocksDB lite tests due to `GetPropertiesOfAllTables()` usage.
|
|
|
|
TEST_F(DBPropertiesTest, BlockAddForCompressionSampling) {
|
|
|
|
// Sampled compression requires at least one of the following four types.
|
|
|
|
if (!Snappy_Supported() && !Zlib_Supported() && !LZ4_Supported() &&
|
|
|
|
!ZSTD_Supported()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.disable_auto_compactions = true;
|
|
|
|
options.table_properties_collector_factories.emplace_back(
|
|
|
|
std::make_shared<BlockCountingTablePropertiesCollectorFactory>());
|
|
|
|
|
|
|
|
for (bool sample_for_compression : {false, true}) {
|
|
|
|
// For simplicity/determinism, sample 100% when enabled, or 0% when disabled
|
|
|
|
options.sample_for_compression = sample_for_compression ? 1 : 0;
|
|
|
|
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
// Setup the following LSM:
|
|
|
|
//
|
|
|
|
// L0_0 ["a", "b"]
|
|
|
|
// L1_0 ["a", "b"]
|
|
|
|
//
|
|
|
|
// L0_0 was created by flush. L1_0 was created by compaction. Each file
|
|
|
|
// contains one data block.
|
|
|
|
for (int i = 0; i < 3; ++i) {
|
|
|
|
ASSERT_OK(Put("a", "val"));
|
|
|
|
ASSERT_OK(Put("b", "val"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
if (i == 1) {
|
|
|
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A `BlockAdd()` should have been seen for files generated by flush or
|
|
|
|
// compaction when `sample_for_compression` is enabled.
|
|
|
|
TablePropertiesCollection file_to_props;
|
|
|
|
ASSERT_OK(db_->GetPropertiesOfAllTables(&file_to_props));
|
|
|
|
ASSERT_EQ(2, file_to_props.size());
|
|
|
|
for (const auto& file_and_props : file_to_props) {
|
|
|
|
auto& user_props = file_and_props.second->user_collected_properties;
|
|
|
|
ASSERT_TRUE(user_props.find(BlockCountingTablePropertiesCollector::
|
|
|
|
kNumSampledBlocksPropertyName) !=
|
|
|
|
user_props.end());
|
|
|
|
ASSERT_EQ(user_props.at(BlockCountingTablePropertiesCollector::
|
|
|
|
kNumSampledBlocksPropertyName),
|
2022-05-06 20:03:58 +00:00
|
|
|
std::to_string(sample_for_compression ? 1 : 0));
|
2021-03-25 21:58:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-01 01:20:44 +00:00
|
|
|
class CompressionSamplingDBPropertiesTest
|
|
|
|
: public DBPropertiesTest,
|
|
|
|
public ::testing::WithParamInterface<bool> {
|
|
|
|
public:
|
|
|
|
CompressionSamplingDBPropertiesTest() : fast_(GetParam()) {}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
const bool fast_;
|
|
|
|
};
|
|
|
|
|
|
|
|
INSTANTIATE_TEST_CASE_P(CompressionSamplingDBPropertiesTest,
|
|
|
|
CompressionSamplingDBPropertiesTest, ::testing::Bool());
|
|
|
|
|
|
|
|
// Excluded from RocksDB lite tests due to `GetPropertiesOfAllTables()` usage.
|
|
|
|
TEST_P(CompressionSamplingDBPropertiesTest,
|
|
|
|
EstimateDataSizeWithCompressionSampling) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
if (fast_) {
|
|
|
|
// One of the following light compression libraries must be present.
|
|
|
|
if (LZ4_Supported()) {
|
|
|
|
options.compression = kLZ4Compression;
|
|
|
|
} else if (Snappy_Supported()) {
|
|
|
|
options.compression = kSnappyCompression;
|
|
|
|
} else {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// One of the following heavy compression libraries must be present.
|
|
|
|
if (ZSTD_Supported()) {
|
|
|
|
options.compression = kZSTD;
|
|
|
|
} else if (Zlib_Supported()) {
|
|
|
|
options.compression = kZlibCompression;
|
|
|
|
} else {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
options.disable_auto_compactions = true;
|
|
|
|
// For simplicity/determinism, sample 100%.
|
|
|
|
options.sample_for_compression = 1;
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
// Setup the following LSM:
|
|
|
|
//
|
|
|
|
// L0_0 ["a", "b"]
|
|
|
|
// L1_0 ["a", "b"]
|
|
|
|
//
|
|
|
|
// L0_0 was created by flush. L1_0 was created by compaction. Each file
|
|
|
|
// contains one data block. The value consists of compressible data so the
|
|
|
|
// data block should be stored compressed.
|
|
|
|
std::string val(1024, 'a');
|
|
|
|
for (int i = 0; i < 3; ++i) {
|
|
|
|
ASSERT_OK(Put("a", val));
|
|
|
|
ASSERT_OK(Put("b", val));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
if (i == 1) {
|
|
|
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TablePropertiesCollection file_to_props;
|
|
|
|
ASSERT_OK(db_->GetPropertiesOfAllTables(&file_to_props));
|
|
|
|
ASSERT_EQ(2, file_to_props.size());
|
|
|
|
for (const auto& file_and_props : file_to_props) {
|
|
|
|
ASSERT_GT(file_and_props.second->data_size, 0);
|
|
|
|
if (fast_) {
|
|
|
|
ASSERT_EQ(file_and_props.second->data_size,
|
|
|
|
file_and_props.second->fast_compression_estimated_data_size);
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(file_and_props.second->data_size,
|
|
|
|
file_and_props.second->slow_compression_estimated_data_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-23 17:32:02 +00:00
|
|
|
TEST_F(DBPropertiesTest, EstimateNumKeysUnderflow) {
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
Options options = CurrentOptions();
|
2017-05-23 17:32:02 +00:00
|
|
|
Reopen(options);
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Put("foo", "bar"));
|
|
|
|
ASSERT_OK(Delete("foo"));
|
|
|
|
ASSERT_OK(Delete("foo"));
|
2017-05-23 17:32:02 +00:00
|
|
|
uint64_t num_keys = 0;
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &num_keys));
|
|
|
|
ASSERT_EQ(0, num_keys);
|
|
|
|
}
|
|
|
|
|
2017-10-23 22:22:05 +00:00
|
|
|
TEST_F(DBPropertiesTest, EstimateOldestKeyTime) {
|
|
|
|
uint64_t oldest_key_time = 0;
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
SetTimeElapseOnlySleepOnReopen(&options);
|
2017-10-23 22:22:05 +00:00
|
|
|
|
|
|
|
// "rocksdb.estimate-oldest-key-time" only available to fifo compaction.
|
|
|
|
for (auto compaction : {kCompactionStyleLevel, kCompactionStyleUniversal,
|
|
|
|
kCompactionStyleNone}) {
|
|
|
|
options.compaction_style = compaction;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
ASSERT_OK(Put("foo", "bar"));
|
|
|
|
ASSERT_FALSE(dbfull()->GetIntProperty(
|
|
|
|
DB::Properties::kEstimateOldestKeyTime, &oldest_key_time));
|
|
|
|
}
|
|
|
|
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
int64_t mock_start_time;
|
|
|
|
ASSERT_OK(env_->GetCurrentTime(&mock_start_time));
|
|
|
|
|
2017-10-23 22:22:05 +00:00
|
|
|
options.compaction_style = kCompactionStyleFIFO;
|
2019-02-15 17:48:44 +00:00
|
|
|
options.ttl = 300;
|
2021-04-14 19:03:56 +00:00
|
|
|
options.max_open_files = -1;
|
2017-10-23 22:22:05 +00:00
|
|
|
options.compaction_options_fifo.allow_compaction = false;
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
env_->MockSleepForSeconds(100);
|
2017-10-23 22:22:05 +00:00
|
|
|
ASSERT_OK(Put("k1", "v1"));
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
|
|
|
|
&oldest_key_time));
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
ASSERT_EQ(100, oldest_key_time - mock_start_time);
|
2017-10-23 22:22:05 +00:00
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_EQ("1", FilesPerLevel());
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
|
|
|
|
&oldest_key_time));
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
ASSERT_EQ(100, oldest_key_time - mock_start_time);
|
2017-10-23 22:22:05 +00:00
|
|
|
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
env_->MockSleepForSeconds(100); // -> 200
|
2017-10-23 22:22:05 +00:00
|
|
|
ASSERT_OK(Put("k2", "v2"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_EQ("2", FilesPerLevel());
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
|
|
|
|
&oldest_key_time));
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
ASSERT_EQ(100, oldest_key_time - mock_start_time);
|
2017-10-23 22:22:05 +00:00
|
|
|
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
env_->MockSleepForSeconds(100); // -> 300
|
2017-10-23 22:22:05 +00:00
|
|
|
ASSERT_OK(Put("k3", "v3"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_EQ("3", FilesPerLevel());
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
|
|
|
|
&oldest_key_time));
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
ASSERT_EQ(100, oldest_key_time - mock_start_time);
|
2017-10-23 22:22:05 +00:00
|
|
|
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
env_->MockSleepForSeconds(150); // -> 450
|
2017-10-23 22:22:05 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
|
|
ASSERT_EQ("2", FilesPerLevel());
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
|
|
|
|
&oldest_key_time));
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
ASSERT_EQ(200, oldest_key_time - mock_start_time);
|
2017-10-23 22:22:05 +00:00
|
|
|
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
env_->MockSleepForSeconds(100); // -> 550
|
2017-10-23 22:22:05 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
|
|
ASSERT_EQ("1", FilesPerLevel());
|
|
|
|
ASSERT_TRUE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
|
|
|
|
&oldest_key_time));
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
ASSERT_EQ(300, oldest_key_time - mock_start_time);
|
2017-10-23 22:22:05 +00:00
|
|
|
|
Fix+clean up handling of mock sleeps (#7101)
Summary:
We have a number of tests hanging on MacOS and windows due to
mishandling of code for mock sleeps. In addition, the code was in
terrible shape because the same variable (addon_time_) would sometimes
refer to microseconds and sometimes to seconds. One test even assumed it
was nanoseconds but was written to pass anyway.
This has been cleaned up so that DB tests generally use a SpecialEnv
function to mock sleep, for either some number of microseconds or seconds
depending on the function called. But to call one of these, the test must first
call SetMockSleep (precondition enforced with assertion), which also turns
sleeps in RocksDB into mock sleeps. To also removes accounting for actual
clock time, call SetTimeElapseOnlySleepOnReopen, which implies
SetMockSleep (on DB re-open). This latter setting only works by applying
on DB re-open, otherwise havoc can ensue if Env goes back in time with
DB open.
More specifics:
Removed some unused test classes, and updated comments on the general
problem.
Fixed DBSSTTest.GetTotalSstFilesSize using a sync point callback instead
of mock time. For this we have the only modification to production code,
inserting a sync point callback in flush_job.cc, which is not a change to
production behavior.
Removed unnecessary resetting of mock times to 0 in many tests. RocksDB
deals in relative time. Any behaviors relying on absolute date/time are likely
a bug. (The above test DBSSTTest.GetTotalSstFilesSize was the only one
clearly injecting a specific absolute time for actual testing convenience.) Just
in case I misunderstood some test, I put this note in each replacement:
// NOTE: Presumed unnecessary and removed: resetting mock time in env
Strengthened some tests like MergeTestTime, MergeCompactionTimeTest, and
FilterCompactionTimeTest in db_test.cc
stats_history_test and blob_db_test are each their own beast, rather deeply
dependent on MockTimeEnv. Each gets its own variant of a work-around for
TimedWait in a mock time environment. (Reduces redundancy and
inconsistency in stats_history_test.)
Intended follow-up:
Remove TimedWait from the public API of InstrumentedCondVar, and only
make that accessible through Env by passing in an InstrumentedCondVar and
a deadline. Then the Env implementations mocking time can fix this problem
without using sync points. (Test infrastructure using sync points interferes
with individual tests' control over sync points.)
With that change, we can simplify/consolidate the scattered work-arounds.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7101
Test Plan: make check on Linux and MacOS
Reviewed By: zhichao-cao
Differential Revision: D23032815
Pulled By: pdillinger
fbshipit-source-id: 7f33967ada8b83011fb54e8279365c008bd6610b
2020-08-11 19:39:49 +00:00
|
|
|
env_->MockSleepForSeconds(100); // -> 650
|
2017-10-23 22:22:05 +00:00
|
|
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
|
|
ASSERT_EQ("", FilesPerLevel());
|
|
|
|
ASSERT_FALSE(dbfull()->GetIntProperty(DB::Properties::kEstimateOldestKeyTime,
|
|
|
|
&oldest_key_time));
|
|
|
|
}
|
|
|
|
|
2018-03-02 01:50:54 +00:00
|
|
|
TEST_F(DBPropertiesTest, SstFilesSize) {
|
|
|
|
struct TestListener : public EventListener {
|
|
|
|
void OnCompactionCompleted(DB* db,
|
|
|
|
const CompactionJobInfo& /*info*/) override {
|
|
|
|
assert(callback_triggered == false);
|
|
|
|
assert(size_before_compaction > 0);
|
|
|
|
callback_triggered = true;
|
|
|
|
uint64_t total_sst_size = 0;
|
|
|
|
uint64_t live_sst_size = 0;
|
|
|
|
bool ok = db->GetIntProperty(DB::Properties::kTotalSstFilesSize,
|
|
|
|
&total_sst_size);
|
|
|
|
ASSERT_TRUE(ok);
|
|
|
|
// total_sst_size include files before and after compaction.
|
|
|
|
ASSERT_GT(total_sst_size, size_before_compaction);
|
|
|
|
ok =
|
|
|
|
db->GetIntProperty(DB::Properties::kLiveSstFilesSize, &live_sst_size);
|
|
|
|
ASSERT_TRUE(ok);
|
|
|
|
// live_sst_size only include files after compaction.
|
|
|
|
ASSERT_GT(live_sst_size, 0);
|
|
|
|
ASSERT_LT(live_sst_size, size_before_compaction);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t size_before_compaction = 0;
|
|
|
|
bool callback_triggered = false;
|
|
|
|
};
|
|
|
|
std::shared_ptr<TestListener> listener = std::make_shared<TestListener>();
|
|
|
|
|
|
|
|
Options options;
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
options.env = CurrentOptions().env;
|
2018-03-02 01:50:54 +00:00
|
|
|
options.disable_auto_compactions = true;
|
|
|
|
options.listeners.push_back(listener);
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
for (int i = 0; i < 10; i++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Put("key" + std::to_string(i), std::string(1000, 'v')));
|
2018-03-02 01:50:54 +00:00
|
|
|
}
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
for (int i = 0; i < 5; i++) {
|
2022-05-06 20:03:58 +00:00
|
|
|
ASSERT_OK(Delete("key" + std::to_string(i)));
|
2018-03-02 01:50:54 +00:00
|
|
|
}
|
|
|
|
ASSERT_OK(Flush());
|
2023-06-13 22:52:45 +00:00
|
|
|
|
2018-03-02 01:50:54 +00:00
|
|
|
uint64_t sst_size;
|
2023-06-13 22:52:45 +00:00
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kTotalSstFilesSize, &sst_size));
|
2018-03-02 01:50:54 +00:00
|
|
|
ASSERT_GT(sst_size, 0);
|
|
|
|
listener->size_before_compaction = sst_size;
|
2023-06-13 22:52:45 +00:00
|
|
|
|
|
|
|
uint64_t obsolete_sst_size;
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kObsoleteSstFilesSize,
|
|
|
|
&obsolete_sst_size));
|
|
|
|
ASSERT_EQ(obsolete_sst_size, 0);
|
|
|
|
|
|
|
|
// Hold files from being deleted so we can test property for size of obsolete
|
|
|
|
// SST files.
|
|
|
|
ASSERT_OK(db_->DisableFileDeletions());
|
|
|
|
|
2018-03-02 01:50:54 +00:00
|
|
|
// Compact to clean all keys and trigger listener.
|
|
|
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
|
|
|
ASSERT_TRUE(listener->callback_triggered);
|
2023-06-13 22:52:45 +00:00
|
|
|
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kObsoleteSstFilesSize,
|
|
|
|
&obsolete_sst_size));
|
|
|
|
ASSERT_EQ(obsolete_sst_size, sst_size);
|
|
|
|
|
|
|
|
// Let the obsolete files be deleted.
|
|
|
|
ASSERT_OK(db_->EnableFileDeletions());
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kObsoleteSstFilesSize,
|
|
|
|
&obsolete_sst_size));
|
|
|
|
ASSERT_EQ(obsolete_sst_size, 0);
|
2018-03-02 01:50:54 +00:00
|
|
|
}
|
|
|
|
|
2018-11-06 03:28:21 +00:00
|
|
|
TEST_F(DBPropertiesTest, MinObsoleteSstNumberToKeep) {
|
|
|
|
class TestListener : public EventListener {
|
|
|
|
public:
|
|
|
|
void OnTableFileCreated(const TableFileCreationInfo& info) override {
|
|
|
|
if (info.reason == TableFileCreationReason::kCompaction) {
|
|
|
|
// Verify the property indicates that SSTs created by a running
|
|
|
|
// compaction cannot be deleted.
|
|
|
|
uint64_t created_file_num;
|
|
|
|
FileType created_file_type;
|
|
|
|
std::string filename =
|
|
|
|
info.file_path.substr(info.file_path.rfind('/') + 1);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
ParseFileName(filename, &created_file_num, &created_file_type));
|
|
|
|
ASSERT_EQ(kTableFile, created_file_type);
|
|
|
|
|
|
|
|
uint64_t keep_sst_lower_bound;
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kMinObsoleteSstNumberToKeep,
|
|
|
|
&keep_sst_lower_bound));
|
|
|
|
|
|
|
|
ASSERT_LE(keep_sst_lower_bound, created_file_num);
|
|
|
|
validated_ = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void SetDB(DB* db) { db_ = db; }
|
|
|
|
|
|
|
|
int GetNumCompactions() { return num_compactions_; }
|
|
|
|
|
|
|
|
// True if we've verified the property for at least one output file
|
|
|
|
bool Validated() { return validated_; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
int num_compactions_ = 0;
|
|
|
|
bool validated_ = false;
|
|
|
|
DB* db_ = nullptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
const int kNumL0Files = 4;
|
|
|
|
|
|
|
|
std::shared_ptr<TestListener> listener = std::make_shared<TestListener>();
|
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.listeners.push_back(listener);
|
|
|
|
options.level0_file_num_compaction_trigger = kNumL0Files;
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
listener->SetDB(db_);
|
|
|
|
|
|
|
|
for (int i = 0; i < kNumL0Files; ++i) {
|
|
|
|
// Make sure they overlap in keyspace to prevent trivial move
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(Put("key1", "val"));
|
|
|
|
ASSERT_OK(Put("key2", "val"));
|
|
|
|
ASSERT_OK(Flush());
|
2018-11-06 03:28:21 +00:00
|
|
|
}
|
2020-10-02 00:45:52 +00:00
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
2018-11-06 03:28:21 +00:00
|
|
|
ASSERT_TRUE(listener->Validated());
|
|
|
|
}
|
|
|
|
|
2022-06-28 20:52:35 +00:00
|
|
|
TEST_F(DBPropertiesTest, BlobCacheProperties) {
|
|
|
|
Options options;
|
|
|
|
uint64_t value;
|
|
|
|
|
|
|
|
options.env = CurrentOptions().env;
|
|
|
|
|
|
|
|
// Test with empty blob cache.
|
|
|
|
constexpr size_t kCapacity = 100;
|
|
|
|
LRUCacheOptions co;
|
|
|
|
co.capacity = kCapacity;
|
|
|
|
co.num_shard_bits = 0;
|
|
|
|
co.metadata_charge_policy = kDontChargeCacheMetadata;
|
|
|
|
auto blob_cache = NewLRUCache(co);
|
|
|
|
options.blob_cache = blob_cache;
|
|
|
|
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
|
|
|
|
ASSERT_EQ(kCapacity, value);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
|
|
|
|
ASSERT_EQ(0, value);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
|
|
|
|
ASSERT_EQ(0, value);
|
|
|
|
|
|
|
|
// Insert unpinned blob to the cache and check size.
|
|
|
|
constexpr size_t kSize1 = 70;
|
Major Cache refactoring, CPU efficiency improvement (#10975)
Summary:
This is several refactorings bundled into one to avoid having to incrementally re-modify uses of Cache several times. Overall, there are breaking changes to Cache class, and it becomes more of low-level interface for implementing caches, especially block cache. New internal APIs make using Cache cleaner than before, and more insulated from block cache evolution. Hopefully, this is the last really big block cache refactoring, because of rather effectively decoupling the implementations from the uses. This change also removes the EXPERIMENTAL designation on the SecondaryCache support in Cache. It seems reasonably mature at this point but still subject to change/evolution (as I warn in the API docs for Cache).
The high-level motivation for this refactoring is to minimize code duplication / compounding complexity in adding SecondaryCache support to HyperClockCache (in a later PR). Other benefits listed below.
* static_cast lines of code +29 -35 (net removed 6)
* reinterpret_cast lines of code +6 -32 (net removed 26)
## cache.h and secondary_cache.h
* Always use CacheItemHelper with entries instead of just a Deleter. There are several motivations / justifications:
* Simpler for implementations to deal with just one Insert and one Lookup.
* Simpler and more efficient implementation because we don't have to track which entries are using helpers and which are using deleters
* Gets rid of hack to classify cache entries by their deleter. Instead, the CacheItemHelper includes a CacheEntryRole. This simplifies a lot of code (cache_entry_roles.h almost eliminated). Fixes https://github.com/facebook/rocksdb/issues/9428.
* Makes it trivial to adjust SecondaryCache behavior based on kind of block (e.g. don't re-compress filter blocks).
* It is arguably less convenient for many direct users of Cache, but direct users of Cache are now rare with introduction of typed_cache.h (below).
* I considered and rejected an alternative approach in which we reduce customizability by assuming each secondary cache compatible value starts with a Slice referencing the uncompressed block contents (already true or mostly true), but we apparently intend to stack secondary caches. Saving an entry from a compressed secondary to a lower tier requires custom handling offered by SaveToCallback, etc.
* Make CreateCallback part of the helper and introduce CreateContext to work with it (alternative to https://github.com/facebook/rocksdb/issues/10562). This cleans up the interface while still allowing context to be provided for loading/parsing values into primary cache. This model works for async lookup in BlockBasedTable reader (reader owns a CreateContext) under the assumption that it always waits on secondary cache operations to finish. (Otherwise, the CreateContext could be destroyed while async operation depending on it continues.) This likely contributes most to the observed performance improvement because it saves an std::function backed by a heap allocation.
* Use char* for serialized data, e.g. in SaveToCallback, where void* was confusingly used. (We use `char*` for serialized byte data all over RocksDB, with many advantages over `void*`. `memcpy` etc. are legacy APIs that should not be mimicked.)
* Add a type alias Cache::ObjectPtr = void*, so that we can better indicate the intent of the void* when it is to be the object associated with a Cache entry. Related: started (but did not complete) a refactoring to move away from "value" of a cache entry toward "object" or "obj". (It is confusing to call Cache a key-value store (like DB) when it is really storing arbitrary in-memory objects, not byte strings.)
* Remove unnecessary key param from DeleterFn. This is good for efficiency in HyperClockCache, which does not directly store the cache key in memory. (Alternative to https://github.com/facebook/rocksdb/issues/10774)
* Add allocator to Cache DeleterFn. This is a kind of future-proofing change in case we get more serious about using the Cache allocator for memory tracked by the Cache. Right now, only the uncompressed block contents are allocated using the allocator, and a pointer to that allocator is saved as part of the cached object so that the deleter can use it. (See CacheAllocationPtr.) If in the future we are able to "flatten out" our Cache objects some more, it would be good not to have to track the allocator as part of each object.
* Removes legacy `ApplyToAllCacheEntries` and changes `ApplyToAllEntries` signature for Deleter->CacheItemHelper change.
## typed_cache.h
Adds various "typed" interfaces to the Cache as internal APIs, so that most uses of Cache can use simple type safe code without casting and without explicit deleters, etc. Almost all of the non-test, non-glue code uses of Cache have been migrated. (Follow-up work: CompressedSecondaryCache deserves deeper attention to migrate.) This change expands RocksDB's internal usage of metaprogramming and SFINAE (https://en.cppreference.com/w/cpp/language/sfinae).
The existing usages of Cache are divided up at a high level into these new interfaces. See updated existing uses of Cache for examples of how these are used.
* PlaceholderCacheInterface - Used for making cache reservations, with entries that have a charge but no value.
* BasicTypedCacheInterface<TValue> - Used for primary cache storage of objects of type TValue, which can be cleaned up with std::default_delete<TValue>. The role is provided by TValue::kCacheEntryRole or given in an optional template parameter.
* FullTypedCacheInterface<TValue, TCreateContext> - Used for secondary cache compatible storage of objects of type TValue. In addition to BasicTypedCacheInterface constraints, we require TValue::ContentSlice() to return persistable data. This simplifies usage for the normal case of simple secondary cache compatibility (can give you a Slice to the data already in memory). In addition to TCreateContext performing the role of Cache::CreateContext, it is also expected to provide a factory function for creating TValue.
* For each of these, there's a "Shared" version (e.g. FullTypedSharedCacheInterface) that holds a shared_ptr to the Cache, rather than assuming external ownership by holding only a raw `Cache*`.
These interfaces introduce specific handle types for each interface instantiation, so that it's easy to see what kind of object is controlled by a handle. (Ultimately, this might not be worth the extra complexity, but it seems OK so far.)
Note: I attempted to make the cache 'charge' automatically inferred from the cache object type, such as by expecting an ApproximateMemoryUsage() function, but this is not so clean because there are cases where we need to compute the charge ahead of time and don't want to re-compute it.
## block_cache.h
This header is essentially the replacement for the old block_like_traits.h. It includes various things to support block cache access with typed_cache.h for block-based table.
## block_based_table_reader.cc
Before this change, accessing the block cache here was an awkward mix of static polymorphism (template TBlocklike) and switch-case on a dynamic BlockType value. This change mostly unifies on static polymorphism, relying on minor hacks in block_cache.h to distinguish variants of Block. We still check BlockType in some places (especially for stats, which could be improved in follow-up work) but at least the BlockType is a static constant from the template parameter. (No more awkward partial redundancy between static and dynamic info.) This likely contributes to the overall performance improvement, but hasn't been tested in isolation.
The other key source of simplification here is a more unified system of creating block cache objects: for directly populating from primary cache and for promotion from secondary cache. Both use BlockCreateContext, for context and for factory functions.
## block_based_table_builder.cc, cache_dump_load_impl.cc
Before this change, warming caches was super ugly code. Both of these source files had switch statements to basically transition from the dynamic BlockType world to the static TBlocklike world. None of that mess is needed anymore as there's a new, untyped WarmInCache function that handles all the details just as promotion from SecondaryCache would. (Fixes `TODO akanksha: Dedup below code` in block_based_table_builder.cc.)
## Everything else
Mostly just updating Cache users to use new typed APIs when reasonably possible, or changed Cache APIs when not.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10975
Test Plan:
tests updated
Performance test setup similar to https://github.com/facebook/rocksdb/issues/10626 (by cache size, LRUCache when not "hyper" for HyperClockCache):
34MB 1thread base.hyper -> kops/s: 0.745 io_bytes/op: 2.52504e+06 miss_ratio: 0.140906 max_rss_mb: 76.4844
34MB 1thread new.hyper -> kops/s: 0.751 io_bytes/op: 2.5123e+06 miss_ratio: 0.140161 max_rss_mb: 79.3594
34MB 1thread base -> kops/s: 0.254 io_bytes/op: 1.36073e+07 miss_ratio: 0.918818 max_rss_mb: 45.9297
34MB 1thread new -> kops/s: 0.252 io_bytes/op: 1.36157e+07 miss_ratio: 0.918999 max_rss_mb: 44.1523
34MB 32thread base.hyper -> kops/s: 7.272 io_bytes/op: 2.88323e+06 miss_ratio: 0.162532 max_rss_mb: 516.602
34MB 32thread new.hyper -> kops/s: 7.214 io_bytes/op: 2.99046e+06 miss_ratio: 0.168818 max_rss_mb: 518.293
34MB 32thread base -> kops/s: 3.528 io_bytes/op: 1.35722e+07 miss_ratio: 0.914691 max_rss_mb: 264.926
34MB 32thread new -> kops/s: 3.604 io_bytes/op: 1.35744e+07 miss_ratio: 0.915054 max_rss_mb: 264.488
233MB 1thread base.hyper -> kops/s: 53.909 io_bytes/op: 2552.35 miss_ratio: 0.0440566 max_rss_mb: 241.984
233MB 1thread new.hyper -> kops/s: 62.792 io_bytes/op: 2549.79 miss_ratio: 0.044043 max_rss_mb: 241.922
233MB 1thread base -> kops/s: 1.197 io_bytes/op: 2.75173e+06 miss_ratio: 0.103093 max_rss_mb: 241.559
233MB 1thread new -> kops/s: 1.199 io_bytes/op: 2.73723e+06 miss_ratio: 0.10305 max_rss_mb: 240.93
233MB 32thread base.hyper -> kops/s: 1298.69 io_bytes/op: 2539.12 miss_ratio: 0.0440307 max_rss_mb: 371.418
233MB 32thread new.hyper -> kops/s: 1421.35 io_bytes/op: 2538.75 miss_ratio: 0.0440307 max_rss_mb: 347.273
233MB 32thread base -> kops/s: 9.693 io_bytes/op: 2.77304e+06 miss_ratio: 0.103745 max_rss_mb: 569.691
233MB 32thread new -> kops/s: 9.75 io_bytes/op: 2.77559e+06 miss_ratio: 0.103798 max_rss_mb: 552.82
1597MB 1thread base.hyper -> kops/s: 58.607 io_bytes/op: 1449.14 miss_ratio: 0.0249324 max_rss_mb: 1583.55
1597MB 1thread new.hyper -> kops/s: 69.6 io_bytes/op: 1434.89 miss_ratio: 0.0247167 max_rss_mb: 1584.02
1597MB 1thread base -> kops/s: 60.478 io_bytes/op: 1421.28 miss_ratio: 0.024452 max_rss_mb: 1589.45
1597MB 1thread new -> kops/s: 63.973 io_bytes/op: 1416.07 miss_ratio: 0.0243766 max_rss_mb: 1589.24
1597MB 32thread base.hyper -> kops/s: 1436.2 io_bytes/op: 1357.93 miss_ratio: 0.0235353 max_rss_mb: 1692.92
1597MB 32thread new.hyper -> kops/s: 1605.03 io_bytes/op: 1358.04 miss_ratio: 0.023538 max_rss_mb: 1702.78
1597MB 32thread base -> kops/s: 280.059 io_bytes/op: 1350.34 miss_ratio: 0.023289 max_rss_mb: 1675.36
1597MB 32thread new -> kops/s: 283.125 io_bytes/op: 1351.05 miss_ratio: 0.0232797 max_rss_mb: 1703.83
Almost uniformly improving over base revision, especially for hot paths with HyperClockCache, up to 12% higher throughput seen (1597MB, 32thread, hyper). The improvement for that is likely coming from much simplified code for providing context for secondary cache promotion (CreateCallback/CreateContext), and possibly from less branching in block_based_table_reader. And likely a small improvement from not reconstituting key for DeleterFn.
Reviewed By: anand1976
Differential Revision: D42417818
Pulled By: pdillinger
fbshipit-source-id: f86bfdd584dce27c028b151ba56818ad14f7a432
2023-01-11 22:20:40 +00:00
|
|
|
ASSERT_OK(blob_cache->Insert("blob1", nullptr /*value*/,
|
|
|
|
&kNoopCacheItemHelper, kSize1));
|
2022-06-28 20:52:35 +00:00
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
|
|
|
|
ASSERT_EQ(kCapacity, value);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
|
|
|
|
ASSERT_EQ(kSize1, value);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
|
|
|
|
ASSERT_EQ(0, value);
|
|
|
|
|
|
|
|
// Insert pinned blob to the cache and check size.
|
|
|
|
constexpr size_t kSize2 = 60;
|
|
|
|
Cache::Handle* blob2 = nullptr;
|
Major Cache refactoring, CPU efficiency improvement (#10975)
Summary:
This is several refactorings bundled into one to avoid having to incrementally re-modify uses of Cache several times. Overall, there are breaking changes to Cache class, and it becomes more of low-level interface for implementing caches, especially block cache. New internal APIs make using Cache cleaner than before, and more insulated from block cache evolution. Hopefully, this is the last really big block cache refactoring, because of rather effectively decoupling the implementations from the uses. This change also removes the EXPERIMENTAL designation on the SecondaryCache support in Cache. It seems reasonably mature at this point but still subject to change/evolution (as I warn in the API docs for Cache).
The high-level motivation for this refactoring is to minimize code duplication / compounding complexity in adding SecondaryCache support to HyperClockCache (in a later PR). Other benefits listed below.
* static_cast lines of code +29 -35 (net removed 6)
* reinterpret_cast lines of code +6 -32 (net removed 26)
## cache.h and secondary_cache.h
* Always use CacheItemHelper with entries instead of just a Deleter. There are several motivations / justifications:
* Simpler for implementations to deal with just one Insert and one Lookup.
* Simpler and more efficient implementation because we don't have to track which entries are using helpers and which are using deleters
* Gets rid of hack to classify cache entries by their deleter. Instead, the CacheItemHelper includes a CacheEntryRole. This simplifies a lot of code (cache_entry_roles.h almost eliminated). Fixes https://github.com/facebook/rocksdb/issues/9428.
* Makes it trivial to adjust SecondaryCache behavior based on kind of block (e.g. don't re-compress filter blocks).
* It is arguably less convenient for many direct users of Cache, but direct users of Cache are now rare with introduction of typed_cache.h (below).
* I considered and rejected an alternative approach in which we reduce customizability by assuming each secondary cache compatible value starts with a Slice referencing the uncompressed block contents (already true or mostly true), but we apparently intend to stack secondary caches. Saving an entry from a compressed secondary to a lower tier requires custom handling offered by SaveToCallback, etc.
* Make CreateCallback part of the helper and introduce CreateContext to work with it (alternative to https://github.com/facebook/rocksdb/issues/10562). This cleans up the interface while still allowing context to be provided for loading/parsing values into primary cache. This model works for async lookup in BlockBasedTable reader (reader owns a CreateContext) under the assumption that it always waits on secondary cache operations to finish. (Otherwise, the CreateContext could be destroyed while async operation depending on it continues.) This likely contributes most to the observed performance improvement because it saves an std::function backed by a heap allocation.
* Use char* for serialized data, e.g. in SaveToCallback, where void* was confusingly used. (We use `char*` for serialized byte data all over RocksDB, with many advantages over `void*`. `memcpy` etc. are legacy APIs that should not be mimicked.)
* Add a type alias Cache::ObjectPtr = void*, so that we can better indicate the intent of the void* when it is to be the object associated with a Cache entry. Related: started (but did not complete) a refactoring to move away from "value" of a cache entry toward "object" or "obj". (It is confusing to call Cache a key-value store (like DB) when it is really storing arbitrary in-memory objects, not byte strings.)
* Remove unnecessary key param from DeleterFn. This is good for efficiency in HyperClockCache, which does not directly store the cache key in memory. (Alternative to https://github.com/facebook/rocksdb/issues/10774)
* Add allocator to Cache DeleterFn. This is a kind of future-proofing change in case we get more serious about using the Cache allocator for memory tracked by the Cache. Right now, only the uncompressed block contents are allocated using the allocator, and a pointer to that allocator is saved as part of the cached object so that the deleter can use it. (See CacheAllocationPtr.) If in the future we are able to "flatten out" our Cache objects some more, it would be good not to have to track the allocator as part of each object.
* Removes legacy `ApplyToAllCacheEntries` and changes `ApplyToAllEntries` signature for Deleter->CacheItemHelper change.
## typed_cache.h
Adds various "typed" interfaces to the Cache as internal APIs, so that most uses of Cache can use simple type safe code without casting and without explicit deleters, etc. Almost all of the non-test, non-glue code uses of Cache have been migrated. (Follow-up work: CompressedSecondaryCache deserves deeper attention to migrate.) This change expands RocksDB's internal usage of metaprogramming and SFINAE (https://en.cppreference.com/w/cpp/language/sfinae).
The existing usages of Cache are divided up at a high level into these new interfaces. See updated existing uses of Cache for examples of how these are used.
* PlaceholderCacheInterface - Used for making cache reservations, with entries that have a charge but no value.
* BasicTypedCacheInterface<TValue> - Used for primary cache storage of objects of type TValue, which can be cleaned up with std::default_delete<TValue>. The role is provided by TValue::kCacheEntryRole or given in an optional template parameter.
* FullTypedCacheInterface<TValue, TCreateContext> - Used for secondary cache compatible storage of objects of type TValue. In addition to BasicTypedCacheInterface constraints, we require TValue::ContentSlice() to return persistable data. This simplifies usage for the normal case of simple secondary cache compatibility (can give you a Slice to the data already in memory). In addition to TCreateContext performing the role of Cache::CreateContext, it is also expected to provide a factory function for creating TValue.
* For each of these, there's a "Shared" version (e.g. FullTypedSharedCacheInterface) that holds a shared_ptr to the Cache, rather than assuming external ownership by holding only a raw `Cache*`.
These interfaces introduce specific handle types for each interface instantiation, so that it's easy to see what kind of object is controlled by a handle. (Ultimately, this might not be worth the extra complexity, but it seems OK so far.)
Note: I attempted to make the cache 'charge' automatically inferred from the cache object type, such as by expecting an ApproximateMemoryUsage() function, but this is not so clean because there are cases where we need to compute the charge ahead of time and don't want to re-compute it.
## block_cache.h
This header is essentially the replacement for the old block_like_traits.h. It includes various things to support block cache access with typed_cache.h for block-based table.
## block_based_table_reader.cc
Before this change, accessing the block cache here was an awkward mix of static polymorphism (template TBlocklike) and switch-case on a dynamic BlockType value. This change mostly unifies on static polymorphism, relying on minor hacks in block_cache.h to distinguish variants of Block. We still check BlockType in some places (especially for stats, which could be improved in follow-up work) but at least the BlockType is a static constant from the template parameter. (No more awkward partial redundancy between static and dynamic info.) This likely contributes to the overall performance improvement, but hasn't been tested in isolation.
The other key source of simplification here is a more unified system of creating block cache objects: for directly populating from primary cache and for promotion from secondary cache. Both use BlockCreateContext, for context and for factory functions.
## block_based_table_builder.cc, cache_dump_load_impl.cc
Before this change, warming caches was super ugly code. Both of these source files had switch statements to basically transition from the dynamic BlockType world to the static TBlocklike world. None of that mess is needed anymore as there's a new, untyped WarmInCache function that handles all the details just as promotion from SecondaryCache would. (Fixes `TODO akanksha: Dedup below code` in block_based_table_builder.cc.)
## Everything else
Mostly just updating Cache users to use new typed APIs when reasonably possible, or changed Cache APIs when not.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10975
Test Plan:
tests updated
Performance test setup similar to https://github.com/facebook/rocksdb/issues/10626 (by cache size, LRUCache when not "hyper" for HyperClockCache):
34MB 1thread base.hyper -> kops/s: 0.745 io_bytes/op: 2.52504e+06 miss_ratio: 0.140906 max_rss_mb: 76.4844
34MB 1thread new.hyper -> kops/s: 0.751 io_bytes/op: 2.5123e+06 miss_ratio: 0.140161 max_rss_mb: 79.3594
34MB 1thread base -> kops/s: 0.254 io_bytes/op: 1.36073e+07 miss_ratio: 0.918818 max_rss_mb: 45.9297
34MB 1thread new -> kops/s: 0.252 io_bytes/op: 1.36157e+07 miss_ratio: 0.918999 max_rss_mb: 44.1523
34MB 32thread base.hyper -> kops/s: 7.272 io_bytes/op: 2.88323e+06 miss_ratio: 0.162532 max_rss_mb: 516.602
34MB 32thread new.hyper -> kops/s: 7.214 io_bytes/op: 2.99046e+06 miss_ratio: 0.168818 max_rss_mb: 518.293
34MB 32thread base -> kops/s: 3.528 io_bytes/op: 1.35722e+07 miss_ratio: 0.914691 max_rss_mb: 264.926
34MB 32thread new -> kops/s: 3.604 io_bytes/op: 1.35744e+07 miss_ratio: 0.915054 max_rss_mb: 264.488
233MB 1thread base.hyper -> kops/s: 53.909 io_bytes/op: 2552.35 miss_ratio: 0.0440566 max_rss_mb: 241.984
233MB 1thread new.hyper -> kops/s: 62.792 io_bytes/op: 2549.79 miss_ratio: 0.044043 max_rss_mb: 241.922
233MB 1thread base -> kops/s: 1.197 io_bytes/op: 2.75173e+06 miss_ratio: 0.103093 max_rss_mb: 241.559
233MB 1thread new -> kops/s: 1.199 io_bytes/op: 2.73723e+06 miss_ratio: 0.10305 max_rss_mb: 240.93
233MB 32thread base.hyper -> kops/s: 1298.69 io_bytes/op: 2539.12 miss_ratio: 0.0440307 max_rss_mb: 371.418
233MB 32thread new.hyper -> kops/s: 1421.35 io_bytes/op: 2538.75 miss_ratio: 0.0440307 max_rss_mb: 347.273
233MB 32thread base -> kops/s: 9.693 io_bytes/op: 2.77304e+06 miss_ratio: 0.103745 max_rss_mb: 569.691
233MB 32thread new -> kops/s: 9.75 io_bytes/op: 2.77559e+06 miss_ratio: 0.103798 max_rss_mb: 552.82
1597MB 1thread base.hyper -> kops/s: 58.607 io_bytes/op: 1449.14 miss_ratio: 0.0249324 max_rss_mb: 1583.55
1597MB 1thread new.hyper -> kops/s: 69.6 io_bytes/op: 1434.89 miss_ratio: 0.0247167 max_rss_mb: 1584.02
1597MB 1thread base -> kops/s: 60.478 io_bytes/op: 1421.28 miss_ratio: 0.024452 max_rss_mb: 1589.45
1597MB 1thread new -> kops/s: 63.973 io_bytes/op: 1416.07 miss_ratio: 0.0243766 max_rss_mb: 1589.24
1597MB 32thread base.hyper -> kops/s: 1436.2 io_bytes/op: 1357.93 miss_ratio: 0.0235353 max_rss_mb: 1692.92
1597MB 32thread new.hyper -> kops/s: 1605.03 io_bytes/op: 1358.04 miss_ratio: 0.023538 max_rss_mb: 1702.78
1597MB 32thread base -> kops/s: 280.059 io_bytes/op: 1350.34 miss_ratio: 0.023289 max_rss_mb: 1675.36
1597MB 32thread new -> kops/s: 283.125 io_bytes/op: 1351.05 miss_ratio: 0.0232797 max_rss_mb: 1703.83
Almost uniformly improving over base revision, especially for hot paths with HyperClockCache, up to 12% higher throughput seen (1597MB, 32thread, hyper). The improvement for that is likely coming from much simplified code for providing context for secondary cache promotion (CreateCallback/CreateContext), and possibly from less branching in block_based_table_reader. And likely a small improvement from not reconstituting key for DeleterFn.
Reviewed By: anand1976
Differential Revision: D42417818
Pulled By: pdillinger
fbshipit-source-id: f86bfdd584dce27c028b151ba56818ad14f7a432
2023-01-11 22:20:40 +00:00
|
|
|
ASSERT_OK(blob_cache->Insert("blob2", nullptr /*value*/,
|
|
|
|
&kNoopCacheItemHelper, kSize2, &blob2));
|
2022-06-28 20:52:35 +00:00
|
|
|
ASSERT_NE(nullptr, blob2);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
|
|
|
|
ASSERT_EQ(kCapacity, value);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
|
|
|
|
// blob1 is evicted.
|
|
|
|
ASSERT_EQ(kSize2, value);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
|
|
|
|
ASSERT_EQ(kSize2, value);
|
|
|
|
|
|
|
|
// Insert another pinned blob to make the cache over-sized.
|
|
|
|
constexpr size_t kSize3 = 80;
|
|
|
|
Cache::Handle* blob3 = nullptr;
|
Major Cache refactoring, CPU efficiency improvement (#10975)
Summary:
This is several refactorings bundled into one to avoid having to incrementally re-modify uses of Cache several times. Overall, there are breaking changes to Cache class, and it becomes more of low-level interface for implementing caches, especially block cache. New internal APIs make using Cache cleaner than before, and more insulated from block cache evolution. Hopefully, this is the last really big block cache refactoring, because of rather effectively decoupling the implementations from the uses. This change also removes the EXPERIMENTAL designation on the SecondaryCache support in Cache. It seems reasonably mature at this point but still subject to change/evolution (as I warn in the API docs for Cache).
The high-level motivation for this refactoring is to minimize code duplication / compounding complexity in adding SecondaryCache support to HyperClockCache (in a later PR). Other benefits listed below.
* static_cast lines of code +29 -35 (net removed 6)
* reinterpret_cast lines of code +6 -32 (net removed 26)
## cache.h and secondary_cache.h
* Always use CacheItemHelper with entries instead of just a Deleter. There are several motivations / justifications:
* Simpler for implementations to deal with just one Insert and one Lookup.
* Simpler and more efficient implementation because we don't have to track which entries are using helpers and which are using deleters
* Gets rid of hack to classify cache entries by their deleter. Instead, the CacheItemHelper includes a CacheEntryRole. This simplifies a lot of code (cache_entry_roles.h almost eliminated). Fixes https://github.com/facebook/rocksdb/issues/9428.
* Makes it trivial to adjust SecondaryCache behavior based on kind of block (e.g. don't re-compress filter blocks).
* It is arguably less convenient for many direct users of Cache, but direct users of Cache are now rare with introduction of typed_cache.h (below).
* I considered and rejected an alternative approach in which we reduce customizability by assuming each secondary cache compatible value starts with a Slice referencing the uncompressed block contents (already true or mostly true), but we apparently intend to stack secondary caches. Saving an entry from a compressed secondary to a lower tier requires custom handling offered by SaveToCallback, etc.
* Make CreateCallback part of the helper and introduce CreateContext to work with it (alternative to https://github.com/facebook/rocksdb/issues/10562). This cleans up the interface while still allowing context to be provided for loading/parsing values into primary cache. This model works for async lookup in BlockBasedTable reader (reader owns a CreateContext) under the assumption that it always waits on secondary cache operations to finish. (Otherwise, the CreateContext could be destroyed while async operation depending on it continues.) This likely contributes most to the observed performance improvement because it saves an std::function backed by a heap allocation.
* Use char* for serialized data, e.g. in SaveToCallback, where void* was confusingly used. (We use `char*` for serialized byte data all over RocksDB, with many advantages over `void*`. `memcpy` etc. are legacy APIs that should not be mimicked.)
* Add a type alias Cache::ObjectPtr = void*, so that we can better indicate the intent of the void* when it is to be the object associated with a Cache entry. Related: started (but did not complete) a refactoring to move away from "value" of a cache entry toward "object" or "obj". (It is confusing to call Cache a key-value store (like DB) when it is really storing arbitrary in-memory objects, not byte strings.)
* Remove unnecessary key param from DeleterFn. This is good for efficiency in HyperClockCache, which does not directly store the cache key in memory. (Alternative to https://github.com/facebook/rocksdb/issues/10774)
* Add allocator to Cache DeleterFn. This is a kind of future-proofing change in case we get more serious about using the Cache allocator for memory tracked by the Cache. Right now, only the uncompressed block contents are allocated using the allocator, and a pointer to that allocator is saved as part of the cached object so that the deleter can use it. (See CacheAllocationPtr.) If in the future we are able to "flatten out" our Cache objects some more, it would be good not to have to track the allocator as part of each object.
* Removes legacy `ApplyToAllCacheEntries` and changes `ApplyToAllEntries` signature for Deleter->CacheItemHelper change.
## typed_cache.h
Adds various "typed" interfaces to the Cache as internal APIs, so that most uses of Cache can use simple type safe code without casting and without explicit deleters, etc. Almost all of the non-test, non-glue code uses of Cache have been migrated. (Follow-up work: CompressedSecondaryCache deserves deeper attention to migrate.) This change expands RocksDB's internal usage of metaprogramming and SFINAE (https://en.cppreference.com/w/cpp/language/sfinae).
The existing usages of Cache are divided up at a high level into these new interfaces. See updated existing uses of Cache for examples of how these are used.
* PlaceholderCacheInterface - Used for making cache reservations, with entries that have a charge but no value.
* BasicTypedCacheInterface<TValue> - Used for primary cache storage of objects of type TValue, which can be cleaned up with std::default_delete<TValue>. The role is provided by TValue::kCacheEntryRole or given in an optional template parameter.
* FullTypedCacheInterface<TValue, TCreateContext> - Used for secondary cache compatible storage of objects of type TValue. In addition to BasicTypedCacheInterface constraints, we require TValue::ContentSlice() to return persistable data. This simplifies usage for the normal case of simple secondary cache compatibility (can give you a Slice to the data already in memory). In addition to TCreateContext performing the role of Cache::CreateContext, it is also expected to provide a factory function for creating TValue.
* For each of these, there's a "Shared" version (e.g. FullTypedSharedCacheInterface) that holds a shared_ptr to the Cache, rather than assuming external ownership by holding only a raw `Cache*`.
These interfaces introduce specific handle types for each interface instantiation, so that it's easy to see what kind of object is controlled by a handle. (Ultimately, this might not be worth the extra complexity, but it seems OK so far.)
Note: I attempted to make the cache 'charge' automatically inferred from the cache object type, such as by expecting an ApproximateMemoryUsage() function, but this is not so clean because there are cases where we need to compute the charge ahead of time and don't want to re-compute it.
## block_cache.h
This header is essentially the replacement for the old block_like_traits.h. It includes various things to support block cache access with typed_cache.h for block-based table.
## block_based_table_reader.cc
Before this change, accessing the block cache here was an awkward mix of static polymorphism (template TBlocklike) and switch-case on a dynamic BlockType value. This change mostly unifies on static polymorphism, relying on minor hacks in block_cache.h to distinguish variants of Block. We still check BlockType in some places (especially for stats, which could be improved in follow-up work) but at least the BlockType is a static constant from the template parameter. (No more awkward partial redundancy between static and dynamic info.) This likely contributes to the overall performance improvement, but hasn't been tested in isolation.
The other key source of simplification here is a more unified system of creating block cache objects: for directly populating from primary cache and for promotion from secondary cache. Both use BlockCreateContext, for context and for factory functions.
## block_based_table_builder.cc, cache_dump_load_impl.cc
Before this change, warming caches was super ugly code. Both of these source files had switch statements to basically transition from the dynamic BlockType world to the static TBlocklike world. None of that mess is needed anymore as there's a new, untyped WarmInCache function that handles all the details just as promotion from SecondaryCache would. (Fixes `TODO akanksha: Dedup below code` in block_based_table_builder.cc.)
## Everything else
Mostly just updating Cache users to use new typed APIs when reasonably possible, or changed Cache APIs when not.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10975
Test Plan:
tests updated
Performance test setup similar to https://github.com/facebook/rocksdb/issues/10626 (by cache size, LRUCache when not "hyper" for HyperClockCache):
34MB 1thread base.hyper -> kops/s: 0.745 io_bytes/op: 2.52504e+06 miss_ratio: 0.140906 max_rss_mb: 76.4844
34MB 1thread new.hyper -> kops/s: 0.751 io_bytes/op: 2.5123e+06 miss_ratio: 0.140161 max_rss_mb: 79.3594
34MB 1thread base -> kops/s: 0.254 io_bytes/op: 1.36073e+07 miss_ratio: 0.918818 max_rss_mb: 45.9297
34MB 1thread new -> kops/s: 0.252 io_bytes/op: 1.36157e+07 miss_ratio: 0.918999 max_rss_mb: 44.1523
34MB 32thread base.hyper -> kops/s: 7.272 io_bytes/op: 2.88323e+06 miss_ratio: 0.162532 max_rss_mb: 516.602
34MB 32thread new.hyper -> kops/s: 7.214 io_bytes/op: 2.99046e+06 miss_ratio: 0.168818 max_rss_mb: 518.293
34MB 32thread base -> kops/s: 3.528 io_bytes/op: 1.35722e+07 miss_ratio: 0.914691 max_rss_mb: 264.926
34MB 32thread new -> kops/s: 3.604 io_bytes/op: 1.35744e+07 miss_ratio: 0.915054 max_rss_mb: 264.488
233MB 1thread base.hyper -> kops/s: 53.909 io_bytes/op: 2552.35 miss_ratio: 0.0440566 max_rss_mb: 241.984
233MB 1thread new.hyper -> kops/s: 62.792 io_bytes/op: 2549.79 miss_ratio: 0.044043 max_rss_mb: 241.922
233MB 1thread base -> kops/s: 1.197 io_bytes/op: 2.75173e+06 miss_ratio: 0.103093 max_rss_mb: 241.559
233MB 1thread new -> kops/s: 1.199 io_bytes/op: 2.73723e+06 miss_ratio: 0.10305 max_rss_mb: 240.93
233MB 32thread base.hyper -> kops/s: 1298.69 io_bytes/op: 2539.12 miss_ratio: 0.0440307 max_rss_mb: 371.418
233MB 32thread new.hyper -> kops/s: 1421.35 io_bytes/op: 2538.75 miss_ratio: 0.0440307 max_rss_mb: 347.273
233MB 32thread base -> kops/s: 9.693 io_bytes/op: 2.77304e+06 miss_ratio: 0.103745 max_rss_mb: 569.691
233MB 32thread new -> kops/s: 9.75 io_bytes/op: 2.77559e+06 miss_ratio: 0.103798 max_rss_mb: 552.82
1597MB 1thread base.hyper -> kops/s: 58.607 io_bytes/op: 1449.14 miss_ratio: 0.0249324 max_rss_mb: 1583.55
1597MB 1thread new.hyper -> kops/s: 69.6 io_bytes/op: 1434.89 miss_ratio: 0.0247167 max_rss_mb: 1584.02
1597MB 1thread base -> kops/s: 60.478 io_bytes/op: 1421.28 miss_ratio: 0.024452 max_rss_mb: 1589.45
1597MB 1thread new -> kops/s: 63.973 io_bytes/op: 1416.07 miss_ratio: 0.0243766 max_rss_mb: 1589.24
1597MB 32thread base.hyper -> kops/s: 1436.2 io_bytes/op: 1357.93 miss_ratio: 0.0235353 max_rss_mb: 1692.92
1597MB 32thread new.hyper -> kops/s: 1605.03 io_bytes/op: 1358.04 miss_ratio: 0.023538 max_rss_mb: 1702.78
1597MB 32thread base -> kops/s: 280.059 io_bytes/op: 1350.34 miss_ratio: 0.023289 max_rss_mb: 1675.36
1597MB 32thread new -> kops/s: 283.125 io_bytes/op: 1351.05 miss_ratio: 0.0232797 max_rss_mb: 1703.83
Almost uniformly improving over base revision, especially for hot paths with HyperClockCache, up to 12% higher throughput seen (1597MB, 32thread, hyper). The improvement for that is likely coming from much simplified code for providing context for secondary cache promotion (CreateCallback/CreateContext), and possibly from less branching in block_based_table_reader. And likely a small improvement from not reconstituting key for DeleterFn.
Reviewed By: anand1976
Differential Revision: D42417818
Pulled By: pdillinger
fbshipit-source-id: f86bfdd584dce27c028b151ba56818ad14f7a432
2023-01-11 22:20:40 +00:00
|
|
|
ASSERT_OK(blob_cache->Insert("blob3", nullptr /*value*/,
|
|
|
|
&kNoopCacheItemHelper, kSize3, &blob3));
|
2022-06-28 20:52:35 +00:00
|
|
|
ASSERT_NE(nullptr, blob3);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
|
|
|
|
ASSERT_EQ(kCapacity, value);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
|
|
|
|
ASSERT_EQ(kSize2 + kSize3, value);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
|
|
|
|
ASSERT_EQ(kSize2 + kSize3, value);
|
|
|
|
|
|
|
|
// Check size after release.
|
|
|
|
blob_cache->Release(blob2);
|
|
|
|
blob_cache->Release(blob3);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheCapacity, &value));
|
|
|
|
ASSERT_EQ(kCapacity, value);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlobCacheUsage, &value));
|
|
|
|
// blob2 will be evicted, while blob3 remain in cache after release.
|
|
|
|
ASSERT_EQ(kSize3, value);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlobCachePinnedUsage, &value));
|
|
|
|
ASSERT_EQ(0, value);
|
|
|
|
}
|
|
|
|
|
2018-04-19 04:35:12 +00:00
|
|
|
TEST_F(DBPropertiesTest, BlockCacheProperties) {
|
|
|
|
Options options;
|
|
|
|
uint64_t value;
|
|
|
|
|
Fix many tests to run with MEM_ENV and ENCRYPTED_ENV; Introduce a MemoryFileSystem class (#7566)
Summary:
This PR does a few things:
1. The MockFileSystem class was split out from the MockEnv. This change would theoretically allow a MockFileSystem to be used by other Environments as well (if we created a means of constructing one). The MockFileSystem implements a FileSystem in its entirety and does not rely on any Wrapper implementation.
2. Make the RocksDB test suite work when MOCK_ENV=1 and ENCRYPTED_ENV=1 are set. To accomplish this, a few things were needed:
- The tests that tried to use the "wrong" environment (Env::Default() instead of env_) were updated
- The MockFileSystem was changed to support the features it was missing or mishandled (such as recursively deleting files in a directory or supporting renaming of a directory).
3. Updated the test framework to have a ROCKSDB_GTEST_SKIP macro. This can be used to flag tests that are skipped. Currently, this defaults to doing nothing (marks the test as SUCCESS) but will mark the tests as SKIPPED when RocksDB is upgraded to a version of gtest that supports this (gtest-1.10).
I have run a full "make check" with MEM_ENV, ENCRYPTED_ENV, both, and neither under both MacOS and RedHat. A few tests were disabled/skipped for the MEM/ENCRYPTED cases. The error_handler_fs_test fails/hangs for MEM_ENV (presumably a timing problem) and I will introduce another PR/issue to track that problem. (I will also push a change to disable those tests soon). There is one more test in DBTest2 that also fails which I need to investigate or skip before this PR is merged.
Theoretically, this PR should also allow the test suite to run against an Env loaded from the registry, though I do not have one to try it with currently.
Finally, once this is accepted, it would be nice if there was a CircleCI job to run these tests on a checkin so this effort does not become stale. I do not know how to do that, so if someone could write that job, it would be appreciated :)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7566
Reviewed By: zhichao-cao
Differential Revision: D24408980
Pulled By: jay-zhuang
fbshipit-source-id: 911b1554a4d0da06fd51feca0c090a4abdcb4a5f
2020-10-27 17:31:34 +00:00
|
|
|
options.env = CurrentOptions().env;
|
|
|
|
|
2018-04-19 04:35:12 +00:00
|
|
|
// Block cache properties are not available for tables other than
|
|
|
|
// block-based table.
|
|
|
|
options.table_factory.reset(NewPlainTableFactory());
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_FALSE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
|
|
|
|
ASSERT_FALSE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
|
|
|
|
ASSERT_FALSE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
|
|
|
|
|
|
|
|
options.table_factory.reset(NewCuckooTableFactory());
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_FALSE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
|
|
|
|
ASSERT_FALSE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
|
|
|
|
ASSERT_FALSE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
|
|
|
|
|
|
|
|
// Block cache properties are not available if block cache is not used.
|
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.no_block_cache = true;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_FALSE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
|
|
|
|
ASSERT_FALSE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
|
|
|
|
ASSERT_FALSE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
|
|
|
|
|
|
|
|
// Test with empty block cache.
|
|
|
|
constexpr size_t kCapacity = 100;
|
2019-09-16 22:14:51 +00:00
|
|
|
LRUCacheOptions co;
|
|
|
|
co.capacity = kCapacity;
|
|
|
|
co.num_shard_bits = 0;
|
|
|
|
co.metadata_charge_policy = kDontChargeCacheMetadata;
|
|
|
|
auto block_cache = NewLRUCache(co);
|
2018-04-19 04:35:12 +00:00
|
|
|
table_options.block_cache = block_cache;
|
|
|
|
table_options.no_block_cache = false;
|
|
|
|
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
|
|
|
|
ASSERT_EQ(kCapacity, value);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
|
|
|
|
ASSERT_EQ(0, value);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
|
|
|
|
ASSERT_EQ(0, value);
|
|
|
|
|
|
|
|
// Insert unpinned item to the cache and check size.
|
|
|
|
constexpr size_t kSize1 = 50;
|
Major Cache refactoring, CPU efficiency improvement (#10975)
Summary:
This is several refactorings bundled into one to avoid having to incrementally re-modify uses of Cache several times. Overall, there are breaking changes to Cache class, and it becomes more of low-level interface for implementing caches, especially block cache. New internal APIs make using Cache cleaner than before, and more insulated from block cache evolution. Hopefully, this is the last really big block cache refactoring, because of rather effectively decoupling the implementations from the uses. This change also removes the EXPERIMENTAL designation on the SecondaryCache support in Cache. It seems reasonably mature at this point but still subject to change/evolution (as I warn in the API docs for Cache).
The high-level motivation for this refactoring is to minimize code duplication / compounding complexity in adding SecondaryCache support to HyperClockCache (in a later PR). Other benefits listed below.
* static_cast lines of code +29 -35 (net removed 6)
* reinterpret_cast lines of code +6 -32 (net removed 26)
## cache.h and secondary_cache.h
* Always use CacheItemHelper with entries instead of just a Deleter. There are several motivations / justifications:
* Simpler for implementations to deal with just one Insert and one Lookup.
* Simpler and more efficient implementation because we don't have to track which entries are using helpers and which are using deleters
* Gets rid of hack to classify cache entries by their deleter. Instead, the CacheItemHelper includes a CacheEntryRole. This simplifies a lot of code (cache_entry_roles.h almost eliminated). Fixes https://github.com/facebook/rocksdb/issues/9428.
* Makes it trivial to adjust SecondaryCache behavior based on kind of block (e.g. don't re-compress filter blocks).
* It is arguably less convenient for many direct users of Cache, but direct users of Cache are now rare with introduction of typed_cache.h (below).
* I considered and rejected an alternative approach in which we reduce customizability by assuming each secondary cache compatible value starts with a Slice referencing the uncompressed block contents (already true or mostly true), but we apparently intend to stack secondary caches. Saving an entry from a compressed secondary to a lower tier requires custom handling offered by SaveToCallback, etc.
* Make CreateCallback part of the helper and introduce CreateContext to work with it (alternative to https://github.com/facebook/rocksdb/issues/10562). This cleans up the interface while still allowing context to be provided for loading/parsing values into primary cache. This model works for async lookup in BlockBasedTable reader (reader owns a CreateContext) under the assumption that it always waits on secondary cache operations to finish. (Otherwise, the CreateContext could be destroyed while async operation depending on it continues.) This likely contributes most to the observed performance improvement because it saves an std::function backed by a heap allocation.
* Use char* for serialized data, e.g. in SaveToCallback, where void* was confusingly used. (We use `char*` for serialized byte data all over RocksDB, with many advantages over `void*`. `memcpy` etc. are legacy APIs that should not be mimicked.)
* Add a type alias Cache::ObjectPtr = void*, so that we can better indicate the intent of the void* when it is to be the object associated with a Cache entry. Related: started (but did not complete) a refactoring to move away from "value" of a cache entry toward "object" or "obj". (It is confusing to call Cache a key-value store (like DB) when it is really storing arbitrary in-memory objects, not byte strings.)
* Remove unnecessary key param from DeleterFn. This is good for efficiency in HyperClockCache, which does not directly store the cache key in memory. (Alternative to https://github.com/facebook/rocksdb/issues/10774)
* Add allocator to Cache DeleterFn. This is a kind of future-proofing change in case we get more serious about using the Cache allocator for memory tracked by the Cache. Right now, only the uncompressed block contents are allocated using the allocator, and a pointer to that allocator is saved as part of the cached object so that the deleter can use it. (See CacheAllocationPtr.) If in the future we are able to "flatten out" our Cache objects some more, it would be good not to have to track the allocator as part of each object.
* Removes legacy `ApplyToAllCacheEntries` and changes `ApplyToAllEntries` signature for Deleter->CacheItemHelper change.
## typed_cache.h
Adds various "typed" interfaces to the Cache as internal APIs, so that most uses of Cache can use simple type safe code without casting and without explicit deleters, etc. Almost all of the non-test, non-glue code uses of Cache have been migrated. (Follow-up work: CompressedSecondaryCache deserves deeper attention to migrate.) This change expands RocksDB's internal usage of metaprogramming and SFINAE (https://en.cppreference.com/w/cpp/language/sfinae).
The existing usages of Cache are divided up at a high level into these new interfaces. See updated existing uses of Cache for examples of how these are used.
* PlaceholderCacheInterface - Used for making cache reservations, with entries that have a charge but no value.
* BasicTypedCacheInterface<TValue> - Used for primary cache storage of objects of type TValue, which can be cleaned up with std::default_delete<TValue>. The role is provided by TValue::kCacheEntryRole or given in an optional template parameter.
* FullTypedCacheInterface<TValue, TCreateContext> - Used for secondary cache compatible storage of objects of type TValue. In addition to BasicTypedCacheInterface constraints, we require TValue::ContentSlice() to return persistable data. This simplifies usage for the normal case of simple secondary cache compatibility (can give you a Slice to the data already in memory). In addition to TCreateContext performing the role of Cache::CreateContext, it is also expected to provide a factory function for creating TValue.
* For each of these, there's a "Shared" version (e.g. FullTypedSharedCacheInterface) that holds a shared_ptr to the Cache, rather than assuming external ownership by holding only a raw `Cache*`.
These interfaces introduce specific handle types for each interface instantiation, so that it's easy to see what kind of object is controlled by a handle. (Ultimately, this might not be worth the extra complexity, but it seems OK so far.)
Note: I attempted to make the cache 'charge' automatically inferred from the cache object type, such as by expecting an ApproximateMemoryUsage() function, but this is not so clean because there are cases where we need to compute the charge ahead of time and don't want to re-compute it.
## block_cache.h
This header is essentially the replacement for the old block_like_traits.h. It includes various things to support block cache access with typed_cache.h for block-based table.
## block_based_table_reader.cc
Before this change, accessing the block cache here was an awkward mix of static polymorphism (template TBlocklike) and switch-case on a dynamic BlockType value. This change mostly unifies on static polymorphism, relying on minor hacks in block_cache.h to distinguish variants of Block. We still check BlockType in some places (especially for stats, which could be improved in follow-up work) but at least the BlockType is a static constant from the template parameter. (No more awkward partial redundancy between static and dynamic info.) This likely contributes to the overall performance improvement, but hasn't been tested in isolation.
The other key source of simplification here is a more unified system of creating block cache objects: for directly populating from primary cache and for promotion from secondary cache. Both use BlockCreateContext, for context and for factory functions.
## block_based_table_builder.cc, cache_dump_load_impl.cc
Before this change, warming caches was super ugly code. Both of these source files had switch statements to basically transition from the dynamic BlockType world to the static TBlocklike world. None of that mess is needed anymore as there's a new, untyped WarmInCache function that handles all the details just as promotion from SecondaryCache would. (Fixes `TODO akanksha: Dedup below code` in block_based_table_builder.cc.)
## Everything else
Mostly just updating Cache users to use new typed APIs when reasonably possible, or changed Cache APIs when not.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10975
Test Plan:
tests updated
Performance test setup similar to https://github.com/facebook/rocksdb/issues/10626 (by cache size, LRUCache when not "hyper" for HyperClockCache):
34MB 1thread base.hyper -> kops/s: 0.745 io_bytes/op: 2.52504e+06 miss_ratio: 0.140906 max_rss_mb: 76.4844
34MB 1thread new.hyper -> kops/s: 0.751 io_bytes/op: 2.5123e+06 miss_ratio: 0.140161 max_rss_mb: 79.3594
34MB 1thread base -> kops/s: 0.254 io_bytes/op: 1.36073e+07 miss_ratio: 0.918818 max_rss_mb: 45.9297
34MB 1thread new -> kops/s: 0.252 io_bytes/op: 1.36157e+07 miss_ratio: 0.918999 max_rss_mb: 44.1523
34MB 32thread base.hyper -> kops/s: 7.272 io_bytes/op: 2.88323e+06 miss_ratio: 0.162532 max_rss_mb: 516.602
34MB 32thread new.hyper -> kops/s: 7.214 io_bytes/op: 2.99046e+06 miss_ratio: 0.168818 max_rss_mb: 518.293
34MB 32thread base -> kops/s: 3.528 io_bytes/op: 1.35722e+07 miss_ratio: 0.914691 max_rss_mb: 264.926
34MB 32thread new -> kops/s: 3.604 io_bytes/op: 1.35744e+07 miss_ratio: 0.915054 max_rss_mb: 264.488
233MB 1thread base.hyper -> kops/s: 53.909 io_bytes/op: 2552.35 miss_ratio: 0.0440566 max_rss_mb: 241.984
233MB 1thread new.hyper -> kops/s: 62.792 io_bytes/op: 2549.79 miss_ratio: 0.044043 max_rss_mb: 241.922
233MB 1thread base -> kops/s: 1.197 io_bytes/op: 2.75173e+06 miss_ratio: 0.103093 max_rss_mb: 241.559
233MB 1thread new -> kops/s: 1.199 io_bytes/op: 2.73723e+06 miss_ratio: 0.10305 max_rss_mb: 240.93
233MB 32thread base.hyper -> kops/s: 1298.69 io_bytes/op: 2539.12 miss_ratio: 0.0440307 max_rss_mb: 371.418
233MB 32thread new.hyper -> kops/s: 1421.35 io_bytes/op: 2538.75 miss_ratio: 0.0440307 max_rss_mb: 347.273
233MB 32thread base -> kops/s: 9.693 io_bytes/op: 2.77304e+06 miss_ratio: 0.103745 max_rss_mb: 569.691
233MB 32thread new -> kops/s: 9.75 io_bytes/op: 2.77559e+06 miss_ratio: 0.103798 max_rss_mb: 552.82
1597MB 1thread base.hyper -> kops/s: 58.607 io_bytes/op: 1449.14 miss_ratio: 0.0249324 max_rss_mb: 1583.55
1597MB 1thread new.hyper -> kops/s: 69.6 io_bytes/op: 1434.89 miss_ratio: 0.0247167 max_rss_mb: 1584.02
1597MB 1thread base -> kops/s: 60.478 io_bytes/op: 1421.28 miss_ratio: 0.024452 max_rss_mb: 1589.45
1597MB 1thread new -> kops/s: 63.973 io_bytes/op: 1416.07 miss_ratio: 0.0243766 max_rss_mb: 1589.24
1597MB 32thread base.hyper -> kops/s: 1436.2 io_bytes/op: 1357.93 miss_ratio: 0.0235353 max_rss_mb: 1692.92
1597MB 32thread new.hyper -> kops/s: 1605.03 io_bytes/op: 1358.04 miss_ratio: 0.023538 max_rss_mb: 1702.78
1597MB 32thread base -> kops/s: 280.059 io_bytes/op: 1350.34 miss_ratio: 0.023289 max_rss_mb: 1675.36
1597MB 32thread new -> kops/s: 283.125 io_bytes/op: 1351.05 miss_ratio: 0.0232797 max_rss_mb: 1703.83
Almost uniformly improving over base revision, especially for hot paths with HyperClockCache, up to 12% higher throughput seen (1597MB, 32thread, hyper). The improvement for that is likely coming from much simplified code for providing context for secondary cache promotion (CreateCallback/CreateContext), and possibly from less branching in block_based_table_reader. And likely a small improvement from not reconstituting key for DeleterFn.
Reviewed By: anand1976
Differential Revision: D42417818
Pulled By: pdillinger
fbshipit-source-id: f86bfdd584dce27c028b151ba56818ad14f7a432
2023-01-11 22:20:40 +00:00
|
|
|
ASSERT_OK(block_cache->Insert("item1", nullptr /*value*/,
|
|
|
|
&kNoopCacheItemHelper, kSize1));
|
2018-04-19 04:35:12 +00:00
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
|
|
|
|
ASSERT_EQ(kCapacity, value);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
|
|
|
|
ASSERT_EQ(kSize1, value);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
|
|
|
|
ASSERT_EQ(0, value);
|
|
|
|
|
|
|
|
// Insert pinned item to the cache and check size.
|
|
|
|
constexpr size_t kSize2 = 30;
|
|
|
|
Cache::Handle* item2 = nullptr;
|
Major Cache refactoring, CPU efficiency improvement (#10975)
Summary:
This is several refactorings bundled into one to avoid having to incrementally re-modify uses of Cache several times. Overall, there are breaking changes to Cache class, and it becomes more of low-level interface for implementing caches, especially block cache. New internal APIs make using Cache cleaner than before, and more insulated from block cache evolution. Hopefully, this is the last really big block cache refactoring, because of rather effectively decoupling the implementations from the uses. This change also removes the EXPERIMENTAL designation on the SecondaryCache support in Cache. It seems reasonably mature at this point but still subject to change/evolution (as I warn in the API docs for Cache).
The high-level motivation for this refactoring is to minimize code duplication / compounding complexity in adding SecondaryCache support to HyperClockCache (in a later PR). Other benefits listed below.
* static_cast lines of code +29 -35 (net removed 6)
* reinterpret_cast lines of code +6 -32 (net removed 26)
## cache.h and secondary_cache.h
* Always use CacheItemHelper with entries instead of just a Deleter. There are several motivations / justifications:
* Simpler for implementations to deal with just one Insert and one Lookup.
* Simpler and more efficient implementation because we don't have to track which entries are using helpers and which are using deleters
* Gets rid of hack to classify cache entries by their deleter. Instead, the CacheItemHelper includes a CacheEntryRole. This simplifies a lot of code (cache_entry_roles.h almost eliminated). Fixes https://github.com/facebook/rocksdb/issues/9428.
* Makes it trivial to adjust SecondaryCache behavior based on kind of block (e.g. don't re-compress filter blocks).
* It is arguably less convenient for many direct users of Cache, but direct users of Cache are now rare with introduction of typed_cache.h (below).
* I considered and rejected an alternative approach in which we reduce customizability by assuming each secondary cache compatible value starts with a Slice referencing the uncompressed block contents (already true or mostly true), but we apparently intend to stack secondary caches. Saving an entry from a compressed secondary to a lower tier requires custom handling offered by SaveToCallback, etc.
* Make CreateCallback part of the helper and introduce CreateContext to work with it (alternative to https://github.com/facebook/rocksdb/issues/10562). This cleans up the interface while still allowing context to be provided for loading/parsing values into primary cache. This model works for async lookup in BlockBasedTable reader (reader owns a CreateContext) under the assumption that it always waits on secondary cache operations to finish. (Otherwise, the CreateContext could be destroyed while async operation depending on it continues.) This likely contributes most to the observed performance improvement because it saves an std::function backed by a heap allocation.
* Use char* for serialized data, e.g. in SaveToCallback, where void* was confusingly used. (We use `char*` for serialized byte data all over RocksDB, with many advantages over `void*`. `memcpy` etc. are legacy APIs that should not be mimicked.)
* Add a type alias Cache::ObjectPtr = void*, so that we can better indicate the intent of the void* when it is to be the object associated with a Cache entry. Related: started (but did not complete) a refactoring to move away from "value" of a cache entry toward "object" or "obj". (It is confusing to call Cache a key-value store (like DB) when it is really storing arbitrary in-memory objects, not byte strings.)
* Remove unnecessary key param from DeleterFn. This is good for efficiency in HyperClockCache, which does not directly store the cache key in memory. (Alternative to https://github.com/facebook/rocksdb/issues/10774)
* Add allocator to Cache DeleterFn. This is a kind of future-proofing change in case we get more serious about using the Cache allocator for memory tracked by the Cache. Right now, only the uncompressed block contents are allocated using the allocator, and a pointer to that allocator is saved as part of the cached object so that the deleter can use it. (See CacheAllocationPtr.) If in the future we are able to "flatten out" our Cache objects some more, it would be good not to have to track the allocator as part of each object.
* Removes legacy `ApplyToAllCacheEntries` and changes `ApplyToAllEntries` signature for Deleter->CacheItemHelper change.
## typed_cache.h
Adds various "typed" interfaces to the Cache as internal APIs, so that most uses of Cache can use simple type safe code without casting and without explicit deleters, etc. Almost all of the non-test, non-glue code uses of Cache have been migrated. (Follow-up work: CompressedSecondaryCache deserves deeper attention to migrate.) This change expands RocksDB's internal usage of metaprogramming and SFINAE (https://en.cppreference.com/w/cpp/language/sfinae).
The existing usages of Cache are divided up at a high level into these new interfaces. See updated existing uses of Cache for examples of how these are used.
* PlaceholderCacheInterface - Used for making cache reservations, with entries that have a charge but no value.
* BasicTypedCacheInterface<TValue> - Used for primary cache storage of objects of type TValue, which can be cleaned up with std::default_delete<TValue>. The role is provided by TValue::kCacheEntryRole or given in an optional template parameter.
* FullTypedCacheInterface<TValue, TCreateContext> - Used for secondary cache compatible storage of objects of type TValue. In addition to BasicTypedCacheInterface constraints, we require TValue::ContentSlice() to return persistable data. This simplifies usage for the normal case of simple secondary cache compatibility (can give you a Slice to the data already in memory). In addition to TCreateContext performing the role of Cache::CreateContext, it is also expected to provide a factory function for creating TValue.
* For each of these, there's a "Shared" version (e.g. FullTypedSharedCacheInterface) that holds a shared_ptr to the Cache, rather than assuming external ownership by holding only a raw `Cache*`.
These interfaces introduce specific handle types for each interface instantiation, so that it's easy to see what kind of object is controlled by a handle. (Ultimately, this might not be worth the extra complexity, but it seems OK so far.)
Note: I attempted to make the cache 'charge' automatically inferred from the cache object type, such as by expecting an ApproximateMemoryUsage() function, but this is not so clean because there are cases where we need to compute the charge ahead of time and don't want to re-compute it.
## block_cache.h
This header is essentially the replacement for the old block_like_traits.h. It includes various things to support block cache access with typed_cache.h for block-based table.
## block_based_table_reader.cc
Before this change, accessing the block cache here was an awkward mix of static polymorphism (template TBlocklike) and switch-case on a dynamic BlockType value. This change mostly unifies on static polymorphism, relying on minor hacks in block_cache.h to distinguish variants of Block. We still check BlockType in some places (especially for stats, which could be improved in follow-up work) but at least the BlockType is a static constant from the template parameter. (No more awkward partial redundancy between static and dynamic info.) This likely contributes to the overall performance improvement, but hasn't been tested in isolation.
The other key source of simplification here is a more unified system of creating block cache objects: for directly populating from primary cache and for promotion from secondary cache. Both use BlockCreateContext, for context and for factory functions.
## block_based_table_builder.cc, cache_dump_load_impl.cc
Before this change, warming caches was super ugly code. Both of these source files had switch statements to basically transition from the dynamic BlockType world to the static TBlocklike world. None of that mess is needed anymore as there's a new, untyped WarmInCache function that handles all the details just as promotion from SecondaryCache would. (Fixes `TODO akanksha: Dedup below code` in block_based_table_builder.cc.)
## Everything else
Mostly just updating Cache users to use new typed APIs when reasonably possible, or changed Cache APIs when not.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10975
Test Plan:
tests updated
Performance test setup similar to https://github.com/facebook/rocksdb/issues/10626 (by cache size, LRUCache when not "hyper" for HyperClockCache):
34MB 1thread base.hyper -> kops/s: 0.745 io_bytes/op: 2.52504e+06 miss_ratio: 0.140906 max_rss_mb: 76.4844
34MB 1thread new.hyper -> kops/s: 0.751 io_bytes/op: 2.5123e+06 miss_ratio: 0.140161 max_rss_mb: 79.3594
34MB 1thread base -> kops/s: 0.254 io_bytes/op: 1.36073e+07 miss_ratio: 0.918818 max_rss_mb: 45.9297
34MB 1thread new -> kops/s: 0.252 io_bytes/op: 1.36157e+07 miss_ratio: 0.918999 max_rss_mb: 44.1523
34MB 32thread base.hyper -> kops/s: 7.272 io_bytes/op: 2.88323e+06 miss_ratio: 0.162532 max_rss_mb: 516.602
34MB 32thread new.hyper -> kops/s: 7.214 io_bytes/op: 2.99046e+06 miss_ratio: 0.168818 max_rss_mb: 518.293
34MB 32thread base -> kops/s: 3.528 io_bytes/op: 1.35722e+07 miss_ratio: 0.914691 max_rss_mb: 264.926
34MB 32thread new -> kops/s: 3.604 io_bytes/op: 1.35744e+07 miss_ratio: 0.915054 max_rss_mb: 264.488
233MB 1thread base.hyper -> kops/s: 53.909 io_bytes/op: 2552.35 miss_ratio: 0.0440566 max_rss_mb: 241.984
233MB 1thread new.hyper -> kops/s: 62.792 io_bytes/op: 2549.79 miss_ratio: 0.044043 max_rss_mb: 241.922
233MB 1thread base -> kops/s: 1.197 io_bytes/op: 2.75173e+06 miss_ratio: 0.103093 max_rss_mb: 241.559
233MB 1thread new -> kops/s: 1.199 io_bytes/op: 2.73723e+06 miss_ratio: 0.10305 max_rss_mb: 240.93
233MB 32thread base.hyper -> kops/s: 1298.69 io_bytes/op: 2539.12 miss_ratio: 0.0440307 max_rss_mb: 371.418
233MB 32thread new.hyper -> kops/s: 1421.35 io_bytes/op: 2538.75 miss_ratio: 0.0440307 max_rss_mb: 347.273
233MB 32thread base -> kops/s: 9.693 io_bytes/op: 2.77304e+06 miss_ratio: 0.103745 max_rss_mb: 569.691
233MB 32thread new -> kops/s: 9.75 io_bytes/op: 2.77559e+06 miss_ratio: 0.103798 max_rss_mb: 552.82
1597MB 1thread base.hyper -> kops/s: 58.607 io_bytes/op: 1449.14 miss_ratio: 0.0249324 max_rss_mb: 1583.55
1597MB 1thread new.hyper -> kops/s: 69.6 io_bytes/op: 1434.89 miss_ratio: 0.0247167 max_rss_mb: 1584.02
1597MB 1thread base -> kops/s: 60.478 io_bytes/op: 1421.28 miss_ratio: 0.024452 max_rss_mb: 1589.45
1597MB 1thread new -> kops/s: 63.973 io_bytes/op: 1416.07 miss_ratio: 0.0243766 max_rss_mb: 1589.24
1597MB 32thread base.hyper -> kops/s: 1436.2 io_bytes/op: 1357.93 miss_ratio: 0.0235353 max_rss_mb: 1692.92
1597MB 32thread new.hyper -> kops/s: 1605.03 io_bytes/op: 1358.04 miss_ratio: 0.023538 max_rss_mb: 1702.78
1597MB 32thread base -> kops/s: 280.059 io_bytes/op: 1350.34 miss_ratio: 0.023289 max_rss_mb: 1675.36
1597MB 32thread new -> kops/s: 283.125 io_bytes/op: 1351.05 miss_ratio: 0.0232797 max_rss_mb: 1703.83
Almost uniformly improving over base revision, especially for hot paths with HyperClockCache, up to 12% higher throughput seen (1597MB, 32thread, hyper). The improvement for that is likely coming from much simplified code for providing context for secondary cache promotion (CreateCallback/CreateContext), and possibly from less branching in block_based_table_reader. And likely a small improvement from not reconstituting key for DeleterFn.
Reviewed By: anand1976
Differential Revision: D42417818
Pulled By: pdillinger
fbshipit-source-id: f86bfdd584dce27c028b151ba56818ad14f7a432
2023-01-11 22:20:40 +00:00
|
|
|
ASSERT_OK(block_cache->Insert("item2", nullptr /*value*/,
|
|
|
|
&kNoopCacheItemHelper, kSize2, &item2));
|
2018-04-19 04:35:12 +00:00
|
|
|
ASSERT_NE(nullptr, item2);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
|
|
|
|
ASSERT_EQ(kCapacity, value);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
|
|
|
|
ASSERT_EQ(kSize1 + kSize2, value);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
|
|
|
|
ASSERT_EQ(kSize2, value);
|
|
|
|
|
|
|
|
// Insert another pinned item to make the cache over-sized.
|
|
|
|
constexpr size_t kSize3 = 80;
|
|
|
|
Cache::Handle* item3 = nullptr;
|
Major Cache refactoring, CPU efficiency improvement (#10975)
Summary:
This is several refactorings bundled into one to avoid having to incrementally re-modify uses of Cache several times. Overall, there are breaking changes to Cache class, and it becomes more of low-level interface for implementing caches, especially block cache. New internal APIs make using Cache cleaner than before, and more insulated from block cache evolution. Hopefully, this is the last really big block cache refactoring, because of rather effectively decoupling the implementations from the uses. This change also removes the EXPERIMENTAL designation on the SecondaryCache support in Cache. It seems reasonably mature at this point but still subject to change/evolution (as I warn in the API docs for Cache).
The high-level motivation for this refactoring is to minimize code duplication / compounding complexity in adding SecondaryCache support to HyperClockCache (in a later PR). Other benefits listed below.
* static_cast lines of code +29 -35 (net removed 6)
* reinterpret_cast lines of code +6 -32 (net removed 26)
## cache.h and secondary_cache.h
* Always use CacheItemHelper with entries instead of just a Deleter. There are several motivations / justifications:
* Simpler for implementations to deal with just one Insert and one Lookup.
* Simpler and more efficient implementation because we don't have to track which entries are using helpers and which are using deleters
* Gets rid of hack to classify cache entries by their deleter. Instead, the CacheItemHelper includes a CacheEntryRole. This simplifies a lot of code (cache_entry_roles.h almost eliminated). Fixes https://github.com/facebook/rocksdb/issues/9428.
* Makes it trivial to adjust SecondaryCache behavior based on kind of block (e.g. don't re-compress filter blocks).
* It is arguably less convenient for many direct users of Cache, but direct users of Cache are now rare with introduction of typed_cache.h (below).
* I considered and rejected an alternative approach in which we reduce customizability by assuming each secondary cache compatible value starts with a Slice referencing the uncompressed block contents (already true or mostly true), but we apparently intend to stack secondary caches. Saving an entry from a compressed secondary to a lower tier requires custom handling offered by SaveToCallback, etc.
* Make CreateCallback part of the helper and introduce CreateContext to work with it (alternative to https://github.com/facebook/rocksdb/issues/10562). This cleans up the interface while still allowing context to be provided for loading/parsing values into primary cache. This model works for async lookup in BlockBasedTable reader (reader owns a CreateContext) under the assumption that it always waits on secondary cache operations to finish. (Otherwise, the CreateContext could be destroyed while async operation depending on it continues.) This likely contributes most to the observed performance improvement because it saves an std::function backed by a heap allocation.
* Use char* for serialized data, e.g. in SaveToCallback, where void* was confusingly used. (We use `char*` for serialized byte data all over RocksDB, with many advantages over `void*`. `memcpy` etc. are legacy APIs that should not be mimicked.)
* Add a type alias Cache::ObjectPtr = void*, so that we can better indicate the intent of the void* when it is to be the object associated with a Cache entry. Related: started (but did not complete) a refactoring to move away from "value" of a cache entry toward "object" or "obj". (It is confusing to call Cache a key-value store (like DB) when it is really storing arbitrary in-memory objects, not byte strings.)
* Remove unnecessary key param from DeleterFn. This is good for efficiency in HyperClockCache, which does not directly store the cache key in memory. (Alternative to https://github.com/facebook/rocksdb/issues/10774)
* Add allocator to Cache DeleterFn. This is a kind of future-proofing change in case we get more serious about using the Cache allocator for memory tracked by the Cache. Right now, only the uncompressed block contents are allocated using the allocator, and a pointer to that allocator is saved as part of the cached object so that the deleter can use it. (See CacheAllocationPtr.) If in the future we are able to "flatten out" our Cache objects some more, it would be good not to have to track the allocator as part of each object.
* Removes legacy `ApplyToAllCacheEntries` and changes `ApplyToAllEntries` signature for Deleter->CacheItemHelper change.
## typed_cache.h
Adds various "typed" interfaces to the Cache as internal APIs, so that most uses of Cache can use simple type safe code without casting and without explicit deleters, etc. Almost all of the non-test, non-glue code uses of Cache have been migrated. (Follow-up work: CompressedSecondaryCache deserves deeper attention to migrate.) This change expands RocksDB's internal usage of metaprogramming and SFINAE (https://en.cppreference.com/w/cpp/language/sfinae).
The existing usages of Cache are divided up at a high level into these new interfaces. See updated existing uses of Cache for examples of how these are used.
* PlaceholderCacheInterface - Used for making cache reservations, with entries that have a charge but no value.
* BasicTypedCacheInterface<TValue> - Used for primary cache storage of objects of type TValue, which can be cleaned up with std::default_delete<TValue>. The role is provided by TValue::kCacheEntryRole or given in an optional template parameter.
* FullTypedCacheInterface<TValue, TCreateContext> - Used for secondary cache compatible storage of objects of type TValue. In addition to BasicTypedCacheInterface constraints, we require TValue::ContentSlice() to return persistable data. This simplifies usage for the normal case of simple secondary cache compatibility (can give you a Slice to the data already in memory). In addition to TCreateContext performing the role of Cache::CreateContext, it is also expected to provide a factory function for creating TValue.
* For each of these, there's a "Shared" version (e.g. FullTypedSharedCacheInterface) that holds a shared_ptr to the Cache, rather than assuming external ownership by holding only a raw `Cache*`.
These interfaces introduce specific handle types for each interface instantiation, so that it's easy to see what kind of object is controlled by a handle. (Ultimately, this might not be worth the extra complexity, but it seems OK so far.)
Note: I attempted to make the cache 'charge' automatically inferred from the cache object type, such as by expecting an ApproximateMemoryUsage() function, but this is not so clean because there are cases where we need to compute the charge ahead of time and don't want to re-compute it.
## block_cache.h
This header is essentially the replacement for the old block_like_traits.h. It includes various things to support block cache access with typed_cache.h for block-based table.
## block_based_table_reader.cc
Before this change, accessing the block cache here was an awkward mix of static polymorphism (template TBlocklike) and switch-case on a dynamic BlockType value. This change mostly unifies on static polymorphism, relying on minor hacks in block_cache.h to distinguish variants of Block. We still check BlockType in some places (especially for stats, which could be improved in follow-up work) but at least the BlockType is a static constant from the template parameter. (No more awkward partial redundancy between static and dynamic info.) This likely contributes to the overall performance improvement, but hasn't been tested in isolation.
The other key source of simplification here is a more unified system of creating block cache objects: for directly populating from primary cache and for promotion from secondary cache. Both use BlockCreateContext, for context and for factory functions.
## block_based_table_builder.cc, cache_dump_load_impl.cc
Before this change, warming caches was super ugly code. Both of these source files had switch statements to basically transition from the dynamic BlockType world to the static TBlocklike world. None of that mess is needed anymore as there's a new, untyped WarmInCache function that handles all the details just as promotion from SecondaryCache would. (Fixes `TODO akanksha: Dedup below code` in block_based_table_builder.cc.)
## Everything else
Mostly just updating Cache users to use new typed APIs when reasonably possible, or changed Cache APIs when not.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10975
Test Plan:
tests updated
Performance test setup similar to https://github.com/facebook/rocksdb/issues/10626 (by cache size, LRUCache when not "hyper" for HyperClockCache):
34MB 1thread base.hyper -> kops/s: 0.745 io_bytes/op: 2.52504e+06 miss_ratio: 0.140906 max_rss_mb: 76.4844
34MB 1thread new.hyper -> kops/s: 0.751 io_bytes/op: 2.5123e+06 miss_ratio: 0.140161 max_rss_mb: 79.3594
34MB 1thread base -> kops/s: 0.254 io_bytes/op: 1.36073e+07 miss_ratio: 0.918818 max_rss_mb: 45.9297
34MB 1thread new -> kops/s: 0.252 io_bytes/op: 1.36157e+07 miss_ratio: 0.918999 max_rss_mb: 44.1523
34MB 32thread base.hyper -> kops/s: 7.272 io_bytes/op: 2.88323e+06 miss_ratio: 0.162532 max_rss_mb: 516.602
34MB 32thread new.hyper -> kops/s: 7.214 io_bytes/op: 2.99046e+06 miss_ratio: 0.168818 max_rss_mb: 518.293
34MB 32thread base -> kops/s: 3.528 io_bytes/op: 1.35722e+07 miss_ratio: 0.914691 max_rss_mb: 264.926
34MB 32thread new -> kops/s: 3.604 io_bytes/op: 1.35744e+07 miss_ratio: 0.915054 max_rss_mb: 264.488
233MB 1thread base.hyper -> kops/s: 53.909 io_bytes/op: 2552.35 miss_ratio: 0.0440566 max_rss_mb: 241.984
233MB 1thread new.hyper -> kops/s: 62.792 io_bytes/op: 2549.79 miss_ratio: 0.044043 max_rss_mb: 241.922
233MB 1thread base -> kops/s: 1.197 io_bytes/op: 2.75173e+06 miss_ratio: 0.103093 max_rss_mb: 241.559
233MB 1thread new -> kops/s: 1.199 io_bytes/op: 2.73723e+06 miss_ratio: 0.10305 max_rss_mb: 240.93
233MB 32thread base.hyper -> kops/s: 1298.69 io_bytes/op: 2539.12 miss_ratio: 0.0440307 max_rss_mb: 371.418
233MB 32thread new.hyper -> kops/s: 1421.35 io_bytes/op: 2538.75 miss_ratio: 0.0440307 max_rss_mb: 347.273
233MB 32thread base -> kops/s: 9.693 io_bytes/op: 2.77304e+06 miss_ratio: 0.103745 max_rss_mb: 569.691
233MB 32thread new -> kops/s: 9.75 io_bytes/op: 2.77559e+06 miss_ratio: 0.103798 max_rss_mb: 552.82
1597MB 1thread base.hyper -> kops/s: 58.607 io_bytes/op: 1449.14 miss_ratio: 0.0249324 max_rss_mb: 1583.55
1597MB 1thread new.hyper -> kops/s: 69.6 io_bytes/op: 1434.89 miss_ratio: 0.0247167 max_rss_mb: 1584.02
1597MB 1thread base -> kops/s: 60.478 io_bytes/op: 1421.28 miss_ratio: 0.024452 max_rss_mb: 1589.45
1597MB 1thread new -> kops/s: 63.973 io_bytes/op: 1416.07 miss_ratio: 0.0243766 max_rss_mb: 1589.24
1597MB 32thread base.hyper -> kops/s: 1436.2 io_bytes/op: 1357.93 miss_ratio: 0.0235353 max_rss_mb: 1692.92
1597MB 32thread new.hyper -> kops/s: 1605.03 io_bytes/op: 1358.04 miss_ratio: 0.023538 max_rss_mb: 1702.78
1597MB 32thread base -> kops/s: 280.059 io_bytes/op: 1350.34 miss_ratio: 0.023289 max_rss_mb: 1675.36
1597MB 32thread new -> kops/s: 283.125 io_bytes/op: 1351.05 miss_ratio: 0.0232797 max_rss_mb: 1703.83
Almost uniformly improving over base revision, especially for hot paths with HyperClockCache, up to 12% higher throughput seen (1597MB, 32thread, hyper). The improvement for that is likely coming from much simplified code for providing context for secondary cache promotion (CreateCallback/CreateContext), and possibly from less branching in block_based_table_reader. And likely a small improvement from not reconstituting key for DeleterFn.
Reviewed By: anand1976
Differential Revision: D42417818
Pulled By: pdillinger
fbshipit-source-id: f86bfdd584dce27c028b151ba56818ad14f7a432
2023-01-11 22:20:40 +00:00
|
|
|
ASSERT_OK(block_cache->Insert("item3", nullptr /*value*/,
|
|
|
|
&kNoopCacheItemHelper, kSize3, &item3));
|
2018-04-19 04:35:12 +00:00
|
|
|
ASSERT_NE(nullptr, item2);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
|
|
|
|
ASSERT_EQ(kCapacity, value);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
|
|
|
|
// Item 1 is evicted.
|
|
|
|
ASSERT_EQ(kSize2 + kSize3, value);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
|
|
|
|
ASSERT_EQ(kSize2 + kSize3, value);
|
|
|
|
|
|
|
|
// Check size after release.
|
|
|
|
block_cache->Release(item2);
|
|
|
|
block_cache->Release(item3);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheCapacity, &value));
|
|
|
|
ASSERT_EQ(kCapacity, value);
|
|
|
|
ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kBlockCacheUsage, &value));
|
|
|
|
// item2 will be evicted, while item3 remain in cache after release.
|
|
|
|
ASSERT_EQ(kSize3, value);
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetIntProperty(DB::Properties::kBlockCachePinnedUsage, &value));
|
|
|
|
ASSERT_EQ(0, value);
|
|
|
|
}
|
|
|
|
|
2021-10-20 20:15:33 +00:00
|
|
|
TEST_F(DBPropertiesTest, GetMapPropertyDbStats) {
|
|
|
|
auto mock_clock = std::make_shared<MockSystemClock>(env_->GetSystemClock());
|
|
|
|
CompositeEnvWrapper env(env_, mock_clock);
|
|
|
|
|
|
|
|
Options opts = CurrentOptions();
|
|
|
|
opts.env = &env;
|
|
|
|
Reopen(opts);
|
|
|
|
|
|
|
|
{
|
|
|
|
std::map<std::string, std::string> db_stats;
|
|
|
|
ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kDBStats, &db_stats));
|
|
|
|
AssertDbStats(db_stats, 0.0 /* expected_uptime */,
|
|
|
|
0 /* expected_user_bytes_written */,
|
|
|
|
0 /* expected_wal_bytes_written */,
|
|
|
|
0 /* expected_user_writes_by_self */,
|
|
|
|
0 /* expected_user_writes_with_wal */);
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
mock_clock->SleepForMicroseconds(1500000);
|
|
|
|
|
|
|
|
std::map<std::string, std::string> db_stats;
|
|
|
|
ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kDBStats, &db_stats));
|
|
|
|
AssertDbStats(db_stats, 1.5 /* expected_uptime */,
|
|
|
|
0 /* expected_user_bytes_written */,
|
|
|
|
0 /* expected_wal_bytes_written */,
|
|
|
|
0 /* expected_user_writes_by_self */,
|
|
|
|
0 /* expected_user_writes_with_wal */);
|
|
|
|
}
|
|
|
|
|
|
|
|
int expected_user_bytes_written = 0;
|
|
|
|
{
|
|
|
|
// Write with WAL disabled.
|
|
|
|
WriteOptions write_opts;
|
|
|
|
write_opts.disableWAL = true;
|
|
|
|
|
|
|
|
WriteBatch batch;
|
|
|
|
ASSERT_OK(batch.Put("key", "val"));
|
|
|
|
expected_user_bytes_written += static_cast<int>(batch.GetDataSize());
|
|
|
|
|
|
|
|
ASSERT_OK(db_->Write(write_opts, &batch));
|
|
|
|
|
|
|
|
std::map<std::string, std::string> db_stats;
|
|
|
|
ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kDBStats, &db_stats));
|
|
|
|
AssertDbStats(db_stats, 1.5 /* expected_uptime */,
|
|
|
|
expected_user_bytes_written,
|
|
|
|
0 /* expected_wal_bytes_written */,
|
|
|
|
1 /* expected_user_writes_by_self */,
|
|
|
|
0 /* expected_user_writes_with_wal */);
|
|
|
|
}
|
|
|
|
|
|
|
|
int expected_wal_bytes_written = 0;
|
|
|
|
{
|
|
|
|
// Write with WAL enabled.
|
|
|
|
WriteBatch batch;
|
|
|
|
ASSERT_OK(batch.Delete("key"));
|
|
|
|
expected_user_bytes_written += static_cast<int>(batch.GetDataSize());
|
|
|
|
expected_wal_bytes_written += static_cast<int>(batch.GetDataSize());
|
|
|
|
|
|
|
|
ASSERT_OK(db_->Write(WriteOptions(), &batch));
|
|
|
|
|
|
|
|
std::map<std::string, std::string> db_stats;
|
|
|
|
ASSERT_TRUE(db_->GetMapProperty(DB::Properties::kDBStats, &db_stats));
|
|
|
|
AssertDbStats(db_stats, 1.5 /* expected_uptime */,
|
|
|
|
expected_user_bytes_written, expected_wal_bytes_written,
|
|
|
|
2 /* expected_user_writes_by_self */,
|
|
|
|
1 /* expected_user_writes_with_wal */);
|
|
|
|
}
|
|
|
|
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2022-04-14 16:38:55 +00:00
|
|
|
TEST_F(DBPropertiesTest, GetMapPropertyBlockCacheEntryStats) {
|
|
|
|
// Currently only verifies the expected properties are present
|
|
|
|
std::map<std::string, std::string> values;
|
|
|
|
ASSERT_TRUE(
|
|
|
|
db_->GetMapProperty(DB::Properties::kBlockCacheEntryStats, &values));
|
|
|
|
|
|
|
|
ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::CacheId()) !=
|
|
|
|
values.end());
|
|
|
|
ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::CacheCapacityBytes()) !=
|
|
|
|
values.end());
|
|
|
|
ASSERT_TRUE(
|
|
|
|
values.find(
|
|
|
|
BlockCacheEntryStatsMapKeys::LastCollectionDurationSeconds()) !=
|
|
|
|
values.end());
|
|
|
|
ASSERT_TRUE(
|
|
|
|
values.find(BlockCacheEntryStatsMapKeys::LastCollectionAgeSeconds()) !=
|
|
|
|
values.end());
|
|
|
|
for (size_t i = 0; i < kNumCacheEntryRoles; ++i) {
|
|
|
|
CacheEntryRole role = static_cast<CacheEntryRole>(i);
|
|
|
|
ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::EntryCount(role)) !=
|
|
|
|
values.end());
|
|
|
|
ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::UsedBytes(role)) !=
|
|
|
|
values.end());
|
|
|
|
ASSERT_TRUE(values.find(BlockCacheEntryStatsMapKeys::UsedPercent(role)) !=
|
|
|
|
values.end());
|
|
|
|
}
|
|
|
|
|
|
|
|
// There should be no extra values in the map.
|
|
|
|
ASSERT_EQ(3 * kNumCacheEntryRoles + 4, values.size());
|
|
|
|
}
|
|
|
|
|
2023-03-18 16:51:58 +00:00
|
|
|
TEST_F(DBPropertiesTest, WriteStallStatsSanityCheck) {
|
|
|
|
for (uint32_t i = 0; i < static_cast<uint32_t>(WriteStallCause::kNone); ++i) {
|
2023-04-05 21:42:31 +00:00
|
|
|
WriteStallCause cause = static_cast<WriteStallCause>(i);
|
|
|
|
const std::string& str = WriteStallCauseToHyphenString(cause);
|
2023-03-18 16:51:58 +00:00
|
|
|
ASSERT_TRUE(!str.empty())
|
|
|
|
<< "Please ensure mapping from `WriteStallCause` to "
|
2023-04-05 21:42:31 +00:00
|
|
|
"`WriteStallCauseToHyphenString` is complete";
|
2023-03-18 16:51:58 +00:00
|
|
|
if (cause == WriteStallCause::kCFScopeWriteStallCauseEnumMax ||
|
|
|
|
cause == WriteStallCause::kDBScopeWriteStallCauseEnumMax) {
|
2023-04-05 21:42:31 +00:00
|
|
|
ASSERT_EQ(str, InvalidWriteStallHyphenString())
|
|
|
|
<< "Please ensure order in `WriteStallCauseToHyphenString` is "
|
2023-03-18 16:51:58 +00:00
|
|
|
"consistent with `WriteStallCause`";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < static_cast<uint32_t>(WriteStallCondition::kNormal);
|
|
|
|
++i) {
|
2023-04-05 21:42:31 +00:00
|
|
|
WriteStallCondition condition = static_cast<WriteStallCondition>(i);
|
|
|
|
const std::string& str = WriteStallConditionToHyphenString(condition);
|
2023-03-18 16:51:58 +00:00
|
|
|
ASSERT_TRUE(!str.empty())
|
|
|
|
<< "Please ensure mapping from `WriteStallCondition` to "
|
2023-04-05 21:42:31 +00:00
|
|
|
"`WriteStallConditionToHyphenString` is complete";
|
2023-03-18 16:51:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < static_cast<uint32_t>(WriteStallCause::kNone); ++i) {
|
|
|
|
for (uint32_t j = 0;
|
|
|
|
j < static_cast<uint32_t>(WriteStallCondition::kNormal); ++j) {
|
|
|
|
WriteStallCause cause = static_cast<WriteStallCause>(i);
|
|
|
|
WriteStallCondition condition = static_cast<WriteStallCondition>(j);
|
|
|
|
|
|
|
|
if (isCFScopeWriteStallCause(cause)) {
|
|
|
|
ASSERT_TRUE(InternalCFStat(cause, condition) !=
|
|
|
|
InternalStats::INTERNAL_CF_STATS_ENUM_MAX)
|
|
|
|
<< "Please ensure the combination of WriteStallCause(" +
|
|
|
|
std::to_string(static_cast<uint32_t>(cause)) +
|
|
|
|
") + WriteStallCondition(" +
|
|
|
|
std::to_string(static_cast<uint32_t>(condition)) +
|
|
|
|
") is correctly mapped to a valid `InternalStats` or bypass "
|
|
|
|
"its check in this test";
|
|
|
|
} else if (isDBScopeWriteStallCause(cause)) {
|
|
|
|
InternalStats::InternalDBStatsType internal_db_stat =
|
|
|
|
InternalDBStat(cause, condition);
|
|
|
|
if (internal_db_stat == InternalStats::kIntStatsNumMax) {
|
|
|
|
ASSERT_TRUE(cause == WriteStallCause::kWriteBufferManagerLimit &&
|
|
|
|
condition == WriteStallCondition::kDelayed)
|
|
|
|
<< "Please ensure the combination of WriteStallCause(" +
|
|
|
|
std::to_string(static_cast<uint32_t>(cause)) +
|
|
|
|
") + WriteStallCondition(" +
|
|
|
|
std::to_string(static_cast<uint32_t>(condition)) +
|
|
|
|
") is correctly mapped to a valid `InternalStats` or "
|
|
|
|
"bypass its check in this test";
|
|
|
|
}
|
|
|
|
} else if (cause != WriteStallCause::kCFScopeWriteStallCauseEnumMax &&
|
|
|
|
cause != WriteStallCause::kDBScopeWriteStallCauseEnumMax) {
|
|
|
|
ASSERT_TRUE(false) << "Please ensure the WriteStallCause(" +
|
|
|
|
std::to_string(static_cast<uint32_t>(cause)) +
|
|
|
|
") is either CF-scope or DB-scope write "
|
|
|
|
"stall cause in enum `WriteStallCause`";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TEST_F(DBPropertiesTest, GetMapPropertyWriteStallStats) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
CreateAndReopenWithCF({"heavy_write_cf"}, options);
|
|
|
|
|
|
|
|
for (auto test_cause : {WriteStallCause::kWriteBufferManagerLimit,
|
|
|
|
WriteStallCause::kMemtableLimit}) {
|
|
|
|
if (test_cause == WriteStallCause::kWriteBufferManagerLimit) {
|
|
|
|
options.write_buffer_manager.reset(
|
|
|
|
new WriteBufferManager(100000, nullptr, true));
|
|
|
|
} else if (test_cause == WriteStallCause::kMemtableLimit) {
|
|
|
|
options.max_write_buffer_number = 2;
|
|
|
|
options.disable_auto_compactions = true;
|
|
|
|
}
|
|
|
|
ReopenWithColumnFamilies({"default", "heavy_write_cf"}, options);
|
|
|
|
|
|
|
|
// Assert initial write stall stats are all 0
|
|
|
|
std::map<std::string, std::string> db_values;
|
|
|
|
ASSERT_TRUE(dbfull()->GetMapProperty(DB::Properties::kDBWriteStallStats,
|
|
|
|
&db_values));
|
|
|
|
ASSERT_EQ(std::stoi(db_values[WriteStallStatsMapKeys::CauseConditionCount(
|
|
|
|
WriteStallCause::kWriteBufferManagerLimit,
|
|
|
|
WriteStallCondition::kStopped)]),
|
|
|
|
0);
|
|
|
|
|
|
|
|
for (int cf = 0; cf <= 1; ++cf) {
|
|
|
|
std::map<std::string, std::string> cf_values;
|
|
|
|
ASSERT_TRUE(dbfull()->GetMapProperty(
|
|
|
|
handles_[cf], DB::Properties::kCFWriteStallStats, &cf_values));
|
|
|
|
ASSERT_EQ(std::stoi(cf_values[WriteStallStatsMapKeys::TotalStops()]), 0);
|
|
|
|
ASSERT_EQ(std::stoi(cf_values[WriteStallStatsMapKeys::TotalDelays()]), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pause flush thread to help coerce write stall
|
|
|
|
std::unique_ptr<test::SleepingBackgroundTask> sleeping_task(
|
|
|
|
new test::SleepingBackgroundTask());
|
|
|
|
env_->SetBackgroundThreads(1, Env::HIGH);
|
|
|
|
env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask,
|
|
|
|
sleeping_task.get(), Env::Priority::HIGH);
|
|
|
|
sleeping_task->WaitUntilSleeping();
|
|
|
|
|
|
|
|
// Coerce write stall
|
|
|
|
if (test_cause == WriteStallCause::kWriteBufferManagerLimit) {
|
|
|
|
ASSERT_OK(dbfull()->Put(
|
|
|
|
WriteOptions(), handles_[1], Key(1),
|
|
|
|
DummyString(options.write_buffer_manager->buffer_size())));
|
|
|
|
|
|
|
|
WriteOptions wo;
|
|
|
|
wo.no_slowdown = true;
|
|
|
|
Status s = dbfull()->Put(
|
|
|
|
wo, handles_[1], Key(2),
|
|
|
|
DummyString(options.write_buffer_manager->buffer_size()));
|
|
|
|
ASSERT_TRUE(s.IsIncomplete());
|
|
|
|
ASSERT_TRUE(s.ToString().find("Write stall") != std::string::npos);
|
|
|
|
} else if (test_cause == WriteStallCause::kMemtableLimit) {
|
|
|
|
FlushOptions fo;
|
|
|
|
fo.allow_write_stall = true;
|
|
|
|
fo.wait = false;
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->Put(WriteOptions(), handles_[1], Key(1), DummyString(1)));
|
|
|
|
ASSERT_OK(dbfull()->Flush(fo, handles_[1]));
|
|
|
|
|
|
|
|
ASSERT_OK(
|
|
|
|
dbfull()->Put(WriteOptions(), handles_[1], Key(2), DummyString(1)));
|
|
|
|
ASSERT_OK(dbfull()->Flush(fo, handles_[1]));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (test_cause == WriteStallCause::kWriteBufferManagerLimit) {
|
|
|
|
db_values.clear();
|
|
|
|
EXPECT_TRUE(dbfull()->GetMapProperty(DB::Properties::kDBWriteStallStats,
|
|
|
|
&db_values));
|
|
|
|
EXPECT_EQ(std::stoi(db_values[WriteStallStatsMapKeys::CauseConditionCount(
|
|
|
|
WriteStallCause::kWriteBufferManagerLimit,
|
|
|
|
WriteStallCondition::kStopped)]),
|
|
|
|
1);
|
|
|
|
// `WriteStallCause::kWriteBufferManagerLimit` should not result in any
|
|
|
|
// CF-scope write stall stats changes
|
|
|
|
for (int cf = 0; cf <= 1; ++cf) {
|
|
|
|
std::map<std::string, std::string> cf_values;
|
|
|
|
EXPECT_TRUE(dbfull()->GetMapProperty(
|
|
|
|
handles_[cf], DB::Properties::kCFWriteStallStats, &cf_values));
|
|
|
|
EXPECT_EQ(std::stoi(cf_values[WriteStallStatsMapKeys::TotalStops()]),
|
|
|
|
0);
|
|
|
|
EXPECT_EQ(std::stoi(cf_values[WriteStallStatsMapKeys::TotalDelays()]),
|
|
|
|
0);
|
|
|
|
}
|
|
|
|
} else if (test_cause == WriteStallCause::kMemtableLimit) {
|
|
|
|
for (int cf = 0; cf <= 1; ++cf) {
|
|
|
|
std::map<std::string, std::string> cf_values;
|
|
|
|
EXPECT_TRUE(dbfull()->GetMapProperty(
|
|
|
|
handles_[cf], DB::Properties::kCFWriteStallStats, &cf_values));
|
|
|
|
EXPECT_EQ(std::stoi(cf_values[WriteStallStatsMapKeys::TotalStops()]),
|
|
|
|
cf == 1 ? 1 : 0);
|
|
|
|
EXPECT_EQ(
|
|
|
|
std::stoi(cf_values[WriteStallStatsMapKeys::CauseConditionCount(
|
|
|
|
WriteStallCause::kMemtableLimit,
|
|
|
|
WriteStallCondition::kStopped)]),
|
|
|
|
cf == 1 ? 1 : 0);
|
|
|
|
EXPECT_EQ(std::stoi(cf_values[WriteStallStatsMapKeys::TotalDelays()]),
|
|
|
|
0);
|
|
|
|
EXPECT_EQ(
|
|
|
|
std::stoi(cf_values[WriteStallStatsMapKeys::CauseConditionCount(
|
|
|
|
WriteStallCause::kMemtableLimit,
|
|
|
|
WriteStallCondition::kDelayed)]),
|
|
|
|
0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sleeping_task->WakeUp();
|
|
|
|
sleeping_task->WaitUntilDone();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-06 22:54:40 +00:00
|
|
|
namespace {
|
|
|
|
std::string PopMetaIndexKey(InternalIterator* meta_iter) {
|
|
|
|
Status s = meta_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s.ToString();
|
|
|
|
} else if (meta_iter->Valid()) {
|
|
|
|
std::string rv = meta_iter->key().ToString();
|
|
|
|
meta_iter->Next();
|
|
|
|
return rv;
|
|
|
|
} else {
|
|
|
|
return "NOT_FOUND";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-02 21:34:24 +00:00
|
|
|
} // anonymous namespace
|
2022-04-06 22:54:40 +00:00
|
|
|
|
|
|
|
TEST_F(DBPropertiesTest, TableMetaIndexKeys) {
|
|
|
|
// This is to detect unexpected churn in metaindex block keys. This is more
|
|
|
|
// of a "table test" but table_test.cc doesn't depend on db_test_util.h and
|
|
|
|
// we need ChangeOptions() for broad coverage.
|
|
|
|
constexpr int kKeyCount = 100;
|
|
|
|
do {
|
|
|
|
Options options;
|
|
|
|
options = CurrentOptions(options);
|
|
|
|
DestroyAndReopen(options);
|
|
|
|
|
|
|
|
// Create an SST file
|
|
|
|
for (int key = 0; key < kKeyCount; key++) {
|
|
|
|
ASSERT_OK(Put(Key(key), "val"));
|
|
|
|
}
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
|
|
|
|
// Find its file number
|
|
|
|
std::vector<LiveFileMetaData> files;
|
|
|
|
db_->GetLiveFilesMetaData(&files);
|
|
|
|
// 1 SST file
|
|
|
|
ASSERT_EQ(1, files.size());
|
|
|
|
|
|
|
|
// Open it for inspection
|
|
|
|
std::string sst_file =
|
|
|
|
files[0].directory + "/" + files[0].relative_filename;
|
|
|
|
std::unique_ptr<FSRandomAccessFile> f;
|
|
|
|
ASSERT_OK(env_->GetFileSystem()->NewRandomAccessFile(
|
|
|
|
sst_file, FileOptions(), &f, nullptr));
|
|
|
|
std::unique_ptr<RandomAccessFileReader> r;
|
|
|
|
r.reset(new RandomAccessFileReader(std::move(f), sst_file));
|
|
|
|
uint64_t file_size = 0;
|
|
|
|
ASSERT_OK(env_->GetFileSize(sst_file, &file_size));
|
|
|
|
|
|
|
|
// Read metaindex
|
|
|
|
BlockContents bc;
|
2023-04-21 16:07:18 +00:00
|
|
|
const ReadOptions read_options;
|
|
|
|
ASSERT_OK(ReadMetaIndexBlockInFile(
|
|
|
|
r.get(), file_size, 0U, ImmutableOptions(options), read_options, &bc));
|
2022-04-06 22:54:40 +00:00
|
|
|
Block metaindex_block(std::move(bc));
|
|
|
|
std::unique_ptr<InternalIterator> meta_iter;
|
|
|
|
meta_iter.reset(metaindex_block.NewMetaIterator());
|
|
|
|
meta_iter->SeekToFirst();
|
|
|
|
|
|
|
|
if (strcmp(options.table_factory->Name(),
|
|
|
|
TableFactory::kBlockBasedTableName()) == 0) {
|
|
|
|
auto bbto = options.table_factory->GetOptions<BlockBasedTableOptions>();
|
|
|
|
if (bbto->filter_policy) {
|
|
|
|
if (bbto->partition_filters) {
|
|
|
|
// The key names are intentionally hard-coded here to detect
|
|
|
|
// accidental regression on compatibility.
|
|
|
|
EXPECT_EQ("partitionedfilter.rocksdb.BuiltinBloomFilter",
|
|
|
|
PopMetaIndexKey(meta_iter.get()));
|
|
|
|
} else {
|
|
|
|
EXPECT_EQ("fullfilter.rocksdb.BuiltinBloomFilter",
|
|
|
|
PopMetaIndexKey(meta_iter.get()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (bbto->index_type == BlockBasedTableOptions::kHashSearch) {
|
|
|
|
EXPECT_EQ("rocksdb.hashindex.metadata",
|
|
|
|
PopMetaIndexKey(meta_iter.get()));
|
|
|
|
EXPECT_EQ("rocksdb.hashindex.prefixes",
|
|
|
|
PopMetaIndexKey(meta_iter.get()));
|
|
|
|
}
|
format_version=6 and context-aware block checksums (#9058)
Summary:
## Context checksum
All RocksDB checksums currently use 32 bits of checking
power, which should be 1 in 4 billion false negative (FN) probability (failing to
detect corruption). This is true for random corruptions, and in some cases
small corruptions are guaranteed to be detected. But some possible
corruptions, such as in storage metadata rather than storage payload data,
would have a much higher FN rate. For example:
* Data larger than one SST block is replaced by data from elsewhere in
the same or another SST file. Especially with block_align=true, the
probability of exact block size match is probably around 1 in 100, making
the FN probability around that same. Without `block_align=true` the
probability of same block start location is probably around 1 in 10,000,
for FN probability around 1 in a million.
To solve this problem in new format_version=6, we add "context awareness"
to block checksum checks. The stored and expected checksum value is
modified based on the block's position in the file and which file it is in. The
modifications are cleverly chosen so that, for example
* blocks within about 4GB of each other are guaranteed to use different context
* blocks that are offset by exactly some multiple of 4GiB are guaranteed to use
different context
* files generated by the same process are guaranteed to use different context
for the same offsets, until wrap-around after 2^32 - 1 files
Thus, with format_version=6, if a valid SST block and checksum is misplaced,
its checksum FN probability should be essentially ideal, 1 in 4B.
## Footer checksum
This change also adds checksum protection to the SST footer (with
format_version=6), for the first time without relying on whole file checksum.
To prevent a corruption of the format_version in the footer (e.g. 6 -> 5) to
defeat the footer checksum, we change much of the footer data format
including an "extended magic number" in format_version 6 that would be
interpreted as empty index and metaindex block handles in older footer
versions. We also change the encoding of handles to free up space for
other new data in footer.
## More detail: making space in footer
In order to keep footer the same size in format_version=6 (avoid change to IO
patterns), we have to free up some space for new data. We do this two ways:
* Metaindex block handle is encoded down to 4 bytes (from 10) by assuming
it immediately precedes the footer, and by assuming it is < 4GB.
* Index block handle is moved into metaindex. (I don't know why it was
in footer to begin with.)
## Performance
In case of small performance penalty, I've made a "pay as you go" optimization
to compensate: replace `MutableCFOptions` in BlockBasedTableBuilder::Rep
with the only field used in that structure after construction: `prefix_extractor`.
This makes the PR an overall performance improvement (results below).
Nevertheless I'm seeing essentially no difference going from fv=5 to fv=6,
even including that improvement for both. That's based on extreme case table
write performance testing, many files with many blocks. This is relatively
checksum intensive (small blocks) and salt generation intensive (small files).
```
(for I in `seq 1 100`; do TEST_TMPDIR=/dev/shm/dbbench2 ./db_bench -benchmarks=fillseq -memtablerep=vector -disable_wal=1 -allow_concurrent_memtable_write=false -num=3000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=0 -write_buffer_size=100000 -compression_type=none -block_size=1000; done) 2>&1 | grep micros/op | tee out
awk '{ tot += $5; n += 1; } END { print int(1.0 * tot / n) }' < out
```
Each value below is ops/s averaged over 100 runs, run simultaneously with competing
configuration for load fairness
Before -> after (both fv=5): 483530 -> 483673 (negligible)
Re-run 1: 480733 -> 485427 (1.0% faster)
Re-run 2: 483821 -> 484541 (0.1% faster)
Before (fv=5) -> after (fv=6): 482006 -> 485100 (0.6% faster)
Re-run 1: 482212 -> 485075 (0.6% faster)
Re-run 2: 483590 -> 484073 (0.1% faster)
After fv=5 -> after fv=6: 483878 -> 485542 (0.3% faster)
Re-run 1: 485331 -> 483385 (0.4% slower)
Re-run 2: 485283 -> 483435 (0.4% slower)
Re-run 3: 483647 -> 486109 (0.5% faster)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9058
Test Plan:
unit tests included (table_test, db_properties_test, salt in env_test). General DB tests
and crash test updated to test new format_version.
Also temporarily updated the default format version to 6 and saw some test failures. Almost all
were due to an inadvertent additional read in VerifyChecksum to verify the index block checksum,
though it's arguably a bug that VerifyChecksum does not appear to (re-)verify the index block
checksum, just assuming it was verified in opening the index reader (probably *usually* true but
probably not always true). Some other concerns about VerifyChecksum are left in FIXME
comments. The only remaining test failure on change of default (in block_fetcher_test) now
has a comment about how to upgrade the test.
The format compatibility test does not need updating because we have not updated the default
format_version.
Reviewed By: ajkr, mrambacher
Differential Revision: D33100915
Pulled By: pdillinger
fbshipit-source-id: 8679e3e572fa580181a737fd6d113ed53c5422ee
2023-07-30 23:40:01 +00:00
|
|
|
if (bbto->format_version >= 6) {
|
|
|
|
EXPECT_EQ("rocksdb.index", PopMetaIndexKey(meta_iter.get()));
|
|
|
|
}
|
2022-04-06 22:54:40 +00:00
|
|
|
}
|
|
|
|
EXPECT_EQ("rocksdb.properties", PopMetaIndexKey(meta_iter.get()));
|
|
|
|
EXPECT_EQ("NOT_FOUND", PopMetaIndexKey(meta_iter.get()));
|
|
|
|
} while (ChangeOptions());
|
|
|
|
}
|
|
|
|
|
2021-10-20 20:15:33 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2016-01-20 23:17:52 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2020-02-20 20:07:53 +00:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2016-01-20 23:17:52 +00:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|