2016-02-09 23:12:00 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2015-11-04 01:52:17 +00:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
2019-05-31 18:52:59 +00:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2015-11-04 01:52:17 +00:00
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/table.h"
|
|
|
|
#include "rocksdb/utilities/memory_util.h"
|
|
|
|
#include "rocksdb/utilities/stackable_db.h"
|
2019-05-30 21:47:29 +00:00
|
|
|
#include "table/block_based/block_based_table_factory.h"
|
2019-05-30 18:21:38 +00:00
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2020-07-09 21:33:42 +00:00
|
|
|
#include "util/random.h"
|
2019-05-31 00:39:43 +00:00
|
|
|
#include "util/string_util.h"
|
2015-11-04 01:52:17 +00:00
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2015-11-04 01:52:17 +00:00
|
|
|
|
|
|
|
class MemoryTest : public testing::Test {
|
|
|
|
public:
|
2018-07-14 00:18:39 +00:00
|
|
|
MemoryTest() : kDbDir(test::PerThreadDBPath("memory_test")), rnd_(301) {
|
2015-11-04 01:52:17 +00:00
|
|
|
assert(Env::Default()->CreateDirIfMissing(kDbDir).ok());
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string GetDBName(int id) { return kDbDir + "db_" + ToString(id); }
|
|
|
|
|
|
|
|
void UpdateUsagesHistory(const std::vector<DB*>& dbs) {
|
|
|
|
std::map<MemoryUtil::UsageType, uint64_t> usage_by_type;
|
|
|
|
ASSERT_OK(GetApproximateMemoryUsageByType(dbs, &usage_by_type));
|
|
|
|
for (int i = 0; i < MemoryUtil::kNumUsageTypes; ++i) {
|
|
|
|
usage_history_[i].push_back(
|
|
|
|
usage_by_type[static_cast<MemoryUtil::UsageType>(i)]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void GetCachePointersFromTableFactory(
|
|
|
|
const TableFactory* factory,
|
|
|
|
std::unordered_set<const Cache*>* cache_set) {
|
|
|
|
const BlockBasedTableFactory* bbtf =
|
|
|
|
dynamic_cast<const BlockBasedTableFactory*>(factory);
|
|
|
|
if (bbtf != nullptr) {
|
2015-11-18 00:41:54 +00:00
|
|
|
const auto bbt_opts = bbtf->table_options();
|
2015-11-04 01:52:17 +00:00
|
|
|
cache_set->insert(bbt_opts.block_cache.get());
|
|
|
|
cache_set->insert(bbt_opts.block_cache_compressed.get());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void GetCachePointers(const std::vector<DB*>& dbs,
|
|
|
|
std::unordered_set<const Cache*>* cache_set) {
|
|
|
|
cache_set->clear();
|
|
|
|
|
|
|
|
for (auto* db : dbs) {
|
2019-09-18 22:22:46 +00:00
|
|
|
assert(db);
|
|
|
|
|
2015-11-04 01:52:17 +00:00
|
|
|
// Cache from DBImpl
|
|
|
|
StackableDB* sdb = dynamic_cast<StackableDB*>(db);
|
|
|
|
DBImpl* db_impl = dynamic_cast<DBImpl*>(sdb ? sdb->GetBaseDB() : db);
|
|
|
|
if (db_impl != nullptr) {
|
|
|
|
cache_set->insert(db_impl->TEST_table_cache());
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cache from DBOptions
|
|
|
|
cache_set->insert(db->GetDBOptions().row_cache.get());
|
|
|
|
|
|
|
|
// Cache from table factories
|
|
|
|
std::unordered_map<std::string, const ImmutableCFOptions*> iopts_map;
|
|
|
|
if (db_impl != nullptr) {
|
|
|
|
ASSERT_OK(db_impl->TEST_GetAllImmutableCFOptions(&iopts_map));
|
|
|
|
}
|
|
|
|
for (auto pair : iopts_map) {
|
|
|
|
GetCachePointersFromTableFactory(pair.second->table_factory, cache_set);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status GetApproximateMemoryUsageByType(
|
|
|
|
const std::vector<DB*>& dbs,
|
|
|
|
std::map<MemoryUtil::UsageType, uint64_t>* usage_by_type) {
|
|
|
|
std::unordered_set<const Cache*> cache_set;
|
|
|
|
GetCachePointers(dbs, &cache_set);
|
|
|
|
|
|
|
|
return MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set,
|
|
|
|
usage_by_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string kDbDir;
|
|
|
|
Random rnd_;
|
|
|
|
std::vector<uint64_t> usage_history_[MemoryUtil::kNumUsageTypes];
|
|
|
|
};
|
|
|
|
|
2016-10-20 01:18:42 +00:00
|
|
|
TEST_F(MemoryTest, SharedBlockCacheTotal) {
|
2015-11-04 01:52:17 +00:00
|
|
|
std::vector<DB*> dbs;
|
|
|
|
std::vector<uint64_t> usage_by_type;
|
|
|
|
const int kNumDBs = 10;
|
|
|
|
const int kKeySize = 100;
|
|
|
|
const int kValueSize = 500;
|
|
|
|
Options opt;
|
|
|
|
opt.create_if_missing = true;
|
|
|
|
opt.write_buffer_size = kKeySize + kValueSize;
|
|
|
|
opt.max_write_buffer_number = 10;
|
|
|
|
opt.min_write_buffer_number_to_merge = 10;
|
2016-10-20 01:18:42 +00:00
|
|
|
opt.disable_auto_compactions = true;
|
2015-11-04 01:52:17 +00:00
|
|
|
BlockBasedTableOptions bbt_opts;
|
|
|
|
bbt_opts.block_cache = NewLRUCache(4096 * 1000 * 10);
|
|
|
|
for (int i = 0; i < kNumDBs; ++i) {
|
|
|
|
DestroyDB(GetDBName(i), opt);
|
|
|
|
DB* db = nullptr;
|
|
|
|
ASSERT_OK(DB::Open(opt, GetDBName(i), &db));
|
|
|
|
dbs.push_back(db);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::string> keys_by_db[kNumDBs];
|
|
|
|
|
|
|
|
// Fill one memtable per Put to make memtable use more memory.
|
|
|
|
for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
|
|
|
|
for (int i = 0; i < kNumDBs; ++i) {
|
|
|
|
for (int j = 0; j < 100; ++j) {
|
2020-07-09 21:33:42 +00:00
|
|
|
keys_by_db[i].emplace_back(rnd_.RandomString(kKeySize));
|
2015-11-04 01:52:17 +00:00
|
|
|
dbs[i]->Put(WriteOptions(), keys_by_db[i].back(),
|
2020-07-09 21:33:42 +00:00
|
|
|
rnd_.RandomString(kValueSize));
|
2015-11-04 01:52:17 +00:00
|
|
|
}
|
|
|
|
dbs[i]->Flush(FlushOptions());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (int i = 0; i < kNumDBs; ++i) {
|
|
|
|
for (auto& key : keys_by_db[i]) {
|
|
|
|
std::string value;
|
|
|
|
dbs[i]->Get(ReadOptions(), key, &value);
|
|
|
|
}
|
|
|
|
UpdateUsagesHistory(dbs);
|
|
|
|
}
|
|
|
|
for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
|
|
|
|
++i) {
|
|
|
|
// Expect EQ as we didn't flush more memtables.
|
|
|
|
ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
|
|
|
|
usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
|
|
|
|
}
|
|
|
|
for (int i = 0; i < kNumDBs; ++i) {
|
|
|
|
delete dbs[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-20 01:18:42 +00:00
|
|
|
TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
|
2015-11-04 01:52:17 +00:00
|
|
|
std::vector<DB*> dbs;
|
|
|
|
std::vector<uint64_t> usage_by_type;
|
|
|
|
std::vector<std::vector<ColumnFamilyHandle*>> vec_handles;
|
|
|
|
const int kNumDBs = 10;
|
|
|
|
const int kKeySize = 100;
|
|
|
|
const int kValueSize = 500;
|
|
|
|
Options opt;
|
|
|
|
opt.create_if_missing = true;
|
|
|
|
opt.create_missing_column_families = true;
|
|
|
|
opt.write_buffer_size = kKeySize + kValueSize;
|
|
|
|
opt.max_write_buffer_number = 10;
|
|
|
|
opt.min_write_buffer_number_to_merge = 10;
|
2016-10-20 01:18:42 +00:00
|
|
|
opt.disable_auto_compactions = true;
|
2015-11-04 01:52:17 +00:00
|
|
|
|
|
|
|
std::vector<ColumnFamilyDescriptor> cf_descs = {
|
|
|
|
{kDefaultColumnFamilyName, ColumnFamilyOptions(opt)},
|
|
|
|
{"one", ColumnFamilyOptions(opt)},
|
|
|
|
{"two", ColumnFamilyOptions(opt)},
|
|
|
|
};
|
|
|
|
|
|
|
|
for (int i = 0; i < kNumDBs; ++i) {
|
|
|
|
DestroyDB(GetDBName(i), opt);
|
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
|
|
|
dbs.emplace_back();
|
|
|
|
vec_handles.emplace_back();
|
|
|
|
ASSERT_OK(DB::Open(DBOptions(opt), GetDBName(i), cf_descs,
|
|
|
|
&vec_handles.back(), &dbs.back()));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fill one memtable per Put to make memtable use more memory.
|
|
|
|
for (int p = 0; p < opt.min_write_buffer_number_to_merge / 2; ++p) {
|
|
|
|
for (int i = 0; i < kNumDBs; ++i) {
|
|
|
|
for (auto* handle : vec_handles[i]) {
|
2020-07-09 21:33:42 +00:00
|
|
|
dbs[i]->Put(WriteOptions(), handle, rnd_.RandomString(kKeySize),
|
|
|
|
rnd_.RandomString(kValueSize));
|
2015-11-04 01:52:17 +00:00
|
|
|
UpdateUsagesHistory(dbs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Expect the usage history is monotonically increasing
|
|
|
|
for (size_t i = 1; i < usage_history_[MemoryUtil::kMemTableTotal].size();
|
|
|
|
++i) {
|
|
|
|
ASSERT_GT(usage_history_[MemoryUtil::kMemTableTotal][i],
|
|
|
|
usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
|
|
|
|
ASSERT_GT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
|
|
|
|
usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
|
|
|
|
ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
|
|
|
|
usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
|
|
|
|
std::vector<Iterator*> iters;
|
|
|
|
|
|
|
|
// Create an iterator and flush all memtables for each db
|
|
|
|
for (int i = 0; i < kNumDBs; ++i) {
|
|
|
|
iters.push_back(dbs[i]->NewIterator(ReadOptions()));
|
|
|
|
dbs[i]->Flush(FlushOptions());
|
|
|
|
|
|
|
|
for (int j = 0; j < 100; ++j) {
|
|
|
|
std::string value;
|
2020-07-09 21:33:42 +00:00
|
|
|
dbs[i]->Get(ReadOptions(), rnd_.RandomString(kKeySize), &value);
|
2015-11-04 01:52:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
UpdateUsagesHistory(dbs);
|
|
|
|
}
|
|
|
|
for (size_t i = usage_check_point;
|
|
|
|
i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
|
|
|
|
// Since memtables are pinned by iterators, we don't expect the
|
|
|
|
// memory usage of all the memtables decreases as they are pinned
|
|
|
|
// by iterators.
|
|
|
|
ASSERT_GE(usage_history_[MemoryUtil::kMemTableTotal][i],
|
|
|
|
usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
|
|
|
|
// Expect the usage history from the "usage_decay_point" is
|
|
|
|
// monotonically decreasing.
|
|
|
|
ASSERT_LT(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
|
|
|
|
usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
|
|
|
|
// Expect the usage history of the table readers increases
|
|
|
|
// as we flush tables.
|
|
|
|
ASSERT_GT(usage_history_[MemoryUtil::kTableReadersTotal][i],
|
|
|
|
usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
|
|
|
|
ASSERT_GT(usage_history_[MemoryUtil::kCacheTotal][i],
|
|
|
|
usage_history_[MemoryUtil::kCacheTotal][i - 1]);
|
|
|
|
}
|
|
|
|
usage_check_point = usage_history_[MemoryUtil::kMemTableTotal].size();
|
|
|
|
for (int i = 0; i < kNumDBs; ++i) {
|
|
|
|
delete iters[i];
|
|
|
|
UpdateUsagesHistory(dbs);
|
|
|
|
}
|
|
|
|
for (size_t i = usage_check_point;
|
|
|
|
i < usage_history_[MemoryUtil::kMemTableTotal].size(); ++i) {
|
|
|
|
// Expect the usage of all memtables decreasing as we delete iterators.
|
|
|
|
ASSERT_LT(usage_history_[MemoryUtil::kMemTableTotal][i],
|
|
|
|
usage_history_[MemoryUtil::kMemTableTotal][i - 1]);
|
|
|
|
// Since the memory usage of un-flushed memtables is only affected
|
|
|
|
// by Put and flush, we expect EQ here as we only delete iterators.
|
|
|
|
ASSERT_EQ(usage_history_[MemoryUtil::kMemTableUnFlushed][i],
|
|
|
|
usage_history_[MemoryUtil::kMemTableUnFlushed][i - 1]);
|
|
|
|
// Expect EQ as we didn't flush more memtables.
|
|
|
|
ASSERT_EQ(usage_history_[MemoryUtil::kTableReadersTotal][i],
|
|
|
|
usage_history_[MemoryUtil::kTableReadersTotal][i - 1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 0; i < kNumDBs; ++i) {
|
|
|
|
for (auto* handle : vec_handles[i]) {
|
|
|
|
delete handle;
|
|
|
|
}
|
|
|
|
delete dbs[i];
|
|
|
|
}
|
|
|
|
}
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2015-11-04 01:52:17 +00:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
#if !(defined NDEBUG) || !defined(OS_WIN)
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
2015-11-04 02:04:41 +00:00
|
|
|
|
2015-11-04 01:52:17 +00:00
|
|
|
#else
|
2015-11-04 02:04:41 +00:00
|
|
|
#include <cstdio>
|
|
|
|
|
2018-04-16 00:19:57 +00:00
|
|
|
int main(int /*argc*/, char** /*argv*/) {
|
2015-11-04 02:04:41 +00:00
|
|
|
printf("Skipped in RocksDBLite as utilities are not supported.\n");
|
2015-11-04 01:52:17 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif // !ROCKSDB_LITE
|