rocksdb/cache/cache_test.cc
Peter Dillinger 32520df1d9 Remove prototype FastLRUCache (#10954)
Summary:
This was just a stepping stone to what eventually became HyperClockCache, and is now just more code to maintain.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/10954

Test Plan: tests updated

Reviewed By: akankshamahajan15

Differential Revision: D41310123

Pulled By: pdillinger

fbshipit-source-id: 618ee148a1a0a29ee756ba8fe28359617b7cd67c
2022-11-16 10:15:55 -08:00

1038 lines
32 KiB
C++

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "rocksdb/cache.h"
#include <forward_list>
#include <functional>
#include <iostream>
#include <string>
#include <vector>
#include "cache/lru_cache.h"
#include "port/stack_trace.h"
#include "test_util/testharness.h"
#include "util/coding.h"
#include "util/string_util.h"
// HyperClockCache only supports 16-byte keys, so some of the tests
// originally written for LRUCache do not work on the other caches.
// Those tests were adapted to use 16-byte keys. We kept the original ones.
// TODO: Remove the original tests if they ever become unused.
namespace ROCKSDB_NAMESPACE {
namespace {
// Conversions between numeric keys/values and the types expected by Cache.
std::string EncodeKey16Bytes(int k) {
std::string result;
PutFixed32(&result, k);
result.append(std::string(12, 'a')); // Because we need a 16B output, we
// add a 12-byte padding.
return result;
}
int DecodeKey16Bytes(const Slice& k) {
assert(k.size() == 16);
return DecodeFixed32(k.data()); // Decodes only the first 4 bytes of k.
}
std::string EncodeKey32Bits(int k) {
std::string result;
PutFixed32(&result, k);
return result;
}
int DecodeKey32Bits(const Slice& k) {
assert(k.size() == 4);
return DecodeFixed32(k.data());
}
void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
int DecodeValue(void* v) {
return static_cast<int>(reinterpret_cast<uintptr_t>(v));
}
void DumbDeleter(const Slice& /*key*/, void* /*value*/) {}
void EraseDeleter1(const Slice& /*key*/, void* value) {
Cache* cache = reinterpret_cast<Cache*>(value);
cache->Erase("foo");
}
void EraseDeleter2(const Slice& /*key*/, void* value) {
Cache* cache = reinterpret_cast<Cache*>(value);
cache->Erase(EncodeKey16Bytes(1234));
}
const std::string kLRU = "lru";
const std::string kHyperClock = "hyper_clock";
} // anonymous namespace
class CacheTest : public testing::TestWithParam<std::string> {
public:
static CacheTest* current_;
static std::string type_;
static void Deleter(const Slice& key, void* v) {
if (type_ == kHyperClock) {
current_->deleted_keys_.push_back(DecodeKey16Bytes(key));
} else {
current_->deleted_keys_.push_back(DecodeKey32Bits(key));
}
current_->deleted_values_.push_back(DecodeValue(v));
}
static const int kCacheSize = 1000;
static const int kNumShardBits = 4;
static const int kCacheSize2 = 100;
static const int kNumShardBits2 = 2;
std::vector<int> deleted_keys_;
std::vector<int> deleted_values_;
std::shared_ptr<Cache> cache_;
std::shared_ptr<Cache> cache2_;
size_t estimated_value_size_ = 1;
CacheTest()
: cache_(NewCache(kCacheSize, kNumShardBits, false)),
cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
current_ = this;
type_ = GetParam();
}
~CacheTest() override {}
std::shared_ptr<Cache> NewCache(size_t capacity) {
auto type = GetParam();
if (type == kLRU) {
return NewLRUCache(capacity);
}
if (type == kHyperClock) {
return HyperClockCacheOptions(
capacity, estimated_value_size_ /*estimated_value_size*/)
.MakeSharedCache();
}
return nullptr;
}
std::shared_ptr<Cache> NewCache(
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
CacheMetadataChargePolicy charge_policy = kDontChargeCacheMetadata) {
auto type = GetParam();
if (type == kLRU) {
LRUCacheOptions co;
co.capacity = capacity;
co.num_shard_bits = num_shard_bits;
co.strict_capacity_limit = strict_capacity_limit;
co.high_pri_pool_ratio = 0;
co.metadata_charge_policy = charge_policy;
return NewLRUCache(co);
}
if (type == kHyperClock) {
return HyperClockCacheOptions(capacity, 1 /*estimated_value_size*/,
num_shard_bits, strict_capacity_limit,
nullptr /*allocator*/, charge_policy)
.MakeSharedCache();
}
return nullptr;
}
// These functions encode/decode keys in tests cases that use
// int keys.
// Currently, HyperClockCache requires keys to be 16B long, whereas
// LRUCache doesn't, so the encoding depends on the cache type.
std::string EncodeKey(int k) {
auto type = GetParam();
if (type == kHyperClock) {
return EncodeKey16Bytes(k);
} else {
return EncodeKey32Bits(k);
}
}
int DecodeKey(const Slice& k) {
auto type = GetParam();
if (type == kHyperClock) {
return DecodeKey16Bytes(k);
} else {
return DecodeKey32Bits(k);
}
}
int Lookup(std::shared_ptr<Cache> cache, int key) {
Cache::Handle* handle = cache->Lookup(EncodeKey(key));
const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
if (handle != nullptr) {
cache->Release(handle);
}
return r;
}
void Insert(std::shared_ptr<Cache> cache, int key, int value,
int charge = 1) {
EXPECT_OK(cache->Insert(EncodeKey(key), EncodeValue(value), charge,
&CacheTest::Deleter));
}
void Erase(std::shared_ptr<Cache> cache, int key) {
cache->Erase(EncodeKey(key));
}
int Lookup(int key) { return Lookup(cache_, key); }
void Insert(int key, int value, int charge = 1) {
Insert(cache_, key, value, charge);
}
void Erase(int key) { Erase(cache_, key); }
int Lookup2(int key) { return Lookup(cache2_, key); }
void Insert2(int key, int value, int charge = 1) {
Insert(cache2_, key, value, charge);
}
void Erase2(int key) { Erase(cache2_, key); }
};
CacheTest* CacheTest::current_;
std::string CacheTest::type_;
class LRUCacheTest : public CacheTest {};
TEST_P(CacheTest, UsageTest) {
auto type = GetParam();
// cache is std::shared_ptr and will be automatically cleaned up.
const size_t kCapacity = 100000;
auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata);
ASSERT_EQ(0, cache->GetUsage());
size_t baseline_meta_usage = precise_cache->GetUsage();
if (type != kHyperClock) {
ASSERT_EQ(0, baseline_meta_usage);
}
size_t usage = 0;
char value[10] = "abcdef";
// make sure everything will be cached
for (int i = 1; i < 100; ++i) {
std::string key;
if (type == kLRU) {
key = std::string(i, 'a');
} else {
key = EncodeKey(i);
}
auto kv_size = key.size() + 5;
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
DumbDeleter));
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
kv_size, DumbDeleter));
usage += kv_size;
ASSERT_EQ(usage, cache->GetUsage());
if (type == kHyperClock) {
ASSERT_EQ(baseline_meta_usage + usage, precise_cache->GetUsage());
} else {
ASSERT_LT(usage, precise_cache->GetUsage());
}
}
cache->EraseUnRefEntries();
precise_cache->EraseUnRefEntries();
ASSERT_EQ(0, cache->GetUsage());
ASSERT_EQ(baseline_meta_usage, precise_cache->GetUsage());
// make sure the cache will be overloaded
for (size_t i = 1; i < kCapacity; ++i) {
std::string key;
if (type == kLRU) {
key = std::to_string(i);
} else {
key = EncodeKey(static_cast<int>(1000 + i));
}
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
DumbDeleter));
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
key.size() + 5, DumbDeleter));
}
// the usage should be close to the capacity
ASSERT_GT(kCapacity, cache->GetUsage());
ASSERT_GT(kCapacity, precise_cache->GetUsage());
ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
if (type != kHyperClock) {
ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage());
} else {
// estimated value size of 1 is weird for clock cache, because
// almost all of the capacity will be used for metadata, and due to only
// using power of 2 table sizes, we might hit strict occupancy limit
// before hitting capacity limit.
ASSERT_LT(kCapacity * 0.80, precise_cache->GetUsage());
}
}
// TODO: This test takes longer than expected on ClockCache. This is
// because the values size estimate at construction is too sloppy.
// Fix this.
// Why is it so slow? The cache is constructed with an estimate of 1, but
// then the charge is claimed to be 21. This will cause the hash table
// to be extremely sparse, which in turn means clock needs to scan too
// many slots to find victims.
TEST_P(CacheTest, PinnedUsageTest) {
auto type = GetParam();
// cache is std::shared_ptr and will be automatically cleaned up.
const size_t kCapacity = 200000;
auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata);
size_t baseline_meta_usage = precise_cache->GetUsage();
if (type != kHyperClock) {
ASSERT_EQ(0, baseline_meta_usage);
}
size_t pinned_usage = 0;
char value[10] = "abcdef";
std::forward_list<Cache::Handle*> unreleased_handles;
std::forward_list<Cache::Handle*> unreleased_handles_in_precise_cache;
// Add entries. Unpin some of them after insertion. Then, pin some of them
// again. Check GetPinnedUsage().
for (int i = 1; i < 100; ++i) {
std::string key;
if (type == kLRU) {
key = std::string(i, 'a');
} else {
key = EncodeKey(i);
}
auto kv_size = key.size() + 5;
Cache::Handle* handle;
Cache::Handle* handle_in_precise_cache;
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
DumbDeleter, &handle));
assert(handle);
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
kv_size, DumbDeleter,
&handle_in_precise_cache));
assert(handle_in_precise_cache);
pinned_usage += kv_size;
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
if (i % 2 == 0) {
cache->Release(handle);
precise_cache->Release(handle_in_precise_cache);
pinned_usage -= kv_size;
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
} else {
unreleased_handles.push_front(handle);
unreleased_handles_in_precise_cache.push_front(handle_in_precise_cache);
}
if (i % 3 == 0) {
unreleased_handles.push_front(cache->Lookup(key));
auto x = precise_cache->Lookup(key);
assert(x);
unreleased_handles_in_precise_cache.push_front(x);
// If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
// usage increased
if (i % 2 == 0) {
pinned_usage += kv_size;
}
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
}
}
auto precise_cache_pinned_usage = precise_cache->GetPinnedUsage();
ASSERT_LT(pinned_usage, precise_cache_pinned_usage);
// check that overloading the cache does not change the pinned usage
for (size_t i = 1; i < 2 * kCapacity; ++i) {
std::string key;
if (type == kLRU) {
key = std::to_string(i);
} else {
key = EncodeKey(static_cast<int>(1000 + i));
}
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
DumbDeleter));
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
key.size() + 5, DumbDeleter));
}
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
cache->EraseUnRefEntries();
precise_cache->EraseUnRefEntries();
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
// release handles for pinned entries to prevent memory leaks
for (auto handle : unreleased_handles) {
cache->Release(handle);
}
for (auto handle : unreleased_handles_in_precise_cache) {
precise_cache->Release(handle);
}
ASSERT_EQ(0, cache->GetPinnedUsage());
ASSERT_EQ(0, precise_cache->GetPinnedUsage());
cache->EraseUnRefEntries();
precise_cache->EraseUnRefEntries();
ASSERT_EQ(0, cache->GetUsage());
ASSERT_EQ(baseline_meta_usage, precise_cache->GetUsage());
}
TEST_P(CacheTest, HitAndMiss) {
ASSERT_EQ(-1, Lookup(100));
Insert(100, 101);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(-1, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
Insert(200, 201);
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
Insert(100, 102);
if (GetParam() == kHyperClock) {
// ClockCache usually doesn't overwrite on Insert
ASSERT_EQ(101, Lookup(100));
} else {
ASSERT_EQ(102, Lookup(100));
}
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(-1, Lookup(300));
ASSERT_EQ(1U, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
if (GetParam() == kHyperClock) {
ASSERT_EQ(102, deleted_values_[0]);
} else {
ASSERT_EQ(101, deleted_values_[0]);
}
}
TEST_P(CacheTest, InsertSameKey) {
if (GetParam() == kHyperClock) {
ROCKSDB_GTEST_BYPASS(
"ClockCache doesn't guarantee Insert overwrite same key.");
return;
}
Insert(1, 1);
Insert(1, 2);
ASSERT_EQ(2, Lookup(1));
}
TEST_P(CacheTest, Erase) {
Erase(200);
ASSERT_EQ(0U, deleted_keys_.size());
Insert(100, 101);
Insert(200, 201);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1U, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(201, Lookup(200));
ASSERT_EQ(1U, deleted_keys_.size());
}
TEST_P(CacheTest, EntriesArePinned) {
if (GetParam() == kHyperClock) {
ROCKSDB_GTEST_BYPASS(
"ClockCache doesn't guarantee Insert overwrite same key.");
return;
}
Insert(100, 101);
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
ASSERT_EQ(1U, cache_->GetUsage());
Insert(100, 102);
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
ASSERT_EQ(0U, deleted_keys_.size());
ASSERT_EQ(2U, cache_->GetUsage());
cache_->Release(h1);
ASSERT_EQ(1U, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[0]);
ASSERT_EQ(101, deleted_values_[0]);
ASSERT_EQ(1U, cache_->GetUsage());
Erase(100);
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(1U, deleted_keys_.size());
ASSERT_EQ(1U, cache_->GetUsage());
cache_->Release(h2);
ASSERT_EQ(2U, deleted_keys_.size());
ASSERT_EQ(100, deleted_keys_[1]);
ASSERT_EQ(102, deleted_values_[1]);
ASSERT_EQ(0U, cache_->GetUsage());
}
TEST_P(CacheTest, EvictionPolicy) {
Insert(100, 101);
Insert(200, 201);
// Frequently used entry must be kept around
for (int i = 0; i < 2 * kCacheSize; i++) {
Insert(1000 + i, 2000 + i);
ASSERT_EQ(101, Lookup(100));
}
ASSERT_EQ(101, Lookup(100));
ASSERT_EQ(-1, Lookup(200));
}
TEST_P(CacheTest, ExternalRefPinsEntries) {
Insert(100, 101);
Cache::Handle* h = cache_->Lookup(EncodeKey(100));
ASSERT_TRUE(cache_->Ref(h));
ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
ASSERT_EQ(1U, cache_->GetUsage());
for (int i = 0; i < 3; ++i) {
if (i > 0) {
// First release (i == 1) corresponds to Ref(), second release (i == 2)
// corresponds to Lookup(). Then, since all external refs are released,
// the below insertions should push out the cache entry.
cache_->Release(h);
}
// double cache size because the usage bit in block cache prevents 100 from
// being evicted in the first kCacheSize iterations
for (int j = 0; j < 2 * kCacheSize + 100; j++) {
Insert(1000 + j, 2000 + j);
}
// Clock cache is even more stateful and needs more churn to evict
if (GetParam() == kHyperClock) {
for (int j = 0; j < kCacheSize; j++) {
Insert(11000 + j, 11000 + j);
}
}
if (i < 2) {
ASSERT_EQ(101, Lookup(100));
}
}
ASSERT_EQ(-1, Lookup(100));
}
TEST_P(CacheTest, EvictionPolicyRef) {
Insert(100, 101);
Insert(101, 102);
Insert(102, 103);
Insert(103, 104);
Insert(200, 101);
Insert(201, 102);
Insert(202, 103);
Insert(203, 104);
Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
Insert(300, 101);
Insert(301, 102);
Insert(302, 103);
Insert(303, 104);
// Insert entries much more than cache capacity.
for (int i = 0; i < 100 * kCacheSize; i++) {
Insert(1000 + i, 2000 + i);
}
// Check whether the entries inserted in the beginning
// are evicted. Ones without extra ref are evicted and
// those with are not.
ASSERT_EQ(-1, Lookup(100));
ASSERT_EQ(-1, Lookup(101));
ASSERT_EQ(-1, Lookup(102));
ASSERT_EQ(-1, Lookup(103));
ASSERT_EQ(-1, Lookup(300));
ASSERT_EQ(-1, Lookup(301));
ASSERT_EQ(-1, Lookup(302));
ASSERT_EQ(-1, Lookup(303));
ASSERT_EQ(101, Lookup(200));
ASSERT_EQ(102, Lookup(201));
ASSERT_EQ(103, Lookup(202));
ASSERT_EQ(104, Lookup(203));
// Cleaning up all the handles
cache_->Release(h201);
cache_->Release(h202);
cache_->Release(h203);
cache_->Release(h204);
}
TEST_P(CacheTest, EvictEmptyCache) {
auto type = GetParam();
// Insert item large than capacity to trigger eviction on empty cache.
auto cache = NewCache(1, 0, false);
if (type == kLRU) {
ASSERT_OK(cache->Insert("foo", nullptr, 10, DumbDeleter));
} else {
ASSERT_OK(cache->Insert(EncodeKey(1000), nullptr, 10, DumbDeleter));
}
}
TEST_P(CacheTest, EraseFromDeleter) {
auto type = GetParam();
// Have deleter which will erase item from cache, which will re-enter
// the cache at that point.
std::shared_ptr<Cache> cache = NewCache(10, 0, false);
std::string foo, bar;
Cache::DeleterFn erase_deleter;
if (type == kLRU) {
foo = "foo";
bar = "bar";
erase_deleter = EraseDeleter1;
} else {
foo = EncodeKey(1234);
bar = EncodeKey(5678);
erase_deleter = EraseDeleter2;
}
ASSERT_OK(cache->Insert(foo, nullptr, 1, DumbDeleter));
ASSERT_OK(cache->Insert(bar, cache.get(), 1, erase_deleter));
cache->Erase(bar);
ASSERT_EQ(nullptr, cache->Lookup(foo));
ASSERT_EQ(nullptr, cache->Lookup(bar));
}
TEST_P(CacheTest, ErasedHandleState) {
// insert a key and get two handles
Insert(100, 1000);
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
ASSERT_EQ(h1, h2);
ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
// delete the key from the cache
Erase(100);
// can no longer find in the cache
ASSERT_EQ(-1, Lookup(100));
// release one handle
cache_->Release(h1);
// still can't find in cache
ASSERT_EQ(-1, Lookup(100));
cache_->Release(h2);
}
TEST_P(CacheTest, HeavyEntries) {
// Add a bunch of light and heavy entries and then count the combined
// size of items still in the cache, which must be approximately the
// same as the total capacity.
const int kLight = 1;
const int kHeavy = 10;
int added = 0;
int index = 0;
while (added < 2 * kCacheSize) {
const int weight = (index & 1) ? kLight : kHeavy;
Insert(index, 1000 + index, weight);
added += weight;
index++;
}
int cached_weight = 0;
for (int i = 0; i < index; i++) {
const int weight = (i & 1 ? kLight : kHeavy);
int r = Lookup(i);
if (r >= 0) {
cached_weight += weight;
ASSERT_EQ(1000 + i, r);
}
}
ASSERT_LE(cached_weight, kCacheSize + kCacheSize / 10);
}
TEST_P(CacheTest, NewId) {
uint64_t a = cache_->NewId();
uint64_t b = cache_->NewId();
ASSERT_NE(a, b);
}
class Value {
public:
explicit Value(int v) : v_(v) {}
int v_;
};
namespace {
void deleter(const Slice& /*key*/, void* value) {
delete static_cast<Value*>(value);
}
} // namespace
TEST_P(CacheTest, ReleaseAndErase) {
std::shared_ptr<Cache> cache = NewCache(5, 0, false);
Cache::Handle* handle;
Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
&CacheTest::Deleter, &handle);
ASSERT_TRUE(s.ok());
ASSERT_EQ(5U, cache->GetCapacity());
ASSERT_EQ(1U, cache->GetUsage());
ASSERT_EQ(0U, deleted_keys_.size());
auto erased = cache->Release(handle, true);
ASSERT_TRUE(erased);
// This tests that deleter has been called
ASSERT_EQ(1U, deleted_keys_.size());
}
TEST_P(CacheTest, ReleaseWithoutErase) {
std::shared_ptr<Cache> cache = NewCache(5, 0, false);
Cache::Handle* handle;
Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
&CacheTest::Deleter, &handle);
ASSERT_TRUE(s.ok());
ASSERT_EQ(5U, cache->GetCapacity());
ASSERT_EQ(1U, cache->GetUsage());
ASSERT_EQ(0U, deleted_keys_.size());
auto erased = cache->Release(handle);
ASSERT_FALSE(erased);
// This tests that deleter is not called. When cache has free capacity it is
// not expected to immediately erase the released items.
ASSERT_EQ(0U, deleted_keys_.size());
}
TEST_P(CacheTest, SetCapacity) {
auto type = GetParam();
if (type == kHyperClock) {
ROCKSDB_GTEST_BYPASS(
"FastLRUCache and HyperClockCache don't support arbitrary capacity "
"adjustments.");
return;
}
// test1: increase capacity
// lets create a cache with capacity 5,
// then, insert 5 elements, then increase capacity
// to 10, returned capacity should be 10, usage=5
std::shared_ptr<Cache> cache = NewCache(5, 0, false);
std::vector<Cache::Handle*> handles(10);
// Insert 5 entries, but not releasing.
for (int i = 0; i < 5; i++) {
std::string key = EncodeKey(i + 1);
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
ASSERT_TRUE(s.ok());
}
ASSERT_EQ(5U, cache->GetCapacity());
ASSERT_EQ(5U, cache->GetUsage());
cache->SetCapacity(10);
ASSERT_EQ(10U, cache->GetCapacity());
ASSERT_EQ(5U, cache->GetUsage());
// test2: decrease capacity
// insert 5 more elements to cache, then release 5,
// then decrease capacity to 7, final capacity should be 7
// and usage should be 7
for (int i = 5; i < 10; i++) {
std::string key = EncodeKey(i + 1);
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
ASSERT_TRUE(s.ok());
}
ASSERT_EQ(10U, cache->GetCapacity());
ASSERT_EQ(10U, cache->GetUsage());
for (int i = 0; i < 5; i++) {
cache->Release(handles[i]);
}
ASSERT_EQ(10U, cache->GetCapacity());
ASSERT_EQ(10U, cache->GetUsage());
cache->SetCapacity(7);
ASSERT_EQ(7, cache->GetCapacity());
ASSERT_EQ(7, cache->GetUsage());
// release remaining 5 to keep valgrind happy
for (int i = 5; i < 10; i++) {
cache->Release(handles[i]);
}
// Make sure this doesn't crash or upset ASAN/valgrind
cache->DisownData();
}
TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
// test1: set the flag to false. Insert more keys than capacity. See if they
// all go through.
std::shared_ptr<Cache> cache = NewCache(5, 0, false);
std::vector<Cache::Handle*> handles(10);
Status s;
for (int i = 0; i < 10; i++) {
std::string key = EncodeKey(i + 1);
s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
ASSERT_OK(s);
ASSERT_NE(nullptr, handles[i]);
}
ASSERT_EQ(10, cache->GetUsage());
// test2: set the flag to true. Insert and check if it fails.
std::string extra_key = EncodeKey(100);
Value* extra_value = new Value(0);
cache->SetStrictCapacityLimit(true);
Cache::Handle* handle;
s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
ASSERT_TRUE(s.IsMemoryLimit());
ASSERT_EQ(nullptr, handle);
ASSERT_EQ(10, cache->GetUsage());
for (int i = 0; i < 10; i++) {
cache->Release(handles[i]);
}
// test3: init with flag being true.
std::shared_ptr<Cache> cache2 = NewCache(5, 0, true);
for (int i = 0; i < 5; i++) {
std::string key = EncodeKey(i + 1);
s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
ASSERT_OK(s);
ASSERT_NE(nullptr, handles[i]);
}
s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
ASSERT_TRUE(s.IsMemoryLimit());
ASSERT_EQ(nullptr, handle);
// test insert without handle
s = cache2->Insert(extra_key, extra_value, 1, &deleter);
// AS if the key have been inserted into cache but get evicted immediately.
ASSERT_OK(s);
ASSERT_EQ(5, cache2->GetUsage());
ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
for (int i = 0; i < 5; i++) {
cache2->Release(handles[i]);
}
}
TEST_P(CacheTest, OverCapacity) {
size_t n = 10;
// a LRUCache with n entries and one shard only
std::shared_ptr<Cache> cache = NewCache(n, 0, false);
std::vector<Cache::Handle*> handles(n + 1);
// Insert n+1 entries, but not releasing.
for (int i = 0; i < static_cast<int>(n + 1); i++) {
std::string key = EncodeKey(i + 1);
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
ASSERT_TRUE(s.ok());
}
// Guess what's in the cache now?
for (int i = 0; i < static_cast<int>(n + 1); i++) {
std::string key = EncodeKey(i + 1);
auto h = cache->Lookup(key);
ASSERT_TRUE(h != nullptr);
if (h) cache->Release(h);
}
// the cache is over capacity since nothing could be evicted
ASSERT_EQ(n + 1U, cache->GetUsage());
for (int i = 0; i < static_cast<int>(n + 1); i++) {
cache->Release(handles[i]);
}
if (GetParam() == kHyperClock) {
// Make sure eviction is triggered.
ASSERT_OK(cache->Insert(EncodeKey(-1), nullptr, 1, &deleter, &handles[0]));
// cache is under capacity now since elements were released
ASSERT_GE(n, cache->GetUsage());
// clean up
cache->Release(handles[0]);
} else {
// LRUCache checks for over-capacity in Release.
// cache is exactly at capacity now with minimal eviction
ASSERT_EQ(n, cache->GetUsage());
// element 0 is evicted and the rest is there
// This is consistent with the LRU policy since the element 0
// was released first
for (int i = 0; i < static_cast<int>(n + 1); i++) {
std::string key = EncodeKey(i + 1);
auto h = cache->Lookup(key);
if (h) {
ASSERT_NE(static_cast<size_t>(i), 0U);
cache->Release(h);
} else {
ASSERT_EQ(static_cast<size_t>(i), 0U);
}
}
}
}
namespace {
std::vector<std::pair<int, int>> legacy_callback_state;
void legacy_callback(void* value, size_t charge) {
legacy_callback_state.push_back(
{DecodeValue(value), static_cast<int>(charge)});
}
}; // namespace
TEST_P(CacheTest, ApplyToAllCacheEntriesTest) {
std::vector<std::pair<int, int>> inserted;
legacy_callback_state.clear();
for (int i = 0; i < 10; ++i) {
Insert(i, i * 2, i + 1);
inserted.push_back({i * 2, i + 1});
}
cache_->ApplyToAllCacheEntries(legacy_callback, true);
std::sort(inserted.begin(), inserted.end());
std::sort(legacy_callback_state.begin(), legacy_callback_state.end());
ASSERT_EQ(inserted.size(), legacy_callback_state.size());
for (int i = 0; i < static_cast<int>(inserted.size()); ++i) {
EXPECT_EQ(inserted[i], legacy_callback_state[i]);
}
}
TEST_P(CacheTest, ApplyToAllEntriesTest) {
std::vector<std::string> callback_state;
const auto callback = [&](const Slice& key, void* value, size_t charge,
Cache::DeleterFn deleter) {
callback_state.push_back(std::to_string(DecodeKey(key)) + "," +
std::to_string(DecodeValue(value)) + "," +
std::to_string(charge));
assert(deleter == &CacheTest::Deleter);
};
std::vector<std::string> inserted;
callback_state.clear();
for (int i = 0; i < 10; ++i) {
Insert(i, i * 2, i + 1);
inserted.push_back(std::to_string(i) + "," + std::to_string(i * 2) + "," +
std::to_string(i + 1));
}
cache_->ApplyToAllEntries(callback, /*opts*/ {});
std::sort(inserted.begin(), inserted.end());
std::sort(callback_state.begin(), callback_state.end());
ASSERT_EQ(inserted.size(), callback_state.size());
for (int i = 0; i < static_cast<int>(inserted.size()); ++i) {
EXPECT_EQ(inserted[i], callback_state[i]);
}
}
TEST_P(CacheTest, ApplyToAllEntriesDuringResize) {
// This is a mini-stress test of ApplyToAllEntries, to ensure
// items in the cache that are neither added nor removed
// during ApplyToAllEntries are counted exactly once.
// Insert some entries that we expect to be seen exactly once
// during iteration.
constexpr int kSpecialCharge = 2;
constexpr int kNotSpecialCharge = 1;
constexpr int kSpecialCount = 100;
size_t expected_usage = 0;
for (int i = 0; i < kSpecialCount; ++i) {
Insert(i, i * 2, kSpecialCharge);
expected_usage += kSpecialCharge;
}
// For callback
int special_count = 0;
const auto callback = [&](const Slice&, void*, size_t charge,
Cache::DeleterFn) {
if (charge == static_cast<size_t>(kSpecialCharge)) {
++special_count;
}
};
// Start counting
std::thread apply_thread([&]() {
// Use small average_entries_per_lock to make the problem difficult
Cache::ApplyToAllEntriesOptions opts;
opts.average_entries_per_lock = 2;
cache_->ApplyToAllEntries(callback, opts);
});
// In parallel, add more entries, enough to cause resize but not enough
// to cause ejections. (Note: if any cache shard is over capacity, there
// will be ejections)
for (int i = kSpecialCount * 1; i < kSpecialCount * 5; ++i) {
Insert(i, i * 2, kNotSpecialCharge);
expected_usage += kNotSpecialCharge;
}
apply_thread.join();
// verify no evictions
ASSERT_EQ(cache_->GetUsage(), expected_usage);
// verify everything seen in ApplyToAllEntries
ASSERT_EQ(special_count, kSpecialCount);
}
TEST_P(CacheTest, DefaultShardBits) {
// Prevent excessive allocation (to save time & space)
estimated_value_size_ = 100000;
// Implementations use different minimum shard sizes
size_t min_shard_size =
(GetParam() == kHyperClock ? 32U * 1024U : 512U) * 1024U;
std::shared_ptr<Cache> cache = NewCache(32U * min_shard_size);
ShardedCacheBase* sc = dynamic_cast<ShardedCacheBase*>(cache.get());
ASSERT_EQ(5, sc->GetNumShardBits());
cache = NewCache(min_shard_size / 1000U * 999U);
sc = dynamic_cast<ShardedCacheBase*>(cache.get());
ASSERT_EQ(0, sc->GetNumShardBits());
cache = NewCache(3U * 1024U * 1024U * 1024U);
sc = dynamic_cast<ShardedCacheBase*>(cache.get());
// current maximum of 6
ASSERT_EQ(6, sc->GetNumShardBits());
if constexpr (sizeof(size_t) > 4) {
cache = NewCache(128U * min_shard_size);
sc = dynamic_cast<ShardedCacheBase*>(cache.get());
// current maximum of 6
ASSERT_EQ(6, sc->GetNumShardBits());
}
}
TEST_P(CacheTest, GetChargeAndDeleter) {
Insert(1, 2);
Cache::Handle* h1 = cache_->Lookup(EncodeKey(1));
ASSERT_EQ(2, DecodeValue(cache_->Value(h1)));
ASSERT_EQ(1, cache_->GetCharge(h1));
ASSERT_EQ(&CacheTest::Deleter, cache_->GetDeleter(h1));
cache_->Release(h1);
}
INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
testing::Values(kLRU, kHyperClock));
INSTANTIATE_TEST_CASE_P(CacheTestInstance, LRUCacheTest, testing::Values(kLRU));
} // namespace ROCKSDB_NAMESPACE
int main(int argc, char** argv) {
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}