mirror of https://github.com/facebook/rocksdb.git
Placeholder for AutoHyperClockCache, more (#11692)
Summary: * The plan is for AutoHyperClockCache to be selected when HyperClockCacheOptions::estimated_entry_charge == 0, and in that case to use a new configuration option min_avg_entry_charge for determining an extreme case maximum size for the hash table. For the placeholder, a hack is in place in HyperClockCacheOptions::MakeSharedCache() to make the unit tests happy despite the new options not really making sense with the current implementation. * Mostly updating and refactoring tests to test both the current HCC (internal name FixedHyperClockCache) and a placeholder for the new version (internal name AutoHyperClockCache). * Simplify some existing tests not to depend directly on cache type. * Type-parameterize the shard-level unit tests, which unfortunately requires more syntax like `this->` in places for disambiguation. * Added means of choosing auto_hyper_clock_cache to cache_bench, db_bench, and db_stress, including add to crash test. * Add another templated class BaseHyperClockCache to reduce future copy-paste * Added ReportProblems support to cache_bench * Added a DEBUG-level diagnostic to ReportProblems for the variance in load factor throughout the table, which will become more of a concern with linear hashing to be used in the Auto implementation. Example with current Fixed HCC: ``` 2023/08/10-13:41:41.602450 6ac36 [DEBUG] [che/clock_cache.cc:1507] Slot occupancy stats: Overall 49% (129008/262144), Min/Max/Window = 39%/60%/500, MaxRun{Pos/Neg} = 18/17 ``` In other words, with overall occupancy of 49%, the lowest across any 500 contiguous cells is 39% and highest 60%. Longest run of occupied is 18 and longest run of unoccupied is 17. This seems consistent with random samples from a uniform distribution. Pull Request resolved: https://github.com/facebook/rocksdb/pull/11692 Test Plan: Shouldn't be any meaningful changes yet to production code or to what is tested, but there is temporary redundancy in testing until the new implementation is plugged in. Reviewed By: jowlyzhang Differential Revision: D48247413 Pulled By: pdillinger fbshipit-source-id: 11541f996d97af403c2e43c92fb67ff22dd0b5da
This commit is contained in:
parent
38ecfabed2
commit
ef6f025563
|
@ -31,6 +31,7 @@
|
|||
#include "util/hash.h"
|
||||
#include "util/mutexlock.h"
|
||||
#include "util/random.h"
|
||||
#include "util/stderr_logger.h"
|
||||
#include "util/stop_watch.h"
|
||||
#include "util/string_util.h"
|
||||
|
||||
|
@ -49,6 +50,9 @@ DEFINE_double(resident_ratio, 0.25,
|
|||
"Ratio of keys fitting in cache to keyspace.");
|
||||
DEFINE_uint64(ops_per_thread, 2000000U, "Number of operations per thread.");
|
||||
DEFINE_uint32(value_bytes, 8 * KiB, "Size of each value added.");
|
||||
DEFINE_uint32(value_bytes_estimate, 0,
|
||||
"If > 0, overrides estimated_entry_charge or "
|
||||
"min_avg_entry_charge depending on cache_type.");
|
||||
|
||||
DEFINE_uint32(skew, 5, "Degree of skew in key selection. 0 = no skew");
|
||||
DEFINE_bool(populate_cache, true, "Populate cache before operations");
|
||||
|
@ -83,6 +87,8 @@ DEFINE_bool(early_exit, false,
|
|||
DEFINE_bool(histograms, true,
|
||||
"Whether to track and print histogram statistics.");
|
||||
|
||||
DEFINE_bool(report_problems, true, "Whether to ReportProblems() at the end.");
|
||||
|
||||
DEFINE_uint32(seed, 0, "Hashing/random seed to use. 0 = choose at random");
|
||||
|
||||
DEFINE_string(secondary_cache_uri, "",
|
||||
|
@ -299,11 +305,23 @@ class CacheBench {
|
|||
if (FLAGS_cache_type == "clock_cache") {
|
||||
fprintf(stderr, "Old clock cache implementation has been removed.\n");
|
||||
exit(1);
|
||||
} else if (FLAGS_cache_type == "hyper_clock_cache" ||
|
||||
FLAGS_cache_type == "fixed_hyper_clock_cache") {
|
||||
HyperClockCacheOptions opts(FLAGS_cache_size, FLAGS_value_bytes,
|
||||
FLAGS_num_shard_bits);
|
||||
} else if (EndsWith(FLAGS_cache_type, "hyper_clock_cache")) {
|
||||
HyperClockCacheOptions opts(
|
||||
FLAGS_cache_size, /*estimated_entry_charge=*/0, FLAGS_num_shard_bits);
|
||||
opts.hash_seed = BitwiseAnd(FLAGS_seed, INT32_MAX);
|
||||
if (FLAGS_cache_type == "fixed_hyper_clock_cache" ||
|
||||
FLAGS_cache_type == "hyper_clock_cache") {
|
||||
opts.estimated_entry_charge = FLAGS_value_bytes_estimate > 0
|
||||
? FLAGS_value_bytes_estimate
|
||||
: FLAGS_value_bytes;
|
||||
} else if (FLAGS_cache_type == "auto_hyper_clock_cache") {
|
||||
if (FLAGS_value_bytes_estimate > 0) {
|
||||
opts.min_avg_entry_charge = FLAGS_value_bytes_estimate;
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "Cache type not supported.");
|
||||
exit(1);
|
||||
}
|
||||
cache_ = opts.MakeSharedCache();
|
||||
} else if (FLAGS_cache_type == "lru_cache") {
|
||||
LRUCacheOptions opts(FLAGS_cache_size, FLAGS_num_shard_bits,
|
||||
|
@ -454,7 +472,14 @@ class CacheBench {
|
|||
printf("%s", stats_hist.ToString().c_str());
|
||||
}
|
||||
}
|
||||
printf("\n%s", stats_report.c_str());
|
||||
|
||||
if (FLAGS_report_problems) {
|
||||
printf("\n");
|
||||
std::shared_ptr<Logger> logger =
|
||||
std::make_shared<StderrLogger>(InfoLogLevel::DEBUG_LEVEL);
|
||||
cache_->ReportProblems(logger);
|
||||
}
|
||||
printf("%s", stats_report.c_str());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -499,7 +524,7 @@ class CacheBench {
|
|||
for (;;) {
|
||||
if (shared->AllDone()) {
|
||||
std::ostringstream ostr;
|
||||
ostr << "Most recent cache entry stats:\n"
|
||||
ostr << "\nMost recent cache entry stats:\n"
|
||||
<< "Number of entries: " << total_entry_count << "\n"
|
||||
<< "Table occupancy: " << table_occupancy << " / "
|
||||
<< table_size << " = "
|
||||
|
|
|
@ -70,18 +70,11 @@ const Cache::CacheItemHelper kDumbHelper{
|
|||
CacheEntryRole::kMisc,
|
||||
[](Cache::ObjectPtr /*value*/, MemoryAllocator* /*alloc*/) {}};
|
||||
|
||||
const Cache::CacheItemHelper kEraseOnDeleteHelper1{
|
||||
const Cache::CacheItemHelper kInvokeOnDeleteHelper{
|
||||
CacheEntryRole::kMisc,
|
||||
[](Cache::ObjectPtr value, MemoryAllocator* /*alloc*/) {
|
||||
Cache* cache = static_cast<Cache*>(value);
|
||||
cache->Erase("foo");
|
||||
}};
|
||||
|
||||
const Cache::CacheItemHelper kEraseOnDeleteHelper2{
|
||||
CacheEntryRole::kMisc,
|
||||
[](Cache::ObjectPtr value, MemoryAllocator* /*alloc*/) {
|
||||
Cache* cache = static_cast<Cache*>(value);
|
||||
cache->Erase(EncodeKey16Bytes(1234));
|
||||
auto& fn = *static_cast<std::function<void()>*>(value);
|
||||
fn();
|
||||
}};
|
||||
} // anonymous namespace
|
||||
|
||||
|
@ -180,8 +173,6 @@ std::string CacheTest::type_;
|
|||
class LRUCacheTest : public CacheTest {};
|
||||
|
||||
TEST_P(CacheTest, UsageTest) {
|
||||
auto type = GetParam();
|
||||
|
||||
// cache is std::shared_ptr and will be automatically cleaned up.
|
||||
const size_t kCapacity = 100000;
|
||||
auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
|
||||
|
@ -196,12 +187,7 @@ TEST_P(CacheTest, UsageTest) {
|
|||
char value[10] = "abcdef";
|
||||
// make sure everything will be cached
|
||||
for (int i = 1; i < 100; ++i) {
|
||||
std::string key;
|
||||
if (type == kLRU) {
|
||||
key = std::string(i, 'a');
|
||||
} else {
|
||||
key = EncodeKey(i);
|
||||
}
|
||||
std::string key = EncodeKey(i);
|
||||
auto kv_size = key.size() + 5;
|
||||
ASSERT_OK(cache->Insert(key, value, &kDumbHelper, kv_size));
|
||||
ASSERT_OK(precise_cache->Insert(key, value, &kDumbHelper, kv_size));
|
||||
|
@ -221,12 +207,7 @@ TEST_P(CacheTest, UsageTest) {
|
|||
|
||||
// make sure the cache will be overloaded
|
||||
for (size_t i = 1; i < kCapacity; ++i) {
|
||||
std::string key;
|
||||
if (type == kLRU) {
|
||||
key = std::to_string(i);
|
||||
} else {
|
||||
key = EncodeKey(static_cast<int>(1000 + i));
|
||||
}
|
||||
std::string key = EncodeKey(static_cast<int>(1000 + i));
|
||||
ASSERT_OK(cache->Insert(key, value, &kDumbHelper, key.size() + 5));
|
||||
ASSERT_OK(precise_cache->Insert(key, value, &kDumbHelper, key.size() + 5));
|
||||
}
|
||||
|
@ -246,16 +227,14 @@ TEST_P(CacheTest, UsageTest) {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: This test takes longer than expected on ClockCache. This is
|
||||
// because the values size estimate at construction is too sloppy.
|
||||
// TODO: This test takes longer than expected on FixedHyperClockCache.
|
||||
// This is because the values size estimate at construction is too sloppy.
|
||||
// Fix this.
|
||||
// Why is it so slow? The cache is constructed with an estimate of 1, but
|
||||
// then the charge is claimed to be 21. This will cause the hash table
|
||||
// to be extremely sparse, which in turn means clock needs to scan too
|
||||
// many slots to find victims.
|
||||
TEST_P(CacheTest, PinnedUsageTest) {
|
||||
auto type = GetParam();
|
||||
|
||||
// cache is std::shared_ptr and will be automatically cleaned up.
|
||||
const size_t kCapacity = 200000;
|
||||
auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
|
||||
|
@ -274,12 +253,7 @@ TEST_P(CacheTest, PinnedUsageTest) {
|
|||
// Add entries. Unpin some of them after insertion. Then, pin some of them
|
||||
// again. Check GetPinnedUsage().
|
||||
for (int i = 1; i < 100; ++i) {
|
||||
std::string key;
|
||||
if (type == kLRU) {
|
||||
key = std::string(i, 'a');
|
||||
} else {
|
||||
key = EncodeKey(i);
|
||||
}
|
||||
std::string key = EncodeKey(i);
|
||||
auto kv_size = key.size() + 5;
|
||||
Cache::Handle* handle;
|
||||
Cache::Handle* handle_in_precise_cache;
|
||||
|
@ -320,12 +294,7 @@ TEST_P(CacheTest, PinnedUsageTest) {
|
|||
|
||||
// check that overloading the cache does not change the pinned usage
|
||||
for (size_t i = 1; i < 2 * kCapacity; ++i) {
|
||||
std::string key;
|
||||
if (type == kLRU) {
|
||||
key = std::to_string(i);
|
||||
} else {
|
||||
key = EncodeKey(static_cast<int>(1000 + i));
|
||||
}
|
||||
std::string key = EncodeKey(static_cast<int>(1000 + i));
|
||||
ASSERT_OK(cache->Insert(key, value, &kDumbHelper, key.size() + 5));
|
||||
ASSERT_OK(precise_cache->Insert(key, value, &kDumbHelper, key.size() + 5));
|
||||
}
|
||||
|
@ -515,20 +484,20 @@ TEST_P(CacheTest, EvictionPolicyRef) {
|
|||
// Check whether the entries inserted in the beginning
|
||||
// are evicted. Ones without extra ref are evicted and
|
||||
// those with are not.
|
||||
ASSERT_EQ(-1, Lookup(100));
|
||||
ASSERT_EQ(-1, Lookup(101));
|
||||
ASSERT_EQ(-1, Lookup(102));
|
||||
ASSERT_EQ(-1, Lookup(103));
|
||||
EXPECT_EQ(-1, Lookup(100));
|
||||
EXPECT_EQ(-1, Lookup(101));
|
||||
EXPECT_EQ(-1, Lookup(102));
|
||||
EXPECT_EQ(-1, Lookup(103));
|
||||
|
||||
ASSERT_EQ(-1, Lookup(300));
|
||||
ASSERT_EQ(-1, Lookup(301));
|
||||
ASSERT_EQ(-1, Lookup(302));
|
||||
ASSERT_EQ(-1, Lookup(303));
|
||||
EXPECT_EQ(-1, Lookup(300));
|
||||
EXPECT_EQ(-1, Lookup(301));
|
||||
EXPECT_EQ(-1, Lookup(302));
|
||||
EXPECT_EQ(-1, Lookup(303));
|
||||
|
||||
ASSERT_EQ(101, Lookup(200));
|
||||
ASSERT_EQ(102, Lookup(201));
|
||||
ASSERT_EQ(103, Lookup(202));
|
||||
ASSERT_EQ(104, Lookup(203));
|
||||
EXPECT_EQ(101, Lookup(200));
|
||||
EXPECT_EQ(102, Lookup(201));
|
||||
EXPECT_EQ(103, Lookup(202));
|
||||
EXPECT_EQ(104, Lookup(203));
|
||||
|
||||
// Cleaning up all the handles
|
||||
cache_->Release(h201);
|
||||
|
@ -538,37 +507,22 @@ TEST_P(CacheTest, EvictionPolicyRef) {
|
|||
}
|
||||
|
||||
TEST_P(CacheTest, EvictEmptyCache) {
|
||||
auto type = GetParam();
|
||||
|
||||
// Insert item large than capacity to trigger eviction on empty cache.
|
||||
auto cache = NewCache(1, 0, false);
|
||||
if (type == kLRU) {
|
||||
ASSERT_OK(cache->Insert("foo", nullptr, &kDumbHelper, 10));
|
||||
} else {
|
||||
ASSERT_OK(cache->Insert(EncodeKey(1000), nullptr, &kDumbHelper, 10));
|
||||
}
|
||||
ASSERT_OK(cache->Insert(EncodeKey(1000), nullptr, &kDumbHelper, 10));
|
||||
}
|
||||
|
||||
TEST_P(CacheTest, EraseFromDeleter) {
|
||||
auto type = GetParam();
|
||||
|
||||
// Have deleter which will erase item from cache, which will re-enter
|
||||
// the cache at that point.
|
||||
std::shared_ptr<Cache> cache = NewCache(10, 0, false);
|
||||
std::string foo, bar;
|
||||
const Cache::CacheItemHelper* erase_helper;
|
||||
if (type == kLRU) {
|
||||
foo = "foo";
|
||||
bar = "bar";
|
||||
erase_helper = &kEraseOnDeleteHelper1;
|
||||
} else {
|
||||
foo = EncodeKey(1234);
|
||||
bar = EncodeKey(5678);
|
||||
erase_helper = &kEraseOnDeleteHelper2;
|
||||
}
|
||||
std::string foo = EncodeKey(1234);
|
||||
std::string bar = EncodeKey(5678);
|
||||
|
||||
std::function<void()> erase_fn = [&]() { cache->Erase(foo); };
|
||||
|
||||
ASSERT_OK(cache->Insert(foo, nullptr, &kDumbHelper, 1));
|
||||
ASSERT_OK(cache->Insert(bar, cache.get(), erase_helper, 1));
|
||||
ASSERT_OK(cache->Insert(bar, &erase_fn, &kInvokeOnDeleteHelper, 1));
|
||||
|
||||
cache->Erase(bar);
|
||||
ASSERT_EQ(nullptr, cache->Lookup(foo));
|
||||
|
@ -676,10 +630,10 @@ using TypedHandle = SharedCache::TypedHandle;
|
|||
} // namespace
|
||||
|
||||
TEST_P(CacheTest, SetCapacity) {
|
||||
auto type = GetParam();
|
||||
if (IsHyperClock()) {
|
||||
// TODO: update test & code for limited supoort
|
||||
ROCKSDB_GTEST_BYPASS(
|
||||
"FastLRUCache and HyperClockCache don't support arbitrary capacity "
|
||||
"HyperClockCache doesn't support arbitrary capacity "
|
||||
"adjustments.");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -9,9 +9,18 @@
|
|||
|
||||
#include "cache/clock_cache.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <bitset>
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <exception>
|
||||
#include <functional>
|
||||
#include <numeric>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <type_traits>
|
||||
|
||||
#include "cache/cache_key.h"
|
||||
#include "cache/secondary_cache_adapter.h"
|
||||
|
@ -92,8 +101,6 @@ inline bool ClockUpdate(ClockHandle& h) {
|
|||
(meta >> ClockHandle::kAcquireCounterShift) & ClockHandle::kCounterMask;
|
||||
uint64_t release_count =
|
||||
(meta >> ClockHandle::kReleaseCounterShift) & ClockHandle::kCounterMask;
|
||||
// fprintf(stderr, "ClockUpdate @ %p: %lu %lu %u\n", &h, acquire_count,
|
||||
// release_count, (unsigned)(meta >> ClockHandle::kStateShift));
|
||||
if (acquire_count != release_count) {
|
||||
// Only clock update entries with no outstanding refs
|
||||
return false;
|
||||
|
@ -1361,35 +1368,39 @@ size_t ClockCacheShard<Table>::GetTableAddressCount() const {
|
|||
|
||||
// Explicit instantiation
|
||||
template class ClockCacheShard<FixedHyperClockTable>;
|
||||
template class ClockCacheShard<AutoHyperClockTable>;
|
||||
|
||||
FixedHyperClockCache::FixedHyperClockCache(const HyperClockCacheOptions& opts)
|
||||
: ShardedCache(opts) {
|
||||
assert(opts.estimated_entry_charge > 0 ||
|
||||
opts.metadata_charge_policy != kDontChargeCacheMetadata);
|
||||
template <class Table>
|
||||
BaseHyperClockCache<Table>::BaseHyperClockCache(
|
||||
const HyperClockCacheOptions& opts)
|
||||
: ShardedCache<ClockCacheShard<Table>>(opts) {
|
||||
// TODO: should not need to go through two levels of pointer indirection to
|
||||
// get to table entries
|
||||
size_t per_shard = GetPerShardCapacity();
|
||||
size_t per_shard = this->GetPerShardCapacity();
|
||||
MemoryAllocator* alloc = this->memory_allocator();
|
||||
InitShards([&](Shard* cs) {
|
||||
FixedHyperClockTable::Opts table_opts;
|
||||
table_opts.estimated_value_size = opts.estimated_entry_charge;
|
||||
this->InitShards([&](Shard* cs) {
|
||||
typename Table::Opts table_opts{opts};
|
||||
new (cs) Shard(per_shard, opts.strict_capacity_limit,
|
||||
opts.metadata_charge_policy, alloc, &eviction_callback_,
|
||||
&hash_seed_, table_opts);
|
||||
opts.metadata_charge_policy, alloc,
|
||||
&this->eviction_callback_, &this->hash_seed_, table_opts);
|
||||
});
|
||||
}
|
||||
|
||||
Cache::ObjectPtr FixedHyperClockCache::Value(Handle* handle) {
|
||||
return reinterpret_cast<const HandleImpl*>(handle)->value;
|
||||
template <class Table>
|
||||
Cache::ObjectPtr BaseHyperClockCache<Table>::Value(Handle* handle) {
|
||||
return reinterpret_cast<const typename Table::HandleImpl*>(handle)->value;
|
||||
}
|
||||
|
||||
size_t FixedHyperClockCache::GetCharge(Handle* handle) const {
|
||||
return reinterpret_cast<const HandleImpl*>(handle)->GetTotalCharge();
|
||||
template <class Table>
|
||||
size_t BaseHyperClockCache<Table>::GetCharge(Handle* handle) const {
|
||||
return reinterpret_cast<const typename Table::HandleImpl*>(handle)
|
||||
->GetTotalCharge();
|
||||
}
|
||||
|
||||
const Cache::CacheItemHelper* FixedHyperClockCache::GetCacheItemHelper(
|
||||
template <class Table>
|
||||
const Cache::CacheItemHelper* BaseHyperClockCache<Table>::GetCacheItemHelper(
|
||||
Handle* handle) const {
|
||||
auto h = reinterpret_cast<const HandleImpl*>(handle);
|
||||
auto h = reinterpret_cast<const typename Table::HandleImpl*>(handle);
|
||||
return h->helper;
|
||||
}
|
||||
|
||||
|
@ -1428,17 +1439,87 @@ void AddShardEvaluation(const FixedHyperClockCache::Shard& shard,
|
|||
min_recommendation = std::min(min_recommendation, recommendation);
|
||||
}
|
||||
|
||||
bool IsSlotOccupied(const ClockHandle& h) {
|
||||
return (h.meta.load(std::memory_order_relaxed) >> ClockHandle::kStateShift) !=
|
||||
0;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// NOTE: GCC might warn about subobject linkage if this is in anon namespace
|
||||
template <size_t N = 500>
|
||||
class LoadVarianceStats {
|
||||
public:
|
||||
std::string Report() const {
|
||||
return "Overall " + PercentStr(positive_count_, samples_) + " (" +
|
||||
std::to_string(positive_count_) + "/" + std::to_string(samples_) +
|
||||
"), Min/Max/Window = " + PercentStr(min_, N) + "/" +
|
||||
PercentStr(max_, N) + "/" + std::to_string(N) +
|
||||
", MaxRun{Pos/Neg} = " + std::to_string(max_pos_run_) + "/" +
|
||||
std::to_string(max_neg_run_) + "\n";
|
||||
}
|
||||
|
||||
void Add(bool positive) {
|
||||
recent_[samples_ % N] = positive;
|
||||
if (positive) {
|
||||
++positive_count_;
|
||||
++cur_pos_run_;
|
||||
max_pos_run_ = std::max(max_pos_run_, cur_pos_run_);
|
||||
cur_neg_run_ = 0;
|
||||
} else {
|
||||
++cur_neg_run_;
|
||||
max_neg_run_ = std::max(max_neg_run_, cur_neg_run_);
|
||||
cur_pos_run_ = 0;
|
||||
}
|
||||
++samples_;
|
||||
if (samples_ >= N) {
|
||||
size_t count_set = recent_.count();
|
||||
max_ = std::max(max_, count_set);
|
||||
min_ = std::min(min_, count_set);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
size_t max_ = 0;
|
||||
size_t min_ = N;
|
||||
size_t positive_count_ = 0;
|
||||
size_t samples_ = 0;
|
||||
size_t max_pos_run_ = 0;
|
||||
size_t cur_pos_run_ = 0;
|
||||
size_t max_neg_run_ = 0;
|
||||
size_t cur_neg_run_ = 0;
|
||||
std::bitset<N> recent_;
|
||||
|
||||
static std::string PercentStr(size_t a, size_t b) {
|
||||
return std::to_string(uint64_t{100} * a / b) + "%";
|
||||
}
|
||||
};
|
||||
|
||||
template <class Table>
|
||||
void BaseHyperClockCache<Table>::ReportProblems(
|
||||
const std::shared_ptr<Logger>& info_log) const {
|
||||
if (info_log->GetInfoLogLevel() <= InfoLogLevel::DEBUG_LEVEL) {
|
||||
LoadVarianceStats slot_stats;
|
||||
this->ForEachShard([&](const BaseHyperClockCache<Table>::Shard* shard) {
|
||||
size_t count = shard->GetTableAddressCount();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
slot_stats.Add(IsSlotOccupied(*shard->GetTable().HandlePtr(i)));
|
||||
}
|
||||
});
|
||||
ROCKS_LOG_AT_LEVEL(info_log, InfoLogLevel::DEBUG_LEVEL,
|
||||
"Slot occupancy stats: %s", slot_stats.Report().c_str());
|
||||
}
|
||||
}
|
||||
|
||||
void FixedHyperClockCache::ReportProblems(
|
||||
const std::shared_ptr<Logger>& info_log) const {
|
||||
BaseHyperClockCache::ReportProblems(info_log);
|
||||
|
||||
uint32_t shard_count = GetNumShards();
|
||||
std::vector<double> predicted_load_factors;
|
||||
size_t min_recommendation = SIZE_MAX;
|
||||
const_cast<FixedHyperClockCache*>(this)->ForEachShard(
|
||||
[&](FixedHyperClockCache::Shard* shard) {
|
||||
AddShardEvaluation(*shard, predicted_load_factors, min_recommendation);
|
||||
});
|
||||
ForEachShard([&](const FixedHyperClockCache::Shard* shard) {
|
||||
AddShardEvaluation(*shard, predicted_load_factors, min_recommendation);
|
||||
});
|
||||
|
||||
if (predicted_load_factors.empty()) {
|
||||
// None operating "at limit" -> nothing to report
|
||||
|
@ -1549,8 +1630,17 @@ std::shared_ptr<Cache> HyperClockCacheOptions::MakeSharedCache() const {
|
|||
opts.num_shard_bits =
|
||||
GetDefaultCacheShardBits(opts.capacity, min_shard_size);
|
||||
}
|
||||
std::shared_ptr<Cache> cache =
|
||||
std::make_shared<clock_cache::FixedHyperClockCache>(opts);
|
||||
std::shared_ptr<Cache> cache;
|
||||
if (opts.estimated_entry_charge == 0) {
|
||||
// BEGIN placeholder logic to be removed
|
||||
// This is sufficient to get the placeholder Auto working in unit tests
|
||||
// much like the Fixed version.
|
||||
opts.estimated_entry_charge = opts.min_avg_entry_charge;
|
||||
// END placeholder logic to be removed
|
||||
cache = std::make_shared<clock_cache::AutoHyperClockCache>(opts);
|
||||
} else {
|
||||
cache = std::make_shared<clock_cache::FixedHyperClockCache>(opts);
|
||||
}
|
||||
if (opts.secondary_cache) {
|
||||
cache = std::make_shared<CacheWithSecondaryAdapter>(cache,
|
||||
opts.secondary_cache);
|
||||
|
|
|
@ -31,6 +31,7 @@ namespace ROCKSDB_NAMESPACE {
|
|||
namespace clock_cache {
|
||||
|
||||
// Forward declaration of friend class.
|
||||
template <class ClockCache>
|
||||
class ClockCacheTest;
|
||||
|
||||
// HyperClockCache is an alternative to LRUCache specifically tailored for
|
||||
|
@ -488,6 +489,12 @@ class FixedHyperClockTable : public BaseClockTable {
|
|||
}; // struct HandleImpl
|
||||
|
||||
struct Opts {
|
||||
explicit Opts(size_t _estimated_value_size)
|
||||
: estimated_value_size(_estimated_value_size) {}
|
||||
explicit Opts(const HyperClockCacheOptions& opts) {
|
||||
assert(opts.estimated_entry_charge > 0);
|
||||
estimated_value_size = opts.estimated_entry_charge;
|
||||
}
|
||||
size_t estimated_value_size;
|
||||
};
|
||||
|
||||
|
@ -530,7 +537,7 @@ class FixedHyperClockTable : public BaseClockTable {
|
|||
const HandleImpl* HandlePtr(size_t idx) const { return &array_[idx]; }
|
||||
|
||||
#ifndef NDEBUG
|
||||
size_t& TEST_MutableOccupancyLimit() const {
|
||||
size_t& TEST_MutableOccupancyLimit() {
|
||||
return const_cast<size_t&>(occupancy_limit_);
|
||||
}
|
||||
|
||||
|
@ -614,10 +621,18 @@ class FixedHyperClockTable : public BaseClockTable {
|
|||
const std::unique_ptr<HandleImpl[]> array_;
|
||||
}; // class FixedHyperClockTable
|
||||
|
||||
// Placeholder for future automatic table variant
|
||||
// For now, just use FixedHyperClockTable.
|
||||
class AutoHyperClockTable : public FixedHyperClockTable {
|
||||
public:
|
||||
using FixedHyperClockTable::FixedHyperClockTable;
|
||||
}; // class AutoHyperClockTable
|
||||
|
||||
// A single shard of sharded cache.
|
||||
template <class Table>
|
||||
template <class TableT>
|
||||
class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShardBase {
|
||||
public:
|
||||
using Table = TableT;
|
||||
ClockCacheShard(size_t capacity, bool strict_capacity_limit,
|
||||
CacheMetadataChargePolicy metadata_charge_policy,
|
||||
MemoryAllocator* allocator,
|
||||
|
@ -710,8 +725,11 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShardBase {
|
|||
return Lookup(key, hashed_key);
|
||||
}
|
||||
|
||||
Table& GetTable() { return table_; }
|
||||
const Table& GetTable() const { return table_; }
|
||||
|
||||
#ifndef NDEBUG
|
||||
size_t& TEST_MutableOccupancyLimit() const {
|
||||
size_t& TEST_MutableOccupancyLimit() {
|
||||
return table_.TEST_MutableOccupancyLimit();
|
||||
}
|
||||
// Acquire/release N references
|
||||
|
@ -729,17 +747,14 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShardBase {
|
|||
std::atomic<bool> strict_capacity_limit_;
|
||||
}; // class ClockCacheShard
|
||||
|
||||
class FixedHyperClockCache
|
||||
#ifdef NDEBUG
|
||||
final
|
||||
#endif
|
||||
: public ShardedCache<ClockCacheShard<FixedHyperClockTable>> {
|
||||
template <class Table>
|
||||
class BaseHyperClockCache : public ShardedCache<ClockCacheShard<Table>> {
|
||||
public:
|
||||
using Shard = ClockCacheShard<FixedHyperClockTable>;
|
||||
using Shard = ClockCacheShard<Table>;
|
||||
using Handle = Cache::Handle;
|
||||
using CacheItemHelper = Cache::CacheItemHelper;
|
||||
|
||||
explicit FixedHyperClockCache(const HyperClockCacheOptions& opts);
|
||||
|
||||
const char* Name() const override { return "FixedHyperClockCache"; }
|
||||
explicit BaseHyperClockCache(const HyperClockCacheOptions& opts);
|
||||
|
||||
Cache::ObjectPtr Value(Handle* handle) override;
|
||||
|
||||
|
@ -747,10 +762,36 @@ class FixedHyperClockCache
|
|||
|
||||
const CacheItemHelper* GetCacheItemHelper(Handle* handle) const override;
|
||||
|
||||
void ReportProblems(
|
||||
const std::shared_ptr<Logger>& /*info_log*/) const override;
|
||||
};
|
||||
|
||||
class FixedHyperClockCache
|
||||
#ifdef NDEBUG
|
||||
final
|
||||
#endif
|
||||
: public BaseHyperClockCache<FixedHyperClockTable> {
|
||||
public:
|
||||
using BaseHyperClockCache::BaseHyperClockCache;
|
||||
|
||||
const char* Name() const override { return "FixedHyperClockCache"; }
|
||||
|
||||
void ReportProblems(
|
||||
const std::shared_ptr<Logger>& /*info_log*/) const override;
|
||||
}; // class FixedHyperClockCache
|
||||
|
||||
// Placeholder for future automatic HCC variant
|
||||
class AutoHyperClockCache
|
||||
#ifdef NDEBUG
|
||||
final
|
||||
#endif
|
||||
: public BaseHyperClockCache<AutoHyperClockTable> {
|
||||
public:
|
||||
using BaseHyperClockCache::BaseHyperClockCache;
|
||||
|
||||
const char* Name() const override { return "AutoHyperClockCache"; }
|
||||
}; // class AutoHyperClockCache
|
||||
|
||||
} // namespace clock_cache
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
|
|
@ -371,11 +371,12 @@ TEST_F(LRUCacheTest, EntriesWithPriority) {
|
|||
|
||||
namespace clock_cache {
|
||||
|
||||
template <class ClockCache>
|
||||
class ClockCacheTest : public testing::Test {
|
||||
public:
|
||||
using Shard = FixedHyperClockCache::Shard;
|
||||
using Table = FixedHyperClockTable;
|
||||
using HandleImpl = Shard::HandleImpl;
|
||||
using Shard = typename ClockCache::Shard;
|
||||
using Table = typename Shard::Table;
|
||||
using TableOpts = typename Table::Opts;
|
||||
|
||||
ClockCacheTest() {}
|
||||
~ClockCacheTest() override { DeleteShard(); }
|
||||
|
@ -393,8 +394,7 @@ class ClockCacheTest : public testing::Test {
|
|||
shard_ =
|
||||
reinterpret_cast<Shard*>(port::cacheline_aligned_alloc(sizeof(Shard)));
|
||||
|
||||
Table::Opts opts;
|
||||
opts.estimated_value_size = 1;
|
||||
TableOpts opts{1 /*value_size*/};
|
||||
new (shard_)
|
||||
Shard(capacity, strict_capacity_limit, kDontChargeCacheMetadata,
|
||||
/*allocator*/ nullptr, &eviction_callback_, &hash_seed_, opts);
|
||||
|
@ -458,43 +458,53 @@ class ClockCacheTest : public testing::Test {
|
|||
uint32_t hash_seed_ = 0;
|
||||
};
|
||||
|
||||
TEST_F(ClockCacheTest, Misc) {
|
||||
NewShard(3);
|
||||
using ClockCacheTypes =
|
||||
::testing::Types<AutoHyperClockCache, FixedHyperClockCache>;
|
||||
TYPED_TEST_CASE(ClockCacheTest, ClockCacheTypes);
|
||||
|
||||
TYPED_TEST(ClockCacheTest, Misc) {
|
||||
this->NewShard(3);
|
||||
// NOTE: templated base class prevents simple naming of inherited members,
|
||||
// so lots of `this->`
|
||||
auto& shard = *this->shard_;
|
||||
|
||||
// Key size stuff
|
||||
EXPECT_OK(InsertWithLen('a', 16));
|
||||
EXPECT_NOK(InsertWithLen('b', 15));
|
||||
EXPECT_OK(InsertWithLen('b', 16));
|
||||
EXPECT_NOK(InsertWithLen('c', 17));
|
||||
EXPECT_NOK(InsertWithLen('d', 1000));
|
||||
EXPECT_NOK(InsertWithLen('e', 11));
|
||||
EXPECT_NOK(InsertWithLen('f', 0));
|
||||
EXPECT_OK(this->InsertWithLen('a', 16));
|
||||
EXPECT_NOK(this->InsertWithLen('b', 15));
|
||||
EXPECT_OK(this->InsertWithLen('b', 16));
|
||||
EXPECT_NOK(this->InsertWithLen('c', 17));
|
||||
EXPECT_NOK(this->InsertWithLen('d', 1000));
|
||||
EXPECT_NOK(this->InsertWithLen('e', 11));
|
||||
EXPECT_NOK(this->InsertWithLen('f', 0));
|
||||
|
||||
// Some of this is motivated by code coverage
|
||||
std::string wrong_size_key(15, 'x');
|
||||
EXPECT_FALSE(Lookup(wrong_size_key, TestHashedKey('x')));
|
||||
EXPECT_FALSE(shard_->Ref(nullptr));
|
||||
EXPECT_FALSE(shard_->Release(nullptr));
|
||||
shard_->Erase(wrong_size_key, TestHashedKey('x')); // no-op
|
||||
EXPECT_FALSE(this->Lookup(wrong_size_key, this->TestHashedKey('x')));
|
||||
EXPECT_FALSE(shard.Ref(nullptr));
|
||||
EXPECT_FALSE(shard.Release(nullptr));
|
||||
shard.Erase(wrong_size_key, this->TestHashedKey('x')); // no-op
|
||||
}
|
||||
|
||||
TEST_F(ClockCacheTest, Limits) {
|
||||
constexpr size_t kCapacity = 3;
|
||||
NewShard(kCapacity, false /*strict_capacity_limit*/);
|
||||
TYPED_TEST(ClockCacheTest, Limits) {
|
||||
constexpr size_t kCapacity = 64;
|
||||
this->NewShard(kCapacity, false /*strict_capacity_limit*/);
|
||||
auto& shard = *this->shard_;
|
||||
using HandleImpl = typename ClockCacheTest<TypeParam>::Shard::HandleImpl;
|
||||
|
||||
for (bool strict_capacity_limit : {false, true, false}) {
|
||||
SCOPED_TRACE("strict_capacity_limit = " +
|
||||
std::to_string(strict_capacity_limit));
|
||||
|
||||
// Also tests switching between strict limit and not
|
||||
shard_->SetStrictCapacityLimit(strict_capacity_limit);
|
||||
shard.SetStrictCapacityLimit(strict_capacity_limit);
|
||||
|
||||
UniqueId64x2 hkey = TestHashedKey('x');
|
||||
UniqueId64x2 hkey = this->TestHashedKey('x');
|
||||
|
||||
// Single entry charge beyond capacity
|
||||
{
|
||||
Status s = shard_->Insert(TestKey(hkey), hkey, nullptr /*value*/,
|
||||
&kNoopCacheItemHelper, 5 /*charge*/,
|
||||
nullptr /*handle*/, Cache::Priority::LOW);
|
||||
Status s = shard.Insert(this->TestKey(hkey), hkey, nullptr /*value*/,
|
||||
&kNoopCacheItemHelper, kCapacity + 2 /*charge*/,
|
||||
nullptr /*handle*/, Cache::Priority::LOW);
|
||||
if (strict_capacity_limit) {
|
||||
EXPECT_TRUE(s.IsMemoryLimit());
|
||||
} else {
|
||||
|
@ -505,11 +515,11 @@ TEST_F(ClockCacheTest, Limits) {
|
|||
// Single entry fills capacity
|
||||
{
|
||||
HandleImpl* h;
|
||||
ASSERT_OK(shard_->Insert(TestKey(hkey), hkey, nullptr /*value*/,
|
||||
&kNoopCacheItemHelper, 3 /*charge*/, &h,
|
||||
Cache::Priority::LOW));
|
||||
ASSERT_OK(shard.Insert(this->TestKey(hkey), hkey, nullptr /*value*/,
|
||||
&kNoopCacheItemHelper, kCapacity /*charge*/, &h,
|
||||
Cache::Priority::LOW));
|
||||
// Try to insert more
|
||||
Status s = Insert('a');
|
||||
Status s = this->Insert('a');
|
||||
if (strict_capacity_limit) {
|
||||
EXPECT_TRUE(s.IsMemoryLimit());
|
||||
} else {
|
||||
|
@ -517,22 +527,22 @@ TEST_F(ClockCacheTest, Limits) {
|
|||
}
|
||||
// Release entry filling capacity.
|
||||
// Cover useful = false case.
|
||||
shard_->Release(h, false /*useful*/, false /*erase_if_last_ref*/);
|
||||
shard.Release(h, false /*useful*/, false /*erase_if_last_ref*/);
|
||||
}
|
||||
|
||||
// Insert more than table size can handle to exceed occupancy limit.
|
||||
// (Cleverly using mostly zero-charge entries, but some non-zero to
|
||||
// verify usage tracking on detached entries.)
|
||||
{
|
||||
size_t n = shard_->GetTableAddressCount() + 1;
|
||||
size_t n = shard.GetTableAddressCount() + 1;
|
||||
std::unique_ptr<HandleImpl* []> ha { new HandleImpl* [n] {} };
|
||||
Status s;
|
||||
for (size_t i = 0; i < n && s.ok(); ++i) {
|
||||
hkey[1] = i;
|
||||
s = shard_->Insert(TestKey(hkey), hkey, nullptr /*value*/,
|
||||
&kNoopCacheItemHelper,
|
||||
(i + kCapacity < n) ? 0 : 1 /*charge*/, &ha[i],
|
||||
Cache::Priority::LOW);
|
||||
s = shard.Insert(this->TestKey(hkey), hkey, nullptr /*value*/,
|
||||
&kNoopCacheItemHelper,
|
||||
(i + kCapacity < n) ? 0 : 1 /*charge*/, &ha[i],
|
||||
Cache::Priority::LOW);
|
||||
if (i == 0) {
|
||||
EXPECT_OK(s);
|
||||
}
|
||||
|
@ -543,7 +553,7 @@ TEST_F(ClockCacheTest, Limits) {
|
|||
EXPECT_OK(s);
|
||||
}
|
||||
// Same result if not keeping a reference
|
||||
s = Insert('a');
|
||||
s = this->Insert('a');
|
||||
if (strict_capacity_limit) {
|
||||
EXPECT_TRUE(s.IsMemoryLimit());
|
||||
} else {
|
||||
|
@ -551,122 +561,123 @@ TEST_F(ClockCacheTest, Limits) {
|
|||
}
|
||||
|
||||
// Regardless, we didn't allow table to actually get full
|
||||
EXPECT_LT(shard_->GetOccupancyCount(), shard_->GetTableAddressCount());
|
||||
EXPECT_LT(shard.GetOccupancyCount(), shard.GetTableAddressCount());
|
||||
|
||||
// Release handles
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
if (ha[i]) {
|
||||
shard_->Release(ha[i]);
|
||||
shard.Release(ha[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(ClockCacheTest, ClockEvictionTest) {
|
||||
TYPED_TEST(ClockCacheTest, ClockEvictionTest) {
|
||||
for (bool strict_capacity_limit : {false, true}) {
|
||||
SCOPED_TRACE("strict_capacity_limit = " +
|
||||
std::to_string(strict_capacity_limit));
|
||||
|
||||
NewShard(6, strict_capacity_limit);
|
||||
EXPECT_OK(Insert('a', Cache::Priority::BOTTOM));
|
||||
EXPECT_OK(Insert('b', Cache::Priority::LOW));
|
||||
EXPECT_OK(Insert('c', Cache::Priority::HIGH));
|
||||
EXPECT_OK(Insert('d', Cache::Priority::BOTTOM));
|
||||
EXPECT_OK(Insert('e', Cache::Priority::LOW));
|
||||
EXPECT_OK(Insert('f', Cache::Priority::HIGH));
|
||||
this->NewShard(6, strict_capacity_limit);
|
||||
auto& shard = *this->shard_;
|
||||
EXPECT_OK(this->Insert('a', Cache::Priority::BOTTOM));
|
||||
EXPECT_OK(this->Insert('b', Cache::Priority::LOW));
|
||||
EXPECT_OK(this->Insert('c', Cache::Priority::HIGH));
|
||||
EXPECT_OK(this->Insert('d', Cache::Priority::BOTTOM));
|
||||
EXPECT_OK(this->Insert('e', Cache::Priority::LOW));
|
||||
EXPECT_OK(this->Insert('f', Cache::Priority::HIGH));
|
||||
|
||||
EXPECT_TRUE(Lookup('a', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('b', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('c', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('d', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('e', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('f', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('a', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('b', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('c', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('d', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('e', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('f', /*use*/ false));
|
||||
|
||||
// Ensure bottom are evicted first, even if new entries are low
|
||||
EXPECT_OK(Insert('g', Cache::Priority::LOW));
|
||||
EXPECT_OK(Insert('h', Cache::Priority::LOW));
|
||||
EXPECT_OK(this->Insert('g', Cache::Priority::LOW));
|
||||
EXPECT_OK(this->Insert('h', Cache::Priority::LOW));
|
||||
|
||||
EXPECT_FALSE(Lookup('a', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('b', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('c', /*use*/ false));
|
||||
EXPECT_FALSE(Lookup('d', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('e', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('f', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('a', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('b', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('c', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('d', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('e', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('f', /*use*/ false));
|
||||
// Mark g & h useful
|
||||
EXPECT_TRUE(Lookup('g', /*use*/ true));
|
||||
EXPECT_TRUE(Lookup('h', /*use*/ true));
|
||||
EXPECT_TRUE(this->Lookup('g', /*use*/ true));
|
||||
EXPECT_TRUE(this->Lookup('h', /*use*/ true));
|
||||
|
||||
// Then old LOW entries
|
||||
EXPECT_OK(Insert('i', Cache::Priority::LOW));
|
||||
EXPECT_OK(Insert('j', Cache::Priority::LOW));
|
||||
EXPECT_OK(this->Insert('i', Cache::Priority::LOW));
|
||||
EXPECT_OK(this->Insert('j', Cache::Priority::LOW));
|
||||
|
||||
EXPECT_FALSE(Lookup('b', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('c', /*use*/ false));
|
||||
EXPECT_FALSE(Lookup('e', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('f', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('b', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('c', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('e', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('f', /*use*/ false));
|
||||
// Mark g & h useful once again
|
||||
EXPECT_TRUE(Lookup('g', /*use*/ true));
|
||||
EXPECT_TRUE(Lookup('h', /*use*/ true));
|
||||
EXPECT_TRUE(Lookup('i', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('j', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('g', /*use*/ true));
|
||||
EXPECT_TRUE(this->Lookup('h', /*use*/ true));
|
||||
EXPECT_TRUE(this->Lookup('i', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('j', /*use*/ false));
|
||||
|
||||
// Then old HIGH entries
|
||||
EXPECT_OK(Insert('k', Cache::Priority::LOW));
|
||||
EXPECT_OK(Insert('l', Cache::Priority::LOW));
|
||||
EXPECT_OK(this->Insert('k', Cache::Priority::LOW));
|
||||
EXPECT_OK(this->Insert('l', Cache::Priority::LOW));
|
||||
|
||||
EXPECT_FALSE(Lookup('c', /*use*/ false));
|
||||
EXPECT_FALSE(Lookup('f', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('g', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('h', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('i', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('j', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('k', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('l', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('c', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('f', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('g', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('h', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('i', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('j', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('k', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('l', /*use*/ false));
|
||||
|
||||
// Then the (roughly) least recently useful
|
||||
EXPECT_OK(Insert('m', Cache::Priority::HIGH));
|
||||
EXPECT_OK(Insert('n', Cache::Priority::HIGH));
|
||||
EXPECT_OK(this->Insert('m', Cache::Priority::HIGH));
|
||||
EXPECT_OK(this->Insert('n', Cache::Priority::HIGH));
|
||||
|
||||
EXPECT_TRUE(Lookup('g', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('h', /*use*/ false));
|
||||
EXPECT_FALSE(Lookup('i', /*use*/ false));
|
||||
EXPECT_FALSE(Lookup('j', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('k', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('l', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('g', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('h', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('i', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('j', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('k', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('l', /*use*/ false));
|
||||
|
||||
// Now try changing capacity down
|
||||
shard_->SetCapacity(4);
|
||||
shard.SetCapacity(4);
|
||||
// Insert to ensure evictions happen
|
||||
EXPECT_OK(Insert('o', Cache::Priority::LOW));
|
||||
EXPECT_OK(Insert('p', Cache::Priority::LOW));
|
||||
EXPECT_OK(this->Insert('o', Cache::Priority::LOW));
|
||||
EXPECT_OK(this->Insert('p', Cache::Priority::LOW));
|
||||
|
||||
EXPECT_FALSE(Lookup('g', /*use*/ false));
|
||||
EXPECT_FALSE(Lookup('h', /*use*/ false));
|
||||
EXPECT_FALSE(Lookup('k', /*use*/ false));
|
||||
EXPECT_FALSE(Lookup('l', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('m', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('n', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('o', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('p', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('g', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('h', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('k', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('l', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('m', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('n', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('o', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('p', /*use*/ false));
|
||||
|
||||
// Now try changing capacity up
|
||||
EXPECT_TRUE(Lookup('m', /*use*/ true));
|
||||
EXPECT_TRUE(Lookup('n', /*use*/ true));
|
||||
shard_->SetCapacity(6);
|
||||
EXPECT_OK(Insert('q', Cache::Priority::HIGH));
|
||||
EXPECT_OK(Insert('r', Cache::Priority::HIGH));
|
||||
EXPECT_OK(Insert('s', Cache::Priority::HIGH));
|
||||
EXPECT_OK(Insert('t', Cache::Priority::HIGH));
|
||||
EXPECT_TRUE(this->Lookup('m', /*use*/ true));
|
||||
EXPECT_TRUE(this->Lookup('n', /*use*/ true));
|
||||
shard.SetCapacity(6);
|
||||
EXPECT_OK(this->Insert('q', Cache::Priority::HIGH));
|
||||
EXPECT_OK(this->Insert('r', Cache::Priority::HIGH));
|
||||
EXPECT_OK(this->Insert('s', Cache::Priority::HIGH));
|
||||
EXPECT_OK(this->Insert('t', Cache::Priority::HIGH));
|
||||
|
||||
EXPECT_FALSE(Lookup('o', /*use*/ false));
|
||||
EXPECT_FALSE(Lookup('p', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('m', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('n', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('q', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('r', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('s', /*use*/ false));
|
||||
EXPECT_TRUE(Lookup('t', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('o', /*use*/ false));
|
||||
EXPECT_FALSE(this->Lookup('p', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('m', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('n', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('q', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('r', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('s', /*use*/ false));
|
||||
EXPECT_TRUE(this->Lookup('t', /*use*/ false));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -682,66 +693,72 @@ const Cache::CacheItemHelper kDeleteCounterHelper{
|
|||
} // namespace
|
||||
|
||||
// Testing calls to CorrectNearOverflow in Release
|
||||
TEST_F(ClockCacheTest, ClockCounterOverflowTest) {
|
||||
NewShard(6, /*strict_capacity_limit*/ false);
|
||||
TYPED_TEST(ClockCacheTest, ClockCounterOverflowTest) {
|
||||
this->NewShard(6, /*strict_capacity_limit*/ false);
|
||||
auto& shard = *this->shard_;
|
||||
using HandleImpl = typename ClockCacheTest<TypeParam>::Shard::HandleImpl;
|
||||
|
||||
HandleImpl* h;
|
||||
DeleteCounter val;
|
||||
UniqueId64x2 hkey = TestHashedKey('x');
|
||||
ASSERT_OK(shard_->Insert(TestKey(hkey), hkey, &val, &kDeleteCounterHelper, 1,
|
||||
&h, Cache::Priority::HIGH));
|
||||
UniqueId64x2 hkey = this->TestHashedKey('x');
|
||||
ASSERT_OK(shard.Insert(this->TestKey(hkey), hkey, &val, &kDeleteCounterHelper,
|
||||
1, &h, Cache::Priority::HIGH));
|
||||
|
||||
// Some large number outstanding
|
||||
shard_->TEST_RefN(h, 123456789);
|
||||
shard.TEST_RefN(h, 123456789);
|
||||
// Simulate many lookup/ref + release, plenty to overflow counters
|
||||
for (int i = 0; i < 10000; ++i) {
|
||||
shard_->TEST_RefN(h, 1234567);
|
||||
shard_->TEST_ReleaseN(h, 1234567);
|
||||
shard.TEST_RefN(h, 1234567);
|
||||
shard.TEST_ReleaseN(h, 1234567);
|
||||
}
|
||||
// Mark it invisible (to reach a different CorrectNearOverflow() in Release)
|
||||
shard_->Erase(TestKey(hkey), hkey);
|
||||
shard.Erase(this->TestKey(hkey), hkey);
|
||||
// Simulate many more lookup/ref + release (one-by-one would be too
|
||||
// expensive for unit test)
|
||||
for (int i = 0; i < 10000; ++i) {
|
||||
shard_->TEST_RefN(h, 1234567);
|
||||
shard_->TEST_ReleaseN(h, 1234567);
|
||||
shard.TEST_RefN(h, 1234567);
|
||||
shard.TEST_ReleaseN(h, 1234567);
|
||||
}
|
||||
// Free all but last 1
|
||||
shard_->TEST_ReleaseN(h, 123456789);
|
||||
shard.TEST_ReleaseN(h, 123456789);
|
||||
// Still alive
|
||||
ASSERT_EQ(val.deleted, 0);
|
||||
// Free last ref, which will finalize erasure
|
||||
shard_->Release(h);
|
||||
shard.Release(h);
|
||||
// Deleted
|
||||
ASSERT_EQ(val.deleted, 1);
|
||||
}
|
||||
|
||||
TEST_F(ClockCacheTest, ClockTableFull) {
|
||||
TYPED_TEST(ClockCacheTest, ClockTableFull) {
|
||||
// Force clock cache table to fill up (not usually allowed) in order
|
||||
// to test full probe sequence that is theoretically possible due to
|
||||
// parallel operations
|
||||
NewShard(6, /*strict_capacity_limit*/ false);
|
||||
size_t size = shard_->GetTableAddressCount();
|
||||
this->NewShard(6, /*strict_capacity_limit*/ false);
|
||||
auto& shard = *this->shard_;
|
||||
using HandleImpl = typename ClockCacheTest<TypeParam>::Shard::HandleImpl;
|
||||
|
||||
size_t size = shard.GetTableAddressCount();
|
||||
ASSERT_LE(size + 3, 256); // for using char keys
|
||||
// Modify occupancy and capacity limits to attempt insert on full
|
||||
shard_->TEST_MutableOccupancyLimit() = size + 100;
|
||||
shard_->SetCapacity(size + 100);
|
||||
shard.TEST_MutableOccupancyLimit() = size + 100;
|
||||
shard.SetCapacity(size + 100);
|
||||
|
||||
DeleteCounter val;
|
||||
std::vector<HandleImpl*> handles;
|
||||
// NOTE: the three extra insertions should create standalone entries
|
||||
for (size_t i = 0; i < size + 3; ++i) {
|
||||
UniqueId64x2 hkey = TestHashedKey(static_cast<char>(i));
|
||||
ASSERT_OK(shard_->Insert(TestKey(hkey), hkey, &val, &kDeleteCounterHelper,
|
||||
1, &handles.emplace_back(),
|
||||
Cache::Priority::HIGH));
|
||||
UniqueId64x2 hkey = this->TestHashedKey(static_cast<char>(i));
|
||||
ASSERT_OK(shard.Insert(this->TestKey(hkey), hkey, &val,
|
||||
&kDeleteCounterHelper, 1, &handles.emplace_back(),
|
||||
Cache::Priority::HIGH));
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < size + 3; ++i) {
|
||||
UniqueId64x2 hkey = TestHashedKey(static_cast<char>(i));
|
||||
HandleImpl* h = shard_->Lookup(TestKey(hkey), hkey);
|
||||
UniqueId64x2 hkey = this->TestHashedKey(static_cast<char>(i));
|
||||
HandleImpl* h = shard.Lookup(this->TestKey(hkey), hkey);
|
||||
if (i < size) {
|
||||
ASSERT_NE(h, nullptr);
|
||||
shard_->Release(h);
|
||||
shard.Release(h);
|
||||
} else {
|
||||
// Standalone entries not visible by lookup
|
||||
ASSERT_EQ(h, nullptr);
|
||||
|
@ -750,7 +767,7 @@ TEST_F(ClockCacheTest, ClockTableFull) {
|
|||
|
||||
for (size_t i = 0; i < size + 3; ++i) {
|
||||
ASSERT_NE(handles[i], nullptr);
|
||||
shard_->Release(handles[i]);
|
||||
shard.Release(handles[i]);
|
||||
if (i < size) {
|
||||
// Everything still in cache
|
||||
ASSERT_EQ(val.deleted, 0);
|
||||
|
@ -761,8 +778,8 @@ TEST_F(ClockCacheTest, ClockTableFull) {
|
|||
}
|
||||
|
||||
for (size_t i = size + 3; i > 0; --i) {
|
||||
UniqueId64x2 hkey = TestHashedKey(static_cast<char>(i - 1));
|
||||
shard_->Erase(TestKey(hkey), hkey);
|
||||
UniqueId64x2 hkey = this->TestHashedKey(static_cast<char>(i - 1));
|
||||
shard.Erase(this->TestKey(hkey), hkey);
|
||||
if (i - 1 > size) {
|
||||
ASSERT_EQ(val.deleted, 3);
|
||||
} else {
|
||||
|
@ -773,78 +790,81 @@ TEST_F(ClockCacheTest, ClockTableFull) {
|
|||
|
||||
// This test is mostly to exercise some corner case logic, by forcing two
|
||||
// keys to have the same hash, and more
|
||||
TEST_F(ClockCacheTest, CollidingInsertEraseTest) {
|
||||
NewShard(6, /*strict_capacity_limit*/ false);
|
||||
TYPED_TEST(ClockCacheTest, CollidingInsertEraseTest) {
|
||||
this->NewShard(6, /*strict_capacity_limit*/ false);
|
||||
auto& shard = *this->shard_;
|
||||
using HandleImpl = typename ClockCacheTest<TypeParam>::Shard::HandleImpl;
|
||||
|
||||
DeleteCounter val;
|
||||
UniqueId64x2 hkey1 = TestHashedKey('x');
|
||||
Slice key1 = TestKey(hkey1);
|
||||
UniqueId64x2 hkey2 = TestHashedKey('y');
|
||||
Slice key2 = TestKey(hkey2);
|
||||
UniqueId64x2 hkey3 = TestHashedKey('z');
|
||||
Slice key3 = TestKey(hkey3);
|
||||
UniqueId64x2 hkey1 = this->TestHashedKey('x');
|
||||
Slice key1 = this->TestKey(hkey1);
|
||||
UniqueId64x2 hkey2 = this->TestHashedKey('y');
|
||||
Slice key2 = this->TestKey(hkey2);
|
||||
UniqueId64x2 hkey3 = this->TestHashedKey('z');
|
||||
Slice key3 = this->TestKey(hkey3);
|
||||
HandleImpl* h1;
|
||||
ASSERT_OK(shard_->Insert(key1, hkey1, &val, &kDeleteCounterHelper, 1, &h1,
|
||||
Cache::Priority::HIGH));
|
||||
ASSERT_OK(shard.Insert(key1, hkey1, &val, &kDeleteCounterHelper, 1, &h1,
|
||||
Cache::Priority::HIGH));
|
||||
HandleImpl* h2;
|
||||
ASSERT_OK(shard_->Insert(key2, hkey2, &val, &kDeleteCounterHelper, 1, &h2,
|
||||
Cache::Priority::HIGH));
|
||||
ASSERT_OK(shard.Insert(key2, hkey2, &val, &kDeleteCounterHelper, 1, &h2,
|
||||
Cache::Priority::HIGH));
|
||||
HandleImpl* h3;
|
||||
ASSERT_OK(shard_->Insert(key3, hkey3, &val, &kDeleteCounterHelper, 1, &h3,
|
||||
Cache::Priority::HIGH));
|
||||
ASSERT_OK(shard.Insert(key3, hkey3, &val, &kDeleteCounterHelper, 1, &h3,
|
||||
Cache::Priority::HIGH));
|
||||
|
||||
// Can repeatedly lookup+release despite the hash collision
|
||||
HandleImpl* tmp_h;
|
||||
for (bool erase_if_last_ref : {true, false}) { // but not last ref
|
||||
tmp_h = shard_->Lookup(key1, hkey1);
|
||||
tmp_h = shard.Lookup(key1, hkey1);
|
||||
ASSERT_EQ(h1, tmp_h);
|
||||
ASSERT_FALSE(shard_->Release(tmp_h, erase_if_last_ref));
|
||||
ASSERT_FALSE(shard.Release(tmp_h, erase_if_last_ref));
|
||||
|
||||
tmp_h = shard_->Lookup(key2, hkey2);
|
||||
tmp_h = shard.Lookup(key2, hkey2);
|
||||
ASSERT_EQ(h2, tmp_h);
|
||||
ASSERT_FALSE(shard_->Release(tmp_h, erase_if_last_ref));
|
||||
ASSERT_FALSE(shard.Release(tmp_h, erase_if_last_ref));
|
||||
|
||||
tmp_h = shard_->Lookup(key3, hkey3);
|
||||
tmp_h = shard.Lookup(key3, hkey3);
|
||||
ASSERT_EQ(h3, tmp_h);
|
||||
ASSERT_FALSE(shard_->Release(tmp_h, erase_if_last_ref));
|
||||
ASSERT_FALSE(shard.Release(tmp_h, erase_if_last_ref));
|
||||
}
|
||||
|
||||
// Make h1 invisible
|
||||
shard_->Erase(key1, hkey1);
|
||||
shard.Erase(key1, hkey1);
|
||||
// Redundant erase
|
||||
shard_->Erase(key1, hkey1);
|
||||
shard.Erase(key1, hkey1);
|
||||
|
||||
// All still alive
|
||||
ASSERT_EQ(val.deleted, 0);
|
||||
|
||||
// Invisible to Lookup
|
||||
tmp_h = shard_->Lookup(key1, hkey1);
|
||||
tmp_h = shard.Lookup(key1, hkey1);
|
||||
ASSERT_EQ(nullptr, tmp_h);
|
||||
|
||||
// Can still find h2, h3
|
||||
for (bool erase_if_last_ref : {true, false}) { // but not last ref
|
||||
tmp_h = shard_->Lookup(key2, hkey2);
|
||||
tmp_h = shard.Lookup(key2, hkey2);
|
||||
ASSERT_EQ(h2, tmp_h);
|
||||
ASSERT_FALSE(shard_->Release(tmp_h, erase_if_last_ref));
|
||||
ASSERT_FALSE(shard.Release(tmp_h, erase_if_last_ref));
|
||||
|
||||
tmp_h = shard_->Lookup(key3, hkey3);
|
||||
tmp_h = shard.Lookup(key3, hkey3);
|
||||
ASSERT_EQ(h3, tmp_h);
|
||||
ASSERT_FALSE(shard_->Release(tmp_h, erase_if_last_ref));
|
||||
ASSERT_FALSE(shard.Release(tmp_h, erase_if_last_ref));
|
||||
}
|
||||
|
||||
// Also Insert with invisible entry there
|
||||
ASSERT_OK(shard_->Insert(key1, hkey1, &val, &kDeleteCounterHelper, 1, nullptr,
|
||||
Cache::Priority::HIGH));
|
||||
tmp_h = shard_->Lookup(key1, hkey1);
|
||||
ASSERT_OK(shard.Insert(key1, hkey1, &val, &kDeleteCounterHelper, 1, nullptr,
|
||||
Cache::Priority::HIGH));
|
||||
tmp_h = shard.Lookup(key1, hkey1);
|
||||
// Found but distinct handle
|
||||
ASSERT_NE(nullptr, tmp_h);
|
||||
ASSERT_NE(h1, tmp_h);
|
||||
ASSERT_TRUE(shard_->Release(tmp_h, /*erase_if_last_ref*/ true));
|
||||
ASSERT_TRUE(shard.Release(tmp_h, /*erase_if_last_ref*/ true));
|
||||
|
||||
// tmp_h deleted
|
||||
ASSERT_EQ(val.deleted--, 1);
|
||||
|
||||
// Release last ref on h1 (already invisible)
|
||||
ASSERT_TRUE(shard_->Release(h1, /*erase_if_last_ref*/ false));
|
||||
ASSERT_TRUE(shard.Release(h1, /*erase_if_last_ref*/ false));
|
||||
|
||||
// h1 deleted
|
||||
ASSERT_EQ(val.deleted--, 1);
|
||||
|
@ -852,57 +872,57 @@ TEST_F(ClockCacheTest, CollidingInsertEraseTest) {
|
|||
|
||||
// Can still find h2, h3
|
||||
for (bool erase_if_last_ref : {true, false}) { // but not last ref
|
||||
tmp_h = shard_->Lookup(key2, hkey2);
|
||||
tmp_h = shard.Lookup(key2, hkey2);
|
||||
ASSERT_EQ(h2, tmp_h);
|
||||
ASSERT_FALSE(shard_->Release(tmp_h, erase_if_last_ref));
|
||||
ASSERT_FALSE(shard.Release(tmp_h, erase_if_last_ref));
|
||||
|
||||
tmp_h = shard_->Lookup(key3, hkey3);
|
||||
tmp_h = shard.Lookup(key3, hkey3);
|
||||
ASSERT_EQ(h3, tmp_h);
|
||||
ASSERT_FALSE(shard_->Release(tmp_h, erase_if_last_ref));
|
||||
ASSERT_FALSE(shard.Release(tmp_h, erase_if_last_ref));
|
||||
}
|
||||
|
||||
// Release last ref on h2
|
||||
ASSERT_FALSE(shard_->Release(h2, /*erase_if_last_ref*/ false));
|
||||
ASSERT_FALSE(shard.Release(h2, /*erase_if_last_ref*/ false));
|
||||
|
||||
// h2 still not deleted (unreferenced in cache)
|
||||
ASSERT_EQ(val.deleted, 0);
|
||||
|
||||
// Can still find it
|
||||
tmp_h = shard_->Lookup(key2, hkey2);
|
||||
tmp_h = shard.Lookup(key2, hkey2);
|
||||
ASSERT_EQ(h2, tmp_h);
|
||||
|
||||
// Release last ref on h2, with erase
|
||||
ASSERT_TRUE(shard_->Release(h2, /*erase_if_last_ref*/ true));
|
||||
ASSERT_TRUE(shard.Release(h2, /*erase_if_last_ref*/ true));
|
||||
|
||||
// h2 deleted
|
||||
ASSERT_EQ(val.deleted--, 1);
|
||||
tmp_h = shard_->Lookup(key2, hkey2);
|
||||
tmp_h = shard.Lookup(key2, hkey2);
|
||||
ASSERT_EQ(nullptr, tmp_h);
|
||||
|
||||
// Can still find h3
|
||||
for (bool erase_if_last_ref : {true, false}) { // but not last ref
|
||||
tmp_h = shard_->Lookup(key3, hkey3);
|
||||
tmp_h = shard.Lookup(key3, hkey3);
|
||||
ASSERT_EQ(h3, tmp_h);
|
||||
ASSERT_FALSE(shard_->Release(tmp_h, erase_if_last_ref));
|
||||
ASSERT_FALSE(shard.Release(tmp_h, erase_if_last_ref));
|
||||
}
|
||||
|
||||
// Release last ref on h3, without erase
|
||||
ASSERT_FALSE(shard_->Release(h3, /*erase_if_last_ref*/ false));
|
||||
ASSERT_FALSE(shard.Release(h3, /*erase_if_last_ref*/ false));
|
||||
|
||||
// h3 still not deleted (unreferenced in cache)
|
||||
ASSERT_EQ(val.deleted, 0);
|
||||
|
||||
// Explicit erase
|
||||
shard_->Erase(key3, hkey3);
|
||||
shard.Erase(key3, hkey3);
|
||||
|
||||
// h3 deleted
|
||||
ASSERT_EQ(val.deleted--, 1);
|
||||
tmp_h = shard_->Lookup(key3, hkey3);
|
||||
tmp_h = shard.Lookup(key3, hkey3);
|
||||
ASSERT_EQ(nullptr, tmp_h);
|
||||
}
|
||||
|
||||
// This uses the public API to effectively test CalcHashBits etc.
|
||||
TEST_F(ClockCacheTest, TableSizesTest) {
|
||||
TYPED_TEST(ClockCacheTest, TableSizesTest) {
|
||||
for (size_t est_val_size : {1U, 5U, 123U, 2345U, 345678U}) {
|
||||
SCOPED_TRACE("est_val_size = " + std::to_string(est_val_size));
|
||||
for (double est_count : {1.1, 2.2, 511.9, 512.1, 2345.0}) {
|
||||
|
|
|
@ -273,6 +273,14 @@ class ShardedCache : public ShardedCacheBase {
|
|||
}
|
||||
}
|
||||
|
||||
inline void ForEachShard(
|
||||
const std::function<void(const CacheShard*)>& fn) const {
|
||||
uint32_t num_shards = GetNumShards();
|
||||
for (uint32_t i = 0; i < num_shards; i++) {
|
||||
fn(shards_ + i);
|
||||
}
|
||||
}
|
||||
|
||||
inline size_t SumOverShards(
|
||||
const std::function<size_t(CacheShard&)>& fn) const {
|
||||
uint32_t num_shards = GetNumShards();
|
||||
|
|
|
@ -741,10 +741,15 @@ TEST_F(DBBlockCacheTest, AddRedundantStats) {
|
|||
int iterations_tested = 0;
|
||||
for (std::shared_ptr<Cache> base_cache :
|
||||
{NewLRUCache(capacity, num_shard_bits),
|
||||
// FixedHyperClockCache
|
||||
HyperClockCacheOptions(
|
||||
capacity,
|
||||
BlockBasedTableOptions().block_size /*estimated_value_size*/,
|
||||
num_shard_bits)
|
||||
.MakeSharedCache(),
|
||||
// AutoHyperClockCache
|
||||
HyperClockCacheOptions(capacity, 0 /*estimated_value_size*/,
|
||||
num_shard_bits)
|
||||
.MakeSharedCache()}) {
|
||||
if (!base_cache) {
|
||||
// Skip clock cache when not supported
|
||||
|
|
|
@ -129,12 +129,20 @@ std::shared_ptr<Cache> StressTest::NewCache(size_t capacity,
|
|||
if (FLAGS_cache_type == "clock_cache") {
|
||||
fprintf(stderr, "Old clock cache implementation has been removed.\n");
|
||||
exit(1);
|
||||
} else if (FLAGS_cache_type == "hyper_clock_cache" ||
|
||||
FLAGS_cache_type == "fixed_hyper_clock_cache") {
|
||||
HyperClockCacheOptions opts(static_cast<size_t>(capacity),
|
||||
FLAGS_block_size /*estimated_entry_charge*/,
|
||||
} else if (EndsWith(FLAGS_cache_type, "hyper_clock_cache")) {
|
||||
size_t estimated_entry_charge;
|
||||
if (FLAGS_cache_type == "fixed_hyper_clock_cache" ||
|
||||
FLAGS_cache_type == "hyper_clock_cache") {
|
||||
estimated_entry_charge = FLAGS_block_size;
|
||||
} else if (FLAGS_cache_type == "auto_hyper_clock_cache") {
|
||||
estimated_entry_charge = 0;
|
||||
} else {
|
||||
fprintf(stderr, "Cache type not supported.");
|
||||
exit(1);
|
||||
}
|
||||
HyperClockCacheOptions opts(FLAGS_cache_size, estimated_entry_charge,
|
||||
num_shard_bits);
|
||||
opts.secondary_cache = std::move(secondary_cache);
|
||||
opts.hash_seed = BitwiseAnd(FLAGS_seed, INT32_MAX);
|
||||
return opts.MakeSharedCache();
|
||||
} else if (FLAGS_cache_type == "lru_cache") {
|
||||
LRUCacheOptions opts;
|
||||
|
|
|
@ -416,6 +416,9 @@ struct HyperClockCacheOptions : public ShardedCacheOptions {
|
|||
// to estimate toward the lower side than the higher side.
|
||||
size_t estimated_entry_charge;
|
||||
|
||||
// FOR A FUTURE FEATURE (NOT YET USED)
|
||||
size_t min_avg_entry_charge = 450;
|
||||
|
||||
HyperClockCacheOptions(
|
||||
size_t _capacity, size_t _estimated_entry_charge,
|
||||
int _num_shard_bits = -1, bool _strict_capacity_limit = false,
|
||||
|
|
|
@ -43,13 +43,18 @@ class WithCacheType : public TestCreateContext {
|
|||
|
||||
static constexpr auto kLRU = "lru";
|
||||
static constexpr auto kFixedHyperClock = "fixed_hyper_clock";
|
||||
static constexpr auto kAutoHyperClock = "auto_hyper_clock";
|
||||
|
||||
// For options other than capacity
|
||||
size_t estimated_value_size_ = 1;
|
||||
|
||||
virtual const std::string& Type() const = 0;
|
||||
|
||||
bool IsHyperClock() const { return Type() == kFixedHyperClock; }
|
||||
static bool IsHyperClock(const std::string& type) {
|
||||
return type == kFixedHyperClock || type == kAutoHyperClock;
|
||||
}
|
||||
|
||||
bool IsHyperClock() const { return IsHyperClock(Type()); }
|
||||
|
||||
std::shared_ptr<Cache> NewCache(
|
||||
size_t capacity,
|
||||
|
@ -64,8 +69,11 @@ class WithCacheType : public TestCreateContext {
|
|||
}
|
||||
return lru_opts.MakeSharedCache();
|
||||
}
|
||||
if (type == kFixedHyperClock) {
|
||||
HyperClockCacheOptions hc_opts{capacity, estimated_value_size_};
|
||||
if (IsHyperClock(type)) {
|
||||
HyperClockCacheOptions hc_opts{
|
||||
capacity, type == kFixedHyperClock ? estimated_value_size_ : 0};
|
||||
hc_opts.min_avg_entry_charge =
|
||||
std::max(size_t{1}, estimated_value_size_ / 2);
|
||||
hc_opts.hash_seed = 0; // deterministic tests
|
||||
if (modify_opts_fn) {
|
||||
modify_opts_fn(hc_opts);
|
||||
|
@ -112,9 +120,11 @@ class WithCacheTypeParam : public WithCacheType,
|
|||
|
||||
constexpr auto kLRU = WithCacheType::kLRU;
|
||||
constexpr auto kFixedHyperClock = WithCacheType::kFixedHyperClock;
|
||||
constexpr auto kAutoHyperClock = WithCacheType::kAutoHyperClock;
|
||||
|
||||
inline auto GetTestingCacheTypes() {
|
||||
return testing::Values(std::string(kLRU), std::string(kFixedHyperClock));
|
||||
return testing::Values(std::string(kLRU), std::string(kFixedHyperClock),
|
||||
std::string(kAutoHyperClock));
|
||||
}
|
||||
|
||||
} // namespace secondary_cache_test_util
|
||||
|
|
|
@ -3039,22 +3039,29 @@ class Benchmark {
|
|||
if (FLAGS_cache_type == "clock_cache") {
|
||||
fprintf(stderr, "Old clock cache implementation has been removed.\n");
|
||||
exit(1);
|
||||
} else if (FLAGS_cache_type == "hyper_clock_cache" ||
|
||||
FLAGS_cache_type == "fixed_hyper_clock_cache") {
|
||||
HyperClockCacheOptions hcco{
|
||||
static_cast<size_t>(capacity),
|
||||
static_cast<size_t>(FLAGS_block_size) /*estimated_entry_charge*/,
|
||||
FLAGS_cache_numshardbits};
|
||||
hcco.hash_seed = GetCacheHashSeed();
|
||||
if (use_tiered_cache) {
|
||||
TieredVolatileCacheOptions opts;
|
||||
hcco.capacity += secondary_cache_opts.capacity;
|
||||
opts.cache_type = PrimaryCacheType::kCacheTypeHCC;
|
||||
opts.cache_opts = &hcco;
|
||||
opts.comp_cache_opts = secondary_cache_opts;
|
||||
return NewTieredVolatileCache(opts);
|
||||
} else if (EndsWith(FLAGS_cache_type, "hyper_clock_cache")) {
|
||||
size_t estimated_entry_charge;
|
||||
if (FLAGS_cache_type == "fixed_hyper_clock_cache" ||
|
||||
FLAGS_cache_type == "hyper_clock_cache") {
|
||||
estimated_entry_charge = FLAGS_block_size;
|
||||
} else if (FLAGS_cache_type == "auto_hyper_clock_cache") {
|
||||
estimated_entry_charge = 0;
|
||||
} else {
|
||||
return hcco.MakeSharedCache();
|
||||
fprintf(stderr, "Cache type not supported.");
|
||||
exit(1);
|
||||
}
|
||||
HyperClockCacheOptions opts(FLAGS_cache_size, estimated_entry_charge,
|
||||
FLAGS_cache_numshardbits);
|
||||
opts.hash_seed = GetCacheHashSeed();
|
||||
if (use_tiered_cache) {
|
||||
TieredVolatileCacheOptions tiered_opts;
|
||||
opts.capacity += secondary_cache_opts.capacity;
|
||||
tiered_opts.cache_type = PrimaryCacheType::kCacheTypeHCC;
|
||||
tiered_opts.cache_opts = &opts;
|
||||
tiered_opts.comp_cache_opts = secondary_cache_opts;
|
||||
return NewTieredVolatileCache(tiered_opts);
|
||||
} else {
|
||||
return opts.MakeSharedCache();
|
||||
}
|
||||
} else if (FLAGS_cache_type == "lru_cache") {
|
||||
LRUCacheOptions opts(
|
||||
|
|
|
@ -123,7 +123,10 @@ default_params = {
|
|||
"use_direct_reads": lambda: random.randint(0, 1),
|
||||
"use_direct_io_for_flush_and_compaction": lambda: random.randint(0, 1),
|
||||
"mock_direct_io": False,
|
||||
"cache_type": lambda: random.choice(["lru_cache", "hyper_clock_cache"]),
|
||||
"cache_type": lambda: random.choice(
|
||||
["lru_cache", "fixed_hyper_clock_cache", "auto_hyper_clock_cache",
|
||||
"auto_hyper_clock_cache"]
|
||||
),
|
||||
"use_full_merge_v1": lambda: random.randint(0, 1),
|
||||
"use_merge": lambda: random.randint(0, 1),
|
||||
# use_put_entity_one_in has to be the same across invocations for verification to work, hence no lambda
|
||||
|
|
Loading…
Reference in New Issue