Prepare tests for new HCC naming (#11676)

Summary:
I'm anticipating using the public name HyperClockCache for both the current version with a fixed-size table and the upcoming version with an automatically growing table. However, for simplicity of testing them as substantially distinct implementations, I want to give them distinct internal names, like FixedHyperClockCache and AutoHyperClockCache.

This change anticipates that by renaming to FixedHyperClockCache and assuming for now that all the unit tests run on HCC will run and behave similarly for the automatic HCC. Obviously updates will need to be made, but I'm trying to avoid uninteresting find & replace updates in what will be a large and engineering-heavy PR for AutoHCC

Pull Request resolved: https://github.com/facebook/rocksdb/pull/11676

Test Plan: no behavior change intended, except logging will now use the name FixedHyperClockCache

Reviewed By: ajkr

Differential Revision: D48103165

Pulled By: pdillinger

fbshipit-source-id: a33f1901488fea102164c2318e2f2b156aaba736
This commit is contained in:
Peter Dillinger 2023-08-07 18:17:12 -07:00 committed by Facebook GitHub Bot
parent 6d1effaf01
commit 99daea3481
9 changed files with 109 additions and 105 deletions

View File

@ -299,7 +299,8 @@ class CacheBench {
if (FLAGS_cache_type == "clock_cache") { if (FLAGS_cache_type == "clock_cache") {
fprintf(stderr, "Old clock cache implementation has been removed.\n"); fprintf(stderr, "Old clock cache implementation has been removed.\n");
exit(1); exit(1);
} else if (FLAGS_cache_type == "hyper_clock_cache") { } else if (FLAGS_cache_type == "hyper_clock_cache" ||
FLAGS_cache_type == "fixed_hyper_clock_cache") {
HyperClockCacheOptions opts(FLAGS_cache_size, FLAGS_value_bytes, HyperClockCacheOptions opts(FLAGS_cache_size, FLAGS_value_bytes,
FLAGS_num_shard_bits); FLAGS_num_shard_bits);
opts.hash_seed = BitwiseAnd(FLAGS_seed, INT32_MAX); opts.hash_seed = BitwiseAnd(FLAGS_seed, INT32_MAX);

31
cache/cache_test.cc vendored
View File

@ -120,8 +120,7 @@ class CacheTest : public testing::Test,
// Currently, HyperClockCache requires keys to be 16B long, whereas // Currently, HyperClockCache requires keys to be 16B long, whereas
// LRUCache doesn't, so the encoding depends on the cache type. // LRUCache doesn't, so the encoding depends on the cache type.
std::string EncodeKey(int k) { std::string EncodeKey(int k) {
auto type = GetParam(); if (IsHyperClock()) {
if (type == kHyperClock) {
return EncodeKey16Bytes(k); return EncodeKey16Bytes(k);
} else { } else {
return EncodeKey32Bits(k); return EncodeKey32Bits(k);
@ -129,8 +128,7 @@ class CacheTest : public testing::Test,
} }
int DecodeKey(const Slice& k) { int DecodeKey(const Slice& k) {
auto type = GetParam(); if (IsHyperClock()) {
if (type == kHyperClock) {
return DecodeKey16Bytes(k); return DecodeKey16Bytes(k);
} else { } else {
return DecodeKey32Bits(k); return DecodeKey32Bits(k);
@ -190,7 +188,7 @@ TEST_P(CacheTest, UsageTest) {
auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata); auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata);
ASSERT_EQ(0, cache->GetUsage()); ASSERT_EQ(0, cache->GetUsage());
size_t baseline_meta_usage = precise_cache->GetUsage(); size_t baseline_meta_usage = precise_cache->GetUsage();
if (type != kHyperClock) { if (!IsHyperClock()) {
ASSERT_EQ(0, baseline_meta_usage); ASSERT_EQ(0, baseline_meta_usage);
} }
@ -209,7 +207,7 @@ TEST_P(CacheTest, UsageTest) {
ASSERT_OK(precise_cache->Insert(key, value, &kDumbHelper, kv_size)); ASSERT_OK(precise_cache->Insert(key, value, &kDumbHelper, kv_size));
usage += kv_size; usage += kv_size;
ASSERT_EQ(usage, cache->GetUsage()); ASSERT_EQ(usage, cache->GetUsage());
if (type == kHyperClock) { if (IsHyperClock()) {
ASSERT_EQ(baseline_meta_usage + usage, precise_cache->GetUsage()); ASSERT_EQ(baseline_meta_usage + usage, precise_cache->GetUsage());
} else { } else {
ASSERT_LT(usage, precise_cache->GetUsage()); ASSERT_LT(usage, precise_cache->GetUsage());
@ -237,7 +235,7 @@ TEST_P(CacheTest, UsageTest) {
ASSERT_GT(kCapacity, cache->GetUsage()); ASSERT_GT(kCapacity, cache->GetUsage());
ASSERT_GT(kCapacity, precise_cache->GetUsage()); ASSERT_GT(kCapacity, precise_cache->GetUsage());
ASSERT_LT(kCapacity * 0.95, cache->GetUsage()); ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
if (type != kHyperClock) { if (!IsHyperClock()) {
ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage()); ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage());
} else { } else {
// estimated value size of 1 is weird for clock cache, because // estimated value size of 1 is weird for clock cache, because
@ -263,7 +261,7 @@ TEST_P(CacheTest, PinnedUsageTest) {
auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata); auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata); auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata);
size_t baseline_meta_usage = precise_cache->GetUsage(); size_t baseline_meta_usage = precise_cache->GetUsage();
if (type != kHyperClock) { if (!IsHyperClock()) {
ASSERT_EQ(0, baseline_meta_usage); ASSERT_EQ(0, baseline_meta_usage);
} }
@ -368,7 +366,7 @@ TEST_P(CacheTest, HitAndMiss) {
ASSERT_EQ(-1, Lookup(300)); ASSERT_EQ(-1, Lookup(300));
Insert(100, 102); Insert(100, 102);
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
// ClockCache usually doesn't overwrite on Insert // ClockCache usually doesn't overwrite on Insert
ASSERT_EQ(101, Lookup(100)); ASSERT_EQ(101, Lookup(100));
} else { } else {
@ -378,7 +376,7 @@ TEST_P(CacheTest, HitAndMiss) {
ASSERT_EQ(-1, Lookup(300)); ASSERT_EQ(-1, Lookup(300));
ASSERT_EQ(1U, deleted_values_.size()); ASSERT_EQ(1U, deleted_values_.size());
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
ASSERT_EQ(102, deleted_values_[0]); ASSERT_EQ(102, deleted_values_[0]);
} else { } else {
ASSERT_EQ(101, deleted_values_[0]); ASSERT_EQ(101, deleted_values_[0]);
@ -386,7 +384,7 @@ TEST_P(CacheTest, HitAndMiss) {
} }
TEST_P(CacheTest, InsertSameKey) { TEST_P(CacheTest, InsertSameKey) {
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
ROCKSDB_GTEST_BYPASS( ROCKSDB_GTEST_BYPASS(
"ClockCache doesn't guarantee Insert overwrite same key."); "ClockCache doesn't guarantee Insert overwrite same key.");
return; return;
@ -415,7 +413,7 @@ TEST_P(CacheTest, Erase) {
} }
TEST_P(CacheTest, EntriesArePinned) { TEST_P(CacheTest, EntriesArePinned) {
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
ROCKSDB_GTEST_BYPASS( ROCKSDB_GTEST_BYPASS(
"ClockCache doesn't guarantee Insert overwrite same key."); "ClockCache doesn't guarantee Insert overwrite same key.");
return; return;
@ -479,7 +477,7 @@ TEST_P(CacheTest, ExternalRefPinsEntries) {
Insert(1000 + j, 2000 + j); Insert(1000 + j, 2000 + j);
} }
// Clock cache is even more stateful and needs more churn to evict // Clock cache is even more stateful and needs more churn to evict
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
for (int j = 0; j < kCacheSize; j++) { for (int j = 0; j < kCacheSize; j++) {
Insert(11000 + j, 11000 + j); Insert(11000 + j, 11000 + j);
} }
@ -679,7 +677,7 @@ using TypedHandle = SharedCache::TypedHandle;
TEST_P(CacheTest, SetCapacity) { TEST_P(CacheTest, SetCapacity) {
auto type = GetParam(); auto type = GetParam();
if (type == kHyperClock) { if (IsHyperClock()) {
ROCKSDB_GTEST_BYPASS( ROCKSDB_GTEST_BYPASS(
"FastLRUCache and HyperClockCache don't support arbitrary capacity " "FastLRUCache and HyperClockCache don't support arbitrary capacity "
"adjustments."); "adjustments.");
@ -811,7 +809,7 @@ TEST_P(CacheTest, OverCapacity) {
cache.Release(handles[i]); cache.Release(handles[i]);
} }
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
// Make sure eviction is triggered. // Make sure eviction is triggered.
ASSERT_OK(cache.Insert(EncodeKey(-1), nullptr, 1, &handles[0])); ASSERT_OK(cache.Insert(EncodeKey(-1), nullptr, 1, &handles[0]));
@ -923,8 +921,7 @@ TEST_P(CacheTest, DefaultShardBits) {
// Prevent excessive allocation (to save time & space) // Prevent excessive allocation (to save time & space)
estimated_value_size_ = 100000; estimated_value_size_ = 100000;
// Implementations use different minimum shard sizes // Implementations use different minimum shard sizes
size_t min_shard_size = size_t min_shard_size = (IsHyperClock() ? 32U * 1024U : 512U) * 1024U;
(GetParam() == kHyperClock ? 32U * 1024U : 512U) * 1024U;
std::shared_ptr<Cache> cache = NewCache(32U * min_shard_size); std::shared_ptr<Cache> cache = NewCache(32U * min_shard_size);
ShardedCacheBase* sc = dynamic_cast<ShardedCacheBase*>(cache.get()); ShardedCacheBase* sc = dynamic_cast<ShardedCacheBase*>(cache.get());

85
cache/clock_cache.cc vendored
View File

@ -522,7 +522,7 @@ void BaseClockTable::TrackAndReleaseEvictedEntry(
// For key reconstructed from hash // For key reconstructed from hash
UniqueId64x2 unhashed; UniqueId64x2 unhashed;
took_value_ownership = took_value_ownership =
eviction_callback_(ClockCacheShard<HyperClockTable>::ReverseHash( eviction_callback_(ClockCacheShard<FixedHyperClockTable>::ReverseHash(
h->GetHash(), &unhashed, hash_seed_), h->GetHash(), &unhashed, hash_seed_),
reinterpret_cast<Cache::Handle*>(h)); reinterpret_cast<Cache::Handle*>(h));
} }
@ -670,7 +670,7 @@ void BaseClockTable::TEST_ReleaseNMinus1(ClockHandle* h, size_t n) {
} }
#endif #endif
HyperClockTable::HyperClockTable( FixedHyperClockTable::FixedHyperClockTable(
size_t capacity, bool /*strict_capacity_limit*/, size_t capacity, bool /*strict_capacity_limit*/,
CacheMetadataChargePolicy metadata_charge_policy, CacheMetadataChargePolicy metadata_charge_policy,
MemoryAllocator* allocator, MemoryAllocator* allocator,
@ -693,7 +693,7 @@ HyperClockTable::HyperClockTable(
"Expecting size / alignment with common cache line size"); "Expecting size / alignment with common cache line size");
} }
HyperClockTable::~HyperClockTable() { FixedHyperClockTable::~FixedHyperClockTable() {
// Assumes there are no references or active operations on any slot/element // Assumes there are no references or active operations on any slot/element
// in the table. // in the table.
for (size_t i = 0; i < GetTableSize(); i++) { for (size_t i = 0; i < GetTableSize(); i++) {
@ -729,13 +729,13 @@ HyperClockTable::~HyperClockTable() {
assert(occupancy_ == 0); assert(occupancy_ == 0);
} }
void HyperClockTable::StartInsert(InsertState&) {} void FixedHyperClockTable::StartInsert(InsertState&) {}
bool HyperClockTable::GrowIfNeeded(size_t new_occupancy, InsertState&) { bool FixedHyperClockTable::GrowIfNeeded(size_t new_occupancy, InsertState&) {
return new_occupancy <= occupancy_limit_; return new_occupancy <= occupancy_limit_;
} }
HyperClockTable::HandleImpl* HyperClockTable::DoInsert( FixedHyperClockTable::HandleImpl* FixedHyperClockTable::DoInsert(
const ClockHandleBasicData& proto, uint64_t initial_countdown, const ClockHandleBasicData& proto, uint64_t initial_countdown,
bool keep_ref, InsertState&) { bool keep_ref, InsertState&) {
bool already_matches = false; bool already_matches = false;
@ -782,7 +782,7 @@ HyperClockTable::HandleImpl* HyperClockTable::DoInsert(
return nullptr; return nullptr;
} }
HyperClockTable::HandleImpl* HyperClockTable::Lookup( FixedHyperClockTable::HandleImpl* FixedHyperClockTable::Lookup(
const UniqueId64x2& hashed_key) { const UniqueId64x2& hashed_key) {
HandleImpl* e = FindSlot( HandleImpl* e = FindSlot(
hashed_key, hashed_key,
@ -843,8 +843,8 @@ HyperClockTable::HandleImpl* HyperClockTable::Lookup(
return e; return e;
} }
bool HyperClockTable::Release(HandleImpl* h, bool useful, bool FixedHyperClockTable::Release(HandleImpl* h, bool useful,
bool erase_if_last_ref) { bool erase_if_last_ref) {
// In contrast with LRUCache's Release, this function won't delete the handle // In contrast with LRUCache's Release, this function won't delete the handle
// when the cache is above capacity and the reference is the last one. Space // when the cache is above capacity and the reference is the last one. Space
// is only freed up by EvictFromClock (called by Insert when space is needed) // is only freed up by EvictFromClock (called by Insert when space is needed)
@ -919,7 +919,7 @@ bool HyperClockTable::Release(HandleImpl* h, bool useful,
} }
#ifndef NDEBUG #ifndef NDEBUG
void HyperClockTable::TEST_ReleaseN(HandleImpl* h, size_t n) { void FixedHyperClockTable::TEST_ReleaseN(HandleImpl* h, size_t n) {
if (n > 0) { if (n > 0) {
// Do n-1 simple releases first // Do n-1 simple releases first
TEST_ReleaseNMinus1(h, n); TEST_ReleaseNMinus1(h, n);
@ -930,7 +930,7 @@ void HyperClockTable::TEST_ReleaseN(HandleImpl* h, size_t n) {
} }
#endif #endif
void HyperClockTable::Erase(const UniqueId64x2& hashed_key) { void FixedHyperClockTable::Erase(const UniqueId64x2& hashed_key) {
(void)FindSlot( (void)FindSlot(
hashed_key, hashed_key,
[&](HandleImpl* h) { [&](HandleImpl* h) {
@ -995,7 +995,7 @@ void HyperClockTable::Erase(const UniqueId64x2& hashed_key) {
[&](HandleImpl* /*h*/, bool /*is_last*/) {}); [&](HandleImpl* /*h*/, bool /*is_last*/) {});
} }
void HyperClockTable::EraseUnRefEntries() { void FixedHyperClockTable::EraseUnRefEntries() {
for (size_t i = 0; i <= this->length_bits_mask_; i++) { for (size_t i = 0; i <= this->length_bits_mask_; i++) {
HandleImpl& h = array_[i]; HandleImpl& h = array_[i];
@ -1017,7 +1017,7 @@ void HyperClockTable::EraseUnRefEntries() {
} }
template <typename MatchFn, typename AbortFn, typename UpdateFn> template <typename MatchFn, typename AbortFn, typename UpdateFn>
inline HyperClockTable::HandleImpl* HyperClockTable::FindSlot( inline FixedHyperClockTable::HandleImpl* FixedHyperClockTable::FindSlot(
const UniqueId64x2& hashed_key, const MatchFn& match_fn, const UniqueId64x2& hashed_key, const MatchFn& match_fn,
const AbortFn& abort_fn, const UpdateFn& update_fn) { const AbortFn& abort_fn, const UpdateFn& update_fn) {
// NOTE: upper 32 bits of hashed_key[0] is used for sharding // NOTE: upper 32 bits of hashed_key[0] is used for sharding
@ -1052,8 +1052,8 @@ inline HyperClockTable::HandleImpl* HyperClockTable::FindSlot(
return nullptr; return nullptr;
} }
inline void HyperClockTable::Rollback(const UniqueId64x2& hashed_key, inline void FixedHyperClockTable::Rollback(const UniqueId64x2& hashed_key,
const HandleImpl* h) { const HandleImpl* h) {
size_t current = ModTableSize(hashed_key[1]); size_t current = ModTableSize(hashed_key[1]);
size_t increment = static_cast<size_t>(hashed_key[0]) | 1U; size_t increment = static_cast<size_t>(hashed_key[0]) | 1U;
while (&array_[current] != h) { while (&array_[current] != h) {
@ -1062,7 +1062,7 @@ inline void HyperClockTable::Rollback(const UniqueId64x2& hashed_key,
} }
} }
inline void HyperClockTable::ReclaimEntryUsage(size_t total_charge) { inline void FixedHyperClockTable::ReclaimEntryUsage(size_t total_charge) {
auto old_occupancy = occupancy_.fetch_sub(1U, std::memory_order_release); auto old_occupancy = occupancy_.fetch_sub(1U, std::memory_order_release);
(void)old_occupancy; (void)old_occupancy;
// No underflow // No underflow
@ -1073,8 +1073,8 @@ inline void HyperClockTable::ReclaimEntryUsage(size_t total_charge) {
assert(old_usage >= total_charge); assert(old_usage >= total_charge);
} }
inline void HyperClockTable::Evict(size_t requested_charge, InsertState&, inline void FixedHyperClockTable::Evict(size_t requested_charge, InsertState&,
EvictionData* data) { EvictionData* data) {
// precondition // precondition
assert(requested_charge > 0); assert(requested_charge > 0);
@ -1172,7 +1172,7 @@ void ClockCacheShard<Table>::ApplyToSomeEntries(
table_.HandlePtr(index_begin), table_.HandlePtr(index_end), false); table_.HandlePtr(index_begin), table_.HandlePtr(index_end), false);
} }
int HyperClockTable::CalcHashBits( int FixedHyperClockTable::CalcHashBits(
size_t capacity, size_t estimated_value_size, size_t capacity, size_t estimated_value_size,
CacheMetadataChargePolicy metadata_charge_policy) { CacheMetadataChargePolicy metadata_charge_policy) {
double average_slot_charge = estimated_value_size * kLoadFactor; double average_slot_charge = estimated_value_size * kLoadFactor;
@ -1360,9 +1360,9 @@ size_t ClockCacheShard<Table>::GetTableAddressCount() const {
} }
// Explicit instantiation // Explicit instantiation
template class ClockCacheShard<HyperClockTable>; template class ClockCacheShard<FixedHyperClockTable>;
HyperClockCache::HyperClockCache(const HyperClockCacheOptions& opts) FixedHyperClockCache::FixedHyperClockCache(const HyperClockCacheOptions& opts)
: ShardedCache(opts) { : ShardedCache(opts) {
assert(opts.estimated_entry_charge > 0 || assert(opts.estimated_entry_charge > 0 ||
opts.metadata_charge_policy != kDontChargeCacheMetadata); opts.metadata_charge_policy != kDontChargeCacheMetadata);
@ -1371,7 +1371,7 @@ HyperClockCache::HyperClockCache(const HyperClockCacheOptions& opts)
size_t per_shard = GetPerShardCapacity(); size_t per_shard = GetPerShardCapacity();
MemoryAllocator* alloc = this->memory_allocator(); MemoryAllocator* alloc = this->memory_allocator();
InitShards([&](Shard* cs) { InitShards([&](Shard* cs) {
HyperClockTable::Opts table_opts; FixedHyperClockTable::Opts table_opts;
table_opts.estimated_value_size = opts.estimated_entry_charge; table_opts.estimated_value_size = opts.estimated_entry_charge;
new (cs) Shard(per_shard, opts.strict_capacity_limit, new (cs) Shard(per_shard, opts.strict_capacity_limit,
opts.metadata_charge_policy, alloc, &eviction_callback_, opts.metadata_charge_policy, alloc, &eviction_callback_,
@ -1379,15 +1379,15 @@ HyperClockCache::HyperClockCache(const HyperClockCacheOptions& opts)
}); });
} }
Cache::ObjectPtr HyperClockCache::Value(Handle* handle) { Cache::ObjectPtr FixedHyperClockCache::Value(Handle* handle) {
return reinterpret_cast<const HandleImpl*>(handle)->value; return reinterpret_cast<const HandleImpl*>(handle)->value;
} }
size_t HyperClockCache::GetCharge(Handle* handle) const { size_t FixedHyperClockCache::GetCharge(Handle* handle) const {
return reinterpret_cast<const HandleImpl*>(handle)->GetTotalCharge(); return reinterpret_cast<const HandleImpl*>(handle)->GetTotalCharge();
} }
const Cache::CacheItemHelper* HyperClockCache::GetCacheItemHelper( const Cache::CacheItemHelper* FixedHyperClockCache::GetCacheItemHelper(
Handle* handle) const { Handle* handle) const {
auto h = reinterpret_cast<const HandleImpl*>(handle); auto h = reinterpret_cast<const HandleImpl*>(handle);
return h->helper; return h->helper;
@ -1402,7 +1402,7 @@ namespace {
// or actual occupancy very close to limit (>95% of limit). // or actual occupancy very close to limit (>95% of limit).
// Also, for each shard compute the recommended estimated_entry_charge, // Also, for each shard compute the recommended estimated_entry_charge,
// and keep the minimum one for use as overall recommendation. // and keep the minimum one for use as overall recommendation.
void AddShardEvaluation(const HyperClockCache::Shard& shard, void AddShardEvaluation(const FixedHyperClockCache::Shard& shard,
std::vector<double>& predicted_load_factors, std::vector<double>& predicted_load_factors,
size_t& min_recommendation) { size_t& min_recommendation) {
size_t usage = shard.GetUsage() - shard.GetStandaloneUsage(); size_t usage = shard.GetUsage() - shard.GetStandaloneUsage();
@ -1420,7 +1420,7 @@ void AddShardEvaluation(const HyperClockCache::Shard& shard,
// If filled to capacity, what would the occupancy ratio be? // If filled to capacity, what would the occupancy ratio be?
double ratio = occ_ratio / usage_ratio; double ratio = occ_ratio / usage_ratio;
// Given max load factor, what that load factor be? // Given max load factor, what that load factor be?
double lf = ratio * HyperClockTable::kStrictLoadFactor; double lf = ratio * FixedHyperClockTable::kStrictLoadFactor;
predicted_load_factors.push_back(lf); predicted_load_factors.push_back(lf);
// Update min_recommendation also // Update min_recommendation also
@ -1430,13 +1430,13 @@ void AddShardEvaluation(const HyperClockCache::Shard& shard,
} // namespace } // namespace
void HyperClockCache::ReportProblems( void FixedHyperClockCache::ReportProblems(
const std::shared_ptr<Logger>& info_log) const { const std::shared_ptr<Logger>& info_log) const {
uint32_t shard_count = GetNumShards(); uint32_t shard_count = GetNumShards();
std::vector<double> predicted_load_factors; std::vector<double> predicted_load_factors;
size_t min_recommendation = SIZE_MAX; size_t min_recommendation = SIZE_MAX;
const_cast<HyperClockCache*>(this)->ForEachShard( const_cast<FixedHyperClockCache*>(this)->ForEachShard(
[&](HyperClockCache::Shard* shard) { [&](FixedHyperClockCache::Shard* shard) {
AddShardEvaluation(*shard, predicted_load_factors, min_recommendation); AddShardEvaluation(*shard, predicted_load_factors, min_recommendation);
}); });
@ -1459,18 +1459,19 @@ void HyperClockCache::ReportProblems(
predicted_load_factors.end(), 0.0) / predicted_load_factors.end(), 0.0) /
shard_count; shard_count;
constexpr double kLowSpecLoadFactor = HyperClockTable::kLoadFactor / 2; constexpr double kLowSpecLoadFactor = FixedHyperClockTable::kLoadFactor / 2;
constexpr double kMidSpecLoadFactor = HyperClockTable::kLoadFactor / 1.414; constexpr double kMidSpecLoadFactor =
if (average_load_factor > HyperClockTable::kLoadFactor) { FixedHyperClockTable::kLoadFactor / 1.414;
if (average_load_factor > FixedHyperClockTable::kLoadFactor) {
// Out of spec => Consider reporting load factor too high // Out of spec => Consider reporting load factor too high
// Estimate effective overall capacity loss due to enforcing occupancy limit // Estimate effective overall capacity loss due to enforcing occupancy limit
double lost_portion = 0.0; double lost_portion = 0.0;
int over_count = 0; int over_count = 0;
for (double lf : predicted_load_factors) { for (double lf : predicted_load_factors) {
if (lf > HyperClockTable::kStrictLoadFactor) { if (lf > FixedHyperClockTable::kStrictLoadFactor) {
++over_count; ++over_count;
lost_portion += lost_portion +=
(lf - HyperClockTable::kStrictLoadFactor) / lf / shard_count; (lf - FixedHyperClockTable::kStrictLoadFactor) / lf / shard_count;
} }
} }
// >= 20% loss -> error // >= 20% loss -> error
@ -1494,10 +1495,10 @@ void HyperClockCache::ReportProblems(
if (report) { if (report) {
ROCKS_LOG_AT_LEVEL( ROCKS_LOG_AT_LEVEL(
info_log, level, info_log, level,
"HyperClockCache@%p unable to use estimated %.1f%% capacity because " "FixedHyperClockCache@%p unable to use estimated %.1f%% capacity "
"of " "because of full occupancy in %d/%u cache shards "
"full occupancy in %d/%u cache shards (estimated_entry_charge too " "(estimated_entry_charge too high). "
"high). Recommend estimated_entry_charge=%zu", "Recommend estimated_entry_charge=%zu",
this, lost_portion * 100.0, over_count, (unsigned)shard_count, this, lost_portion * 100.0, over_count, (unsigned)shard_count,
min_recommendation); min_recommendation);
} }
@ -1515,8 +1516,8 @@ void HyperClockCache::ReportProblems(
} }
ROCKS_LOG_AT_LEVEL( ROCKS_LOG_AT_LEVEL(
info_log, level, info_log, level,
"HyperClockCache@%p table has low occupancy at full capacity. Higher " "FixedHyperClockCache@%p table has low occupancy at full capacity. "
"estimated_entry_charge (about %.1fx) would likely improve " "Higher estimated_entry_charge (about %.1fx) would likely improve "
"performance. Recommend estimated_entry_charge=%zu", "performance. Recommend estimated_entry_charge=%zu",
this, kMidSpecLoadFactor / average_load_factor, min_recommendation); this, kMidSpecLoadFactor / average_load_factor, min_recommendation);
} }
@ -1549,7 +1550,7 @@ std::shared_ptr<Cache> HyperClockCacheOptions::MakeSharedCache() const {
GetDefaultCacheShardBits(opts.capacity, min_shard_size); GetDefaultCacheShardBits(opts.capacity, min_shard_size);
} }
std::shared_ptr<Cache> cache = std::shared_ptr<Cache> cache =
std::make_shared<clock_cache::HyperClockCache>(opts); std::make_shared<clock_cache::FixedHyperClockCache>(opts);
if (opts.secondary_cache) { if (opts.secondary_cache) {
cache = std::make_shared<CacheWithSecondaryAdapter>(cache, cache = std::make_shared<CacheWithSecondaryAdapter>(cache,
opts.secondary_cache); opts.secondary_cache);

28
cache/clock_cache.h vendored
View File

@ -466,7 +466,7 @@ class BaseClockTable {
const uint32_t& hash_seed_; const uint32_t& hash_seed_;
}; };
class HyperClockTable : public BaseClockTable { class FixedHyperClockTable : public BaseClockTable {
public: public:
// Target size to be exactly a common cache line size (see static_assert in // Target size to be exactly a common cache line size (see static_assert in
// clock_cache.cc) // clock_cache.cc)
@ -491,12 +491,12 @@ class HyperClockTable : public BaseClockTable {
size_t estimated_value_size; size_t estimated_value_size;
}; };
HyperClockTable(size_t capacity, bool strict_capacity_limit, FixedHyperClockTable(size_t capacity, bool strict_capacity_limit,
CacheMetadataChargePolicy metadata_charge_policy, CacheMetadataChargePolicy metadata_charge_policy,
MemoryAllocator* allocator, MemoryAllocator* allocator,
const Cache::EvictionCallback* eviction_callback, const Cache::EvictionCallback* eviction_callback,
const uint32_t* hash_seed, const Opts& opts); const uint32_t* hash_seed, const Opts& opts);
~HyperClockTable(); ~FixedHyperClockTable();
// For BaseClockTable::Insert // For BaseClockTable::Insert
struct InsertState {}; struct InsertState {};
@ -612,7 +612,7 @@ class HyperClockTable : public BaseClockTable {
// Array of slots comprising the hash table. // Array of slots comprising the hash table.
const std::unique_ptr<HandleImpl[]> array_; const std::unique_ptr<HandleImpl[]> array_;
}; // class HyperClockTable }; // class FixedHyperClockTable
// A single shard of sharded cache. // A single shard of sharded cache.
template <class Table> template <class Table>
@ -729,17 +729,17 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShardBase {
std::atomic<bool> strict_capacity_limit_; std::atomic<bool> strict_capacity_limit_;
}; // class ClockCacheShard }; // class ClockCacheShard
class HyperClockCache class FixedHyperClockCache
#ifdef NDEBUG #ifdef NDEBUG
final final
#endif #endif
: public ShardedCache<ClockCacheShard<HyperClockTable>> { : public ShardedCache<ClockCacheShard<FixedHyperClockTable>> {
public: public:
using Shard = ClockCacheShard<HyperClockTable>; using Shard = ClockCacheShard<FixedHyperClockTable>;
explicit HyperClockCache(const HyperClockCacheOptions& opts); explicit FixedHyperClockCache(const HyperClockCacheOptions& opts);
const char* Name() const override { return "HyperClockCache"; } const char* Name() const override { return "FixedHyperClockCache"; }
Cache::ObjectPtr Value(Handle* handle) override; Cache::ObjectPtr Value(Handle* handle) override;
@ -749,7 +749,7 @@ class HyperClockCache
void ReportProblems( void ReportProblems(
const std::shared_ptr<Logger>& /*info_log*/) const override; const std::shared_ptr<Logger>& /*info_log*/) const override;
}; // class HyperClockCache }; // class FixedHyperClockCache
} // namespace clock_cache } // namespace clock_cache

View File

@ -737,7 +737,7 @@ class CompressedSecondaryCacheTestBase : public testing::Test,
class CompressedSecondaryCacheTest class CompressedSecondaryCacheTest
: public CompressedSecondaryCacheTestBase, : public CompressedSecondaryCacheTestBase,
public testing::WithParamInterface<std::string> { public testing::WithParamInterface<std::string> {
const std::string& Type() override { return GetParam(); } const std::string& Type() const override { return GetParam(); }
}; };
INSTANTIATE_TEST_CASE_P(CompressedSecondaryCacheTest, INSTANTIATE_TEST_CASE_P(CompressedSecondaryCacheTest,
@ -752,7 +752,7 @@ class CompressedSecCacheTestWithCompressAndAllocatorParam
sec_cache_is_compressed_ = std::get<0>(GetParam()); sec_cache_is_compressed_ = std::get<0>(GetParam());
use_jemalloc_ = std::get<1>(GetParam()); use_jemalloc_ = std::get<1>(GetParam());
} }
const std::string& Type() override { return std::get<2>(GetParam()); } const std::string& Type() const override { return std::get<2>(GetParam()); }
bool sec_cache_is_compressed_; bool sec_cache_is_compressed_;
bool use_jemalloc_; bool use_jemalloc_;
}; };
@ -773,7 +773,7 @@ class CompressedSecondaryCacheTestWithCompressionParam
CompressedSecondaryCacheTestWithCompressionParam() { CompressedSecondaryCacheTestWithCompressionParam() {
sec_cache_is_compressed_ = std::get<0>(GetParam()); sec_cache_is_compressed_ = std::get<0>(GetParam());
} }
const std::string& Type() override { return std::get<1>(GetParam()); } const std::string& Type() const override { return std::get<1>(GetParam()); }
bool sec_cache_is_compressed_; bool sec_cache_is_compressed_;
}; };
@ -950,7 +950,7 @@ class CompressedSecCacheTestWithCompressAndSplitParam
sec_cache_is_compressed_ = std::get<0>(GetParam()); sec_cache_is_compressed_ = std::get<0>(GetParam());
enable_custom_split_merge_ = std::get<1>(GetParam()); enable_custom_split_merge_ = std::get<1>(GetParam());
} }
const std::string& Type() override { return std::get<2>(GetParam()); } const std::string& Type() const override { return std::get<2>(GetParam()); }
bool sec_cache_is_compressed_; bool sec_cache_is_compressed_;
bool enable_custom_split_merge_; bool enable_custom_split_merge_;
}; };

View File

@ -373,8 +373,8 @@ namespace clock_cache {
class ClockCacheTest : public testing::Test { class ClockCacheTest : public testing::Test {
public: public:
using Shard = HyperClockCache::Shard; using Shard = FixedHyperClockCache::Shard;
using Table = HyperClockTable; using Table = FixedHyperClockTable;
using HandleImpl = Shard::HandleImpl; using HandleImpl = Shard::HandleImpl;
ClockCacheTest() {} ClockCacheTest() {}
@ -916,9 +916,9 @@ TEST_F(ClockCacheTest, TableSizesTest) {
.MakeSharedCache(); .MakeSharedCache();
// Table sizes are currently only powers of two // Table sizes are currently only powers of two
EXPECT_GE(cache->GetTableAddressCount(), EXPECT_GE(cache->GetTableAddressCount(),
est_count / HyperClockTable::kLoadFactor); est_count / FixedHyperClockTable::kLoadFactor);
EXPECT_LE(cache->GetTableAddressCount(), EXPECT_LE(cache->GetTableAddressCount(),
est_count / HyperClockTable::kLoadFactor * 2.0); est_count / FixedHyperClockTable::kLoadFactor * 2.0);
EXPECT_EQ(cache->GetUsage(), 0); EXPECT_EQ(cache->GetUsage(), 0);
// kFullChargeMetaData // kFullChargeMetaData
@ -935,9 +935,10 @@ TEST_F(ClockCacheTest, TableSizesTest) {
double est_count_after_meta = double est_count_after_meta =
(capacity - cache->GetUsage()) * 1.0 / est_val_size; (capacity - cache->GetUsage()) * 1.0 / est_val_size;
EXPECT_GE(cache->GetTableAddressCount(), EXPECT_GE(cache->GetTableAddressCount(),
est_count_after_meta / HyperClockTable::kLoadFactor); est_count_after_meta / FixedHyperClockTable::kLoadFactor);
EXPECT_LE(cache->GetTableAddressCount(), EXPECT_LE(
est_count_after_meta / HyperClockTable::kLoadFactor * 2.0); cache->GetTableAddressCount(),
est_count_after_meta / FixedHyperClockTable::kLoadFactor * 2.0);
} }
} }
} }
@ -1425,7 +1426,7 @@ TEST_P(BasicSecondaryCacheTest, FullCapacityTest) {
k2.AsSlice(), k2.AsSlice(),
GetHelper(CacheEntryRole::kDataBlock, /*secondary_compatible=*/false), GetHelper(CacheEntryRole::kDataBlock, /*secondary_compatible=*/false),
/*context*/ this, Cache::Priority::LOW); /*context*/ this, Cache::Priority::LOW);
if (strict_capacity_limit || GetParam() == kHyperClock) { if (strict_capacity_limit || IsHyperClock()) {
ASSERT_NE(handle2, nullptr); ASSERT_NE(handle2, nullptr);
cache->Release(handle2); cache->Release(handle2);
ASSERT_EQ(secondary_cache->num_inserts(), 1u); ASSERT_EQ(secondary_cache->num_inserts(), 1u);
@ -1450,12 +1451,12 @@ TEST_P(BasicSecondaryCacheTest, FullCapacityTest) {
// CORRECTION: this is not quite right. block_1 can be inserted into the block // CORRECTION: this is not quite right. block_1 can be inserted into the block
// cache because strict_capacity_limit=false, but it is removed from the cache // cache because strict_capacity_limit=false, but it is removed from the cache
// in Release() because of being over-capacity, without demoting to secondary // in Release() because of being over-capacity, without demoting to secondary
// cache. HyperClockCache doesn't check capacity on release (for efficiency) // cache. FixedHyperClockCache doesn't check capacity on release (for
// so can demote the over-capacity item to secondary cache. Also, we intend to // efficiency) so can demote the over-capacity item to secondary cache. Also, we
// add support for demotion in Release, but that currently causes too much // intend to add support for demotion in Release, but that currently causes too
// unit test churn. // much unit test churn.
TEST_P(DBSecondaryCacheTest, TestSecondaryCacheCorrectness1) { TEST_P(DBSecondaryCacheTest, TestSecondaryCacheCorrectness1) {
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
// See CORRECTION above // See CORRECTION above
ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors"); ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors");
return; return;
@ -1553,7 +1554,7 @@ TEST_P(DBSecondaryCacheTest, TestSecondaryCacheCorrectness1) {
// insert and cache block_1 in the block cache (this is the different place // insert and cache block_1 in the block cache (this is the different place
// from TestSecondaryCacheCorrectness1) // from TestSecondaryCacheCorrectness1)
TEST_P(DBSecondaryCacheTest, TestSecondaryCacheCorrectness2) { TEST_P(DBSecondaryCacheTest, TestSecondaryCacheCorrectness2) {
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors"); ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors");
return; return;
} }
@ -1741,7 +1742,7 @@ TEST_P(DBSecondaryCacheTest, SecondaryCacheIntensiveTesting) {
// if we try to insert block_1 to the block cache, it will always fails. Only // if we try to insert block_1 to the block cache, it will always fails. Only
// block_2 will be successfully inserted into the block cache. // block_2 will be successfully inserted into the block cache.
TEST_P(DBSecondaryCacheTest, SecondaryCacheFailureTest) { TEST_P(DBSecondaryCacheTest, SecondaryCacheFailureTest) {
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors"); ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors");
return; return;
} }
@ -1851,7 +1852,7 @@ TEST_P(BasicSecondaryCacheTest, BasicWaitAllTest) {
str.length())); str.length()));
} }
// Force all entries to be evicted to the secondary cache // Force all entries to be evicted to the secondary cache
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
// HCC doesn't respond immediately to SetCapacity // HCC doesn't respond immediately to SetCapacity
for (int i = 9000; i < 9030; ++i) { for (int i = 9000; i < 9030; ++i) {
ASSERT_OK(cache->Insert(ock.WithOffset(i).AsSlice(), nullptr, ASSERT_OK(cache->Insert(ock.WithOffset(i).AsSlice(), nullptr,
@ -1906,7 +1907,7 @@ TEST_P(BasicSecondaryCacheTest, BasicWaitAllTest) {
// a sync point callback in TestSecondaryCache::Lookup. We then control the // a sync point callback in TestSecondaryCache::Lookup. We then control the
// lookup result by setting the ResultMap. // lookup result by setting the ResultMap.
TEST_P(DBSecondaryCacheTest, TestSecondaryCacheMultiGet) { TEST_P(DBSecondaryCacheTest, TestSecondaryCacheMultiGet) {
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors"); ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors");
return; return;
} }
@ -2407,7 +2408,7 @@ TEST_P(DBSecondaryCacheTest, TestSecondaryCacheOptionBasic) {
// with new options, which set the lowest_used_cache_tier to // with new options, which set the lowest_used_cache_tier to
// kNonVolatileBlockTier. So secondary cache will be used. // kNonVolatileBlockTier. So secondary cache will be used.
TEST_P(DBSecondaryCacheTest, TestSecondaryCacheOptionChange) { TEST_P(DBSecondaryCacheTest, TestSecondaryCacheOptionChange) {
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors"); ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors");
return; return;
} }
@ -2502,7 +2503,7 @@ TEST_P(DBSecondaryCacheTest, TestSecondaryCacheOptionChange) {
// Two DB test. We create 2 DBs sharing the same block cache and secondary // Two DB test. We create 2 DBs sharing the same block cache and secondary
// cache. We diable the secondary cache option for DB2. // cache. We diable the secondary cache option for DB2.
TEST_P(DBSecondaryCacheTest, TestSecondaryCacheOptionTwoDB) { TEST_P(DBSecondaryCacheTest, TestSecondaryCacheOptionTwoDB) {
if (GetParam() == kHyperClock) { if (IsHyperClock()) {
ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors"); ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors");
return; return;
} }

View File

@ -129,7 +129,8 @@ std::shared_ptr<Cache> StressTest::NewCache(size_t capacity,
if (FLAGS_cache_type == "clock_cache") { if (FLAGS_cache_type == "clock_cache") {
fprintf(stderr, "Old clock cache implementation has been removed.\n"); fprintf(stderr, "Old clock cache implementation has been removed.\n");
exit(1); exit(1);
} else if (FLAGS_cache_type == "hyper_clock_cache") { } else if (FLAGS_cache_type == "hyper_clock_cache" ||
FLAGS_cache_type == "fixed_hyper_clock_cache") {
HyperClockCacheOptions opts(static_cast<size_t>(capacity), HyperClockCacheOptions opts(static_cast<size_t>(capacity),
FLAGS_block_size /*estimated_entry_charge*/, FLAGS_block_size /*estimated_entry_charge*/,
num_shard_bits); num_shard_bits);

View File

@ -42,12 +42,14 @@ class WithCacheType : public TestCreateContext {
}; };
static constexpr auto kLRU = "lru"; static constexpr auto kLRU = "lru";
static constexpr auto kHyperClock = "hyper_clock"; static constexpr auto kFixedHyperClock = "fixed_hyper_clock";
// For options other than capacity // For options other than capacity
size_t estimated_value_size_ = 1; size_t estimated_value_size_ = 1;
virtual const std::string& Type() = 0; virtual const std::string& Type() const = 0;
bool IsHyperClock() const { return Type() == kFixedHyperClock; }
std::shared_ptr<Cache> NewCache( std::shared_ptr<Cache> NewCache(
size_t capacity, size_t capacity,
@ -62,7 +64,7 @@ class WithCacheType : public TestCreateContext {
} }
return lru_opts.MakeSharedCache(); return lru_opts.MakeSharedCache();
} }
if (type == kHyperClock) { if (type == kFixedHyperClock) {
HyperClockCacheOptions hc_opts{capacity, estimated_value_size_}; HyperClockCacheOptions hc_opts{capacity, estimated_value_size_};
hc_opts.hash_seed = 0; // deterministic tests hc_opts.hash_seed = 0; // deterministic tests
if (modify_opts_fn) { if (modify_opts_fn) {
@ -105,14 +107,14 @@ class WithCacheType : public TestCreateContext {
class WithCacheTypeParam : public WithCacheType, class WithCacheTypeParam : public WithCacheType,
public testing::WithParamInterface<std::string> { public testing::WithParamInterface<std::string> {
const std::string& Type() override { return GetParam(); } const std::string& Type() const override { return GetParam(); }
}; };
constexpr auto kLRU = WithCacheType::kLRU; constexpr auto kLRU = WithCacheType::kLRU;
constexpr auto kHyperClock = WithCacheType::kHyperClock; constexpr auto kFixedHyperClock = WithCacheType::kFixedHyperClock;
inline auto GetTestingCacheTypes() { inline auto GetTestingCacheTypes() {
return testing::Values(std::string(kLRU), std::string(kHyperClock)); return testing::Values(std::string(kLRU), std::string(kFixedHyperClock));
} }
} // namespace secondary_cache_test_util } // namespace secondary_cache_test_util

View File

@ -3039,7 +3039,8 @@ class Benchmark {
if (FLAGS_cache_type == "clock_cache") { if (FLAGS_cache_type == "clock_cache") {
fprintf(stderr, "Old clock cache implementation has been removed.\n"); fprintf(stderr, "Old clock cache implementation has been removed.\n");
exit(1); exit(1);
} else if (FLAGS_cache_type == "hyper_clock_cache") { } else if (FLAGS_cache_type == "hyper_clock_cache" ||
FLAGS_cache_type == "fixed_hyper_clock_cache") {
HyperClockCacheOptions hcco{ HyperClockCacheOptions hcco{
static_cast<size_t>(capacity), static_cast<size_t>(capacity),
static_cast<size_t>(FLAGS_block_size) /*estimated_entry_charge*/, static_cast<size_t>(FLAGS_block_size) /*estimated_entry_charge*/,