mirror of https://github.com/facebook/rocksdb.git
Prepare tests for new HCC naming (#11676)
Summary: I'm anticipating using the public name HyperClockCache for both the current version with a fixed-size table and the upcoming version with an automatically growing table. However, for simplicity of testing them as substantially distinct implementations, I want to give them distinct internal names, like FixedHyperClockCache and AutoHyperClockCache. This change anticipates that by renaming to FixedHyperClockCache and assuming for now that all the unit tests run on HCC will run and behave similarly for the automatic HCC. Obviously updates will need to be made, but I'm trying to avoid uninteresting find & replace updates in what will be a large and engineering-heavy PR for AutoHCC Pull Request resolved: https://github.com/facebook/rocksdb/pull/11676 Test Plan: no behavior change intended, except logging will now use the name FixedHyperClockCache Reviewed By: ajkr Differential Revision: D48103165 Pulled By: pdillinger fbshipit-source-id: a33f1901488fea102164c2318e2f2b156aaba736
This commit is contained in:
parent
6d1effaf01
commit
99daea3481
|
@ -299,7 +299,8 @@ class CacheBench {
|
|||
if (FLAGS_cache_type == "clock_cache") {
|
||||
fprintf(stderr, "Old clock cache implementation has been removed.\n");
|
||||
exit(1);
|
||||
} else if (FLAGS_cache_type == "hyper_clock_cache") {
|
||||
} else if (FLAGS_cache_type == "hyper_clock_cache" ||
|
||||
FLAGS_cache_type == "fixed_hyper_clock_cache") {
|
||||
HyperClockCacheOptions opts(FLAGS_cache_size, FLAGS_value_bytes,
|
||||
FLAGS_num_shard_bits);
|
||||
opts.hash_seed = BitwiseAnd(FLAGS_seed, INT32_MAX);
|
||||
|
|
|
@ -120,8 +120,7 @@ class CacheTest : public testing::Test,
|
|||
// Currently, HyperClockCache requires keys to be 16B long, whereas
|
||||
// LRUCache doesn't, so the encoding depends on the cache type.
|
||||
std::string EncodeKey(int k) {
|
||||
auto type = GetParam();
|
||||
if (type == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
return EncodeKey16Bytes(k);
|
||||
} else {
|
||||
return EncodeKey32Bits(k);
|
||||
|
@ -129,8 +128,7 @@ class CacheTest : public testing::Test,
|
|||
}
|
||||
|
||||
int DecodeKey(const Slice& k) {
|
||||
auto type = GetParam();
|
||||
if (type == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
return DecodeKey16Bytes(k);
|
||||
} else {
|
||||
return DecodeKey32Bits(k);
|
||||
|
@ -190,7 +188,7 @@ TEST_P(CacheTest, UsageTest) {
|
|||
auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata);
|
||||
ASSERT_EQ(0, cache->GetUsage());
|
||||
size_t baseline_meta_usage = precise_cache->GetUsage();
|
||||
if (type != kHyperClock) {
|
||||
if (!IsHyperClock()) {
|
||||
ASSERT_EQ(0, baseline_meta_usage);
|
||||
}
|
||||
|
||||
|
@ -209,7 +207,7 @@ TEST_P(CacheTest, UsageTest) {
|
|||
ASSERT_OK(precise_cache->Insert(key, value, &kDumbHelper, kv_size));
|
||||
usage += kv_size;
|
||||
ASSERT_EQ(usage, cache->GetUsage());
|
||||
if (type == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
ASSERT_EQ(baseline_meta_usage + usage, precise_cache->GetUsage());
|
||||
} else {
|
||||
ASSERT_LT(usage, precise_cache->GetUsage());
|
||||
|
@ -237,7 +235,7 @@ TEST_P(CacheTest, UsageTest) {
|
|||
ASSERT_GT(kCapacity, cache->GetUsage());
|
||||
ASSERT_GT(kCapacity, precise_cache->GetUsage());
|
||||
ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
|
||||
if (type != kHyperClock) {
|
||||
if (!IsHyperClock()) {
|
||||
ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage());
|
||||
} else {
|
||||
// estimated value size of 1 is weird for clock cache, because
|
||||
|
@ -263,7 +261,7 @@ TEST_P(CacheTest, PinnedUsageTest) {
|
|||
auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
|
||||
auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata);
|
||||
size_t baseline_meta_usage = precise_cache->GetUsage();
|
||||
if (type != kHyperClock) {
|
||||
if (!IsHyperClock()) {
|
||||
ASSERT_EQ(0, baseline_meta_usage);
|
||||
}
|
||||
|
||||
|
@ -368,7 +366,7 @@ TEST_P(CacheTest, HitAndMiss) {
|
|||
ASSERT_EQ(-1, Lookup(300));
|
||||
|
||||
Insert(100, 102);
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
// ClockCache usually doesn't overwrite on Insert
|
||||
ASSERT_EQ(101, Lookup(100));
|
||||
} else {
|
||||
|
@ -378,7 +376,7 @@ TEST_P(CacheTest, HitAndMiss) {
|
|||
ASSERT_EQ(-1, Lookup(300));
|
||||
|
||||
ASSERT_EQ(1U, deleted_values_.size());
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
ASSERT_EQ(102, deleted_values_[0]);
|
||||
} else {
|
||||
ASSERT_EQ(101, deleted_values_[0]);
|
||||
|
@ -386,7 +384,7 @@ TEST_P(CacheTest, HitAndMiss) {
|
|||
}
|
||||
|
||||
TEST_P(CacheTest, InsertSameKey) {
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
ROCKSDB_GTEST_BYPASS(
|
||||
"ClockCache doesn't guarantee Insert overwrite same key.");
|
||||
return;
|
||||
|
@ -415,7 +413,7 @@ TEST_P(CacheTest, Erase) {
|
|||
}
|
||||
|
||||
TEST_P(CacheTest, EntriesArePinned) {
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
ROCKSDB_GTEST_BYPASS(
|
||||
"ClockCache doesn't guarantee Insert overwrite same key.");
|
||||
return;
|
||||
|
@ -479,7 +477,7 @@ TEST_P(CacheTest, ExternalRefPinsEntries) {
|
|||
Insert(1000 + j, 2000 + j);
|
||||
}
|
||||
// Clock cache is even more stateful and needs more churn to evict
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
for (int j = 0; j < kCacheSize; j++) {
|
||||
Insert(11000 + j, 11000 + j);
|
||||
}
|
||||
|
@ -679,7 +677,7 @@ using TypedHandle = SharedCache::TypedHandle;
|
|||
|
||||
TEST_P(CacheTest, SetCapacity) {
|
||||
auto type = GetParam();
|
||||
if (type == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
ROCKSDB_GTEST_BYPASS(
|
||||
"FastLRUCache and HyperClockCache don't support arbitrary capacity "
|
||||
"adjustments.");
|
||||
|
@ -811,7 +809,7 @@ TEST_P(CacheTest, OverCapacity) {
|
|||
cache.Release(handles[i]);
|
||||
}
|
||||
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
// Make sure eviction is triggered.
|
||||
ASSERT_OK(cache.Insert(EncodeKey(-1), nullptr, 1, &handles[0]));
|
||||
|
||||
|
@ -923,8 +921,7 @@ TEST_P(CacheTest, DefaultShardBits) {
|
|||
// Prevent excessive allocation (to save time & space)
|
||||
estimated_value_size_ = 100000;
|
||||
// Implementations use different minimum shard sizes
|
||||
size_t min_shard_size =
|
||||
(GetParam() == kHyperClock ? 32U * 1024U : 512U) * 1024U;
|
||||
size_t min_shard_size = (IsHyperClock() ? 32U * 1024U : 512U) * 1024U;
|
||||
|
||||
std::shared_ptr<Cache> cache = NewCache(32U * min_shard_size);
|
||||
ShardedCacheBase* sc = dynamic_cast<ShardedCacheBase*>(cache.get());
|
||||
|
|
|
@ -522,7 +522,7 @@ void BaseClockTable::TrackAndReleaseEvictedEntry(
|
|||
// For key reconstructed from hash
|
||||
UniqueId64x2 unhashed;
|
||||
took_value_ownership =
|
||||
eviction_callback_(ClockCacheShard<HyperClockTable>::ReverseHash(
|
||||
eviction_callback_(ClockCacheShard<FixedHyperClockTable>::ReverseHash(
|
||||
h->GetHash(), &unhashed, hash_seed_),
|
||||
reinterpret_cast<Cache::Handle*>(h));
|
||||
}
|
||||
|
@ -670,7 +670,7 @@ void BaseClockTable::TEST_ReleaseNMinus1(ClockHandle* h, size_t n) {
|
|||
}
|
||||
#endif
|
||||
|
||||
HyperClockTable::HyperClockTable(
|
||||
FixedHyperClockTable::FixedHyperClockTable(
|
||||
size_t capacity, bool /*strict_capacity_limit*/,
|
||||
CacheMetadataChargePolicy metadata_charge_policy,
|
||||
MemoryAllocator* allocator,
|
||||
|
@ -693,7 +693,7 @@ HyperClockTable::HyperClockTable(
|
|||
"Expecting size / alignment with common cache line size");
|
||||
}
|
||||
|
||||
HyperClockTable::~HyperClockTable() {
|
||||
FixedHyperClockTable::~FixedHyperClockTable() {
|
||||
// Assumes there are no references or active operations on any slot/element
|
||||
// in the table.
|
||||
for (size_t i = 0; i < GetTableSize(); i++) {
|
||||
|
@ -729,13 +729,13 @@ HyperClockTable::~HyperClockTable() {
|
|||
assert(occupancy_ == 0);
|
||||
}
|
||||
|
||||
void HyperClockTable::StartInsert(InsertState&) {}
|
||||
void FixedHyperClockTable::StartInsert(InsertState&) {}
|
||||
|
||||
bool HyperClockTable::GrowIfNeeded(size_t new_occupancy, InsertState&) {
|
||||
bool FixedHyperClockTable::GrowIfNeeded(size_t new_occupancy, InsertState&) {
|
||||
return new_occupancy <= occupancy_limit_;
|
||||
}
|
||||
|
||||
HyperClockTable::HandleImpl* HyperClockTable::DoInsert(
|
||||
FixedHyperClockTable::HandleImpl* FixedHyperClockTable::DoInsert(
|
||||
const ClockHandleBasicData& proto, uint64_t initial_countdown,
|
||||
bool keep_ref, InsertState&) {
|
||||
bool already_matches = false;
|
||||
|
@ -782,7 +782,7 @@ HyperClockTable::HandleImpl* HyperClockTable::DoInsert(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
HyperClockTable::HandleImpl* HyperClockTable::Lookup(
|
||||
FixedHyperClockTable::HandleImpl* FixedHyperClockTable::Lookup(
|
||||
const UniqueId64x2& hashed_key) {
|
||||
HandleImpl* e = FindSlot(
|
||||
hashed_key,
|
||||
|
@ -843,8 +843,8 @@ HyperClockTable::HandleImpl* HyperClockTable::Lookup(
|
|||
return e;
|
||||
}
|
||||
|
||||
bool HyperClockTable::Release(HandleImpl* h, bool useful,
|
||||
bool erase_if_last_ref) {
|
||||
bool FixedHyperClockTable::Release(HandleImpl* h, bool useful,
|
||||
bool erase_if_last_ref) {
|
||||
// In contrast with LRUCache's Release, this function won't delete the handle
|
||||
// when the cache is above capacity and the reference is the last one. Space
|
||||
// is only freed up by EvictFromClock (called by Insert when space is needed)
|
||||
|
@ -919,7 +919,7 @@ bool HyperClockTable::Release(HandleImpl* h, bool useful,
|
|||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
void HyperClockTable::TEST_ReleaseN(HandleImpl* h, size_t n) {
|
||||
void FixedHyperClockTable::TEST_ReleaseN(HandleImpl* h, size_t n) {
|
||||
if (n > 0) {
|
||||
// Do n-1 simple releases first
|
||||
TEST_ReleaseNMinus1(h, n);
|
||||
|
@ -930,7 +930,7 @@ void HyperClockTable::TEST_ReleaseN(HandleImpl* h, size_t n) {
|
|||
}
|
||||
#endif
|
||||
|
||||
void HyperClockTable::Erase(const UniqueId64x2& hashed_key) {
|
||||
void FixedHyperClockTable::Erase(const UniqueId64x2& hashed_key) {
|
||||
(void)FindSlot(
|
||||
hashed_key,
|
||||
[&](HandleImpl* h) {
|
||||
|
@ -995,7 +995,7 @@ void HyperClockTable::Erase(const UniqueId64x2& hashed_key) {
|
|||
[&](HandleImpl* /*h*/, bool /*is_last*/) {});
|
||||
}
|
||||
|
||||
void HyperClockTable::EraseUnRefEntries() {
|
||||
void FixedHyperClockTable::EraseUnRefEntries() {
|
||||
for (size_t i = 0; i <= this->length_bits_mask_; i++) {
|
||||
HandleImpl& h = array_[i];
|
||||
|
||||
|
@ -1017,7 +1017,7 @@ void HyperClockTable::EraseUnRefEntries() {
|
|||
}
|
||||
|
||||
template <typename MatchFn, typename AbortFn, typename UpdateFn>
|
||||
inline HyperClockTable::HandleImpl* HyperClockTable::FindSlot(
|
||||
inline FixedHyperClockTable::HandleImpl* FixedHyperClockTable::FindSlot(
|
||||
const UniqueId64x2& hashed_key, const MatchFn& match_fn,
|
||||
const AbortFn& abort_fn, const UpdateFn& update_fn) {
|
||||
// NOTE: upper 32 bits of hashed_key[0] is used for sharding
|
||||
|
@ -1052,8 +1052,8 @@ inline HyperClockTable::HandleImpl* HyperClockTable::FindSlot(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
inline void HyperClockTable::Rollback(const UniqueId64x2& hashed_key,
|
||||
const HandleImpl* h) {
|
||||
inline void FixedHyperClockTable::Rollback(const UniqueId64x2& hashed_key,
|
||||
const HandleImpl* h) {
|
||||
size_t current = ModTableSize(hashed_key[1]);
|
||||
size_t increment = static_cast<size_t>(hashed_key[0]) | 1U;
|
||||
while (&array_[current] != h) {
|
||||
|
@ -1062,7 +1062,7 @@ inline void HyperClockTable::Rollback(const UniqueId64x2& hashed_key,
|
|||
}
|
||||
}
|
||||
|
||||
inline void HyperClockTable::ReclaimEntryUsage(size_t total_charge) {
|
||||
inline void FixedHyperClockTable::ReclaimEntryUsage(size_t total_charge) {
|
||||
auto old_occupancy = occupancy_.fetch_sub(1U, std::memory_order_release);
|
||||
(void)old_occupancy;
|
||||
// No underflow
|
||||
|
@ -1073,8 +1073,8 @@ inline void HyperClockTable::ReclaimEntryUsage(size_t total_charge) {
|
|||
assert(old_usage >= total_charge);
|
||||
}
|
||||
|
||||
inline void HyperClockTable::Evict(size_t requested_charge, InsertState&,
|
||||
EvictionData* data) {
|
||||
inline void FixedHyperClockTable::Evict(size_t requested_charge, InsertState&,
|
||||
EvictionData* data) {
|
||||
// precondition
|
||||
assert(requested_charge > 0);
|
||||
|
||||
|
@ -1172,7 +1172,7 @@ void ClockCacheShard<Table>::ApplyToSomeEntries(
|
|||
table_.HandlePtr(index_begin), table_.HandlePtr(index_end), false);
|
||||
}
|
||||
|
||||
int HyperClockTable::CalcHashBits(
|
||||
int FixedHyperClockTable::CalcHashBits(
|
||||
size_t capacity, size_t estimated_value_size,
|
||||
CacheMetadataChargePolicy metadata_charge_policy) {
|
||||
double average_slot_charge = estimated_value_size * kLoadFactor;
|
||||
|
@ -1360,9 +1360,9 @@ size_t ClockCacheShard<Table>::GetTableAddressCount() const {
|
|||
}
|
||||
|
||||
// Explicit instantiation
|
||||
template class ClockCacheShard<HyperClockTable>;
|
||||
template class ClockCacheShard<FixedHyperClockTable>;
|
||||
|
||||
HyperClockCache::HyperClockCache(const HyperClockCacheOptions& opts)
|
||||
FixedHyperClockCache::FixedHyperClockCache(const HyperClockCacheOptions& opts)
|
||||
: ShardedCache(opts) {
|
||||
assert(opts.estimated_entry_charge > 0 ||
|
||||
opts.metadata_charge_policy != kDontChargeCacheMetadata);
|
||||
|
@ -1371,7 +1371,7 @@ HyperClockCache::HyperClockCache(const HyperClockCacheOptions& opts)
|
|||
size_t per_shard = GetPerShardCapacity();
|
||||
MemoryAllocator* alloc = this->memory_allocator();
|
||||
InitShards([&](Shard* cs) {
|
||||
HyperClockTable::Opts table_opts;
|
||||
FixedHyperClockTable::Opts table_opts;
|
||||
table_opts.estimated_value_size = opts.estimated_entry_charge;
|
||||
new (cs) Shard(per_shard, opts.strict_capacity_limit,
|
||||
opts.metadata_charge_policy, alloc, &eviction_callback_,
|
||||
|
@ -1379,15 +1379,15 @@ HyperClockCache::HyperClockCache(const HyperClockCacheOptions& opts)
|
|||
});
|
||||
}
|
||||
|
||||
Cache::ObjectPtr HyperClockCache::Value(Handle* handle) {
|
||||
Cache::ObjectPtr FixedHyperClockCache::Value(Handle* handle) {
|
||||
return reinterpret_cast<const HandleImpl*>(handle)->value;
|
||||
}
|
||||
|
||||
size_t HyperClockCache::GetCharge(Handle* handle) const {
|
||||
size_t FixedHyperClockCache::GetCharge(Handle* handle) const {
|
||||
return reinterpret_cast<const HandleImpl*>(handle)->GetTotalCharge();
|
||||
}
|
||||
|
||||
const Cache::CacheItemHelper* HyperClockCache::GetCacheItemHelper(
|
||||
const Cache::CacheItemHelper* FixedHyperClockCache::GetCacheItemHelper(
|
||||
Handle* handle) const {
|
||||
auto h = reinterpret_cast<const HandleImpl*>(handle);
|
||||
return h->helper;
|
||||
|
@ -1402,7 +1402,7 @@ namespace {
|
|||
// or actual occupancy very close to limit (>95% of limit).
|
||||
// Also, for each shard compute the recommended estimated_entry_charge,
|
||||
// and keep the minimum one for use as overall recommendation.
|
||||
void AddShardEvaluation(const HyperClockCache::Shard& shard,
|
||||
void AddShardEvaluation(const FixedHyperClockCache::Shard& shard,
|
||||
std::vector<double>& predicted_load_factors,
|
||||
size_t& min_recommendation) {
|
||||
size_t usage = shard.GetUsage() - shard.GetStandaloneUsage();
|
||||
|
@ -1420,7 +1420,7 @@ void AddShardEvaluation(const HyperClockCache::Shard& shard,
|
|||
// If filled to capacity, what would the occupancy ratio be?
|
||||
double ratio = occ_ratio / usage_ratio;
|
||||
// Given max load factor, what that load factor be?
|
||||
double lf = ratio * HyperClockTable::kStrictLoadFactor;
|
||||
double lf = ratio * FixedHyperClockTable::kStrictLoadFactor;
|
||||
predicted_load_factors.push_back(lf);
|
||||
|
||||
// Update min_recommendation also
|
||||
|
@ -1430,13 +1430,13 @@ void AddShardEvaluation(const HyperClockCache::Shard& shard,
|
|||
|
||||
} // namespace
|
||||
|
||||
void HyperClockCache::ReportProblems(
|
||||
void FixedHyperClockCache::ReportProblems(
|
||||
const std::shared_ptr<Logger>& info_log) const {
|
||||
uint32_t shard_count = GetNumShards();
|
||||
std::vector<double> predicted_load_factors;
|
||||
size_t min_recommendation = SIZE_MAX;
|
||||
const_cast<HyperClockCache*>(this)->ForEachShard(
|
||||
[&](HyperClockCache::Shard* shard) {
|
||||
const_cast<FixedHyperClockCache*>(this)->ForEachShard(
|
||||
[&](FixedHyperClockCache::Shard* shard) {
|
||||
AddShardEvaluation(*shard, predicted_load_factors, min_recommendation);
|
||||
});
|
||||
|
||||
|
@ -1459,18 +1459,19 @@ void HyperClockCache::ReportProblems(
|
|||
predicted_load_factors.end(), 0.0) /
|
||||
shard_count;
|
||||
|
||||
constexpr double kLowSpecLoadFactor = HyperClockTable::kLoadFactor / 2;
|
||||
constexpr double kMidSpecLoadFactor = HyperClockTable::kLoadFactor / 1.414;
|
||||
if (average_load_factor > HyperClockTable::kLoadFactor) {
|
||||
constexpr double kLowSpecLoadFactor = FixedHyperClockTable::kLoadFactor / 2;
|
||||
constexpr double kMidSpecLoadFactor =
|
||||
FixedHyperClockTable::kLoadFactor / 1.414;
|
||||
if (average_load_factor > FixedHyperClockTable::kLoadFactor) {
|
||||
// Out of spec => Consider reporting load factor too high
|
||||
// Estimate effective overall capacity loss due to enforcing occupancy limit
|
||||
double lost_portion = 0.0;
|
||||
int over_count = 0;
|
||||
for (double lf : predicted_load_factors) {
|
||||
if (lf > HyperClockTable::kStrictLoadFactor) {
|
||||
if (lf > FixedHyperClockTable::kStrictLoadFactor) {
|
||||
++over_count;
|
||||
lost_portion +=
|
||||
(lf - HyperClockTable::kStrictLoadFactor) / lf / shard_count;
|
||||
(lf - FixedHyperClockTable::kStrictLoadFactor) / lf / shard_count;
|
||||
}
|
||||
}
|
||||
// >= 20% loss -> error
|
||||
|
@ -1494,10 +1495,10 @@ void HyperClockCache::ReportProblems(
|
|||
if (report) {
|
||||
ROCKS_LOG_AT_LEVEL(
|
||||
info_log, level,
|
||||
"HyperClockCache@%p unable to use estimated %.1f%% capacity because "
|
||||
"of "
|
||||
"full occupancy in %d/%u cache shards (estimated_entry_charge too "
|
||||
"high). Recommend estimated_entry_charge=%zu",
|
||||
"FixedHyperClockCache@%p unable to use estimated %.1f%% capacity "
|
||||
"because of full occupancy in %d/%u cache shards "
|
||||
"(estimated_entry_charge too high). "
|
||||
"Recommend estimated_entry_charge=%zu",
|
||||
this, lost_portion * 100.0, over_count, (unsigned)shard_count,
|
||||
min_recommendation);
|
||||
}
|
||||
|
@ -1515,8 +1516,8 @@ void HyperClockCache::ReportProblems(
|
|||
}
|
||||
ROCKS_LOG_AT_LEVEL(
|
||||
info_log, level,
|
||||
"HyperClockCache@%p table has low occupancy at full capacity. Higher "
|
||||
"estimated_entry_charge (about %.1fx) would likely improve "
|
||||
"FixedHyperClockCache@%p table has low occupancy at full capacity. "
|
||||
"Higher estimated_entry_charge (about %.1fx) would likely improve "
|
||||
"performance. Recommend estimated_entry_charge=%zu",
|
||||
this, kMidSpecLoadFactor / average_load_factor, min_recommendation);
|
||||
}
|
||||
|
@ -1549,7 +1550,7 @@ std::shared_ptr<Cache> HyperClockCacheOptions::MakeSharedCache() const {
|
|||
GetDefaultCacheShardBits(opts.capacity, min_shard_size);
|
||||
}
|
||||
std::shared_ptr<Cache> cache =
|
||||
std::make_shared<clock_cache::HyperClockCache>(opts);
|
||||
std::make_shared<clock_cache::FixedHyperClockCache>(opts);
|
||||
if (opts.secondary_cache) {
|
||||
cache = std::make_shared<CacheWithSecondaryAdapter>(cache,
|
||||
opts.secondary_cache);
|
||||
|
|
|
@ -466,7 +466,7 @@ class BaseClockTable {
|
|||
const uint32_t& hash_seed_;
|
||||
};
|
||||
|
||||
class HyperClockTable : public BaseClockTable {
|
||||
class FixedHyperClockTable : public BaseClockTable {
|
||||
public:
|
||||
// Target size to be exactly a common cache line size (see static_assert in
|
||||
// clock_cache.cc)
|
||||
|
@ -491,12 +491,12 @@ class HyperClockTable : public BaseClockTable {
|
|||
size_t estimated_value_size;
|
||||
};
|
||||
|
||||
HyperClockTable(size_t capacity, bool strict_capacity_limit,
|
||||
CacheMetadataChargePolicy metadata_charge_policy,
|
||||
MemoryAllocator* allocator,
|
||||
const Cache::EvictionCallback* eviction_callback,
|
||||
const uint32_t* hash_seed, const Opts& opts);
|
||||
~HyperClockTable();
|
||||
FixedHyperClockTable(size_t capacity, bool strict_capacity_limit,
|
||||
CacheMetadataChargePolicy metadata_charge_policy,
|
||||
MemoryAllocator* allocator,
|
||||
const Cache::EvictionCallback* eviction_callback,
|
||||
const uint32_t* hash_seed, const Opts& opts);
|
||||
~FixedHyperClockTable();
|
||||
|
||||
// For BaseClockTable::Insert
|
||||
struct InsertState {};
|
||||
|
@ -612,7 +612,7 @@ class HyperClockTable : public BaseClockTable {
|
|||
|
||||
// Array of slots comprising the hash table.
|
||||
const std::unique_ptr<HandleImpl[]> array_;
|
||||
}; // class HyperClockTable
|
||||
}; // class FixedHyperClockTable
|
||||
|
||||
// A single shard of sharded cache.
|
||||
template <class Table>
|
||||
|
@ -729,17 +729,17 @@ class ALIGN_AS(CACHE_LINE_SIZE) ClockCacheShard final : public CacheShardBase {
|
|||
std::atomic<bool> strict_capacity_limit_;
|
||||
}; // class ClockCacheShard
|
||||
|
||||
class HyperClockCache
|
||||
class FixedHyperClockCache
|
||||
#ifdef NDEBUG
|
||||
final
|
||||
#endif
|
||||
: public ShardedCache<ClockCacheShard<HyperClockTable>> {
|
||||
: public ShardedCache<ClockCacheShard<FixedHyperClockTable>> {
|
||||
public:
|
||||
using Shard = ClockCacheShard<HyperClockTable>;
|
||||
using Shard = ClockCacheShard<FixedHyperClockTable>;
|
||||
|
||||
explicit HyperClockCache(const HyperClockCacheOptions& opts);
|
||||
explicit FixedHyperClockCache(const HyperClockCacheOptions& opts);
|
||||
|
||||
const char* Name() const override { return "HyperClockCache"; }
|
||||
const char* Name() const override { return "FixedHyperClockCache"; }
|
||||
|
||||
Cache::ObjectPtr Value(Handle* handle) override;
|
||||
|
||||
|
@ -749,7 +749,7 @@ class HyperClockCache
|
|||
|
||||
void ReportProblems(
|
||||
const std::shared_ptr<Logger>& /*info_log*/) const override;
|
||||
}; // class HyperClockCache
|
||||
}; // class FixedHyperClockCache
|
||||
|
||||
} // namespace clock_cache
|
||||
|
||||
|
|
|
@ -737,7 +737,7 @@ class CompressedSecondaryCacheTestBase : public testing::Test,
|
|||
class CompressedSecondaryCacheTest
|
||||
: public CompressedSecondaryCacheTestBase,
|
||||
public testing::WithParamInterface<std::string> {
|
||||
const std::string& Type() override { return GetParam(); }
|
||||
const std::string& Type() const override { return GetParam(); }
|
||||
};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(CompressedSecondaryCacheTest,
|
||||
|
@ -752,7 +752,7 @@ class CompressedSecCacheTestWithCompressAndAllocatorParam
|
|||
sec_cache_is_compressed_ = std::get<0>(GetParam());
|
||||
use_jemalloc_ = std::get<1>(GetParam());
|
||||
}
|
||||
const std::string& Type() override { return std::get<2>(GetParam()); }
|
||||
const std::string& Type() const override { return std::get<2>(GetParam()); }
|
||||
bool sec_cache_is_compressed_;
|
||||
bool use_jemalloc_;
|
||||
};
|
||||
|
@ -773,7 +773,7 @@ class CompressedSecondaryCacheTestWithCompressionParam
|
|||
CompressedSecondaryCacheTestWithCompressionParam() {
|
||||
sec_cache_is_compressed_ = std::get<0>(GetParam());
|
||||
}
|
||||
const std::string& Type() override { return std::get<1>(GetParam()); }
|
||||
const std::string& Type() const override { return std::get<1>(GetParam()); }
|
||||
bool sec_cache_is_compressed_;
|
||||
};
|
||||
|
||||
|
@ -950,7 +950,7 @@ class CompressedSecCacheTestWithCompressAndSplitParam
|
|||
sec_cache_is_compressed_ = std::get<0>(GetParam());
|
||||
enable_custom_split_merge_ = std::get<1>(GetParam());
|
||||
}
|
||||
const std::string& Type() override { return std::get<2>(GetParam()); }
|
||||
const std::string& Type() const override { return std::get<2>(GetParam()); }
|
||||
bool sec_cache_is_compressed_;
|
||||
bool enable_custom_split_merge_;
|
||||
};
|
||||
|
|
|
@ -373,8 +373,8 @@ namespace clock_cache {
|
|||
|
||||
class ClockCacheTest : public testing::Test {
|
||||
public:
|
||||
using Shard = HyperClockCache::Shard;
|
||||
using Table = HyperClockTable;
|
||||
using Shard = FixedHyperClockCache::Shard;
|
||||
using Table = FixedHyperClockTable;
|
||||
using HandleImpl = Shard::HandleImpl;
|
||||
|
||||
ClockCacheTest() {}
|
||||
|
@ -916,9 +916,9 @@ TEST_F(ClockCacheTest, TableSizesTest) {
|
|||
.MakeSharedCache();
|
||||
// Table sizes are currently only powers of two
|
||||
EXPECT_GE(cache->GetTableAddressCount(),
|
||||
est_count / HyperClockTable::kLoadFactor);
|
||||
est_count / FixedHyperClockTable::kLoadFactor);
|
||||
EXPECT_LE(cache->GetTableAddressCount(),
|
||||
est_count / HyperClockTable::kLoadFactor * 2.0);
|
||||
est_count / FixedHyperClockTable::kLoadFactor * 2.0);
|
||||
EXPECT_EQ(cache->GetUsage(), 0);
|
||||
|
||||
// kFullChargeMetaData
|
||||
|
@ -935,9 +935,10 @@ TEST_F(ClockCacheTest, TableSizesTest) {
|
|||
double est_count_after_meta =
|
||||
(capacity - cache->GetUsage()) * 1.0 / est_val_size;
|
||||
EXPECT_GE(cache->GetTableAddressCount(),
|
||||
est_count_after_meta / HyperClockTable::kLoadFactor);
|
||||
EXPECT_LE(cache->GetTableAddressCount(),
|
||||
est_count_after_meta / HyperClockTable::kLoadFactor * 2.0);
|
||||
est_count_after_meta / FixedHyperClockTable::kLoadFactor);
|
||||
EXPECT_LE(
|
||||
cache->GetTableAddressCount(),
|
||||
est_count_after_meta / FixedHyperClockTable::kLoadFactor * 2.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1425,7 +1426,7 @@ TEST_P(BasicSecondaryCacheTest, FullCapacityTest) {
|
|||
k2.AsSlice(),
|
||||
GetHelper(CacheEntryRole::kDataBlock, /*secondary_compatible=*/false),
|
||||
/*context*/ this, Cache::Priority::LOW);
|
||||
if (strict_capacity_limit || GetParam() == kHyperClock) {
|
||||
if (strict_capacity_limit || IsHyperClock()) {
|
||||
ASSERT_NE(handle2, nullptr);
|
||||
cache->Release(handle2);
|
||||
ASSERT_EQ(secondary_cache->num_inserts(), 1u);
|
||||
|
@ -1450,12 +1451,12 @@ TEST_P(BasicSecondaryCacheTest, FullCapacityTest) {
|
|||
// CORRECTION: this is not quite right. block_1 can be inserted into the block
|
||||
// cache because strict_capacity_limit=false, but it is removed from the cache
|
||||
// in Release() because of being over-capacity, without demoting to secondary
|
||||
// cache. HyperClockCache doesn't check capacity on release (for efficiency)
|
||||
// so can demote the over-capacity item to secondary cache. Also, we intend to
|
||||
// add support for demotion in Release, but that currently causes too much
|
||||
// unit test churn.
|
||||
// cache. FixedHyperClockCache doesn't check capacity on release (for
|
||||
// efficiency) so can demote the over-capacity item to secondary cache. Also, we
|
||||
// intend to add support for demotion in Release, but that currently causes too
|
||||
// much unit test churn.
|
||||
TEST_P(DBSecondaryCacheTest, TestSecondaryCacheCorrectness1) {
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
// See CORRECTION above
|
||||
ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors");
|
||||
return;
|
||||
|
@ -1553,7 +1554,7 @@ TEST_P(DBSecondaryCacheTest, TestSecondaryCacheCorrectness1) {
|
|||
// insert and cache block_1 in the block cache (this is the different place
|
||||
// from TestSecondaryCacheCorrectness1)
|
||||
TEST_P(DBSecondaryCacheTest, TestSecondaryCacheCorrectness2) {
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors");
|
||||
return;
|
||||
}
|
||||
|
@ -1741,7 +1742,7 @@ TEST_P(DBSecondaryCacheTest, SecondaryCacheIntensiveTesting) {
|
|||
// if we try to insert block_1 to the block cache, it will always fails. Only
|
||||
// block_2 will be successfully inserted into the block cache.
|
||||
TEST_P(DBSecondaryCacheTest, SecondaryCacheFailureTest) {
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors");
|
||||
return;
|
||||
}
|
||||
|
@ -1851,7 +1852,7 @@ TEST_P(BasicSecondaryCacheTest, BasicWaitAllTest) {
|
|||
str.length()));
|
||||
}
|
||||
// Force all entries to be evicted to the secondary cache
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
// HCC doesn't respond immediately to SetCapacity
|
||||
for (int i = 9000; i < 9030; ++i) {
|
||||
ASSERT_OK(cache->Insert(ock.WithOffset(i).AsSlice(), nullptr,
|
||||
|
@ -1906,7 +1907,7 @@ TEST_P(BasicSecondaryCacheTest, BasicWaitAllTest) {
|
|||
// a sync point callback in TestSecondaryCache::Lookup. We then control the
|
||||
// lookup result by setting the ResultMap.
|
||||
TEST_P(DBSecondaryCacheTest, TestSecondaryCacheMultiGet) {
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors");
|
||||
return;
|
||||
}
|
||||
|
@ -2407,7 +2408,7 @@ TEST_P(DBSecondaryCacheTest, TestSecondaryCacheOptionBasic) {
|
|||
// with new options, which set the lowest_used_cache_tier to
|
||||
// kNonVolatileBlockTier. So secondary cache will be used.
|
||||
TEST_P(DBSecondaryCacheTest, TestSecondaryCacheOptionChange) {
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors");
|
||||
return;
|
||||
}
|
||||
|
@ -2502,7 +2503,7 @@ TEST_P(DBSecondaryCacheTest, TestSecondaryCacheOptionChange) {
|
|||
// Two DB test. We create 2 DBs sharing the same block cache and secondary
|
||||
// cache. We diable the secondary cache option for DB2.
|
||||
TEST_P(DBSecondaryCacheTest, TestSecondaryCacheOptionTwoDB) {
|
||||
if (GetParam() == kHyperClock) {
|
||||
if (IsHyperClock()) {
|
||||
ROCKSDB_GTEST_BYPASS("Test depends on LRUCache-specific behaviors");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -129,7 +129,8 @@ std::shared_ptr<Cache> StressTest::NewCache(size_t capacity,
|
|||
if (FLAGS_cache_type == "clock_cache") {
|
||||
fprintf(stderr, "Old clock cache implementation has been removed.\n");
|
||||
exit(1);
|
||||
} else if (FLAGS_cache_type == "hyper_clock_cache") {
|
||||
} else if (FLAGS_cache_type == "hyper_clock_cache" ||
|
||||
FLAGS_cache_type == "fixed_hyper_clock_cache") {
|
||||
HyperClockCacheOptions opts(static_cast<size_t>(capacity),
|
||||
FLAGS_block_size /*estimated_entry_charge*/,
|
||||
num_shard_bits);
|
||||
|
|
|
@ -42,12 +42,14 @@ class WithCacheType : public TestCreateContext {
|
|||
};
|
||||
|
||||
static constexpr auto kLRU = "lru";
|
||||
static constexpr auto kHyperClock = "hyper_clock";
|
||||
static constexpr auto kFixedHyperClock = "fixed_hyper_clock";
|
||||
|
||||
// For options other than capacity
|
||||
size_t estimated_value_size_ = 1;
|
||||
|
||||
virtual const std::string& Type() = 0;
|
||||
virtual const std::string& Type() const = 0;
|
||||
|
||||
bool IsHyperClock() const { return Type() == kFixedHyperClock; }
|
||||
|
||||
std::shared_ptr<Cache> NewCache(
|
||||
size_t capacity,
|
||||
|
@ -62,7 +64,7 @@ class WithCacheType : public TestCreateContext {
|
|||
}
|
||||
return lru_opts.MakeSharedCache();
|
||||
}
|
||||
if (type == kHyperClock) {
|
||||
if (type == kFixedHyperClock) {
|
||||
HyperClockCacheOptions hc_opts{capacity, estimated_value_size_};
|
||||
hc_opts.hash_seed = 0; // deterministic tests
|
||||
if (modify_opts_fn) {
|
||||
|
@ -105,14 +107,14 @@ class WithCacheType : public TestCreateContext {
|
|||
|
||||
class WithCacheTypeParam : public WithCacheType,
|
||||
public testing::WithParamInterface<std::string> {
|
||||
const std::string& Type() override { return GetParam(); }
|
||||
const std::string& Type() const override { return GetParam(); }
|
||||
};
|
||||
|
||||
constexpr auto kLRU = WithCacheType::kLRU;
|
||||
constexpr auto kHyperClock = WithCacheType::kHyperClock;
|
||||
constexpr auto kFixedHyperClock = WithCacheType::kFixedHyperClock;
|
||||
|
||||
inline auto GetTestingCacheTypes() {
|
||||
return testing::Values(std::string(kLRU), std::string(kHyperClock));
|
||||
return testing::Values(std::string(kLRU), std::string(kFixedHyperClock));
|
||||
}
|
||||
|
||||
} // namespace secondary_cache_test_util
|
||||
|
|
|
@ -3039,7 +3039,8 @@ class Benchmark {
|
|||
if (FLAGS_cache_type == "clock_cache") {
|
||||
fprintf(stderr, "Old clock cache implementation has been removed.\n");
|
||||
exit(1);
|
||||
} else if (FLAGS_cache_type == "hyper_clock_cache") {
|
||||
} else if (FLAGS_cache_type == "hyper_clock_cache" ||
|
||||
FLAGS_cache_type == "fixed_hyper_clock_cache") {
|
||||
HyperClockCacheOptions hcco{
|
||||
static_cast<size_t>(capacity),
|
||||
static_cast<size_t>(FLAGS_block_size) /*estimated_entry_charge*/,
|
||||
|
|
Loading…
Reference in New Issue