mirror of https://github.com/facebook/rocksdb.git
Fix updating the capacity of a tiered cache (#11873)
Summary: Updating the tiered cache (cache allocated using ```NewTieredCache()```) by calling ```SetCapacity()``` on it was not working properly. The initial creation would set the primary cache capacity to the combined primary and compressed secondary cache capacity. But ```SetCapacity()``` would just set the primary cache capacity, with no way to change the secondary cache capacity. Additionally, the API was confusing, since the primary and compressed secondary capacities would be specified separately during creation, but ```SetCapacity``` took the combined capacity. With this fix, the user always specifies the total budget and compressed secondary cache ratio on creation. Subsequently, `SetCapacity` will distribute the new capacity across the two caches by the same ratio. The `NewTieredCache` API has been changed to take the total cache capacity (inclusive of both the primary and the compressed secondary cache) and the ratio of total capacity to allocate to the compressed cache. These are specified in `TieredCacheOptions`. Any capacity specified in `LRUCacheOptions`, `HyperClockCacheOptions` and `CompressedSecondaryCacheOptions` is ignored. A new API, `UpdateTieredCache` is provided to dynamically update the total capacity, ratio of compressed cache, and admission policy. Tests: New unit tests Pull Request resolved: https://github.com/facebook/rocksdb/pull/11873 Reviewed By: akankshamahajan15 Differential Revision: D49562250 Pulled By: anand1976 fbshipit-source-id: 57033bc713b68d5da6292207765a6b3dbe539ddf
This commit is contained in:
parent
552bc01669
commit
48589b961f
|
@ -273,9 +273,10 @@ class ConcurrentCacheReservationManager
|
|||
std::size_t total_mem_used = cache_res_mgr_->GetTotalMemoryUsed();
|
||||
Status s;
|
||||
if (!increase) {
|
||||
assert(total_mem_used >= memory_used_delta);
|
||||
s = cache_res_mgr_->UpdateCacheReservation(total_mem_used -
|
||||
memory_used_delta);
|
||||
s = cache_res_mgr_->UpdateCacheReservation(
|
||||
(total_mem_used > memory_used_delta)
|
||||
? (total_mem_used - memory_used_delta)
|
||||
: 0);
|
||||
} else {
|
||||
s = cache_res_mgr_->UpdateCacheReservation(total_mem_used +
|
||||
memory_used_delta);
|
||||
|
|
|
@ -26,9 +26,7 @@ CompressedSecondaryCache::CompressedSecondaryCache(
|
|||
cache_))),
|
||||
disable_cache_(opts.capacity == 0) {}
|
||||
|
||||
CompressedSecondaryCache::~CompressedSecondaryCache() {
|
||||
assert(cache_res_mgr_->GetTotalReservedCacheSize() == 0);
|
||||
}
|
||||
CompressedSecondaryCache::~CompressedSecondaryCache() {}
|
||||
|
||||
std::unique_ptr<SecondaryCacheResultHandle> CompressedSecondaryCache::Lookup(
|
||||
const Slice& key, const Cache::CacheItemHelper* helper,
|
||||
|
|
|
@ -989,11 +989,11 @@ class CompressedSecCacheTestWithTiered
|
|||
CompressedSecCacheTestWithTiered() {
|
||||
LRUCacheOptions lru_opts;
|
||||
HyperClockCacheOptions hcc_opts(
|
||||
/*_capacity=*/70 << 20,
|
||||
/*_capacity=*/0,
|
||||
/*_estimated_entry_charge=*/256 << 10,
|
||||
/*_num_shard_bits=*/0);
|
||||
TieredCacheOptions opts;
|
||||
lru_opts.capacity = 70 << 20;
|
||||
lru_opts.capacity = 0;
|
||||
lru_opts.num_shard_bits = 0;
|
||||
lru_opts.high_pri_pool_ratio = 0;
|
||||
opts.cache_type = std::get<0>(GetParam());
|
||||
|
@ -1004,8 +1004,10 @@ class CompressedSecCacheTestWithTiered
|
|||
}
|
||||
opts.adm_policy = std::get<1>(GetParam());
|
||||
;
|
||||
opts.comp_cache_opts.capacity = 30 << 20;
|
||||
opts.comp_cache_opts.capacity = 0;
|
||||
opts.comp_cache_opts.num_shard_bits = 0;
|
||||
opts.total_capacity = 100 << 20;
|
||||
opts.compressed_secondary_ratio = 0.3;
|
||||
cache_ = NewTieredCache(opts);
|
||||
cache_res_mgr_ =
|
||||
std::make_shared<CacheReservationManagerImpl<CacheEntryRole::kMisc>>(
|
||||
|
@ -1023,7 +1025,7 @@ class CompressedSecCacheTestWithTiered
|
|||
protected:
|
||||
CacheReservationManager* cache_res_mgr() { return cache_res_mgr_.get(); }
|
||||
|
||||
Cache* GetTieredCache() { return cache_.get(); }
|
||||
std::shared_ptr<Cache> GetTieredCache() { return cache_; }
|
||||
|
||||
Cache* GetCache() {
|
||||
return static_cast_with_check<CacheWithSecondaryAdapter, Cache>(
|
||||
|
@ -1110,7 +1112,7 @@ TEST_P(CompressedSecCacheTestWithTiered, AdmissionPolicy) {
|
|||
return;
|
||||
}
|
||||
|
||||
Cache* tiered_cache = GetTieredCache();
|
||||
Cache* tiered_cache = GetTieredCache().get();
|
||||
Cache* cache = GetCache();
|
||||
std::vector<CacheKey> keys;
|
||||
std::vector<std::string> vals;
|
||||
|
@ -1165,6 +1167,151 @@ TEST_P(CompressedSecCacheTestWithTiered, AdmissionPolicy) {
|
|||
ASSERT_EQ(handle1, nullptr);
|
||||
}
|
||||
|
||||
TEST_P(CompressedSecCacheTestWithTiered, DynamicUpdate) {
|
||||
CompressedSecondaryCache* sec_cache =
|
||||
reinterpret_cast<CompressedSecondaryCache*>(GetSecondaryCache());
|
||||
std::shared_ptr<Cache> tiered_cache = GetTieredCache();
|
||||
|
||||
// Use EXPECT_PRED3 instead of EXPECT_NEAR to void too many size_t to
|
||||
// double explicit casts
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (30 << 20),
|
||||
GetPercent(30 << 20, 1));
|
||||
size_t sec_capacity;
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, (30 << 20));
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, 130 << 20));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (39 << 20),
|
||||
GetPercent(39 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, (39 << 20));
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, 70 << 20));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (21 << 20),
|
||||
GetPercent(21 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, (21 << 20));
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, 100 << 20));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (30 << 20),
|
||||
GetPercent(30 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, (30 << 20));
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.4));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (40 << 20),
|
||||
GetPercent(40 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, (40 << 20));
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.2));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (20 << 20),
|
||||
GetPercent(20 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, (20 << 20));
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 1.0));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (100 << 20),
|
||||
GetPercent(100 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, 100 << 20);
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.0));
|
||||
// Only check usage for LRU cache. HCC shows a 64KB usage for some reason
|
||||
if (std::get<0>(GetParam()) == PrimaryCacheType::kCacheTypeLRU) {
|
||||
ASSERT_EQ(GetCache()->GetUsage(), 0);
|
||||
}
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, 0);
|
||||
|
||||
ASSERT_NOK(UpdateTieredCache(tiered_cache, -1, 0.3));
|
||||
// Only check usage for LRU cache. HCC shows a 64KB usage for some reason
|
||||
if (std::get<0>(GetParam()) == PrimaryCacheType::kCacheTypeLRU) {
|
||||
ASSERT_EQ(GetCache()->GetUsage(), 0);
|
||||
}
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, 0);
|
||||
}
|
||||
|
||||
TEST_P(CompressedSecCacheTestWithTiered, DynamicUpdateWithReservation) {
|
||||
CompressedSecondaryCache* sec_cache =
|
||||
reinterpret_cast<CompressedSecondaryCache*>(GetSecondaryCache());
|
||||
std::shared_ptr<Cache> tiered_cache = GetTieredCache();
|
||||
|
||||
ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(10 << 20));
|
||||
// Use EXPECT_PRED3 instead of EXPECT_NEAR to void too many size_t to
|
||||
// double explicit casts
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (37 << 20),
|
||||
GetPercent(37 << 20, 1));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
|
||||
GetPercent(3 << 20, 1));
|
||||
size_t sec_capacity;
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, (30 << 20));
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, 70 << 20));
|
||||
// Only check usage for LRU cache. HCC is slightly off for some reason
|
||||
if (std::get<0>(GetParam()) == PrimaryCacheType::kCacheTypeLRU) {
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (28 << 20),
|
||||
GetPercent(28 << 20, 1));
|
||||
}
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
|
||||
GetPercent(3 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, (21 << 20));
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, 130 << 20));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (46 << 20),
|
||||
GetPercent(46 << 20, 1));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
|
||||
GetPercent(3 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, (39 << 20));
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, 100 << 20));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (37 << 20),
|
||||
GetPercent(37 << 20, 1));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (3 << 20),
|
||||
GetPercent(3 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, (30 << 20));
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.39));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (45 << 20),
|
||||
GetPercent(45 << 20, 1));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (4 << 20),
|
||||
GetPercent(4 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, (39 << 20));
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.2));
|
||||
// Only check usage for LRU cache. HCC is slightly off for some reason
|
||||
if (std::get<0>(GetParam()) == PrimaryCacheType::kCacheTypeLRU) {
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (28 << 20),
|
||||
GetPercent(28 << 20, 1));
|
||||
}
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (2 << 20),
|
||||
GetPercent(2 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, (20 << 20));
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 1.0));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (100 << 20),
|
||||
GetPercent(100 << 20, 1));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, sec_cache->TEST_GetUsage(), (10 << 20),
|
||||
GetPercent(10 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, 100 << 20);
|
||||
|
||||
ASSERT_OK(UpdateTieredCache(tiered_cache, -1, 0.0));
|
||||
EXPECT_PRED3(CacheUsageWithinBounds, GetCache()->GetUsage(), (10 << 20),
|
||||
GetPercent(10 << 20, 1));
|
||||
ASSERT_OK(sec_cache->GetCapacity(sec_capacity));
|
||||
ASSERT_EQ(sec_capacity, 0);
|
||||
|
||||
ASSERT_OK(cache_res_mgr()->UpdateCacheReservation(0));
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(
|
||||
CompressedSecCacheTests, CompressedSecCacheTestWithTiered,
|
||||
::testing::Values(
|
||||
|
|
|
@ -18,6 +18,7 @@ struct Dummy {
|
|||
};
|
||||
const Dummy kDummy{};
|
||||
Cache::ObjectPtr const kDummyObj = const_cast<Dummy*>(&kDummy);
|
||||
const char* kTieredCacheName = "TieredCache";
|
||||
} // namespace
|
||||
|
||||
// When CacheWithSecondaryAdapter is constructed with the distribute_cache_res
|
||||
|
@ -108,7 +109,7 @@ CacheWithSecondaryAdapter::~CacheWithSecondaryAdapter() {
|
|||
// use after free
|
||||
target_->SetEvictionCallback({});
|
||||
#ifndef NDEBUG
|
||||
if (distribute_cache_res_) {
|
||||
if (distribute_cache_res_ && !ratio_changed_) {
|
||||
size_t sec_capacity = 0;
|
||||
Status s = secondary_cache_->GetCapacity(sec_capacity);
|
||||
assert(s.ok());
|
||||
|
@ -416,37 +417,188 @@ std::string CacheWithSecondaryAdapter::GetPrintableOptions() const {
|
|||
}
|
||||
|
||||
const char* CacheWithSecondaryAdapter::Name() const {
|
||||
// To the user, at least for now, configure the underlying cache with
|
||||
// a secondary cache. So we pretend to be that cache
|
||||
return target_->Name();
|
||||
if (distribute_cache_res_) {
|
||||
return kTieredCacheName;
|
||||
} else {
|
||||
// To the user, at least for now, configure the underlying cache with
|
||||
// a secondary cache. So we pretend to be that cache
|
||||
return target_->Name();
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<Cache> NewTieredCache(TieredCacheOptions& opts) {
|
||||
if (!opts.cache_opts) {
|
||||
// Update the total cache capacity. If we're distributing cache reservations
|
||||
// to both primary and secondary, then update the pri_cache_res_reservation
|
||||
// as well. At the moment, we don't have a good way of handling the case
|
||||
// where the new capacity < total cache reservations.
|
||||
void CacheWithSecondaryAdapter::SetCapacity(size_t capacity) {
|
||||
size_t sec_capacity = static_cast<size_t>(
|
||||
capacity * (distribute_cache_res_ ? sec_cache_res_ratio_ : 0.0));
|
||||
size_t old_sec_capacity = 0;
|
||||
|
||||
if (distribute_cache_res_) {
|
||||
MutexLock m(&mutex_);
|
||||
|
||||
Status s = secondary_cache_->GetCapacity(old_sec_capacity);
|
||||
if (!s.ok()) {
|
||||
return;
|
||||
}
|
||||
if (old_sec_capacity > sec_capacity) {
|
||||
// We're shrinking the cache. We do things in the following order to
|
||||
// avoid a temporary spike in usage over the configured capacity -
|
||||
// 1. Lower the secondary cache capacity
|
||||
// 2. Credit an equal amount (by decreasing pri_cache_res_) to the
|
||||
// primary cache
|
||||
// 3. Decrease the primary cache capacity to the total budget
|
||||
s = secondary_cache_->SetCapacity(sec_capacity);
|
||||
if (s.ok()) {
|
||||
s = pri_cache_res_->UpdateCacheReservation(
|
||||
old_sec_capacity - sec_capacity,
|
||||
/*increase=*/false);
|
||||
assert(s.ok());
|
||||
target_->SetCapacity(capacity);
|
||||
}
|
||||
} else {
|
||||
// We're expanding the cache. Do it in the following order to avoid
|
||||
// unnecessary evictions -
|
||||
// 1. Increase the primary cache capacity to total budget
|
||||
// 2. Reserve additional memory in primary on behalf of secondary (by
|
||||
// increasing pri_cache_res_ reservation)
|
||||
// 3. Increase secondary cache capacity
|
||||
target_->SetCapacity(capacity);
|
||||
s = pri_cache_res_->UpdateCacheReservation(
|
||||
sec_capacity - old_sec_capacity,
|
||||
/*increase=*/true);
|
||||
assert(s.ok());
|
||||
s = secondary_cache_->SetCapacity(sec_capacity);
|
||||
assert(s.ok());
|
||||
}
|
||||
} else {
|
||||
// No cache reservation distribution. Just set the primary cache capacity.
|
||||
target_->SetCapacity(capacity);
|
||||
}
|
||||
}
|
||||
|
||||
// Update the secondary/primary allocation ratio (remember, the primary
|
||||
// capacity is the total memory budget when distribute_cache_res_ is true).
|
||||
// When the ratio changes, we may accumulate some error in the calculations
|
||||
// for secondary cache inflate/deflate and pri_cache_res_ reservations.
|
||||
// This is due to the rounding of the reservation amount.
|
||||
//
|
||||
// We rely on the current pri_cache_res_ total memory used to estimate the
|
||||
// new secondary cache reservation after the ratio change. For this reason,
|
||||
// once the ratio is lowered to 0.0 (effectively disabling the secondary
|
||||
// cache and pri_cache_res_ total mem used going down to 0), we cannot
|
||||
// increase the ratio and re-enable it, We might remove this limitation
|
||||
// in the future.
|
||||
Status CacheWithSecondaryAdapter::UpdateCacheReservationRatio(
|
||||
double compressed_secondary_ratio) {
|
||||
if (!distribute_cache_res_ || sec_cache_res_ratio_ == 0.0) {
|
||||
return Status::NotSupported();
|
||||
}
|
||||
|
||||
MutexLock m(&mutex_);
|
||||
size_t pri_capacity = target_->GetCapacity();
|
||||
size_t sec_capacity =
|
||||
static_cast<size_t>(pri_capacity * compressed_secondary_ratio);
|
||||
size_t old_sec_capacity;
|
||||
Status s = secondary_cache_->GetCapacity(old_sec_capacity);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
|
||||
assert(old_sec_capacity >= pri_cache_res_->GetTotalMemoryUsed());
|
||||
size_t old_sec_reserved =
|
||||
old_sec_capacity - pri_cache_res_->GetTotalMemoryUsed();
|
||||
// Calculate the new secondary cache reservation
|
||||
size_t sec_reserved = static_cast<size_t>(
|
||||
old_sec_reserved *
|
||||
(double)(compressed_secondary_ratio / sec_cache_res_ratio_));
|
||||
sec_cache_res_ratio_ = compressed_secondary_ratio;
|
||||
if (sec_capacity > old_sec_capacity) {
|
||||
// We're increasing the ratio, thus ending up with a larger secondary
|
||||
// cache and a smaller usable primary cache capacity. Similar to
|
||||
// SetCapacity(), we try to avoid a temporary increase in total usage
|
||||
// beyond teh configured capacity -
|
||||
// 1. A higher secondary cache ratio means it gets a higher share of
|
||||
// cache reservations. So first account for that by deflating the
|
||||
// secondary cache
|
||||
// 2. Increase pri_cache_res_ reservation to reflect the new secondary
|
||||
// cache utilization (increase in capacity - increase in share of cache
|
||||
// reservation)
|
||||
// 3. Increase secondary cache capacity
|
||||
assert(sec_reserved > old_sec_reserved || sec_reserved == 0);
|
||||
s = secondary_cache_->Deflate(sec_reserved - old_sec_reserved);
|
||||
assert(s.ok());
|
||||
s = pri_cache_res_->UpdateCacheReservation(
|
||||
(sec_capacity - old_sec_capacity) - (sec_reserved - old_sec_reserved),
|
||||
/*increase=*/true);
|
||||
assert(s.ok());
|
||||
s = secondary_cache_->SetCapacity(sec_capacity);
|
||||
assert(s.ok());
|
||||
} else {
|
||||
// We're shrinking the ratio. Try to avoid unnecessary evictions -
|
||||
// 1. Lower the secondary cache capacity
|
||||
// 2. Decrease pri_cache_res_ reservation to relect lower secondary
|
||||
// cache utilization (decrease in capacity - decrease in share of cache
|
||||
// reservations)
|
||||
// 3. Inflate the secondary cache to give it back the reduction in its
|
||||
// share of cache reservations
|
||||
assert(old_sec_reserved > sec_reserved || sec_reserved == 0);
|
||||
s = secondary_cache_->SetCapacity(sec_capacity);
|
||||
if (s.ok()) {
|
||||
s = pri_cache_res_->UpdateCacheReservation(
|
||||
(old_sec_capacity - sec_capacity) - (old_sec_reserved - sec_reserved),
|
||||
/*increase=*/false);
|
||||
assert(s.ok());
|
||||
s = secondary_cache_->Inflate(old_sec_reserved - sec_reserved);
|
||||
assert(s.ok());
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
// As mentioned in the function comments, we may accumulate some erros when
|
||||
// the ratio is changed. We set a flag here which disables some assertions
|
||||
// in the destructor
|
||||
ratio_changed_ = true;
|
||||
#endif
|
||||
return s;
|
||||
}
|
||||
|
||||
Status CacheWithSecondaryAdapter::UpdateAdmissionPolicy(
|
||||
TieredAdmissionPolicy adm_policy) {
|
||||
adm_policy_ = adm_policy;
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
std::shared_ptr<Cache> NewTieredCache(const TieredCacheOptions& _opts) {
|
||||
if (!_opts.cache_opts) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (opts.adm_policy >= TieredAdmissionPolicy::kAdmPolicyMax) {
|
||||
if (_opts.adm_policy >= TieredAdmissionPolicy::kAdmPolicyMax) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
TieredCacheOptions opts = _opts;
|
||||
std::shared_ptr<Cache> cache;
|
||||
if (opts.cache_type == PrimaryCacheType::kCacheTypeLRU) {
|
||||
LRUCacheOptions cache_opts =
|
||||
*(static_cast_with_check<LRUCacheOptions, ShardedCacheOptions>(
|
||||
opts.cache_opts));
|
||||
cache_opts.capacity += opts.comp_cache_opts.capacity;
|
||||
cache_opts.capacity = opts.total_capacity;
|
||||
cache = cache_opts.MakeSharedCache();
|
||||
} else if (opts.cache_type == PrimaryCacheType::kCacheTypeHCC) {
|
||||
HyperClockCacheOptions cache_opts =
|
||||
*(static_cast_with_check<HyperClockCacheOptions, ShardedCacheOptions>(
|
||||
opts.cache_opts));
|
||||
cache_opts.capacity += opts.comp_cache_opts.capacity;
|
||||
cache_opts.capacity = opts.total_capacity;
|
||||
cache = cache_opts.MakeSharedCache();
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
std::shared_ptr<SecondaryCache> sec_cache;
|
||||
opts.comp_cache_opts.capacity = static_cast<size_t>(
|
||||
opts.total_capacity * opts.compressed_secondary_ratio);
|
||||
sec_cache = NewCompressedSecondaryCache(opts.comp_cache_opts);
|
||||
|
||||
if (opts.nvm_sec_cache) {
|
||||
|
@ -463,4 +615,27 @@ std::shared_ptr<Cache> NewTieredCache(TieredCacheOptions& opts) {
|
|||
return std::make_shared<CacheWithSecondaryAdapter>(
|
||||
cache, sec_cache, opts.adm_policy, /*distribute_cache_res=*/true);
|
||||
}
|
||||
|
||||
Status UpdateTieredCache(const std::shared_ptr<Cache>& cache,
|
||||
int64_t total_capacity,
|
||||
double compressed_secondary_ratio,
|
||||
TieredAdmissionPolicy adm_policy) {
|
||||
if (!cache || strcmp(cache->Name(), kTieredCacheName)) {
|
||||
return Status::InvalidArgument();
|
||||
}
|
||||
CacheWithSecondaryAdapter* tiered_cache =
|
||||
static_cast<CacheWithSecondaryAdapter*>(cache.get());
|
||||
|
||||
Status s;
|
||||
if (total_capacity > 0) {
|
||||
tiered_cache->SetCapacity(total_capacity);
|
||||
}
|
||||
if (compressed_secondary_ratio >= 0.0 && compressed_secondary_ratio <= 1.0) {
|
||||
s = tiered_cache->UpdateCacheReservationRatio(compressed_secondary_ratio);
|
||||
}
|
||||
if (adm_policy < TieredAdmissionPolicy::kAdmPolicyMax) {
|
||||
s = tiered_cache->UpdateAdmissionPolicy(adm_policy);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
|
|
@ -45,6 +45,12 @@ class CacheWithSecondaryAdapter : public CacheWrapper {
|
|||
|
||||
const char* Name() const override;
|
||||
|
||||
void SetCapacity(size_t capacity) override;
|
||||
|
||||
Status UpdateCacheReservationRatio(double ratio);
|
||||
|
||||
Status UpdateAdmissionPolicy(TieredAdmissionPolicy adm_policy);
|
||||
|
||||
Cache* TEST_GetCache() { return target_.get(); }
|
||||
|
||||
SecondaryCache* TEST_GetSecondaryCache() { return secondary_cache_.get(); }
|
||||
|
@ -75,6 +81,10 @@ class CacheWithSecondaryAdapter : public CacheWrapper {
|
|||
// Fraction of a cache memory reservation to be assigned to the secondary
|
||||
// cache
|
||||
double sec_cache_res_ratio_;
|
||||
port::Mutex mutex_;
|
||||
#ifndef NDEBUG
|
||||
bool ratio_changed_ = false;
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
|
|
@ -182,14 +182,17 @@ class DBTieredSecondaryCacheTest : public DBTestBase {
|
|||
size_t nvm_capacity) {
|
||||
LRUCacheOptions lru_opts;
|
||||
TieredCacheOptions opts;
|
||||
lru_opts.capacity = pri_capacity;
|
||||
lru_opts.capacity = 0;
|
||||
lru_opts.num_shard_bits = 0;
|
||||
lru_opts.high_pri_pool_ratio = 0;
|
||||
opts.cache_opts = &lru_opts;
|
||||
opts.cache_type = PrimaryCacheType::kCacheTypeLRU;
|
||||
opts.adm_policy = TieredAdmissionPolicy::kAdmPolicyThreeQueue;
|
||||
opts.comp_cache_opts.capacity = compressed_capacity;
|
||||
opts.comp_cache_opts.capacity = 0;
|
||||
opts.comp_cache_opts.num_shard_bits = 0;
|
||||
opts.total_capacity = pri_capacity + compressed_capacity;
|
||||
opts.compressed_secondary_ratio =
|
||||
(double)compressed_capacity / opts.total_capacity;
|
||||
nvm_sec_cache_.reset(new TestSecondaryCache(nvm_capacity));
|
||||
opts.nvm_sec_cache = nvm_sec_cache_;
|
||||
cache_ = NewTieredCache(opts);
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
|
@ -498,16 +499,36 @@ enum TieredAdmissionPolicy {
|
|||
// allocations costed to the block cache, will be distributed
|
||||
// proportionally across both the primary and secondary.
|
||||
struct TieredCacheOptions {
|
||||
ShardedCacheOptions* cache_opts;
|
||||
PrimaryCacheType cache_type;
|
||||
TieredAdmissionPolicy adm_policy;
|
||||
ShardedCacheOptions* cache_opts = nullptr;
|
||||
PrimaryCacheType cache_type = PrimaryCacheType::kCacheTypeLRU;
|
||||
TieredAdmissionPolicy adm_policy = TieredAdmissionPolicy::kAdmPolicyAuto;
|
||||
CompressedSecondaryCacheOptions comp_cache_opts;
|
||||
// Any capacity specified in LRUCacheOptions, HyperClockCacheOptions and
|
||||
// CompressedSecondaryCacheOptions is ignored
|
||||
// The total_capacity specified here is taken as the memory budget and
|
||||
// divided between the primary block cache and compressed secondary cache
|
||||
size_t total_capacity = 0;
|
||||
double compressed_secondary_ratio = 0.0;
|
||||
// An optional secondary cache that will serve as the persistent cache
|
||||
// tier. If present, compressed blocks will be written to this
|
||||
// secondary cache.
|
||||
std::shared_ptr<SecondaryCache> nvm_sec_cache;
|
||||
};
|
||||
|
||||
extern std::shared_ptr<Cache> NewTieredCache(
|
||||
const TieredCacheOptions& cache_opts);
|
||||
|
||||
// EXPERIMENTAL
|
||||
extern std::shared_ptr<Cache> NewTieredCache(TieredCacheOptions& cache_opts);
|
||||
// Dynamically update some of the parameters of a TieredCache. The input
|
||||
// cache shared_ptr should have been allocated using NewTieredVolatileCache.
|
||||
// At the moment, there are a couple of limitations -
|
||||
// 1. The total_capacity should be > the WriteBufferManager max size, if
|
||||
// using the block cache charging feature
|
||||
// 2. Once the compressed secondary cache is disabled by setting the
|
||||
// compressed_secondary_ratio to 0.0, it cannot be dynamically re-enabled
|
||||
// again
|
||||
extern Status UpdateTieredCache(
|
||||
const std::shared_ptr<Cache>& cache, int64_t total_capacity = -1,
|
||||
double compressed_secondary_ratio = std::numeric_limits<double>::max(),
|
||||
TieredAdmissionPolicy adm_policy = TieredAdmissionPolicy::kAdmPolicyMax);
|
||||
} // namespace ROCKSDB_NAMESPACE
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Updating the tiered cache (cache allocated using NewTieredCache()) by calling SetCapacity() on it was not working properly. The initial creation would set the primary cache capacity to the combined primary and compressed secondary cache capacity. But SetCapacity() would just set the primary cache capacity. With this fix, the user always specifies the total budget and compressed secondary cache ratio on creation. Subsequently, SetCapacity() will distribute the new capacity across the two caches by the same ratio.
|
|
@ -0,0 +1 @@
|
|||
The `NewTieredCache` API has been changed to take the total cache capacity (inclusive of both the primary and the compressed secondary cache) and the ratio of total capacity to allocate to the compressed cache. These are specified in `TieredCacheOptions`. Any capacity specified in `LRUCacheOptions`, `HyperClockCacheOptions` and `CompressedSecondaryCacheOptions` is ignored. A new API, `UpdateTieredCache` is provided to dynamically update the total capacity, ratio of compressed cache, and admission policy.
|
Loading…
Reference in New Issue