2016-08-19 19:28:19 +00:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-15 23:03:42 +00:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2016-08-19 19:28:19 +00:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2017-04-06 02:02:00 +00:00
|
|
|
#include "cache/clock_cache.h"
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <cstdio>
|
|
|
|
#include <functional>
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
#include "monitoring/perf_context_imp.h"
|
|
|
|
#include "monitoring/statistics.h"
|
|
|
|
#include "port/lang.h"
|
|
|
|
#include "util/distributed_mutex.h"
|
|
|
|
#include "util/hash.h"
|
|
|
|
#include "util/math.h"
|
|
|
|
#include "util/random.h"
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
namespace clock_cache {
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
ClockHandleTable::ClockHandleTable(int hash_bits)
|
|
|
|
: length_bits_(hash_bits),
|
|
|
|
length_bits_mask_((uint32_t{1} << length_bits_) - 1),
|
|
|
|
occupancy_(0),
|
|
|
|
occupancy_limit_(static_cast<uint32_t>((uint32_t{1} << length_bits_) *
|
|
|
|
kStrictLoadFactor)),
|
|
|
|
array_(new ClockHandle[size_t{1} << length_bits_]) {
|
|
|
|
assert(hash_bits <= 32);
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
ClockHandleTable::~ClockHandleTable() {
|
|
|
|
ApplyToEntriesRange([](ClockHandle* h) { h->FreeData(); }, 0, GetTableSize());
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
ClockHandle* ClockHandleTable::Lookup(const Slice& key) {
|
|
|
|
int probe = 0;
|
|
|
|
int slot = FindVisibleElement(key, probe, 0);
|
|
|
|
return (slot == -1) ? nullptr : &array_[slot];
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
ClockHandle* ClockHandleTable::Insert(ClockHandle* h, ClockHandle** old) {
|
|
|
|
int probe = 0;
|
|
|
|
int slot =
|
|
|
|
FindVisibleElementOrAvailableSlot(h->key(), probe, 1 /*displacement*/);
|
|
|
|
*old = nullptr;
|
|
|
|
if (slot == -1) {
|
|
|
|
return nullptr;
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
2019-09-16 22:14:51 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
if (array_[slot].IsEmpty() || array_[slot].IsTombstone()) {
|
|
|
|
bool empty = array_[slot].IsEmpty();
|
|
|
|
Assign(slot, h);
|
|
|
|
ClockHandle* new_entry = &array_[slot];
|
|
|
|
if (empty) {
|
|
|
|
// This used to be an empty slot.
|
|
|
|
return new_entry;
|
|
|
|
}
|
|
|
|
// It used to be a tombstone, so there may already be a copy of the
|
|
|
|
// key in the table.
|
|
|
|
slot = FindVisibleElement(h->key(), probe, 0 /*displacement*/);
|
|
|
|
if (slot == -1) {
|
|
|
|
// No existing copy of the key.
|
|
|
|
return new_entry;
|
2019-09-16 22:14:51 +00:00
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
*old = &array_[slot];
|
|
|
|
return new_entry;
|
|
|
|
} else {
|
|
|
|
// There is an existing copy of the key.
|
|
|
|
*old = &array_[slot];
|
|
|
|
// Find an available slot for the new element.
|
|
|
|
array_[slot].displacements++;
|
|
|
|
slot = FindAvailableSlot(h->key(), probe, 1 /*displacement*/);
|
|
|
|
if (slot == -1) {
|
|
|
|
// No available slots. Roll back displacements.
|
|
|
|
probe = 0;
|
|
|
|
slot = FindVisibleElement(h->key(), probe, -1);
|
|
|
|
array_[slot].displacements--;
|
|
|
|
FindAvailableSlot(h->key(), probe, -1);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
Assign(slot, h);
|
|
|
|
return &array_[slot];
|
2019-09-16 22:14:51 +00:00
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
}
|
2019-09-16 22:14:51 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
void ClockHandleTable::Remove(ClockHandle* h) {
|
|
|
|
assert(!h->IsInClockList()); // Already off the clock list.
|
|
|
|
int probe = 0;
|
|
|
|
FindSlot(
|
|
|
|
h->key(), [&h](ClockHandle* e) { return e == h; }, probe,
|
|
|
|
-1 /*displacement*/);
|
|
|
|
h->SetIsVisible(false);
|
|
|
|
h->SetIsElement(false);
|
|
|
|
occupancy_--;
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
void ClockHandleTable::Assign(int slot, ClockHandle* h) {
|
|
|
|
ClockHandle* dst = &array_[slot];
|
|
|
|
uint32_t disp = dst->displacements;
|
|
|
|
*dst = *h;
|
|
|
|
dst->displacements = disp;
|
|
|
|
dst->SetIsVisible(true);
|
|
|
|
dst->SetIsElement(true);
|
|
|
|
dst->SetPriority(ClockHandle::ClockPriority::NONE);
|
|
|
|
occupancy_++;
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
void ClockHandleTable::Exclude(ClockHandle* h) { h->SetIsVisible(false); }
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
int ClockHandleTable::FindVisibleElement(const Slice& key, int& probe,
|
|
|
|
int displacement) {
|
|
|
|
return FindSlot(
|
|
|
|
key, [&](ClockHandle* h) { return h->Matches(key) && h->IsVisible(); },
|
|
|
|
probe, displacement);
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
int ClockHandleTable::FindAvailableSlot(const Slice& key, int& probe,
|
|
|
|
int displacement) {
|
|
|
|
return FindSlot(
|
|
|
|
key, [](ClockHandle* h) { return h->IsEmpty() || h->IsTombstone(); },
|
|
|
|
probe, displacement);
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
int ClockHandleTable::FindVisibleElementOrAvailableSlot(const Slice& key,
|
|
|
|
int& probe,
|
|
|
|
int displacement) {
|
|
|
|
return FindSlot(
|
|
|
|
key,
|
|
|
|
[&](ClockHandle* h) {
|
|
|
|
return h->IsEmpty() || h->IsTombstone() ||
|
|
|
|
(h->Matches(key) && h->IsVisible());
|
|
|
|
},
|
|
|
|
probe, displacement);
|
|
|
|
}
|
|
|
|
|
|
|
|
inline int ClockHandleTable::FindSlot(const Slice& key,
|
|
|
|
std::function<bool(ClockHandle*)> cond,
|
|
|
|
int& probe, int displacement) {
|
|
|
|
uint32_t base = ModTableSize(Hash(key.data(), key.size(), kProbingSeed1));
|
|
|
|
uint32_t increment =
|
|
|
|
ModTableSize((Hash(key.data(), key.size(), kProbingSeed2) << 1) | 1);
|
|
|
|
uint32_t current = ModTableSize(base + probe * increment);
|
|
|
|
while (true) {
|
|
|
|
ClockHandle* h = &array_[current];
|
|
|
|
probe++;
|
|
|
|
if (current == base && probe > 1) {
|
|
|
|
// We looped back.
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (cond(h)) {
|
|
|
|
return current;
|
|
|
|
}
|
|
|
|
if (h->IsEmpty()) {
|
|
|
|
// We check emptyness after the condition, because
|
|
|
|
// the condition may be emptyness.
|
|
|
|
return -1;
|
2016-08-31 15:56:34 +00:00
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
h->displacements += displacement;
|
|
|
|
current = ModTableSize(current + increment);
|
2016-08-31 15:56:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
ClockCacheShard::ClockCacheShard(
|
|
|
|
size_t capacity, size_t estimated_value_size, bool strict_capacity_limit,
|
|
|
|
CacheMetadataChargePolicy metadata_charge_policy)
|
|
|
|
: capacity_(capacity),
|
|
|
|
strict_capacity_limit_(strict_capacity_limit),
|
|
|
|
clock_pointer_(0),
|
|
|
|
table_(
|
|
|
|
CalcHashBits(capacity, estimated_value_size, metadata_charge_policy)),
|
|
|
|
usage_(0),
|
|
|
|
clock_usage_(0) {
|
|
|
|
set_metadata_charge_policy(metadata_charge_policy);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
void ClockCacheShard::EraseUnRefEntries() {
|
|
|
|
autovector<ClockHandle> last_reference_list;
|
|
|
|
{
|
|
|
|
DMutexLock l(mutex_);
|
|
|
|
uint32_t slot = 0;
|
|
|
|
do {
|
|
|
|
ClockHandle* old = &(table_.array_[slot]);
|
|
|
|
if (!old->IsInClockList()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ClockRemove(old);
|
|
|
|
table_.Remove(old);
|
|
|
|
assert(usage_ >= old->total_charge);
|
|
|
|
usage_ -= old->total_charge;
|
|
|
|
last_reference_list.push_back(*old);
|
|
|
|
slot = table_.ModTableSize(slot + 1);
|
|
|
|
} while (slot != 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free the entries here outside of mutex for performance reasons.
|
|
|
|
for (auto& h : last_reference_list) {
|
|
|
|
h.FreeData();
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-11 23:16:11 +00:00
|
|
|
void ClockCacheShard::ApplyToSomeEntries(
|
|
|
|
const std::function<void(const Slice& key, void* value, size_t charge,
|
|
|
|
DeleterFn deleter)>& callback,
|
|
|
|
uint32_t average_entries_per_lock, uint32_t* state) {
|
2022-06-30 04:50:39 +00:00
|
|
|
// The state is essentially going to be the starting hash, which works
|
|
|
|
// nicely even if we resize between calls because we use upper-most
|
|
|
|
// hash bits for table indexes.
|
2022-06-17 20:08:45 +00:00
|
|
|
DMutexLock l(mutex_);
|
2022-06-30 04:50:39 +00:00
|
|
|
uint32_t length_bits = table_.GetLengthBits();
|
|
|
|
uint32_t length = table_.GetTableSize();
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-11 23:16:11 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
assert(average_entries_per_lock > 0);
|
|
|
|
// Assuming we are called with same average_entries_per_lock repeatedly,
|
|
|
|
// this simplifies some logic (index_end will not overflow).
|
|
|
|
assert(average_entries_per_lock < length || *state == 0);
|
|
|
|
|
|
|
|
uint32_t index_begin = *state >> (32 - length_bits);
|
|
|
|
uint32_t index_end = index_begin + average_entries_per_lock;
|
|
|
|
if (index_end >= length) {
|
|
|
|
// Going to end
|
|
|
|
index_end = length;
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-11 23:16:11 +00:00
|
|
|
*state = UINT32_MAX;
|
|
|
|
} else {
|
2022-06-30 04:50:39 +00:00
|
|
|
*state = index_end << (32 - length_bits);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-11 23:16:11 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
table_.ApplyToEntriesRange(
|
|
|
|
[callback,
|
|
|
|
metadata_charge_policy = metadata_charge_policy_](ClockHandle* h) {
|
|
|
|
callback(h->key(), h->value, h->GetCharge(metadata_charge_policy),
|
|
|
|
h->deleter);
|
|
|
|
},
|
|
|
|
index_begin, index_end);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
void ClockCacheShard::ClockRemove(ClockHandle* h) {
|
|
|
|
assert(h->IsInClockList());
|
|
|
|
h->SetPriority(ClockHandle::ClockPriority::NONE);
|
|
|
|
assert(clock_usage_ >= h->total_charge);
|
|
|
|
clock_usage_ -= h->total_charge;
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
void ClockCacheShard::ClockInsert(ClockHandle* h) {
|
|
|
|
assert(!h->IsInClockList());
|
|
|
|
h->SetPriority(ClockHandle::ClockPriority::HIGH);
|
|
|
|
clock_usage_ += h->total_charge;
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
void ClockCacheShard::EvictFromClock(size_t charge,
|
|
|
|
autovector<ClockHandle>* deleted) {
|
|
|
|
assert(charge <= capacity_);
|
|
|
|
while (clock_usage_ > 0 && (usage_ + charge) > capacity_) {
|
|
|
|
ClockHandle* old = &table_.array_[clock_pointer_];
|
|
|
|
clock_pointer_ = table_.ModTableSize(clock_pointer_ + 1);
|
|
|
|
// Clock list contains only elements which can be evicted.
|
|
|
|
if (!old->IsInClockList()) {
|
|
|
|
continue;
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
if (old->GetPriority() == ClockHandle::ClockPriority::LOW) {
|
|
|
|
ClockRemove(old);
|
|
|
|
table_.Remove(old);
|
|
|
|
assert(usage_ >= old->total_charge);
|
|
|
|
usage_ -= old->total_charge;
|
|
|
|
deleted->push_back(*old);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
old->DecreasePriority();
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
size_t ClockCacheShard::CalcEstimatedHandleCharge(
|
|
|
|
size_t estimated_value_size,
|
|
|
|
CacheMetadataChargePolicy metadata_charge_policy) {
|
|
|
|
ClockHandle h;
|
|
|
|
h.CalcTotalCharge(estimated_value_size, metadata_charge_policy);
|
|
|
|
return h.total_charge;
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
int ClockCacheShard::CalcHashBits(
|
|
|
|
size_t capacity, size_t estimated_value_size,
|
|
|
|
CacheMetadataChargePolicy metadata_charge_policy) {
|
|
|
|
size_t handle_charge =
|
|
|
|
CalcEstimatedHandleCharge(estimated_value_size, metadata_charge_policy);
|
2022-07-02 03:51:20 +00:00
|
|
|
assert(handle_charge > 0);
|
2022-06-30 04:50:39 +00:00
|
|
|
uint32_t num_entries =
|
2022-07-02 03:51:20 +00:00
|
|
|
static_cast<uint32_t>(capacity / (kLoadFactor * handle_charge)) + 1;
|
|
|
|
assert(num_entries <= uint32_t{1} << 31);
|
|
|
|
return FloorLog2((num_entries << 1) - 1);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void ClockCacheShard::SetCapacity(size_t capacity) {
|
2022-06-30 04:50:39 +00:00
|
|
|
assert(false); // Not supported. TODO(Guido) Support it?
|
|
|
|
autovector<ClockHandle> last_reference_list;
|
2016-08-19 19:28:19 +00:00
|
|
|
{
|
2022-06-17 20:08:45 +00:00
|
|
|
DMutexLock l(mutex_);
|
2022-06-30 04:50:39 +00:00
|
|
|
capacity_ = capacity;
|
|
|
|
EvictFromClock(0, &last_reference_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Free the entries here outside of mutex for performance reasons.
|
|
|
|
for (auto& h : last_reference_list) {
|
|
|
|
h.FreeData();
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ClockCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
|
2022-06-17 20:08:45 +00:00
|
|
|
DMutexLock l(mutex_);
|
2022-06-30 04:50:39 +00:00
|
|
|
strict_capacity_limit_ = strict_capacity_limit;
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
|
2022-06-30 04:50:39 +00:00
|
|
|
size_t charge, Cache::DeleterFn deleter,
|
|
|
|
Cache::Handle** handle,
|
2018-03-05 21:08:17 +00:00
|
|
|
Cache::Priority /*priority*/) {
|
2022-06-30 04:50:39 +00:00
|
|
|
if (key.size() != kCacheKeySize) {
|
|
|
|
return Status::NotSupported("ClockCache only supports key size " +
|
|
|
|
std::to_string(kCacheKeySize) + "B");
|
|
|
|
}
|
|
|
|
|
|
|
|
ClockHandle tmp;
|
|
|
|
tmp.value = value;
|
|
|
|
tmp.deleter = deleter;
|
|
|
|
tmp.hash = hash;
|
|
|
|
tmp.CalcTotalCharge(charge, metadata_charge_policy_);
|
|
|
|
for (int i = 0; i < kCacheKeySize; i++) {
|
|
|
|
tmp.key_data[i] = key.data()[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
Status s = Status::OK();
|
|
|
|
autovector<ClockHandle> last_reference_list;
|
|
|
|
{
|
|
|
|
DMutexLock l(mutex_);
|
|
|
|
assert(table_.GetOccupancy() <= table_.GetOccupancyLimit());
|
|
|
|
// Free the space following strict clock policy until enough space
|
|
|
|
// is freed or the clock list is empty.
|
|
|
|
EvictFromClock(tmp.total_charge, &last_reference_list);
|
|
|
|
if ((usage_ + tmp.total_charge > capacity_ &&
|
|
|
|
(strict_capacity_limit_ || handle == nullptr)) ||
|
|
|
|
table_.GetOccupancy() == table_.GetOccupancyLimit()) {
|
|
|
|
if (handle == nullptr) {
|
|
|
|
// Don't insert the entry but still return ok, as if the entry inserted
|
|
|
|
// into cache and get evicted immediately.
|
|
|
|
last_reference_list.push_back(tmp);
|
|
|
|
} else {
|
|
|
|
if (table_.GetOccupancy() == table_.GetOccupancyLimit()) {
|
|
|
|
s = Status::Incomplete(
|
|
|
|
"Insert failed because all slots in the hash table are full.");
|
|
|
|
// TODO(Guido) Use the correct statuses.
|
|
|
|
} else {
|
|
|
|
s = Status::Incomplete(
|
|
|
|
"Insert failed because the total charge has exceeded the "
|
|
|
|
"capacity.");
|
|
|
|
}
|
|
|
|
}
|
2016-08-23 20:53:49 +00:00
|
|
|
} else {
|
2022-06-30 04:50:39 +00:00
|
|
|
// Insert into the cache. Note that the cache might get larger than its
|
|
|
|
// capacity if not enough space was freed up.
|
|
|
|
ClockHandle* old;
|
|
|
|
ClockHandle* h = table_.Insert(&tmp, &old);
|
|
|
|
assert(h != nullptr); // We're below occupancy, so this insertion should
|
|
|
|
// never fail.
|
|
|
|
usage_ += h->total_charge;
|
|
|
|
if (old != nullptr) {
|
|
|
|
s = Status::OkOverwritten();
|
|
|
|
assert(old->IsVisible());
|
|
|
|
table_.Exclude(old);
|
|
|
|
if (!old->HasRefs()) {
|
|
|
|
// old is in clock because it's in cache and its reference count is 0.
|
|
|
|
ClockRemove(old);
|
|
|
|
table_.Remove(old);
|
|
|
|
assert(usage_ >= old->total_charge);
|
|
|
|
usage_ -= old->total_charge;
|
|
|
|
last_reference_list.push_back(*old);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (handle == nullptr) {
|
|
|
|
ClockInsert(h);
|
|
|
|
} else {
|
|
|
|
// If caller already holds a ref, no need to take one here.
|
|
|
|
if (!h->HasRefs()) {
|
|
|
|
h->Ref();
|
|
|
|
}
|
|
|
|
*handle = reinterpret_cast<Cache::Handle*>(h);
|
|
|
|
}
|
2016-08-23 20:53:49 +00:00
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
|
|
|
|
// Free the entries here outside of mutex for performance reasons.
|
|
|
|
for (auto& h : last_reference_list) {
|
|
|
|
h.FreeData();
|
2020-04-27 20:18:18 +00:00
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
|
2016-08-19 19:28:19 +00:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
Cache::Handle* ClockCacheShard::Lookup(const Slice& key, uint32_t /* hash */) {
|
|
|
|
ClockHandle* h = nullptr;
|
|
|
|
{
|
|
|
|
DMutexLock l(mutex_);
|
|
|
|
h = table_.Lookup(key);
|
|
|
|
if (h != nullptr) {
|
|
|
|
assert(h->IsVisible());
|
|
|
|
if (!h->HasRefs()) {
|
|
|
|
// The entry is in clock since it's in the hash table and has no
|
|
|
|
// external references.
|
|
|
|
ClockRemove(h);
|
|
|
|
}
|
|
|
|
h->Ref();
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
return reinterpret_cast<Cache::Handle*>(h);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
bool ClockCacheShard::Ref(Cache::Handle* h) {
|
|
|
|
ClockHandle* e = reinterpret_cast<ClockHandle*>(h);
|
|
|
|
DMutexLock l(mutex_);
|
|
|
|
// To create another reference - entry must be already externally referenced.
|
|
|
|
assert(e->HasRefs());
|
|
|
|
e->Ref();
|
|
|
|
return true;
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
bool ClockCacheShard::Release(Cache::Handle* handle, bool erase_if_last_ref) {
|
|
|
|
if (handle == nullptr) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
ClockHandle* h = reinterpret_cast<ClockHandle*>(handle);
|
|
|
|
ClockHandle copy;
|
|
|
|
bool last_reference = false;
|
|
|
|
assert(!h->IsInClockList());
|
|
|
|
{
|
|
|
|
DMutexLock l(mutex_);
|
|
|
|
last_reference = h->Unref();
|
|
|
|
if (last_reference && h->IsVisible()) {
|
|
|
|
// The item is still in cache, and nobody else holds a reference to it.
|
|
|
|
if (usage_ > capacity_ || erase_if_last_ref) {
|
|
|
|
// The clock list must be empty since the cache is full.
|
|
|
|
assert(clock_usage_ == 0 || erase_if_last_ref);
|
|
|
|
// Take this opportunity and remove the item.
|
|
|
|
table_.Remove(h);
|
|
|
|
} else {
|
|
|
|
// Put the item back on the clock list, and don't free it.
|
|
|
|
ClockInsert(h);
|
|
|
|
last_reference = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If it was the last reference, then decrement the cache usage.
|
|
|
|
if (last_reference) {
|
|
|
|
assert(usage_ >= h->total_charge);
|
|
|
|
usage_ -= h->total_charge;
|
|
|
|
copy = *h;
|
|
|
|
}
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
// Free the entry here outside of mutex for performance reasons.
|
|
|
|
if (last_reference) {
|
|
|
|
copy.FreeData();
|
2017-04-24 18:21:47 +00:00
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
return last_reference;
|
2017-04-24 18:21:47 +00:00
|
|
|
}
|
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
void ClockCacheShard::Erase(const Slice& key, uint32_t /* hash */) {
|
|
|
|
ClockHandle copy;
|
|
|
|
bool last_reference = false;
|
2016-08-19 19:28:19 +00:00
|
|
|
{
|
2022-06-17 20:08:45 +00:00
|
|
|
DMutexLock l(mutex_);
|
2022-06-30 04:50:39 +00:00
|
|
|
ClockHandle* h = table_.Lookup(key);
|
|
|
|
if (h != nullptr) {
|
|
|
|
table_.Exclude(h);
|
|
|
|
if (!h->HasRefs()) {
|
|
|
|
// The entry is in Clock since it's in cache and has no external
|
|
|
|
// references.
|
|
|
|
ClockRemove(h);
|
|
|
|
table_.Remove(h);
|
|
|
|
assert(usage_ >= h->total_charge);
|
|
|
|
usage_ -= h->total_charge;
|
|
|
|
last_reference = true;
|
|
|
|
copy = *h;
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
// Free the entry here outside of mutex for performance reasons.
|
|
|
|
// last_reference will only be true if e != nullptr.
|
|
|
|
if (last_reference) {
|
|
|
|
copy.FreeData();
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
size_t ClockCacheShard::GetUsage() const {
|
|
|
|
DMutexLock l(mutex_);
|
|
|
|
return usage_;
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
size_t ClockCacheShard::GetPinnedUsage() const {
|
|
|
|
DMutexLock l(mutex_);
|
|
|
|
assert(usage_ >= clock_usage_);
|
|
|
|
return usage_ - clock_usage_;
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
std::string ClockCacheShard::GetPrintableOptions() const {
|
|
|
|
return std::string{};
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
ClockCache::ClockCache(size_t capacity, size_t estimated_value_size,
|
|
|
|
int num_shard_bits, bool strict_capacity_limit,
|
|
|
|
CacheMetadataChargePolicy metadata_charge_policy)
|
|
|
|
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit) {
|
2022-07-02 03:51:20 +00:00
|
|
|
assert(estimated_value_size > 0 ||
|
|
|
|
metadata_charge_policy != kDontChargeCacheMetadata);
|
2022-06-30 04:50:39 +00:00
|
|
|
num_shards_ = 1 << num_shard_bits;
|
|
|
|
shards_ = reinterpret_cast<ClockCacheShard*>(
|
|
|
|
port::cacheline_aligned_alloc(sizeof(ClockCacheShard) * num_shards_));
|
|
|
|
size_t per_shard = (capacity + (num_shards_ - 1)) / num_shards_;
|
|
|
|
for (int i = 0; i < num_shards_; i++) {
|
|
|
|
new (&shards_[i])
|
|
|
|
ClockCacheShard(per_shard, estimated_value_size, strict_capacity_limit,
|
|
|
|
metadata_charge_policy);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
ClockCache::~ClockCache() {
|
|
|
|
if (shards_ != nullptr) {
|
|
|
|
assert(num_shards_ > 0);
|
|
|
|
for (int i = 0; i < num_shards_; i++) {
|
|
|
|
shards_[i].~ClockCacheShard();
|
|
|
|
}
|
|
|
|
port::cacheline_aligned_free(shards_);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
CacheShard* ClockCache::GetShard(uint32_t shard) {
|
|
|
|
return reinterpret_cast<CacheShard*>(&shards_[shard]);
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
const CacheShard* ClockCache::GetShard(uint32_t shard) const {
|
|
|
|
return reinterpret_cast<CacheShard*>(&shards_[shard]);
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
void* ClockCache::Value(Handle* handle) {
|
|
|
|
return reinterpret_cast<const ClockHandle*>(handle)->value;
|
|
|
|
}
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-19 23:45:51 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
size_t ClockCache::GetCharge(Handle* handle) const {
|
|
|
|
CacheMetadataChargePolicy metadata_charge_policy = kDontChargeCacheMetadata;
|
|
|
|
if (num_shards_ > 0) {
|
|
|
|
metadata_charge_policy = shards_[0].metadata_charge_policy_;
|
Fix use-after-free threading bug in ClockCache (#8261)
Summary:
In testing for https://github.com/facebook/rocksdb/issues/8225 I found cache_bench would crash with
-use_clock_cache, as well as db_bench -use_clock_cache, but not
single-threaded. Smaller cache size hits failure much faster. ASAN
reported the failuer as calling malloc_usable_size on the `key` pointer
of a ClockCache handle after it was reportedly freed. On detailed
inspection I found this bad sequence of operations for a cache entry:
state=InCache=1,refs=1
[thread 1] Start ClockCacheShard::Unref (from Release, no mutex)
[thread 1] Decrement ref count
state=InCache=1,refs=0
[thread 1] Suspend before CalcTotalCharge (no mutex)
[thread 2] Start UnsetInCache (from Insert, mutex held)
[thread 2] clear InCache bit
state=InCache=0,refs=0
[thread 2] Calls RecycleHandle (based on pre-updated state)
[thread 2] Returns to Insert which calls Cleanup which deletes `key`
[thread 1] Resume ClockCacheShard::Unref
[thread 1] Read `key` in CalcTotalCharge
To fix this, I've added a field to the handle to store the metadata
charge so that we can efficiently remember everything we need from
the handle in Unref. We must not read from the handle again if we
decrement the count to zero with InCache=1, which means we don't own
the entry and someone else could eject/overwrite it immediately.
Note before this change, on amd64 sizeof(Handle) == 56 even though there
are only 48 bytes of data. Grouping together the uint32_t fields would
cut it down to 48, but I've added another uint32_t, which takes it
back up to 56. Not a big deal.
Also fixed DisownData to cooperate with ASAN as in LRUCache.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8261
Test Plan:
Manual + adding use_clock_cache to db_crashtest.py
Base performance
./cache_bench -use_clock_cache
Complete in 17.060 s; QPS = 2458513
New performance
./cache_bench -use_clock_cache
Complete in 17.052 s; QPS = 2459695
Any difference is easily buried in small noise.
Crash test shows still more bug(s) in ClockCache, so I'm expecting to
disable ClockCache from production code in a follow-up PR (if we
can't find and fix the bug(s))
Reviewed By: mrambacher
Differential Revision: D28207358
Pulled By: pdillinger
fbshipit-source-id: aa7a9322afc6f18f30e462c75dbbe4a1206eb294
2021-05-05 05:17:02 +00:00
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
return reinterpret_cast<const ClockHandle*>(handle)->GetCharge(
|
|
|
|
metadata_charge_policy);
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
Cache::DeleterFn ClockCache::GetDeleter(Handle* handle) const {
|
|
|
|
auto h = reinterpret_cast<const ClockHandle*>(handle);
|
|
|
|
return h->deleter;
|
|
|
|
}
|
2021-05-14 05:57:51 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
uint32_t ClockCache::GetHash(Handle* handle) const {
|
|
|
|
return reinterpret_cast<const ClockHandle*>(handle)->hash;
|
|
|
|
}
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2022-06-30 04:50:39 +00:00
|
|
|
void ClockCache::DisownData() {
|
|
|
|
// Leak data only if that won't generate an ASAN/valgrind warning.
|
|
|
|
if (!kMustFreeHeapAllocations) {
|
|
|
|
shards_ = nullptr;
|
|
|
|
num_shards_ = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace clock_cache
|
2016-08-19 19:28:19 +00:00
|
|
|
|
2019-09-16 22:14:51 +00:00
|
|
|
std::shared_ptr<Cache> NewClockCache(
|
2022-06-30 04:50:39 +00:00
|
|
|
size_t capacity, size_t estimated_value_size, int num_shard_bits,
|
|
|
|
bool strict_capacity_limit,
|
2019-09-16 22:14:51 +00:00
|
|
|
CacheMetadataChargePolicy metadata_charge_policy) {
|
2022-06-30 04:50:39 +00:00
|
|
|
if (num_shard_bits >= 20) {
|
|
|
|
return nullptr; // The cache cannot be sharded into too many fine pieces.
|
|
|
|
}
|
2017-01-27 14:35:41 +00:00
|
|
|
if (num_shard_bits < 0) {
|
|
|
|
num_shard_bits = GetDefaultCacheShardBits(capacity);
|
|
|
|
}
|
2022-06-30 04:50:39 +00:00
|
|
|
return std::make_shared<clock_cache::ClockCache>(
|
|
|
|
capacity, estimated_value_size, num_shard_bits, strict_capacity_limit,
|
|
|
|
metadata_charge_policy);
|
2016-08-19 19:28:19 +00:00
|
|
|
}
|
|
|
|
|
2020-02-20 20:07:53 +00:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|